medical_balloon/Defect_Detection/test_0501_joe.py
leo890808 a1fb25c89f UP
2024-07-30 16:18:26 +08:00

415 lines
17 KiB
Python
Raw Blame History

This file contains ambiguous Unicode characters

This file contains Unicode characters that might be confused with other characters. If you think that this is intentional, you can safely ignore this warning. Use the Escape button to reveal them.

import time
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QWidget, QFileDialog,QMainWindow, QLabel, QSizePolicy, QApplication, QAction, QHBoxLayout
from PyQt5.QtCore import *
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import traceback
import ctypes as C
import numpy as np
import cv2
import os
from test_0415_ui import Ui_MainWindow
from pyueye import ueye
import numpy as np
import cv2
# 串口通信
import serial
import time
import threading
import torch
from Class.Camera import Camera_class
from Class.Motor import Motor_class
from Class.Pan import Pan_class
from Class.Yolo import yolo_class
class img_yolo(QtCore.QThread):
sinOut = pyqtSignal(str) # 聲明一個帶字串參數的信號
def __init__(self, yolo_model, parent=None):
super().__init__(parent)
self.yolo = yolo_model
def normalize_image(self, img):
if img is not None:
normalized_image = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
return normalized_image
return None
def process_image(self, img, filename):
t1 = time.time()
results = self.yolo.YoloDetect(img)
t2 = time.time()
print(f'推論時間 = {t2 - t1}')
print(f'{filename}')
# 如果有預測結果,則打印信心值
if results.pred[0] is not None and len(results.pred[0]) > 0:
conf = results.pred[0][0, 4] # 信心值
print(f'信心值 = {conf}')
if results.pred[0] is None or len(results.pred[0]) == 0:
return
predicted_classes = results.names[int(results.pred[0][0, -1])]
if predicted_classes == 'fisheyes':
processed_img = self.fisheyes_method(results,filename,img)
elif predicted_classes == 'gel':
# print('2')
processed_img = self.gel_method(results,filename,img)
elif predicted_classes == 'scratches':
# print('3')
processed_img = self.scratches_method(results,filename,img)
# 魚眼處理
def fisheyes_method(self, results, filename, img):
conf = results.pred[0][0, 4]
predicted_classes = results.names[int(results.pred[0][0, -1])]
box = results.pred[0][0, :4].cpu().numpy().astype(int)
roi = img[box[1]:box[3], box[0]:box[2]]
normalized_roi = self.normalize_image(roi)
gray_roi = cv2.cvtColor(normalized_roi, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=0.0, tileGridSize=(1, 1))
clahe_image = clahe.apply(gray_roi)
_, binary_image = cv2.threshold(clahe_image, 200, 255, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6))
closed_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
contours, _ = cv2.findContours(closed_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
if len(contours) > 0:
max_contour = max(contours, key=cv2.contourArea)
if len(max_contour) > 5:
ellipse = cv2.fitEllipse(max_contour)
contour_image = roi.copy()
cv2.ellipse(contour_image, ellipse, (0, 255, 0), 1)
# center, axes, angle = ellipse
# major_axis = max(axes)
# longest_distance = 2 * major_axis * 5.5 / 1000
# 创建与图像相同大小的空白掩膜
mask = np.zeros_like(binary_image)
# 在掩膜上绘制填充了的椭圆
cv2.ellipse(mask, ellipse, (255), thickness=cv2.FILLED)
# 将掩膜应用于原始二值图像以获取椭圆内部的像素
ellipse_interior = cv2.bitwise_and(binary_image, binary_image, mask=mask)
# 计算椭圆内部区域的像素总数,即面积
interior_area_pixels = np.sum(ellipse_interior == 255)
interior_area_mm2 = interior_area_pixels * (5.5 ** 2) / 1000 # 将像素转换为平方毫米
# print(f"椭圆內部面積 (像素): {interior_area_pixels}")
print(f"椭圆內部面積 (平方毫米): {interior_area_mm2}")
# cv2.line(contour_image, (int(center[0] - major_axis / 2), int(center[1])),
# (int(center[0] + major_axis / 2), int(center[1])), (0, 0, 255), 1)
# print(f"最長距離:{longest_distance} mm")
self.save_images(filename, roi, results, conf)
# return contour_image
# 凝膠處理
def gel_method(self, results, filename, img):
conf = results.pred[0][0, 4]
predicted_classes = results.names[int(results.pred[0][0, -1])]
box = results.pred[0][0, :4].cpu().numpy().astype(int)
roi = img[box[1]:box[3], box[0]:box[2]]
normalized_roi = self.normalize_image(roi)
gray_roi = cv2.cvtColor(normalized_roi, cv2.COLOR_BGR2GRAY)
clahe = cv2.createCLAHE(clipLimit=0.0, tileGridSize=(1, 1))
clahe_image = clahe.apply(gray_roi)
_, binary_image = cv2.threshold(clahe_image, 170, 255, cv2.THRESH_BINARY)
# 使用 cv2.RETR_TREE 检索完整的轮廓层级
contours, _ = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
total_area_pixels = 0
for i, contour in enumerate(contours):
# 绘制填充了的轮廓
# cv2.drawContours(roi, [contour], -1, (255, 255, 255), thickness=cv2.FILLED)
# 计算轮廓的面积并加到总面积中
contour_area_pixels = cv2.contourArea(contour)
total_area_pixels += contour_area_pixels
contour_area_mm2 = contour_area_pixels * (5.5 ** 2) / 1000 # 将像素转换为毫米
# print(f"轮廓 {i + 1} 的面积 (像素): {contour_area_pixels}")
# print(f"轮廓 {i + 1} 的面积 (平方毫米): {contour_area_mm2}")
# 计算总面积并将单位转换为毫米
total_area_mm2 = total_area_pixels * (5.5 ** 2) / 1000 # 将像素转换为毫米
# print(f"總面積 (像素): {total_area_pixels}")
print(f"gel面積 (平方毫米): {total_area_mm2}")
# 在原始图像上绘制轮廓
contour_image = roi.copy()
cv2.drawContours(contour_image, contours, -1, (0, 255, 0), 1)
self.save_images(filename, roi, results, conf)
# if len(contours) > 0:
# max_contour = max(contours, key=cv2.contourArea)
# if len(max_contour) > 5:
# ellipse = cv2.fitEllipse(max_contour)
# contour_image = roi.copy()
# cv2.ellipse(contour_image, ellipse, (0, 255, 0), 1)
# center, axes, angle = ellipse
# major_axis = max(axes)
# longest_distance = 2 * major_axis * 5.5 / 1000
# cv2.line(contour_image, (int(center[0] - major_axis / 2), int(center[1])),
# (int(center[0] + major_axis / 2), int(center[1])), (0, 0, 255), 1)
# print(f"最長距離:{longest_distance} mm")
# self.save_images(filename, contour_image, results, conf)
# return contour_image
# 刮痕處理
def scratches_method(self,results,filename,img):
# conf = results.pred[0][0, 4]
# predicted_classes = results.names[int(results.pred[0][0, -1])]
# box = results.pred[0][0, :4].cpu().numpy().astype(int)
# roi = img[box[1]:box[3], box[0]:box[2]]
# normalized_roi = self.normalize_image(roi)
# gray_roi = cv2.cvtColor(normalized_roi, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=0.0, tileGridSize=(1, 1))
# clahe_image = clahe.apply(gray_roi)
# _, binary_image = cv2.threshold(clahe_image, 170, 255, cv2.THRESH_BINARY)
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (6, 6))
# closed_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
# contours, _ = cv2.findContours(closed_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# self.save_images(filename, normalized_roi, results, conf)
conf = results.pred[0][0, 4]
predicted_classes = results.names[int(results.pred[0][0, -1])]
box = results.pred[0][0, :4].cpu().numpy().astype(int)
roi = img[box[1]:box[3], box[0]:box[2]]
# normalized_roi = self.normalize_image(roi)
gray_roi = cv2.cvtColor(roi, cv2.COLOR_BGR2GRAY)
# clahe = cv2.createCLAHE(clipLimit=0.0, tileGridSize=(1, 1))
# clahe_image = clahe.apply(gray_roi)
_, binary_image = cv2.threshold(gray_roi, 45, 240, cv2.THRESH_BINARY)
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8))
closed_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel)
contours, _ = cv2.findContours(binary_image, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
total_area_pixels = 0
for i, contour in enumerate(contours):
# 绘制填充了的轮廓
# cv2.drawContours(roi, [contour], -1, (255, 255, 255), thickness=cv2.FILLED)
# 计算轮廓的面积并加到总面积中
contour_area_pixels = cv2.contourArea(contour)
total_area_pixels += contour_area_pixels
contour_area_mm2 = contour_area_pixels * (5.5 ** 2) / 1000 # 将像素转换为毫米
# print(f"轮廓 {i + 1} 的面积 (像素): {contour_area_pixels}")
# print(f"轮廓 {i + 1} 的面积 (平方毫米): {contour_area_mm2}")
# 计算总面积并将单位转换为毫米
total_area_mm2 = total_area_pixels * (5.5 ** 2) / 1000 # 将像素转换为毫米
# print(f"總面積 (像素): {total_area_pixels}")
print(f"scratches面積 (平方毫米): {total_area_mm2}")
# 在原始图像上绘制轮廓
contour_image = roi.copy()
cv2.drawContours(contour_image, contours, -1, (0, 255, 0), 1)
self.save_images(filename, roi, results, conf)
# if len(contours) > 0:
# max_contour = max(contours, key=cv2.contourArea)
# if len(max_contour) > 5:
# ellipse = cv2.fitEllipse(max_contour)
# contour_image = roi.copy()
# cv2.ellipse(contour_image, ellipse, (0, 255, 0), 1)
# center, axes, angle = ellipse
# major_axis = max(axes)
# longest_distance = 2 * major_axis * 5.5 / 1000
# cv2.line(contour_image, (int(center[0] - major_axis / 2), int(center[1])),
# (int(center[0] + major_axis / 2), int(center[1])), (0, 0, 255), 1)
# print(f"最長距離:{longest_distance} mm")
# self.save_images(filename, contour_image, results, conf)
# return contour_image
def save_images(self, filename, contour_image, results, conf):
if conf > 0.8:
base_name, ext = os.path.splitext(filename)
new_file_name = f'{base_name}_2{ext}'
print(f'Saving {new_file_name}')
cv2.imwrite(f'img_2/{new_file_name}', contour_image)
# if conf > 0.6:
# resized_frame_high_conf = cv2.resize(results.render()[0], (1280, 1280))
# print(f'Saving {filename}')
# cv2.imwrite(f'img_2/{filename}', resized_frame_high_conf)
def run(self):
print('run')
self.img_list = []
self.filename_list = []
for filename in os.listdir(r"./image_folder"):
if filename.endswith('.jpg') or filename.endswith('.bmp'):
img = cv2.imread('image_folder' + "/" + filename)
self.img_list.append(img)
self.filename_list.append(filename)
print(f'img讀取完成')
for i in range(0, len(self.filename_list)):
img = self.img_list[i]
file_name = self.filename_list[i]
self.process_image(img, file_name)
def img_to_view(img):#原圖
img= cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # QT顏色顯示轉換
Ny, Nx, _ = img.shape
img = QtGui.QImage(img.data, Nx, Ny,Nx*3, QtGui.QImage.Format_RGB888) #須改格式
return img
class MainWindow(QMainWindow,Ui_MainWindow):
def __init__(self, parent=None): #按鍵設定
super(MainWindow, self).__init__(parent)
self.setupUi(self)
# self.view_1.setScaledContents(True)
# self.view_2.setScaledContents(True)
# self.view_3.setScaledContents(True)
# self.view_4.setScaledContents(True)
self.bt_1.clicked.connect(self.bt_1_clicked)
# self.bt_2.clicked.connect(self.bt_2_clicked) # 連接按鈕到新的功能
self.bt_3.clicked.connect(self.bt_3_clicked)
# self.cam = Camera_class()
# self.cam.start()
# self.cam.rawdata.connect(self.show_img)
#
# self.run_flag = True
# self.com5 = Motor_class()
# self.com5.start()
# self.com5.sinOut.connect(self.com5_str)
#
#
# self.com4 = Pan_class()
# self.com4.start()
# self.com4.sinOut.connect(self.com4_str)
# self.count = 0
# self.img_count=0
self.yolo = yolo_class('weight/best.pt')
#self.yolo.start()
# self.img_test_2()
def show_img(self,img):
self.img = img
h,w,_ = self.img.shape
img= img_to_view(img)
self.view_1.setPixmap(QtGui.QPixmap.fromImage(img))
def bt_1_clicked(self):
self.time_2= time.time()
self.com5.start_run()
self.run_flag = True
self.time_1 = time.time()
def com5_str(self,str_1):
if str_1 =='move':
self.count = self.count+1
if self.count< 6:
print(self.count)
if str_1 == 'move':
self.img_count = 0
self.com4.move()
if self.count == 5:
self.run_flag = False
if str_1 =='2':
# self.img_test()
#self.time_1 = time.time()
#print(f's={self.time_1-self.time_2}')
# print('save_img')
self.img_count = self.img_count+1
cv2.imwrite(f'image_folder\\{str(self.count)}_{self.img_count}.jpg',self.img)
#self.time_2= time.time()
def com4_str(self,str_1):
if str_1 =='move_end' and self.run_flag == True:
self.com5.start_run()
if str_1 == 'Motor emergency stop 010':
print('010')
self.run_flag = False
self.count = 0
time_2 = time.time()
print(f'all_time - {time_2 - self.time_1}')
self.bt_3_clicked()
def img_test_2(self):
img_list=[]
for filename in os.listdir(r"./image_folder"):
if filename.endswith('.jpg'):
img = cv2.imread('image_folder' + "/" + filename)
img_list.append(img)
# self.img_test(img, filename)
print(f'img讀取完成')
run_list=[]
for img in img_list:
run_list.append(img_yolo( self.yolo,img))
for i in range (0,len(run_list)):
run_list[i].start()
def bt_3_clicked(self):
self.T1 = img_yolo(self.yolo)
self.T1.finished.connect(self.on_yolo_finished) # Connect to finished signal
self.T1.start()
def on_yolo_finished(self):
img_confidence = {} # 用于存储图像的置信度值
# 从img_2文件夹加载图像
for filename in os.listdir(r"./img_2"):
if filename.endswith('.jpg'):
img_path = os.path.join("./img_2", filename)
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img_rgb.shape
qimg = QtGui.QImage(img_rgb.data, w, h, w * 3, QtGui.QImage.Format_RGB888)
# 执行YOLO检测并获取置信度值
results = self.yolo.YoloDetect(img)
if results.pred[0] is not None and len(results.pred[0]) > 0:
conf = results.pred[0][0, 4] # 置信度值
else:
conf = 0
img_confidence[filename] = conf
# 按置信度值降序排列图像
sorted_imgs = sorted(img_confidence.items(), key=lambda x: x[1], reverse=True)
count = 0 # 计算显示的图像数量
# 基于置信度值在view_2、view_3和view_4中显示图像
for filename, _ in sorted_imgs:
img_path = os.path.join("./img_2", filename)
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img_rgb.shape
qimg = QtGui.QImage(img_rgb.data, w, h, w * 3, QtGui.QImage.Format_RGB888)
if not self.view_2.pixmap():
self.view_2.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
elif not self.view_3.pixmap():
self.view_3.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
elif not self.view_4.pixmap():
self.view_4.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
# 如果显示了3张图像则退出循环
if count == 3:
break
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())