medical_balloon/Defect_Detection/img_2/備份.txt

367 lines
14 KiB
Plaintext
Raw Permalink Normal View History

2024-07-30 16:18:26 +08:00
import time
from PyQt5.QtGui import QPixmap, QImage
from PyQt5.QtWidgets import QWidget, QFileDialog,QMainWindow, QLabel, QSizePolicy, QApplication, QAction, QHBoxLayout
from PyQt5.QtCore import *
from PyQt5 import QtCore, QtGui, QtWidgets
import sys
import traceback
import ctypes as C
import numpy as np
import cv2
import os
from test_0415_ui import Ui_MainWindow
from pyueye import ueye
import numpy as np
import cv2
# 串口通信
import serial
import time
import threading
import torch
from Class.Camera import Camera_class
from Class.Motor import Motor_class
from Class.Pan import Pan_class
from Class.Yolo import yolo_class
class img_yolo(QtCore.QThread):
sinOut = pyqtSignal(str) # 聲明一個帶字串參數的信號
def __init__(self,yolo_model,parent=None):
super().__init__(parent)
self.yolo = yolo_model
def normalize_image(self, img):
if img is not None:
normalized_image = cv2.normalize(img, None, alpha=0, beta=255, norm_type=cv2.NORM_MINMAX)
return normalized_image
return None
def run(self):
print('run')
self.img_list = []
self.filename_list = []
for filename in os.listdir(r"./image_folder"):
if filename.endswith('.jpg') or filename.endswith('.bmp'):
img = cv2.imread('image_folder' + "/" + filename)
self.img_list.append(img)
self.filename_list.append(filename)
# self.img_test(img, filename)
print(f'img讀取完成')
for i in range(0,len(self.filename_list)):
img = self.img_list[i]
file_name = self.filename_list[i]
t1 = time.time()
results = self.yolo.YoloDetect(img)
t2 = time.time()
print(f'推論時間 = {t2 - t1}')
# print(results)
print(f'{file_name}')
# 檢查是否有預測結果
if results.pred[0] is None or len(results.pred[0]) == 0:
continue
# 獲取預測的信心值和類別
conf = results.pred[0][0, 4] # 取最高信心度
predicted_classes = results.names[int(results.pred[0][0, -1])]
# 獲取 bounding box 的座標
box = results.pred[0][0, :4].cpu().numpy().astype(int)
# 提取ROI (Region of Interest)
roi = img[box[1]:box[3], box[0]:box[2]]
# 正規化ROI圖像
normalized_roi = self.normalize_image(roi)
# 將正規化的ROI轉換為灰度圖像
gray_roi = cv2.cvtColor(normalized_roi, cv2.COLOR_BGR2GRAY)
# 創建CLAHE對象
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(3, 3))
# 對灰度圖像應用CLAHE
clahe_image = clahe.apply(gray_roi)
# # 二值化處理
# _, binary_image = cv2.threshold(sharpened_image, 30, 255, cv2.THRESH_BINARY)
# 形態學膨脹
# kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (8, 8)) #用這個
# dilated_image = cv2.dilate(binary_image, kernel, iterations=2)
# closed_image = cv2.morphologyEx(binary_image, cv2.MORPH_CLOSE, kernel) #用這個
# opened_image = cv2.morphologyEx(binary_image, cv2.MORPH_OPEN, kernel)
# 將圖像轉換為灰度格式
# gray_image = cv2.cvtColor(closed_image, cv2.COLOR_BGR2GRAY)
# 找到輪廓
# contours, _ = cv2.findContours(closed_image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# 取得原圖檔名稱
base_name, ext = os.path.splitext(file_name)
# 另存繪製後的ROI檔名加上 '_2'
new_file_name = f'{base_name}_2{ext}'
print(f'Saving {new_file_name}')
cv2.imwrite(f'img_2/{new_file_name}', clahe_image)
# # 獲取最外圍輪廓
# if len(contours) > 0:
# # 擬合最外圍輪廓為橢圓
# ellipse = cv2.fitEllipse(contours[0])
#
# # 在原圖上繪製橢圓
# contour_image = roi.copy()
# cv2.ellipse(contour_image, ellipse, (0, 255, 0), 1) # 使用綠色線條繪製橢圓
#
# # 取得原圖檔名稱
# base_name, ext = os.path.splitext(file_name)
#
# # 另存繪製後的ROI檔名加上 '_2'
# new_file_name = f'{base_name}_2{ext}'
# print(f'Saving {new_file_name}')
# cv2.imwrite(f'img_2/{new_file_name}', sharpened_image)
#
# # 如果信心值超過0.6,則保存圖片
# if conf > 0.6:
# resized_frame_high_conf = cv2.resize(results.render()[0], (1280, 1280))
# print(f'Saving {file_name}')
# cv2.imwrite(f'img_2/{file_name}', resized_frame_high_conf)
# else:
# print("No contours found.")
# # 如果信心值超過0.6,則保存圖片
# if conf > 0.6:
# # 獲取 bounding box 的座標
# box = results.pred[0][0, :4].cpu().numpy().astype(int)
#
# # 保留原本的推論結果,另存原圖像
# print(f'Saving {file_name}')
# cv2.imwrite(f'img_2/{file_name}', img)
#
# # 繪製 bounding box 在原圖上
# cv2.rectangle(img, (box[0], box[1]), (box[2], box[3]), (0, 255, 0), 2) # 使用綠色線條繪製
#
# # 調整圖像大小
# resized_frame = cv2.resize(img, (1280, 1280))
#
# # 取得原圖檔名稱
# base_name, ext = os.path.splitext(file_name)
#
# # 另存繪製後的圖像,檔名加上 '_2'
# new_file_name = f'{base_name}_2{ext}'
# print(f'Saving {new_file_name}')
# cv2.imwrite(f'img_2/{new_file_name}', resized_frame)
#
# else:
# # 保留原本的推論結果,另存原圖像
# resized_frame = cv2.resize(results.render()[0], (1280, 1280))
# print(f'Saving {file_name}')
# cv2.imwrite(f'img_2/{file_name}', resized_frame)
def img_to_view(img):#原圖
img= cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # QT顏色顯示轉換
Ny, Nx, _ = img.shape
img = QtGui.QImage(img.data, Nx, Ny,Nx*3, QtGui.QImage.Format_RGB888) #須改格式
return img
class MainWindow(QMainWindow,Ui_MainWindow):
def __init__(self, parent=None): #按鍵設定
super(MainWindow, self).__init__(parent)
self.setupUi(self)
# self.view_1.setScaledContents(True)
# self.view_2.setScaledContents(True)
# self.view_3.setScaledContents(True)
# self.view_4.setScaledContents(True)
self.bt_1.clicked.connect(self.bt_1_clicked)
# self.bt_2.clicked.connect(self.bt_2_clicked) # 連接按鈕到新的功能
self.bt_3.clicked.connect(self.bt_3_clicked)
self.cam = Camera_class()
self.cam.start()
self.cam.rawdata.connect(self.show_img)
self.run_flag = True
self.com5 = Motor_class()
self.com5.start()
self.com5.sinOut.connect(self.com5_str)
self.com4 = Pan_class()
self.com4.start()
self.com4.sinOut.connect(self.com4_str)
self.count = 0
self.img_count=0
self.yolo = yolo_class('weight/best.pt')
#self.yolo.start()
# self.img_test_2()
def show_img(self,img):
self.img = img
h,w,_ = self.img.shape
img= img_to_view(img)
self.view_1.setPixmap(QtGui.QPixmap.fromImage(img))
def bt_1_clicked(self):
self.time_2= time.time()
self.com5.start_run()
self.run_flag = True
self.time_1 = time.time()
def com5_str(self,str_1):
if str_1 =='move':
self.count = self.count+1
if self.count< 6:
print(self.count)
if str_1 == 'move':
self.img_count = 0
self.com4.move()
if self.count == 5:
self.run_flag = False
if str_1 =='2':
# self.img_test()
#self.time_1 = time.time()
#print(f's={self.time_1-self.time_2}')
# print('save_img')
self.img_count = self.img_count+1
cv2.imwrite(f'image_folder\\{str(self.count)}_{self.img_count}.jpg',self.img)
#self.time_2= time.time()
def com4_str(self,str_1):
if str_1 =='move_end' and self.run_flag == True:
self.com5.start_run()
if str_1 == 'Motor emergency stop 010':
print('010')
self.run_flag = False
self.count = 0
time_2 = time.time()
print(f'all_time - {time_2 - self.time_1}')
self.bt_3_clicked()
# def bt_3_clicked(self):
# options = QFileDialog.Options()
# options |= QFileDialog.ReadOnly
# filePath, _ = QFileDialog.getOpenFileName(self, "選擇圖片", "", "圖片文件 (*.bmp *.jpg *.png);;所有文件 (*)",
# options=options)
# # print(filePath)
# img = cv2.imread(filePath)
# t1 = time.time()
# results = self.yolo.YoloDetect(img)
# t2=time.time()
# # print(f'推論時間 = {t2-t1}')
# # print(results)
# # 檢查是否有預測結果
# if results.pred[0] is None or len(results.pred[0]) == 0:
# # self.label1.setText("沒有瑕疵物件")
# return
#
# # 獲取預測的類別
# predicted_classes = results.names[int(results.pred[0][0, -1])] # 取最高信心度的類別
# resized_frame = cv2.resize(results.render()[0], (1920, 1280))
# # print(resized_frame)
# img = img_to_view(resized_frame)
# self.view_2.setPixmap(QtGui.QPixmap.fromImage(img))
#
# def img_test(self,img,file_name):
# t1 = time.time()
# results = self.yolo.YoloDetect(img)
# t2 = time.time()
# print(f'推論時間 = {t2 - t1}')
# # print(results)
# print(f'{file_name}')
#
# # 檢查是否有預測結果
# if results.pred[0] is None or len(results.pred[0]) == 0:
# # self.label1.setText("沒有瑕疵物件")
# return
#
# # 獲取預測的類別
# predicted_classes = results.names[int(results.pred[0][0, -1])] # 取最高信心度的類別
# resized_frame = cv2.resize(results.render()[0], (1920, 1280))
# # print(resized_frame)
# # img = img_to_view(resized_frame)
# # self.view_2.setPixmap(QtGui.QPixmap.fromImage(img))
# print(f'{file_name}')
# cv2.imwrite(f'img_2/{file_name}', resized_frame)
def img_test_2(self):
img_list=[]
for filename in os.listdir(r"./image_folder"):
if filename.endswith('.jpg'):
img = cv2.imread('image_folder' + "/" + filename)
img_list.append(img)
# self.img_test(img, filename)
print(f'img讀取完成')
run_list=[]
for img in img_list:
run_list.append(img_yolo( self.yolo,img))
for i in range (0,len(run_list)):
run_list[i].start()
def bt_3_clicked(self):
self.T1 = img_yolo(self.yolo)
self.T1.finished.connect(self.on_yolo_finished) # Connect to finished signal
self.T1.start()
def on_yolo_finished(self):
img_confidence = {} # Dictionary to store confidence values for images
# Load images from img_3 folder
for filename in os.listdir(r"./img_2"):
if filename.endswith('.jpg'):
img_path = os.path.join("./img_2", filename)
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img_rgb.shape
qimg = QtGui.QImage(img_rgb.data, w, h, w * 3, QtGui.QImage.Format_RGB888)
# Perform YOLO detection and get confidence value
results = self.yolo.YoloDetect(img)
if results.pred[0] is not None and len(results.pred[0]) > 0:
conf = results.pred[0][0, 4] # Confidence value
else:
conf = 0
img_confidence[filename] = conf
# Sort images by confidence value in descending order
sorted_imgs = sorted(img_confidence.items(), key=lambda x: x[1], reverse=True)
count = 0 # Count the number of displayed images
# Display images in view_2, view_3, and view_4 based on confidence value
for filename, _ in sorted_imgs:
img_path = os.path.join("./img_2", filename)
img = cv2.imread(img_path)
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
h, w, _ = img_rgb.shape
qimg = QtGui.QImage(img_rgb.data, w, h, w * 3, QtGui.QImage.Format_RGB888)
if not self.view_2.pixmap():
self.view_2.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
elif not self.view_3.pixmap():
self.view_3.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
elif not self.view_4.pixmap():
self.view_4.setPixmap(QtGui.QPixmap.fromImage(qimg))
count += 1
# Break the loop if displayed 3 images
if count == 3:
break
if __name__ == "__main__":
app = QApplication(sys.argv)
window = MainWindow()
window.show()
sys.exit(app.exec_())