diff --git a/Yolo_predict.py b/Yolo_predict.py new file mode 100644 index 0000000..bda7e97 --- /dev/null +++ b/Yolo_predict.py @@ -0,0 +1,56 @@ +import os +import cv2 +import datetime +import threading +from ultralytics import YOLO + +class YoloPredict(threading.Thread): + """ YOLO 推論執行緒 """ + + def __init__(self, image, model_path, save_dir="results", callback=None): + super(YoloPredict, self).__init__() + self.image = image + self.model_path = model_path + self.save_dir = save_dir + self.callback = callback # ✅ 設定回呼函數 + self.model = None + + # ✅ 確保 `results/` 資料夾存在 + if not os.path.exists(self.save_dir): + os.makedirs(self.save_dir) + + def run(self): + """ 執行 YOLO 推論 """ + if self.image is None: + print("⚠️ 無影像可進行推論") + return + + try: + # ✅ 載入 YOLO 模型 + if self.model is None: + self.model = YOLO(self.model_path) + print("✅ YOLO 模型載入成功") + + # ✅ 轉換影像格式 (BGR → RGB) + image_rgb = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) + + # ✅ 使用 YOLO 模型進行推論 + results = self.model.predict(image_rgb, imgsz=640, conf=0.5) + print("✅ YOLO 推論完成") + + # ✅ 取得標註結果 + result_image = results[0].plot() + + # ✅ 儲存影像 + timestamp = datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S") + save_path = os.path.join(self.save_dir, f"{timestamp}.jpg") + result_image_bgr = cv2.cvtColor(result_image, cv2.COLOR_RGB2BGR) + cv2.imwrite(save_path, result_image_bgr) + print(f"✅ 推論結果儲存至: {save_path}") + + # ✅ 回傳結果到主視窗 (`callback` 函數) + if self.callback: + self.callback(result_image, save_path) + + except Exception as e: + print(f"⚠️ 推論時發生錯誤: {e}") diff --git a/camera/old_version/Main.py b/camera/old_version/Main.py deleted file mode 100644 index 60814be..0000000 --- a/camera/old_version/Main.py +++ /dev/null @@ -1,131 +0,0 @@ -# -*- coding: utf-8 -*- - -# Form implementation generated from reading ui file 'Main.ui' -# -# Created by: PyQt5 UI code generator 5.15.6 -# -# WARNING: Any manual changes made to this file will be lost when pyuic5 is -# run again. Do not edit this file unless you know what you are doing. - - -from PyQt5 import QtCore, QtGui, QtWidgets - - -class Ui_MainWindow(object): - def setupUi(self, MainWindow): - MainWindow.setObjectName("MainWindow") - MainWindow.resize(1363, 916) - self.centralwidget = QtWidgets.QWidget(MainWindow) - self.centralwidget.setObjectName("centralwidget") - self.widget = QtWidgets.QWidget(self.centralwidget) - self.widget.setGeometry(QtCore.QRect(20, 140, 2008, 1002)) - self.widget.setObjectName("widget") - self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.widget) - self.horizontalLayout_3.setContentsMargins(0, 0, 0, 0) - self.horizontalLayout_3.setObjectName("horizontalLayout_3") - self.label_2 = QtWidgets.QLabel(self.widget) - self.label_2.setMinimumSize(QtCore.QSize(1000, 1000)) - self.label_2.setMaximumSize(QtCore.QSize(1000, 1000)) - self.label_2.setObjectName("label_2") - self.horizontalLayout_3.addWidget(self.label_2) - self.label = QtWidgets.QLabel(self.widget) - self.label.setMinimumSize(QtCore.QSize(1000, 1000)) - self.label.setMaximumSize(QtCore.QSize(1000, 1000)) - self.label.setObjectName("label") - self.horizontalLayout_3.addWidget(self.label) - self.widget1 = QtWidgets.QWidget(self.centralwidget) - self.widget1.setGeometry(QtCore.QRect(21, 20, 758, 116)) - self.widget1.setObjectName("widget1") - self.horizontalLayout_6 = QtWidgets.QHBoxLayout(self.widget1) - self.horizontalLayout_6.setContentsMargins(0, 0, 0, 0) - self.horizontalLayout_6.setObjectName("horizontalLayout_6") - self.verticalLayout_2 = QtWidgets.QVBoxLayout() - self.verticalLayout_2.setObjectName("verticalLayout_2") - self.horizontalLayout = QtWidgets.QHBoxLayout() - self.horizontalLayout.setObjectName("horizontalLayout") - self.bt_camera_connect = QtWidgets.QPushButton(self.widget1) - self.bt_camera_connect.setMinimumSize(QtCore.QSize(101, 51)) - self.bt_camera_connect.setMaximumSize(QtCore.QSize(101, 51)) - self.bt_camera_connect.setObjectName("bt_camera_connect") - self.horizontalLayout.addWidget(self.bt_camera_connect) - self.bt_OneShot = QtWidgets.QPushButton(self.widget1) - self.bt_OneShot.setMinimumSize(QtCore.QSize(101, 51)) - self.bt_OneShot.setMaximumSize(QtCore.QSize(101, 51)) - self.bt_OneShot.setObjectName("bt_OneShot") - self.horizontalLayout.addWidget(self.bt_OneShot) - self.bt_KeetShot = QtWidgets.QPushButton(self.widget1) - self.bt_KeetShot.setMinimumSize(QtCore.QSize(101, 51)) - self.bt_KeetShot.setMaximumSize(QtCore.QSize(101, 51)) - self.bt_KeetShot.setObjectName("bt_KeetShot") - self.horizontalLayout.addWidget(self.bt_KeetShot) - self.verticalLayout_2.addLayout(self.horizontalLayout) - self.horizontalLayout_4 = QtWidgets.QHBoxLayout() - self.horizontalLayout_4.setObjectName("horizontalLayout_4") - self.bt_open_image = QtWidgets.QPushButton(self.widget1) - self.bt_open_image.setMinimumSize(QtCore.QSize(101, 51)) - self.bt_open_image.setMaximumSize(QtCore.QSize(101, 51)) - self.bt_open_image.setObjectName("bt_open_image") - self.horizontalLayout_4.addWidget(self.bt_open_image) - self.bt_save_image = QtWidgets.QPushButton(self.widget1) - self.bt_save_image.setMinimumSize(QtCore.QSize(101, 51)) - self.bt_save_image.setMaximumSize(QtCore.QSize(101, 51)) - self.bt_save_image.setObjectName("bt_save_image") - self.horizontalLayout_4.addWidget(self.bt_save_image) - self.verticalLayout_2.addLayout(self.horizontalLayout_4) - self.horizontalLayout_6.addLayout(self.verticalLayout_2) - self.verticalLayout = QtWidgets.QVBoxLayout() - self.verticalLayout.setObjectName("verticalLayout") - self.horizontalLayout_5 = QtWidgets.QHBoxLayout() - self.horizontalLayout_5.setObjectName("horizontalLayout_5") - self.label_3 = QtWidgets.QLabel(self.widget1) - self.label_3.setMinimumSize(QtCore.QSize(91, 31)) - self.label_3.setMaximumSize(QtCore.QSize(91, 31)) - self.label_3.setObjectName("label_3") - self.horizontalLayout_5.addWidget(self.label_3) - self.Ex_time = QtWidgets.QTextEdit(self.widget1) - self.Ex_time.setMinimumSize(QtCore.QSize(100, 30)) - self.Ex_time.setMaximumSize(QtCore.QSize(100, 30)) - self.Ex_time.setObjectName("Ex_time") - self.horizontalLayout_5.addWidget(self.Ex_time) - spacerItem = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum) - self.horizontalLayout_5.addItem(spacerItem) - self.verticalLayout.addLayout(self.horizontalLayout_5) - self.horizontalLayout_2 = QtWidgets.QHBoxLayout() - self.horizontalLayout_2.setObjectName("horizontalLayout_2") - self.threshold = QtWidgets.QTextEdit(self.widget1) - self.threshold.setMinimumSize(QtCore.QSize(100, 30)) - self.threshold.setMaximumSize(QtCore.QSize(100, 30)) - self.threshold.setObjectName("threshold") - self.horizontalLayout_2.addWidget(self.threshold) - self.threshold_value = QtWidgets.QSlider(self.widget1) - self.threshold_value.setMinimumSize(QtCore.QSize(321, 16)) - self.threshold_value.setMaximumSize(QtCore.QSize(321, 16)) - self.threshold_value.setMaximum(255) - self.threshold_value.setOrientation(QtCore.Qt.Horizontal) - self.threshold_value.setObjectName("threshold_value") - self.horizontalLayout_2.addWidget(self.threshold_value) - self.verticalLayout.addLayout(self.horizontalLayout_2) - self.horizontalLayout_6.addLayout(self.verticalLayout) - MainWindow.setCentralWidget(self.centralwidget) - self.menubar = QtWidgets.QMenuBar(MainWindow) - self.menubar.setGeometry(QtCore.QRect(0, 0, 1363, 22)) - self.menubar.setObjectName("menubar") - MainWindow.setMenuBar(self.menubar) - self.statusbar = QtWidgets.QStatusBar(MainWindow) - self.statusbar.setObjectName("statusbar") - MainWindow.setStatusBar(self.statusbar) - - self.retranslateUi(MainWindow) - QtCore.QMetaObject.connectSlotsByName(MainWindow) - - def retranslateUi(self, MainWindow): - _translate = QtCore.QCoreApplication.translate - MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow")) - self.label_2.setText(_translate("MainWindow", "二值化")) - self.label.setText(_translate("MainWindow", "原圖")) - self.bt_camera_connect.setText(_translate("MainWindow", "相機連線")) - self.bt_OneShot.setText(_translate("MainWindow", "單張擷取")) - self.bt_KeetShot.setText(_translate("MainWindow", "連續取像")) - self.bt_open_image.setText(_translate("MainWindow", "讀入影像")) - self.bt_save_image.setText(_translate("MainWindow", "儲存影像")) - self.label_3.setText(_translate("MainWindow", "曝光時間")) diff --git a/camera/old_version/Main.ui b/camera/old_version/Main.ui deleted file mode 100644 index df836f0..0000000 --- a/camera/old_version/Main.ui +++ /dev/null @@ -1,299 +0,0 @@ - - - MainWindow - - - - 0 - 0 - 1363 - 916 - - - - MainWindow - - - - - - 20 - 140 - 2008 - 1002 - - - - - - - - 1000 - 1000 - - - - - 1000 - 1000 - - - - 二值化 - - - - - - - - 1000 - 1000 - - - - - 1000 - 1000 - - - - 原圖 - - - - - - - - - 21 - 20 - 758 - 116 - - - - - - - - - - - - 101 - 51 - - - - - 101 - 51 - - - - 相機連線 - - - - - - - - 101 - 51 - - - - - 101 - 51 - - - - 單張擷取 - - - - - - - - 101 - 51 - - - - - 101 - 51 - - - - 連續取像 - - - - - - - - - - - - 101 - 51 - - - - - 101 - 51 - - - - 讀入影像 - - - - - - - - 101 - 51 - - - - - 101 - 51 - - - - 儲存影像 - - - - - - - - - - - - - - - - 91 - 31 - - - - - 91 - 31 - - - - 曝光時間 - - - - - - - - 100 - 30 - - - - - 100 - 30 - - - - - - - - Qt::Horizontal - - - - 40 - 20 - - - - - - - - - - - - - 100 - 30 - - - - - 100 - 30 - - - - - - - - - 321 - 16 - - - - - 321 - 16 - - - - 255 - - - Qt::Horizontal - - - - - - - - - - - - - - 0 - 0 - 1363 - 22 - - - - - - - - diff --git a/camera/old_version/camera.py b/camera/old_version/camera.py deleted file mode 100644 index 909092b..0000000 --- a/camera/old_version/camera.py +++ /dev/null @@ -1,226 +0,0 @@ -import sys -from PyQt5 import QtWidgets, QtGui, QtCore -from PyQt5.QtCore import QTimer -from PyQt5.QtWidgets import QFileDialog -from pypylon import pylon -import cv2 -import numpy as np -import os -from Main import Ui_MainWindow - -class CameraApp(QtWidgets.QMainWindow, Ui_MainWindow): - def __init__(self): - super(CameraApp, self).__init__() - self.setupUi(self) - - # 連接按鈕事件 - self.bt_camera_connect.clicked.connect(self.connect_camera) - self.bt_OneShot.clicked.connect(self.one_shot_capture) - self.bt_KeetShot.clicked.connect(self.keep_shot_capture) - self.bt_open_image.clicked.connect(self.open_image) - self.bt_save_image.clicked.connect(self.save_image) # 連接儲存影像按鈕事件 - - # 初始化變數 - self.camera = None - self.timer = QTimer() - self.timer.timeout.connect(self.keep_shot_process) - - # 設定 QSlider 範圍和事件 - self.threshold_value.setMinimum(0) - self.threshold_value.setMaximum(255) - self.threshold_value.setValue(127) # 預設閥值 - self.threshold_value.valueChanged.connect(self.update_threshold) - - self.threshold.setText(str(self.threshold_value.value())) - self.current_image = None - - def get_bgr_image(self): - """取得 BGR 格式的影像""" - if self.current_image is None: - return None - - # 若影像為單通道或 Bayer 格式,進行轉換 - if len(self.current_image.shape) == 2 or self.current_image.shape[2] == 1: - return cv2.cvtColor(self.current_image, cv2.COLOR_BayerBG2BGR) - else: - return self.current_image - - def connect_camera(self): - try: - self.camera = pylon.InstantCamera(pylon.TlFactory.GetInstance().CreateFirstDevice()) - self.camera.Open() - self.statusbar.showMessage("相機連線成功") - except Exception as e: - self.statusbar.showMessage(f"相機連線失敗: {e}") - - def get_exposure_time(self): - """從 QTextEdit 取得曝光時間,若為空則使用預設值""" - exposure_time_text = self.Ex_time.toPlainText().strip() - - # 當 QTextEdit 值為空時使用預設值 - if not exposure_time_text: - return 5000 # 預設曝光時間(微秒) - - # 驗證是否為數值 - if exposure_time_text.isdigit(): - return int(exposure_time_text) - - self.statusbar.showMessage("請輸入有效的曝光時間(整數),使用預設值 5000 微秒") - return 5000 - - def one_shot_capture(self): - if not (self.camera and self.camera.IsOpen()): - self.statusbar.showMessage("請先連接相機") - return - - try: - exposure_time = self.get_exposure_time() - self.camera.ExposureTime.SetValue(float(exposure_time)) - self.statusbar.showMessage(f"曝光時間設定為 {exposure_time} 微秒") - - if self.camera.IsGrabbing(): - self.camera.StopGrabbing() - - self.camera.StartGrabbing(1) - grab_result = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_Return) - - if grab_result.GrabSucceeded(): - self.current_image = grab_result.Array - self.display_original_image() - self.apply_threshold_and_display() - self.statusbar.showMessage("單張擷取成功") - else: - self.statusbar.showMessage("擷取失敗") - - grab_result.Release() - - except Exception as e: - self.statusbar.showMessage(f"擷取失敗: {e}") - if self.camera.IsGrabbing(): - self.camera.StopGrabbing() - - def display_original_image(self): - """顯示原始影像到 label 上""" - if self.current_image is not None: - # 判斷是否需要轉換 Bayer 格式 - if len(self.current_image.shape) == 2 or self.current_image.shape[2] == 1: - # 影像來自相機(Bayer 格式) - image_bgr = cv2.cvtColor(self.current_image, cv2.COLOR_BayerBG2BGR) - else: - # 影像已經是 BGR 格式 - image_bgr = self.current_image - - # 顯示影像 - height, width, channel = image_bgr.shape - bytes_per_line = 3 * width - qimage = QtGui.QImage(image_bgr.data, width, height, bytes_per_line, QtGui.QImage.Format_BGR888) - pixmap = QtGui.QPixmap.fromImage(qimage).scaled(self.label.size(), QtCore.Qt.KeepAspectRatio) - self.label.setPixmap(pixmap) - - def apply_threshold_and_display(self): - """套用二值化處理並顯示影像到 label_2 上""" - if self.current_image is not None: - # 判斷是否需要轉換 Bayer 格式 - if len(self.current_image.shape) == 2 or self.current_image.shape[2] == 1: - # 影像來自相機(Bayer 格式) - image_bgr = cv2.cvtColor(self.current_image, cv2.COLOR_BayerBG2BGR) - else: - # 影像已經是 BGR 格式 - image_bgr = self.current_image - - # 轉換為灰階 - image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY) - - # 套用二值化處理 - threshold_value = self.threshold_value.value() - _, thresholded_image = cv2.threshold(image_gray, threshold_value, 255, cv2.THRESH_BINARY) - - # 顯示影像 - height, width = thresholded_image.shape - bytes_per_line = width - qimage = QtGui.QImage(thresholded_image.data, width, height, bytes_per_line, QtGui.QImage.Format_Grayscale8) - pixmap = QtGui.QPixmap.fromImage(qimage).scaled(self.label_2.size(), QtCore.Qt.KeepAspectRatio) - self.label_2.setPixmap(pixmap) - - def update_threshold(self): - """當 QSlider 值改變時更新影像""" - self.apply_threshold_and_display() - # 更新 QTextEdit 顯示的值 - self.threshold.setText(str(self.threshold_value.value())) - - def keep_shot_capture(self): - if self.camera and self.camera.IsOpen(): - if not self.timer.isActive(): - self.camera.StartGrabbing(pylon.GrabStrategy_LatestImageOnly) - self.timer.start(30) - self.statusbar.showMessage("開始連續取像") - else: - self.timer.stop() - self.camera.StopGrabbing() - self.statusbar.showMessage("停止連續取像") - else: - self.statusbar.showMessage("請先連接相機") - - def keep_shot_process(self): - if self.camera and self.camera.IsGrabbing(): - grab_result = self.camera.RetrieveResult(5000, pylon.TimeoutHandling_ThrowException) - if grab_result.GrabSucceeded(): - self.current_image = grab_result.Array - self.display_original_image() - self.apply_threshold_and_display() - grab_result.Release() - - def open_image(self): - options = QFileDialog.Options() - file_path, _ = QFileDialog.getOpenFileName(self, "打開影像檔", "", "Images (*.png *.xpm *.jpg *.bmp *.tiff)", options=options) - - if file_path: - if not os.path.exists(file_path): - self.statusbar.showMessage("檔案不存在或路徑錯誤") - return - - image_data = np.fromfile(file_path, dtype=np.uint8) - image = cv2.imdecode(image_data, cv2.IMREAD_COLOR) - - if image is not None: - self.current_image = image - self.display_original_image() - self.apply_threshold_and_display() - self.statusbar.showMessage("影像載入成功") - else: - self.statusbar.showMessage("無法讀取影像") - def save_image(self): - """將二值化影像儲存到檔案""" - if self.current_image is None: - self.statusbar.showMessage("沒有影像可儲存") - return - - # 使用 QFileDialog 取得儲存檔案路徑和名稱 - file_path, _ = QFileDialog.getSaveFileName(self, "儲存二值化影像", "", "Images (*.png *.jpg *.bmp *.tiff)") - - if file_path: - try: - # 取得 BGR 影像並生成二值化影像 - image_bgr = self.get_bgr_image() - if image_bgr is None: - self.statusbar.showMessage("無法取得影像") - return - - image_gray = cv2.cvtColor(image_bgr, cv2.COLOR_BGR2GRAY) - threshold_value = self.threshold_value.value() - _, thresholded_image = cv2.threshold(image_gray, threshold_value, 255, cv2.THRESH_BINARY) - - # 儲存影像 - cv2.imwrite(file_path, thresholded_image) - self.statusbar.showMessage(f"二值化影像成功儲存到 {file_path}") - - except Exception as e: - self.statusbar.showMessage(f"影像儲存失敗: {e}") - - - -if __name__ == "__main__": - app = QtWidgets.QApplication(sys.argv) - window = CameraApp() - window.show() - sys.exit(app.exec_()) diff --git a/detection.py b/detection.py index 6d35d58..be1e1e7 100644 --- a/detection.py +++ b/detection.py @@ -1,6 +1,8 @@ import sys import cv2 import numpy as np +import os +import datetime import multiprocessing from PyQt5 import QtWidgets, QtGui, QtCore from PyQt5.QtCore import QTimer @@ -10,6 +12,7 @@ from Detection_window import Ui_MainWindow from camera.camera_process import CameraProcess from read_ini import ConfigReader from log_handler import LogHandler +from Yolo_predict import YoloPredict class DetectionApp(QtWidgets.QMainWindow, Ui_MainWindow): def __init__(self): @@ -41,6 +44,10 @@ class DetectionApp(QtWidgets.QMainWindow, Ui_MainWindow): self.bt_StopKeepShot.clicked.connect(self.StopKeepShot) self.bt_detection.clicked.connect(self.detection) + # 鎖定檢測按鈕及停止相機連續取像 + self.bt_detection.setEnabled(False) + self.bt_StopKeepShot.setEnabled(False) + # ✅ 設定 QTimer,每 100ms 更新影像 self.timer = QTimer(self) self.timer.timeout.connect(self.update_view_origin) @@ -59,6 +66,9 @@ class DetectionApp(QtWidgets.QMainWindow, Ui_MainWindow): self.camera_process.start() self.log_handler.write_log("相機啟動") # ✅ 寫入 log: 相機啟動 self.statusbar.showMessage(f"開始擷取影像 (曝光時間: {self.exposure_time} 微秒)") + self.bt_KeepShot.setEnabled(False) + self.bt_StopKeepShot.setEnabled(True) + self.bt_detection.setEnabled(True) else: print("相機已經在擷取") @@ -70,7 +80,9 @@ class DetectionApp(QtWidgets.QMainWindow, Ui_MainWindow): self.camera_process = None print("已停止影像擷取") self.log_handler.write_log("相機停止") # ✅ 寫入 log: 相機停止 - + self.bt_KeepShot.setEnabled(True) + self.bt_StopKeepShot.setEnabled(False) + self.bt_detection.setEnabled(False) # ✅ 清空 Queue,確保新擷取不會讀取到舊影像 while not self.image_queue.empty(): try: @@ -98,31 +110,25 @@ class DetectionApp(QtWidgets.QMainWindow, Ui_MainWindow): self.log_handler.write_log(f"⚠️ 顯示影像時發生錯誤: {e}") def detection(self): - """ 使用 YOLO進行推論 """ + """ 啟動 YOLO 推論執行緒 """ if self.latest_image is None: self.log_handler.write_log("⚠️ 無影像可進行推論") return - if self.model is None: - self.log_handler.write_log("⚠️ YOLO 模型未載入,無法執行推論") - return + # ✅ 啟動 YOLO 推論執行緒 + self.yolo_thread = YoloPredict( + image=self.latest_image, + model_path="model/best.pt", + callback=self.on_detection_complete # ✅ 設定回呼函數 + ) + self.yolo_thread.start() + self.log_handler.write_log("YOLO 推論執行中...") - try: - # ✅ 轉換影像格式 (BGR → RGB) - image_rgb = cv2.cvtColor(self.latest_image, cv2.COLOR_BGR2RGB) - - # ✅ 使用 YOLO 模型進行推論 - results = self.model.predict(image_rgb, imgsz=640, conf=0.5) # 影像大小 640, 置信度閾值 0.5 - self.log_handler.write_log("YOLO 推論完成") - - # ✅ 取得標註結果 - result_image = results[0].plot() # `plot()` 會回傳畫出標註的影像 - - # ✅ 顯示結果 - self.display_image(result_image, self.view_predict) - - except Exception as e: - self.log_handler.write_log(f"⚠️ 推論時發生錯誤: {e}") + def on_detection_complete(self, result_image, save_path): + """ YOLO 推論完成後的回呼函數 """ + self.display_image(result_image, self.view_predict) + self.statusbar.showMessage(f"推論結果已儲存: {save_path}") + self.log_handler.write_log(f"推論結果儲存至: {save_path}") def closeEvent(self, event): reply = QtWidgets.QMessageBox.question( diff --git a/edge_detection.py b/edge_detection.py deleted file mode 100644 index 4b9e182..0000000 --- a/edge_detection.py +++ /dev/null @@ -1,74 +0,0 @@ -import cv2 -import numpy as np -import os -from tkinter import Tk, filedialog - -def select_image(): - Tk().withdraw() # 隱藏主視窗 - file_path = filedialog.askopenfilename( - title="選擇影像檔案", - filetypes=[("影像檔案", "*.png;*.jpg;*.jpeg;*.bmp")] - ) - return file_path - -def read_image_with_unicode_path(image_path): - with open(image_path, 'rb') as file: - binary_data = np.asarray(bytearray(file.read()), dtype=np.uint8) - image = cv2.imdecode(binary_data, cv2.IMREAD_COLOR) - return image - -def save_image(output_path, image): - # 儲存影像到指定路徑 - cv2.imwrite(output_path, image) - print(f"影像已儲存至:{output_path}") - -def detect_lines_with_hough(image): - # 二值化處理 - _, binary_image = cv2.threshold(image, 68, 255, cv2.THRESH_BINARY) - - # 儲存二值化影像 - binary_image_path = os.path.join(r"E:\AP", f"binary.png") - save_image(binary_image_path, binary_image) - - # 霍夫直線偵測 - lines = cv2.HoughLinesP(binary_image, 1, np.pi / 180, threshold=100, minLineLength=1300, maxLineGap=50) - - # 在原影像上繪製直線 - line_image = cv2.cvtColor(image, cv2.COLOR_GRAY2BGR) # 將灰階影像轉為 BGR 以繪製彩色直線 - if lines is not None: - for line in lines: - x1, y1, x2, y2 = line[0] - cv2.line(line_image, (x1, y1), (x2, y2), (0, 0, 255), 2) - - return line_image - -def process_image(image_path): - # 讀取影像 - image = read_image_with_unicode_path(image_path) - - if image is None: - print("無法讀取影像") - return - - # 轉換為灰階影像 - gray_image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY) - - # 執行霍夫直線偵測(前處理為二值化) - line_image = detect_lines_with_hough(gray_image) - - # 取得輸出路徑 - directory = os.path.dirname(r"E:\AP\img0120") - - gray_image_path = os.path.join(directory, f"gray.png") - lines_image_path = os.path.join(directory, f"lines.png") - - # 儲存影像 - save_image(gray_image_path, gray_image) - save_image(lines_image_path, line_image) - -if __name__ == "__main__": - file_path = select_image() - if file_path: - process_image(file_path) - else: - print("未選擇影像檔案") diff --git a/logging/2025-03-10.txt b/logging/2025-03-10.txt index 7d70db9..f676bfe 100644 --- a/logging/2025-03-10.txt +++ b/logging/2025-03-10.txt @@ -48,3 +48,14 @@ [2025-03-10 21:32:21] YOLO 推論完成 [2025-03-10 21:32:32] 相機停止 [2025-03-10 21:32:33] 程式關閉 +[2025-03-10 21:37:06] 程式啟動 +[2025-03-10 21:37:06] YOLO 模型載入成功 +[2025-03-10 21:37:08] 相機啟動 +[2025-03-10 21:37:21] 相機停止 +[2025-03-10 21:37:21] 相機啟動 +[2025-03-10 21:37:29] 相機停止 +[2025-03-10 21:37:31] 相機啟動 +[2025-03-10 21:37:43] YOLO 推論完成 +[2025-03-10 21:37:43] ⚠️ 推論時發生錯誤: 'set' object has no attribute 'flush' +[2025-03-10 21:38:15] 相機停止 +[2025-03-10 21:38:17] 程式關閉 diff --git a/predict.py b/predict.py deleted file mode 100644 index eb605c6..0000000 --- a/predict.py +++ /dev/null @@ -1,60 +0,0 @@ -from ultralytics import YOLO -from tkinter import Tk, filedialog -import cv2 -import numpy as np -import os - -# 載入模型 -model = YOLO('./runs/detect/train3/weights/best.pt') - -# 設定輸出資料夾 -results_dir = "./results" -os.makedirs(results_dir, exist_ok=True) # 確保結果資料夾存在 - - -# 使用 tkinter 開啟檔案選擇對話框 -def select_files(): - root = Tk() - root.withdraw() # 隱藏主視窗 - file_paths = filedialog.askopenfilenames( - title="Select Images", - filetypes=[("Image files", "*.bmp;*.png;*.jpg;*.jpeg")] - ) - return file_paths - - -# 選擇影像檔案 -image_paths = select_files() -if not image_paths: - print("No files selected.") -else: - # 執行推論 - for image_path in image_paths: - results = model( - source=image_path, # 輸入圖片路徑 - save=True, # 儲存推論結果 - device='0', # 使用 GPU 若發生錯誤改成CPU - conf=0.4 # 可以根據需要調整信心度開關 - ) - - # 傾向只顯示方框和信心度 - image = cv2.imread(image_path) - for r in results: - if r.boxes is not None: - for box in r.boxes: - if box.conf.item() > 0.4: # 過濾信心度 - x1, y1, x2, y2 = map(int, box.xyxy[0].tolist()) - confidence = box.conf.item() - - # 顯示方框 - cv2.rectangle(image, (x1, y1), (x2, y2), (255, 0, 0), 2) - cv2.putText(image, f'{confidence:.2f}', (x1, y1 - 10), - cv2.FONT_HERSHEY_SIMPLEX, 2, (255, 0, 0), 2) - - # 設定輸出路徑到 results 資料夾 - output_filename = os.path.basename(image_path).replace(".jpg", "_output.jpg").replace(".png", "_output.png") - output_path = os.path.join(results_dir, output_filename) - cv2.imwrite(output_path, image) - print(f"Saved result to {output_path}") - - print("Inference completed!")