yolov8训练模型、测试视频
- 人工智能
- 2025-09-14 02:18:01

yolov8先训练生成best.pt文件,用这个生成的模型进行视频的测试
因为本来用的代码生成的测试视频打不开,格式应该是损坏了,或者部分帧没有正常保存吧。
修改了一下代码,现状可以正常打开生成的视频了。
1、训练代码train.py import os # os.environ["CUDA_VISIBLE_DEVICES"] = "3" # 同样是选择第3块GPU from ultralytics import YOLO # Load a model # model = YOLO("yolov8n.yaml") # build a new model from YAML # model = YOLO("yolov8n.pt") # load a pretrained model (recommended for training) # ffs = os.listdir("cfg1116/new_cfg") # for ff in ffs: model = YOLO(f"cfg1116/yolov8n.yaml") # build from YAML and transfer weights # Train the model # results = model.train(data=r"/mnt/disk3/sunjiahui/CV-code/v8_all/data.yaml", epochs=5, imgsz=1280, workers=0, batch=2, device=[2]) results = model.train( data=r"/mnt/disk3/sunjiahui/CV-code/v8_all/data.yaml", epochs=500, imgsz=1280, workers=0, batch=2, device=[0], hsv_h=0.015, # HSV色调变化 hsv_s=0.7, # HSV饱和度变化 hsv_v=0.4, # HSV亮度变化 degrees=0.0, # 旋转角度 translate=0.1, # 平移比例 scale=0.5, # 缩放比例 shear=0.0, # 剪切变换 perspective=0.0, # 透视变换 flipud=0.0, # 上下翻转概率 fliplr=0.5, # 左右翻转概率 mosaic=1.0, # Mosaic增强的概率 mixup=0.0 # MixUp增强的概率 ) model.val(imgsz=[1280,1280])2、测试代码:视频
from ultralytics import YOLO import cv2 import os os.environ["CUDA_VISIBLE_DEVICES"] = "2" # 同样是选择第3块GPU def process_video(): # 初始化模型 model = YOLO("runs/detect/train2/weights/best.pt") # 输入输出路径 input_path = "/mnt/disk3/sunjiahui/CV-code/v8_all/XIONG_AN/shipin.mp4" output_path = "/mnt/disk3/sunjiahui/CV-code/v8_all/XIONG_AN/output_video15.mp4" # 尝试不同编解码器组合 codec_options = ['mp4v', 'avc1', 'X264', 'MJPG'] success = False for codec in codec_options: try: cap = cv2.VideoCapture(input_path) fps = int(cap.get(cv2.CAP_PROP_FPS)) or 30 # 处理fps为0的情况 width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH)) height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT)) fourcc = cv2.VideoWriter_fourcc(*codec) out = cv2.VideoWriter(output_path, fourcc, fps, (width, height)) print(f"尝试使用编解码器 {codec}...") while cap.isOpened(): ret, frame = cap.read() if not ret: break results = model.predict(frame, conf=0.15) annotated_frame = results[0].plot() # 确保帧格式正确 if annotated_frame.shape[:2] != (height, width): annotated_frame = cv2.resize(annotated_frame, (width, height)) out.write(annotated_frame) success = True break except Exception as e: print(f"编解码器 {codec} 失败: {str(e)}") if os.path.exists(output_path): os.remove(output_path) continue finally: cap.release() out.release() if success: print(f"视频生成成功!保存路径:{os.path.abspath(output_path)}") print("如果仍无法播放,请尝试以下方案:") print("1. 使用 VLC 播放器(兼容性最佳)") print("2. 执行命令:ffmpeg -i output_video.mp4 -c:v libx264 final.mp4") else: print("所有编解码器尝试失败,改用图像序列方案...") save_as_image_sequence(model, input_path) def save_as_image_sequence(model, input_path): """备用方案:保存为图片序列""" output_dir = "video_frames" os.makedirs(output_dir, exist_ok=True) cap = cv2.VideoCapture(input_path) frame_count = 0 while cap.isOpened(): ret, frame = cap.read() if not ret: break results = model.predict(frame) annotated_frame = results[0].plot() cv2.imwrite(f"{output_dir}/frame_{frame_count:04d}.jpg", annotated_frame) frame_count += 1 cap.release() print(f"图像序列已保存至 {output_dir},可用以下命令合成视频:") print(f"ffmpeg -framerate 30 -i {output_dir}/frame_%04d.jpg -c:v libx264 output.mp4") if __name__ == "__main__": process_video()yolov8训练模型、测试视频由讯客互联人工智能栏目发布,感谢您对讯客互联的认可,以及对我们原创作品以及文章的青睐,非常欢迎各位朋友分享到个人网站或者朋友圈,但转载请说明文章出处“yolov8训练模型、测试视频”
 
               
               
               
               
               
               
               
   
   
   
   
   
   
   
   
   
   
   
   
  