|
|
|
|
|
from ultralytics import YOLO |
|
from ultralytics.cfg import get_cfg |
|
from ultralytics.engine.exporter import Exporter |
|
from ultralytics.models.yolo import classify, detect, segment |
|
from ultralytics.utils import ASSETS, DEFAULT_CFG, WEIGHTS_DIR |
|
|
|
CFG_DET = "yolov8n.yaml" |
|
CFG_SEG = "yolov8n-seg.yaml" |
|
CFG_CLS = "yolov8n-cls.yaml" |
|
CFG = get_cfg(DEFAULT_CFG) |
|
MODEL = WEIGHTS_DIR / "yolov8n" |
|
|
|
|
|
def test_func(*args): |
|
"""Test function callback.""" |
|
print("callback test passed") |
|
|
|
|
|
def test_export(): |
|
"""Test model exporting functionality.""" |
|
exporter = Exporter() |
|
exporter.add_callback("on_export_start", test_func) |
|
assert test_func in exporter.callbacks["on_export_start"], "callback test failed" |
|
f = exporter(model=YOLO(CFG_DET).model) |
|
YOLO(f)(ASSETS) |
|
|
|
|
|
def test_detect(): |
|
"""Test object detection functionality.""" |
|
overrides = {"data": "coco8.yaml", "model": CFG_DET, "imgsz": 32, "epochs": 1, "save": False} |
|
CFG.data = "coco8.yaml" |
|
CFG.imgsz = 32 |
|
|
|
|
|
trainer = detect.DetectionTrainer(overrides=overrides) |
|
trainer.add_callback("on_train_start", test_func) |
|
assert test_func in trainer.callbacks["on_train_start"], "callback test failed" |
|
trainer.train() |
|
|
|
|
|
val = detect.DetectionValidator(args=CFG) |
|
val.add_callback("on_val_start", test_func) |
|
assert test_func in val.callbacks["on_val_start"], "callback test failed" |
|
val(model=trainer.best) |
|
|
|
|
|
pred = detect.DetectionPredictor(overrides={"imgsz": [64, 64]}) |
|
pred.add_callback("on_predict_start", test_func) |
|
assert test_func in pred.callbacks["on_predict_start"], "callback test failed" |
|
result = pred(source=ASSETS, model=f"{MODEL}.pt") |
|
assert len(result), "predictor test failed" |
|
|
|
overrides["resume"] = trainer.last |
|
trainer = detect.DetectionTrainer(overrides=overrides) |
|
try: |
|
trainer.train() |
|
except Exception as e: |
|
print(f"Expected exception caught: {e}") |
|
return |
|
|
|
Exception("Resume test failed!") |
|
|
|
|
|
def test_segment(): |
|
"""Test image segmentation functionality.""" |
|
overrides = {"data": "coco8-seg.yaml", "model": CFG_SEG, "imgsz": 32, "epochs": 1, "save": False} |
|
CFG.data = "coco8-seg.yaml" |
|
CFG.imgsz = 32 |
|
|
|
|
|
|
|
trainer = segment.SegmentationTrainer(overrides=overrides) |
|
trainer.add_callback("on_train_start", test_func) |
|
assert test_func in trainer.callbacks["on_train_start"], "callback test failed" |
|
trainer.train() |
|
|
|
|
|
val = segment.SegmentationValidator(args=CFG) |
|
val.add_callback("on_val_start", test_func) |
|
assert test_func in val.callbacks["on_val_start"], "callback test failed" |
|
val(model=trainer.best) |
|
|
|
|
|
pred = segment.SegmentationPredictor(overrides={"imgsz": [64, 64]}) |
|
pred.add_callback("on_predict_start", test_func) |
|
assert test_func in pred.callbacks["on_predict_start"], "callback test failed" |
|
result = pred(source=ASSETS, model=f"{MODEL}-seg.pt") |
|
assert len(result), "predictor test failed" |
|
|
|
|
|
overrides["resume"] = trainer.last |
|
trainer = segment.SegmentationTrainer(overrides=overrides) |
|
try: |
|
trainer.train() |
|
except Exception as e: |
|
print(f"Expected exception caught: {e}") |
|
return |
|
|
|
Exception("Resume test failed!") |
|
|
|
|
|
def test_classify(): |
|
"""Test image classification functionality.""" |
|
overrides = {"data": "imagenet10", "model": CFG_CLS, "imgsz": 32, "epochs": 1, "save": False} |
|
CFG.data = "imagenet10" |
|
CFG.imgsz = 32 |
|
|
|
|
|
|
|
trainer = classify.ClassificationTrainer(overrides=overrides) |
|
trainer.add_callback("on_train_start", test_func) |
|
assert test_func in trainer.callbacks["on_train_start"], "callback test failed" |
|
trainer.train() |
|
|
|
|
|
val = classify.ClassificationValidator(args=CFG) |
|
val.add_callback("on_val_start", test_func) |
|
assert test_func in val.callbacks["on_val_start"], "callback test failed" |
|
val(model=trainer.best) |
|
|
|
|
|
pred = classify.ClassificationPredictor(overrides={"imgsz": [64, 64]}) |
|
pred.add_callback("on_predict_start", test_func) |
|
assert test_func in pred.callbacks["on_predict_start"], "callback test failed" |
|
result = pred(source=ASSETS, model=trainer.best) |
|
assert len(result), "predictor test failed" |
|
|