From 984b97ca011f2c8bd42d05e73401e274d111239c Mon Sep 17 00:00:00 2001 From: StrickenSun <2086347760@qq.com> Date: Fri, 18 Apr 2025 21:06:11 +0800 Subject: [PATCH 1/4] try --- requirements.txt | 26 +++++++++++++------------- train.py | 10 +++++----- 2 files changed, 18 insertions(+), 18 deletions(-) diff --git a/requirements.txt b/requirements.txt index 22b51fc490e3..507177454265 100755 --- a/requirements.txt +++ b/requirements.txt @@ -1,24 +1,24 @@ # pip install -r requirements.txt # Base ---------------------------------------- -matplotlib>=3.2.2 -numpy>=1.18.5 -opencv-python>=4.1.2 -Pillow>=7.1.2 -PyYAML>=5.3.1 -requests>=2.23.0 -scipy>=1.4.1 -torch>=1.7.0 -torchvision>=0.8.1 -tqdm>=4.41.0 +matplotlib==3.2.2 +numpy==1.18.5 +opencv-python==4.1.2.30 +Pillow==7.1.2 +PyYAML==5.3.1 +requests==2.23.0 +scipy==1.4.1 +#torch>=1.7.0 +#torchvision>=0.8.1 +tqdm==4.41.0 # Logging ------------------------------------- -tensorboard>=2.4.1 +tensorboard==2.4.1 # wandb # Plotting ------------------------------------ -pandas>=1.1.4 -seaborn>=0.11.0 +pandas==1.1.4 +seaborn==0.11.0 # Export -------------------------------------- # coremltools>=4.1 # CoreML export diff --git a/train.py b/train.py index 29ae43e3bd37..2af27fee899d 100644 --- a/train.py +++ b/train.py @@ -433,12 +433,12 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=ROOT / 'yolov5s.pt', help='initial weights path') - parser.add_argument('--cfg', type=str, default='', help='model.yaml path') - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') + parser.add_argument('--weights', type=str, default=r'D:\lzy\yolov5\weight\yolov5n.pt', help='initial weights path') + parser.add_argument('--cfg', type=str, default=r'D:\lzy\yolov5\models\yolov5n.yaml', help='model.yaml path') + parser.add_argument('--data', type=str, default=r'D:\lzy\yolov5\data\my_data.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path') parser.add_argument('--epochs', type=int, default=300) - parser.add_argument('--batch-size', type=int, default=16, help='total batch size for all GPUs') + parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') parser.add_argument('--resume', nargs='?', const=True, default=False, help='resume most recent training') @@ -449,7 +449,7 @@ def parse_opt(known=False): parser.add_argument('--bucket', type=str, default='', help='gsutil bucket') parser.add_argument('--cache', type=str, nargs='?', const='ram', help='--cache images in "ram" (default) or "disk"') parser.add_argument('--image-weights', action='store_true', help='use weighted image selection for training') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--device', default='0', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--multi-scale', action='store_true', help='vary img-size +/- 50%%') parser.add_argument('--single-cls', action='store_true', help='train multi-class data as single-class') parser.add_argument('--adam', action='store_true', help='use torch.optim.Adam() optimizer') From ef6a71d51c2375834e583ab939126ae199de5bb8 Mon Sep 17 00:00:00 2001 From: StrickenSun <2086347760@qq.com> Date: Sat, 19 Apr 2025 16:32:02 +0800 Subject: [PATCH 2/4] trained --- detect.py | 6 +++--- train.py | 6 ++++-- 2 files changed, 7 insertions(+), 5 deletions(-) diff --git a/detect.py b/detect.py index 4e497305668c..b6f0f1f45d3f 100644 --- a/detect.py +++ b/detect.py @@ -266,13 +266,13 @@ def wrap_frozen_graph(gd, inputs, outputs): def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model path(s)') - parser.add_argument('--source', type=str, default=ROOT / 'data/images', help='file/dir/URL/glob, 0 for webcam') + parser.add_argument('--weights', nargs='+', type=str, default=r'D:\lzy\yolov5\runs\train\exp6\weights\best.pt', help='model path(s)') + parser.add_argument('--source', type=str, default=r'D:\lzy\yolov5\data\yolo_dataset\test\images', help='file/dir/URL/glob, 0 for webcam') parser.add_argument('--imgsz', '--img', '--img-size', nargs='+', type=int, default=[640], help='inference size h,w') parser.add_argument('--conf-thres', type=float, default=0.25, help='confidence threshold') parser.add_argument('--iou-thres', type=float, default=0.45, help='NMS IoU threshold') parser.add_argument('--max-det', type=int, default=1000, help='maximum detections per image') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') + parser.add_argument('--device', default='',help='cuda device, i.e. 0 or 0,1,2,3 or cpu') parser.add_argument('--view-img', action='store_true', help='show results') parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') diff --git a/train.py b/train.py index 2af27fee899d..f5f6b03d3e36 100644 --- a/train.py +++ b/train.py @@ -433,11 +433,13 @@ def train(hyp, # path/to/hyp.yaml or hyp dictionary def parse_opt(known=False): parser = argparse.ArgumentParser() - parser.add_argument('--weights', type=str, default=r'D:\lzy\yolov5\weight\yolov5n.pt', help='initial weights path') + parser.add_argument('--weights', type=str, default=r'D:\lzy\yolov5\runs\train\exp3\weights\best.pt', help='initial weights path') parser.add_argument('--cfg', type=str, default=r'D:\lzy\yolov5\models\yolov5n.yaml', help='model.yaml path') parser.add_argument('--data', type=str, default=r'D:\lzy\yolov5\data\my_data.yaml', help='dataset.yaml path') parser.add_argument('--hyp', type=str, default=ROOT / 'data/hyps/hyp.scratch.yaml', help='hyperparameters path') - parser.add_argument('--epochs', type=int, default=300) + parser.add_argument('--epochs', type=int, default=150 + + ) parser.add_argument('--batch-size', type=int, default=1, help='total batch size for all GPUs') parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='train, val image size (pixels)') parser.add_argument('--rect', action='store_true', help='rectangular training') From f796cf4764b0214e2f08f5d847fdc490eed6642c Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sat, 19 Apr 2025 09:16:14 +0000 Subject: [PATCH 3/4] Auto-format by https://ultralytics.com/actions --- .github/ISSUE_TEMPLATE/bug-report.md | 5 +- .github/ISSUE_TEMPLATE/feature-request.md | 7 +- .github/ISSUE_TEMPLATE/question.md | 5 +- CONTRIBUTING.md | 20 +- README.md | 108 ++--- detect.py | 215 +++++---- export.py | 299 +++++++----- hubconf.py | 72 +-- models/common.py | 112 +++-- models/experimental.py | 21 +- models/hub/anchors.yaml | 63 ++- models/hub/yolov3-spp.yaml | 73 +-- models/hub/yolov3-tiny.yaml | 55 +-- models/hub/yolov3.yaml | 73 +-- models/hub/yolov5-fpn.yaml | 55 +-- models/hub/yolov5-p2.yaml | 85 ++-- models/hub/yolov5-p6.yaml | 89 ++-- models/hub/yolov5-p7.yaml | 95 ++-- models/hub/yolov5-panet.yaml | 65 +-- models/hub/yolov5l6.yaml | 97 ++-- models/hub/yolov5m6.yaml | 97 ++-- models/hub/yolov5n6.yaml | 97 ++-- models/hub/yolov5s-ghost.yaml | 65 +-- models/hub/yolov5s-transformer.yaml | 65 +-- models/hub/yolov5s6.yaml | 97 ++-- models/hub/yolov5x6.yaml | 97 ++-- models/tf.py | 192 +++++--- models/yolo.py | 106 ++-- models/yolov5l.yaml | 65 +-- models/yolov5m.yaml | 65 +-- models/yolov5n.yaml | 65 +-- models/yolov5s.yaml | 65 +-- models/yolov5x.yaml | 65 +-- train.py | 563 +++++++++++++--------- utils/activations.py | 14 +- utils/augmentations.py | 47 +- utils/autoanchor.py | 89 ++-- utils/aws/resume.py | 16 +- utils/callbacks.py | 63 ++- utils/datasets.py | 505 +++++++++++-------- utils/downloads.py | 81 ++-- utils/flask_rest_api/README.md | 2 +- utils/flask_rest_api/example_request.py | 3 +- utils/flask_rest_api/restapi.py | 7 +- utils/general.py | 439 ++++++++++------- utils/google_app_engine/app.yaml | 2 +- utils/loggers/__init__.py | 77 +-- utils/loggers/wandb/README.md | 59 +-- utils/loggers/wandb/log_dataset.py | 16 +- utils/loggers/wandb/sweep.py | 4 +- utils/loggers/wandb/sweep.yaml | 10 +- utils/loggers/wandb/wandb_utils.py | 380 ++++++++------- utils/loss.py | 68 +-- utils/metrics.py | 124 ++--- utils/plots.py | 254 ++++++---- utils/torch_utils.py | 136 +++--- val.py | 241 +++++---- 57 files changed, 3302 insertions(+), 2653 deletions(-) diff --git a/.github/ISSUE_TEMPLATE/bug-report.md b/.github/ISSUE_TEMPLATE/bug-report.md index 62a02a3a6948..09f245108391 100644 --- a/.github/ISSUE_TEMPLATE/bug-report.md +++ b/.github/ISSUE_TEMPLATE/bug-report.md @@ -1,10 +1,9 @@ --- name: "πŸ› Bug report" about: Create a report to help us improve -title: '' +title: "" labels: bug -assignees: '' - +assignees: "" --- Before submitting a bug report, please be aware that your issue **must be reproducible** with all of the following, diff --git a/.github/ISSUE_TEMPLATE/feature-request.md b/.github/ISSUE_TEMPLATE/feature-request.md index 1fdf99045488..e0e7fbfed34d 100644 --- a/.github/ISSUE_TEMPLATE/feature-request.md +++ b/.github/ISSUE_TEMPLATE/feature-request.md @@ -1,10 +1,9 @@ --- name: "πŸš€ Feature request" about: Suggest an idea for this project -title: '' +title: "" labels: enhancement -assignees: '' - +assignees: "" --- ## πŸš€ Feature @@ -13,7 +12,7 @@ assignees: '' ## Motivation - ## Pitch diff --git a/.github/ISSUE_TEMPLATE/question.md b/.github/ISSUE_TEMPLATE/question.md index 2892cfe262fb..17d46cb1c44f 100644 --- a/.github/ISSUE_TEMPLATE/question.md +++ b/.github/ISSUE_TEMPLATE/question.md @@ -1,10 +1,9 @@ --- name: "❓Question" about: Ask a general question -title: '' +title: "" labels: question -assignees: '' - +assignees: "" --- ## ❔Question diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 38601775caeb..444d444b670a 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -18,16 +18,19 @@ Submitting a PR is easy! This example shows how to submit a PR for updating `req ### 1. Select File to Update Select `requirements.txt` to update by clicking on it in GitHub. +

PR_step1

### 2. Click 'Edit this file' Button is in top-right corner. +

PR_step2

### 3. Make Changes Change `matplotlib` version from `3.2.2` to `3.3`. +

PR_step3

### 4. Preview Changes and Submit PR @@ -35,6 +38,7 @@ Change `matplotlib` version from `3.2.2` to `3.3`. Click on the **Preview changes** tab to verify your updates. At the bottom of the screen select 'Create a **new branch** for this commit', assign your branch a descriptive name such as `fix/matplotlib_version` and click the green **Propose changes** button. All done, your PR is now submitted to YOLOv5 for review and approval πŸ˜ƒ! +

PR_step4

### PR recommendations @@ -49,20 +53,20 @@ To allow your work to be integrated as seamlessly as possible, we advise you to: ```bash git remote add upstream https://github.com/ultralytics/yolov5.git git fetch upstream -git checkout feature # <----- replace 'feature' with local branch name +git checkout feature # <----- replace 'feature' with local branch name git merge upstream/master git push -u origin -f ``` - βœ… Verify all Continuous Integration (CI) **checks are passing**. - βœ… Reduce changes to the absolute **minimum** required for your bug fix or feature addition. _"It is not daily increase - but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee + but daily decrease, hack away the unessential. The closer to the source, the less wastage there is."_ -Bruce Lee ## Submitting a Bug Report πŸ› If you spot a problem with YOLOv5 please submit a Bug Report! -For us to start investigating a possibel problem we need to be able to reproduce it ourselves first. We've created a few +For us to start investigating a possible problem we need to be able to reproduce it ourselves first. We've created a few short guidelines below to help users provide what we need in order to get started. When asking a question, people will be better able to provide help if you provide **code** that they can easily @@ -70,17 +74,17 @@ understand and use to **reproduce** the problem. This is referred to by communit a [minimum reproducible example](https://stackoverflow.com/help/minimal-reproducible-example). Your code that reproduces the problem should be: -* βœ… **Minimal** – Use as little code as possible that still produces the same problem -* βœ… **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself -* βœ… **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem +- βœ… **Minimal** – Use as little code as possible that still produces the same problem +- βœ… **Complete** – Provide **all** parts someone else needs to reproduce your problem in the question itself +- βœ… **Reproducible** – Test the code you're about to provide to make sure it reproduces the problem In addition to the above requirements, for [Ultralytics](https://ultralytics.com/) to provide assistance your code should be: -* βœ… **Current** – Verify that your code is up-to-date with current +- βœ… **Current** – Verify that your code is up-to-date with current GitHub [master](https://github.com/ultralytics/yolov5/tree/master), and if necessary `git pull` or `git clone` a new copy to ensure your problem has not already been resolved by previous commits. -* βœ… **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this +- βœ… **Unmodified** – Your problem must be reproducible without any modifications to the codebase in this repository. [Ultralytics](https://ultralytics.com/) does not provide support for custom code ⚠️. If you believe your problem meets all of the above criteria, please close this issue and raise a new one using the πŸ› ** diff --git a/README.md b/README.md index cdefaeab4e60..279d8fa7dfc9 100644 --- a/README.md +++ b/README.md @@ -46,7 +46,7 @@ YOLOv5 πŸš€ is a family of object detection architectures and models pretrained open-source research into future vision AI methods, incorporating lessons learned and best practices evolved over thousands of hours of research and development.

- @@ -65,6 +65,7 @@ See the [YOLOv5 Docs](https://docs.ultralytics.com) for full documentation on tr [**Python>=3.6.0**](https://www.python.org/) is required with all [requirements.txt](https://github.com/ultralytics/yolov5/blob/master/requirements.txt) installed including [**PyTorch>=1.7**](https://pytorch.org/get-started/locally/): + ```bash @@ -85,10 +86,10 @@ from the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) import torch # Model -model = torch.hub.load('ultralytics/yolov5', 'yolov5s') # or yolov5m, yolov5l, yolov5x, custom +model = torch.hub.load("ultralytics/yolov5", "yolov5s") # or yolov5m, yolov5l, yolov5x, custom # Images -img = 'https://ultralytics.com/images/zidane.jpg' # or file, Path, PIL, OpenCV, numpy, list +img = "https://ultralytics.com/images/zidane.jpg" # or file, Path, PIL, OpenCV, numpy, list # Inference results = model(img) @@ -99,8 +100,6 @@ results.print() # or .show(), .save(), .crop(), .pandas(), etc. - -
Inference with detect.py @@ -109,12 +108,12 @@ the [latest YOLOv5 release](https://github.com/ultralytics/yolov5/releases) and ```bash $ python detect.py --source 0 # webcam - file.jpg # image - file.mp4 # video - path/ # directory - path/*.jpg # glob - 'https://youtu.be/NUsoVlDFqZg' # YouTube - 'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream +file.jpg # image +file.mp4 # video +path/ # directory +path/*.jpg # glob +'https://youtu.be/NUsoVlDFqZg' # YouTube +'rtsp://example.com/media.mp4' # RTSP, RTMP, HTTP stream ```
@@ -129,32 +128,32 @@ largest `--batch-size` your GPU allows (batch sizes shown for 16 GB devices). ```bash $ python train.py --data coco.yaml --cfg yolov5s.yaml --weights '' --batch-size 64 - yolov5m 40 - yolov5l 24 - yolov5x 16 +yolov5m 40 +yolov5l 24 +yolov5x 16 ``` - +
Tutorials -* [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  πŸš€ RECOMMENDED -* [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ +- [Train Custom Data](https://github.com/ultralytics/yolov5/wiki/Train-Custom-Data)  πŸš€ RECOMMENDED +- [Tips for Best Training Results](https://github.com/ultralytics/yolov5/wiki/Tips-for-Best-Training-Results)  ☘️ RECOMMENDED -* [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW -* [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW -* [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) -* [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW -* [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) πŸš€ -* [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) -* [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) -* [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) -* [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) -* [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW -* [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx) +- [Weights & Biases Logging](https://github.com/ultralytics/yolov5/issues/1289)  🌟 NEW +- [Roboflow for Datasets, Labeling, and Active Learning](https://github.com/ultralytics/yolov5/issues/4975)  🌟 NEW +- [Multi-GPU Training](https://github.com/ultralytics/yolov5/issues/475) +- [PyTorch Hub](https://github.com/ultralytics/yolov5/issues/36)  ⭐ NEW +- [TorchScript, ONNX, CoreML Export](https://github.com/ultralytics/yolov5/issues/251) πŸš€ +- [Test-Time Augmentation (TTA)](https://github.com/ultralytics/yolov5/issues/303) +- [Model Ensembling](https://github.com/ultralytics/yolov5/issues/318) +- [Model Pruning/Sparsity](https://github.com/ultralytics/yolov5/issues/304) +- [Hyperparameter Evolution](https://github.com/ultralytics/yolov5/issues/607) +- [Transfer Learning with Frozen Layers](https://github.com/ultralytics/yolov5/issues/1314)  ⭐ NEW +- [TensorRT Deployment](https://github.com/wang-xinyu/tensorrtx)
@@ -178,7 +177,7 @@ Get started in seconds with our verified environments. Click each icon below for - + ##
Integrations
@@ -191,10 +190,9 @@ Get started in seconds with our verified environments. Click each icon below for -|Weights and Biases|Roboflow ⭐ NEW| -|:-:|:-:| -|Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme)|Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | - +| Weights and Biases | Roboflow ⭐ NEW | +| :-------------------------------------------------------------------------------------------------------------------------------------------------------: | :----------------------------------------------------------------------------------------------------------------------------------------: | +| Automatically track and visualize all your YOLOv5 training runs in the cloud with [Weights & Biases](https://wandb.ai/site?utm_campaign=repo_yolo_readme) | Label and automatically export your custom datasets directly to YOLOv5 for training with [Roboflow](https://roboflow.com/?ref=ultralytics) | runs/exp{sep}2, runs/exp{sep}3, ... etc. path = Path(path) # os-agnostic if path.exists() and not exist_ok: suffix = path.suffix - path = path.with_suffix('') + path = path.with_suffix("") dirs = glob.glob(f"{path}{sep}*") # similar paths matches = [re.search(rf"%s{sep}(\d+)" % path.stem, d) for d in dirs] i = [int(m.groups()[0]) for m in matches if m] # indices n = max(i) + 1 if i else 2 # increment number path = Path(f"{path}{sep}{n}{suffix}") # update path - dir = path if path.suffix == '' else path.parent # directory + dir = path if path.suffix == "" else path.parent # directory if not dir.exists() and mkdir: dir.mkdir(parents=True, exist_ok=True) # make directory return path diff --git a/utils/google_app_engine/app.yaml b/utils/google_app_engine/app.yaml index ac29d104b144..5056b7c1186d 100644 --- a/utils/google_app_engine/app.yaml +++ b/utils/google_app_engine/app.yaml @@ -11,4 +11,4 @@ manual_scaling: resources: cpu: 1 memory_gb: 4 - disk_size_gb: 20 \ No newline at end of file + disk_size_gb: 20 diff --git a/utils/loggers/__init__.py b/utils/loggers/__init__.py index 0750be6c8828..57a1de6ac5ed 100644 --- a/utils/loggers/__init__.py +++ b/utils/loggers/__init__.py @@ -1,7 +1,5 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license -""" -Logging utils -""" +"""Logging utils.""" import warnings from threading import Thread @@ -14,17 +12,17 @@ from utils.plots import plot_images, plot_results from utils.torch_utils import de_parallel -LOGGERS = ('csv', 'tb', 'wandb') # text-file, TensorBoard, Weights & Biases +LOGGERS = ("csv", "tb", "wandb") # text-file, TensorBoard, Weights & Biases try: import wandb - assert hasattr(wandb, '__version__') # verify package import not local dir + assert hasattr(wandb, "__version__") # verify package import not local dir except (ImportError, AssertionError): wandb = None -class Loggers(): +class Loggers: # YOLOv5 Loggers class def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, include=LOGGERS): self.save_dir = save_dir @@ -33,31 +31,42 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, self.hyp = hyp self.logger = logger # for printing results to console self.include = include - self.keys = ['train/box_loss', 'train/obj_loss', 'train/cls_loss', # train loss - 'metrics/precision', 'metrics/recall', 'metrics/mAP_0.5', 'metrics/mAP_0.5:0.95', # metrics - 'val/box_loss', 'val/obj_loss', 'val/cls_loss', # val loss - 'x/lr0', 'x/lr1', 'x/lr2'] # params + self.keys = [ + "train/box_loss", + "train/obj_loss", + "train/cls_loss", # train loss + "metrics/precision", + "metrics/recall", + "metrics/mAP_0.5", + "metrics/mAP_0.5:0.95", # metrics + "val/box_loss", + "val/obj_loss", + "val/cls_loss", # val loss + "x/lr0", + "x/lr1", + "x/lr2", + ] # params for k in LOGGERS: setattr(self, k, None) # init empty logger dictionary self.csv = True # always log to csv # Message if not wandb: - prefix = colorstr('Weights & Biases: ') + prefix = colorstr("Weights & Biases: ") s = f"{prefix}run 'pip install wandb' to automatically track and visualize YOLOv5 πŸš€ runs (RECOMMENDED)" print(emojis(s)) # TensorBoard s = self.save_dir - if 'tb' in self.include and not self.opt.evolve: - prefix = colorstr('TensorBoard: ') + if "tb" in self.include and not self.opt.evolve: + prefix = colorstr("TensorBoard: ") self.logger.info(f"{prefix}Start with 'tensorboard --logdir {s.parent}', view at http://localhost:6006/") self.tb = SummaryWriter(str(s)) # W&B - if wandb and 'wandb' in self.include: - wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith('wandb-artifact://') - run_id = torch.load(self.weights).get('wandb_id') if self.opt.resume and not wandb_artifact_resume else None + if wandb and "wandb" in self.include: + wandb_artifact_resume = isinstance(self.opt.resume, str) and self.opt.resume.startswith("wandb-artifact://") + run_id = torch.load(self.weights).get("wandb_id") if self.opt.resume and not wandb_artifact_resume else None self.opt.hyp = self.hyp # add hyperparameters self.wandb = WandbLogger(self.opt, run_id) else: @@ -65,7 +74,7 @@ def __init__(self, save_dir=None, weights=None, opt=None, hyp=None, logger=None, def on_pretrain_routine_end(self): # Callback runs on pre-train routine end - paths = self.save_dir.glob('*labels*.jpg') # training labels + paths = self.save_dir.glob("*labels*.jpg") # training labels if self.wandb: self.wandb.log({"Labels": [wandb.Image(str(x), caption=x.name) for x in paths]}) @@ -75,14 +84,14 @@ def on_train_batch_end(self, ni, model, imgs, targets, paths, plots, sync_bn): if ni == 0: if not sync_bn: # tb.add_graph() --sync known issue https://github.com/ultralytics/yolov5/issues/3754 with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress jit trace warning + warnings.simplefilter("ignore") # suppress jit trace warning self.tb.add_graph(torch.jit.trace(de_parallel(model), imgs[0:1], strict=False), []) if ni < 3: - f = self.save_dir / f'train_batch{ni}.jpg' # filename + f = self.save_dir / f"train_batch{ni}.jpg" # filename Thread(target=plot_images, args=(imgs, targets, paths, f), daemon=True).start() if self.wandb and ni == 10: - files = sorted(self.save_dir.glob('train*.jpg')) - self.wandb.log({'Mosaics': [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) + files = sorted(self.save_dir.glob("train*.jpg")) + self.wandb.log({"Mosaics": [wandb.Image(str(f), caption=f.name) for f in files if f.exists()]}) def on_train_epoch_end(self, epoch): # Callback runs on train epoch end @@ -97,18 +106,18 @@ def on_val_image_end(self, pred, predn, path, names, im): def on_val_end(self): # Callback runs on val end if self.wandb: - files = sorted(self.save_dir.glob('val*.jpg')) + files = sorted(self.save_dir.glob("val*.jpg")) self.wandb.log({"Validation": [wandb.Image(str(f), caption=f.name) for f in files]}) def on_fit_epoch_end(self, vals, epoch, best_fitness, fi): # Callback runs at the end of each fit (train+val) epoch x = {k: v for k, v in zip(self.keys, vals)} # dict if self.csv: - file = self.save_dir / 'results.csv' + file = self.save_dir / "results.csv" n = len(x) + 1 # number of cols - s = '' if file.exists() else (('%20s,' * n % tuple(['epoch'] + self.keys)).rstrip(',') + '\n') # add header - with open(file, 'a') as f: - f.write(s + ('%20.5g,' * n % tuple([epoch] + vals)).rstrip(',') + '\n') + s = "" if file.exists() else (("%20s," * n % tuple(["epoch"] + self.keys)).rstrip(",") + "\n") # add header + with open(file, "a") as f: + f.write(s + ("%20.5g," * n % tuple([epoch] + vals)).rstrip(",") + "\n") if self.tb: for k, v in x.items(): @@ -127,22 +136,26 @@ def on_model_save(self, last, epoch, final_epoch, best_fitness, fi): def on_train_end(self, last, best, plots, epoch): # Callback runs on training end if plots: - plot_results(file=self.save_dir / 'results.csv') # save results.png - files = ['results.png', 'confusion_matrix.png', *[f'{x}_curve.png' for x in ('F1', 'PR', 'P', 'R')]] + plot_results(file=self.save_dir / "results.csv") # save results.png + files = ["results.png", "confusion_matrix.png", *[f"{x}_curve.png" for x in ("F1", "PR", "P", "R")]] files = [(self.save_dir / f) for f in files if (self.save_dir / f).exists()] # filter if self.tb: import cv2 + for f in files: - self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats='HWC') + self.tb.add_image(f.stem, cv2.imread(str(f))[..., ::-1], epoch, dataformats="HWC") if self.wandb: self.wandb.log({"Results": [wandb.Image(str(f), caption=f.name) for f in files]}) # Calling wandb.log. TODO: Refactor this into WandbLogger.log_model if not self.opt.evolve: - wandb.log_artifact(str(best if best.exists() else last), type='model', - name='run_' + self.wandb.wandb_run.id + '_model', - aliases=['latest', 'best', 'stripped']) + wandb.log_artifact( + str(best if best.exists() else last), + type="model", + name="run_" + self.wandb.wandb_run.id + "_model", + aliases=["latest", "best", "stripped"], + ) self.wandb.finish_run() else: self.wandb.finish_run() diff --git a/utils/loggers/wandb/README.md b/utils/loggers/wandb/README.md index dd7dc1e46d45..ddf94b6a1537 100644 --- a/utils/loggers/wandb/README.md +++ b/utils/loggers/wandb/README.md @@ -1,60 +1,64 @@ πŸ“š This guide explains how to use **Weights & Biases** (W&B) with YOLOv5 πŸš€. UPDATED 29 September 2021. -* [About Weights & Biases](#about-weights-&-biases) -* [First-Time Setup](#first-time-setup) -* [Viewing runs](#viewing-runs) -* [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) -* [Reports: Share your work with the world!](#reports) + +- [About Weights & Biases](#about-weights-&-biases) +- [First-Time Setup](#first-time-setup) +- [Viewing runs](#viewing-runs) +- [Advanced Usage: Dataset Versioning and Evaluation](#advanced-usage) +- [Reports: Share your work with the world!](#reports) ## About Weights & Biases + Think of [W&B](https://wandb.ai/site?utm_campaign=repo_yolo_wandbtutorial) like GitHub for machine learning models. With a few lines of code, save everything you need to debug, compare and reproduce your models β€” architecture, hyperparameters, git commits, model weights, GPU usage, and even datasets and predictions. Used by top researchers including teams at OpenAI, Lyft, Github, and MILA, W&B is part of the new standard of best practices for machine learning. How W&B can help you optimize your machine learning workflows: - * [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time - * [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically - * [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization - * [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators - * [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently - * [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models +- [Debug](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Free-2) model performance in real time +- [GPU usage](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#System-4) visualized automatically +- [Custom charts](https://wandb.ai/wandb/customizable-charts/reports/Powerful-Custom-Charts-To-Debug-Model-Peformance--VmlldzoyNzY4ODI) for powerful, extensible visualization +- [Share insights](https://wandb.ai/wandb/getting-started/reports/Visualize-Debug-Machine-Learning-Models--VmlldzoyNzY5MDk#Share-8) interactively with collaborators +- [Optimize hyperparameters](https://docs.wandb.com/sweeps) efficiently +- [Track](https://docs.wandb.com/artifacts) datasets, pipelines, and production models ## First-Time Setup +
Toggle Details When you first train, W&B will prompt you to create a new account and will generate an **API key** for you. If you are an existing user you can retrieve your key from https://wandb.ai/authorize. This key is used to tell W&B where to log your data. You only need to supply your key once, and then it is remembered on the same device. W&B will create a cloud **project** (default is 'YOLOv5') for your training runs, and each new training run will be provided a unique run **name** within that project as project/name. You can also manually set your project and run name as: - ```shell - $ python train.py --project ... --name ... - ``` +```shell +$ python train.py --project ... --name ... +``` YOLOv5 notebook example: Open In Colab Open In Kaggle Screen Shot 2021-09-29 at 10 23 13 PM -
## Viewing Runs +
Toggle Details Run information streams from your environment to the W&B cloud console as you train. This allows you to monitor and even cancel runs in realtime . All important information is logged: - * Training & Validation losses - * Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 - * Learning Rate over time - * A bounding box debugging panel, showing the training progress over time - * GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** - * System: Disk I/0, CPU utilization, RAM memory usage - * Your trained model as W&B Artifact - * Environment: OS and Python types, Git repository and state, **training command** +- Training & Validation losses +- Metrics: Precision, Recall, mAP@0.5, mAP@0.5:0.95 +- Learning Rate over time +- A bounding box debugging panel, showing the training progress over time +- GPU: Type, **GPU Utilization**, power, temperature, **CUDA memory usage** +- System: Disk I/0, CPU utilization, RAM memory usage +- Your trained model as W&B Artifact +- Environment: OS and Python types, Git repository and state, **training command**

Weights & Biases dashboard

-
## Advanced Usage + You can leverage W&B artifacts and Tables integration to easily visualize and manage your datasets, models and training evaluations. Here are some quick examples to get you started. +

1. Visualize and Version Datasets

Log, visualize, dynamically query, and understand your data with W&B Tables. You can use the following command to log your dataset as a W&B Table. This will generate a {dataset}_wandb.yaml file which can be used to train from dataset artifact. @@ -87,7 +91,7 @@ You can leverage W&B artifacts and Tables integration to easily visualize and ma

4: Save model checkpoints as artifacts

- To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base cammand, where `n` represents checkpoint interval. + To enable saving and versioning checkpoints of your experiment, pass `--save_period n` with the base command, where `n` represents checkpoint interval. You can also log both the dataset and model checkpoints simultaneously. If not passed, only the final model will be logged
@@ -123,13 +127,11 @@ Any run can be resumed using artifacts if the --resume argument sta
-

Reports

-W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publically share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). +W&B Reports can be created from your saved runs for sharing online. Once a report is created you will receive a link you can use to publicly share your results. Here is an example report created from the COCO128 tutorial trainings of all four YOLOv5 models ([link](https://wandb.ai/glenn-jocher/yolov5_tutorial/reports/YOLOv5-COCO128-Tutorial-Results--VmlldzozMDI5OTY)). Weights & Biases Reports - ## Environments YOLOv5 may be run in any of the following up-to-date verified environments (with all dependencies including [CUDA](https://developer.nvidia.com/cuda)/[CUDNN](https://developer.nvidia.com/cudnn), [Python](https://www.python.org/) and [PyTorch](https://pytorch.org/) preinstalled): @@ -139,7 +141,6 @@ YOLOv5 may be run in any of the following up-to-date verified environments (with - **Amazon** Deep Learning AMI. See [AWS Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/AWS-Quickstart) - **Docker Image**. See [Docker Quickstart Guide](https://github.com/ultralytics/yolov5/wiki/Docker-Quickstart) Docker Pulls - ## Status ![CI CPU testing](https://github.com/ultralytics/yolov5/workflows/CI%20CPU%20testing/badge.svg) diff --git a/utils/loggers/wandb/log_dataset.py b/utils/loggers/wandb/log_dataset.py index 8447272cdb48..db68a2aa9cc9 100644 --- a/utils/loggers/wandb/log_dataset.py +++ b/utils/loggers/wandb/log_dataset.py @@ -2,20 +2,20 @@ from wandb_utils import WandbLogger -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' +WANDB_ARTIFACT_PREFIX = "wandb-artifact://" def create_dataset_artifact(opt): - logger = WandbLogger(opt, None, job_type='Dataset Creation') # TODO: return value unused + WandbLogger(opt, None, job_type="Dataset Creation") # TODO: return value unused -if __name__ == '__main__': +if __name__ == "__main__": parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default='data/coco128.yaml', help='data.yaml path') - parser.add_argument('--single-cls', action='store_true', help='train as single-class dataset') - parser.add_argument('--project', type=str, default='YOLOv5', help='name of W&B Project') - parser.add_argument('--entity', default=None, help='W&B entity') - parser.add_argument('--name', type=str, default='log dataset', help='name of W&B run') + parser.add_argument("--data", type=str, default="data/coco128.yaml", help="data.yaml path") + parser.add_argument("--single-cls", action="store_true", help="train as single-class dataset") + parser.add_argument("--project", type=str, default="YOLOv5", help="name of W&B Project") + parser.add_argument("--entity", default=None, help="W&B entity") + parser.add_argument("--name", type=str, default="log dataset", help="name of W&B run") opt = parser.parse_args() opt.resume = False # Explicitly disallow resume check for dataset upload job diff --git a/utils/loggers/wandb/sweep.py b/utils/loggers/wandb/sweep.py index fdabec4eb63b..5b99dc705c8f 100644 --- a/utils/loggers/wandb/sweep.py +++ b/utils/loggers/wandb/sweep.py @@ -8,10 +8,10 @@ if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -from train import train, parse_opt +from train import parse_opt, train +from utils.callbacks import Callbacks from utils.general import increment_path from utils.torch_utils import select_device -from utils.callbacks import Callbacks def sweep(): diff --git a/utils/loggers/wandb/sweep.yaml b/utils/loggers/wandb/sweep.yaml index c3727de82d4a..c7790d75f6b2 100644 --- a/utils/loggers/wandb/sweep.yaml +++ b/utils/loggers/wandb/sweep.yaml @@ -1,17 +1,17 @@ # Hyperparameters for training -# To set range- +# To set range- # Provide min and max values as: # parameter: -# +# # min: scalar # max: scalar # OR # # Set a specific list of search space- -# parameter: +# parameter: # values: [scalar1, scalar2, scalar3...] -# -# You can use grid, bayesian and hyperopt search strategy +# +# You can use grid, bayesian and hyperopt search strategy # For more info on configuring sweeps visit - https://docs.wandb.ai/guides/sweeps/configuration program: utils/loggers/wandb/sweep.py diff --git a/utils/loggers/wandb/wandb_utils.py b/utils/loggers/wandb/wandb_utils.py index 92fdd27bb004..6913328da984 100644 --- a/utils/loggers/wandb/wandb_utils.py +++ b/utils/loggers/wandb/wandb_utils.py @@ -15,30 +15,29 @@ if str(ROOT) not in sys.path: sys.path.append(str(ROOT)) # add ROOT to PATH -from utils.datasets import LoadImagesAndLabels -from utils.datasets import img2label_paths +from utils.datasets import LoadImagesAndLabels, img2label_paths from utils.general import check_dataset, check_file -RANK = int(os.getenv('RANK', -1)) +RANK = int(os.getenv("RANK", -1)) try: import wandb - assert hasattr(wandb, '__version__') # verify package import not local dir - if pkg.parse_version(wandb.__version__) >= pkg.parse_version('0.12.2') and RANK in [0, -1]: + assert hasattr(wandb, "__version__") # verify package import not local dir + if pkg.parse_version(wandb.__version__) >= pkg.parse_version("0.12.2") and RANK in [0, -1]: wandb.login(timeout=30) except (ImportError, AssertionError): wandb = None -WANDB_ARTIFACT_PREFIX = 'wandb-artifact://' +WANDB_ARTIFACT_PREFIX = "wandb-artifact://" def remove_prefix(from_string, prefix=WANDB_ARTIFACT_PREFIX): - return from_string[len(prefix):] + return from_string[len(prefix) :] def check_wandb_config_file(data_config_file): - wandb_config = '_wandb.'.join(data_config_file.rsplit('.', 1)) # updated data.yaml path + wandb_config = "_wandb.".join(data_config_file.rsplit(".", 1)) # updated data.yaml path if Path(wandb_config).is_file(): return wandb_config return data_config_file @@ -47,13 +46,15 @@ def check_wandb_config_file(data_config_file): def check_wandb_dataset(data_file): is_trainset_wandb_artifact = False is_valset_wandb_artifact = False - if check_file(data_file) and data_file.endswith('.yaml'): - with open(data_file, errors='ignore') as f: + if check_file(data_file) and data_file.endswith(".yaml"): + with open(data_file, errors="ignore") as f: data_dict = yaml.safe_load(f) - is_trainset_wandb_artifact = (isinstance(data_dict['train'], str) and - data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX)) - is_valset_wandb_artifact = (isinstance(data_dict['val'], str) and - data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX)) + is_trainset_wandb_artifact = isinstance(data_dict["train"], str) and data_dict["train"].startswith( + WANDB_ARTIFACT_PREFIX + ) + is_valset_wandb_artifact = isinstance(data_dict["val"], str) and data_dict["val"].startswith( + WANDB_ARTIFACT_PREFIX + ) if is_trainset_wandb_artifact or is_valset_wandb_artifact: return data_dict else: @@ -65,7 +66,7 @@ def get_run_info(run_path): run_id = run_path.stem project = run_path.parent.stem entity = run_path.parent.parent.stem - model_artifact_name = 'run_' + run_id + '_model' + model_artifact_name = "run_" + run_id + "_model" return entity, project, run_id, model_artifact_name @@ -76,7 +77,7 @@ def check_wandb_resume(opt): if RANK not in [-1, 0]: # For resuming DDP runs entity, project, run_id, model_artifact_name = get_run_info(opt.resume) api = wandb.Api() - artifact = api.artifact(entity + '/' + project + '/' + model_artifact_name + ':latest') + artifact = api.artifact(entity + "/" + project + "/" + model_artifact_name + ":latest") modeldir = artifact.download() opt.weights = str(Path(modeldir) / "last.pt") return True @@ -84,55 +85,54 @@ def check_wandb_resume(opt): def process_wandb_config_ddp_mode(opt): - with open(check_file(opt.data), errors='ignore') as f: + with open(check_file(opt.data), errors="ignore") as f: data_dict = yaml.safe_load(f) # data dict train_dir, val_dir = None, None - if isinstance(data_dict['train'], str) and data_dict['train'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict["train"], str) and data_dict["train"].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() - train_artifact = api.artifact(remove_prefix(data_dict['train']) + ':' + opt.artifact_alias) + train_artifact = api.artifact(remove_prefix(data_dict["train"]) + ":" + opt.artifact_alias) train_dir = train_artifact.download() - train_path = Path(train_dir) / 'data/images/' - data_dict['train'] = str(train_path) + train_path = Path(train_dir) / "data/images/" + data_dict["train"] = str(train_path) - if isinstance(data_dict['val'], str) and data_dict['val'].startswith(WANDB_ARTIFACT_PREFIX): + if isinstance(data_dict["val"], str) and data_dict["val"].startswith(WANDB_ARTIFACT_PREFIX): api = wandb.Api() - val_artifact = api.artifact(remove_prefix(data_dict['val']) + ':' + opt.artifact_alias) + val_artifact = api.artifact(remove_prefix(data_dict["val"]) + ":" + opt.artifact_alias) val_dir = val_artifact.download() - val_path = Path(val_dir) / 'data/images/' - data_dict['val'] = str(val_path) + val_path = Path(val_dir) / "data/images/" + data_dict["val"] = str(val_path) if train_dir or val_dir: - ddp_data_path = str(Path(val_dir) / 'wandb_local_data.yaml') - with open(ddp_data_path, 'w') as f: + ddp_data_path = str(Path(val_dir) / "wandb_local_data.yaml") + with open(ddp_data_path, "w") as f: yaml.safe_dump(data_dict, f) opt.data = ddp_data_path -class WandbLogger(): - """Log training runs, datasets, models, and predictions to Weights & Biases. +class WandbLogger: + """ + Log training runs, datasets, models, and predictions to Weights & Biases. - This logger sends information to W&B at wandb.ai. By default, this information - includes hyperparameters, system configuration and metrics, model metrics, - and basic data metrics and analyses. + This logger sends information to W&B at wandb.ai. By default, this information includes hyperparameters, system + configuration and metrics, model metrics, and basic data metrics and analyses. - By providing additional command line arguments to train.py, datasets, - models and predictions can also be logged. + By providing additional command line arguments to train.py, datasets, models and predictions can also be logged. For more on how this logger is used, see the Weights & Biases documentation: https://docs.wandb.com/guides/integrations/yolov5 """ - def __init__(self, opt, run_id=None, job_type='Training'): + def __init__(self, opt, run_id=None, job_type="Training"): """ - Initialize WandbLogger instance - Upload dataset if opt.upload_dataset is True - - Setup trainig processes if job_type is 'Training' + - Setup training processes if job_type is 'Training'. - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run run_id (str) -- Run ID of W&B run to be resumed - job_type (str) -- To set the job_type for this run + job_type (str) -- To set the job_type for this run - """ + """ # Pre-training routine -- self.job_type = job_type self.wandb, self.wandb_run = wandb, None if not wandb else wandb.run @@ -150,25 +150,29 @@ def __init__(self, opt, run_id=None, job_type='Training'): if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): entity, project, run_id, model_artifact_name = get_run_info(opt.resume) model_artifact_name = WANDB_ARTIFACT_PREFIX + model_artifact_name - assert wandb, 'install wandb to resume wandb runs' + assert wandb, "install wandb to resume wandb runs" # Resume wandb-artifact:// runs here| workaround for not overwriting wandb.config - self.wandb_run = wandb.init(id=run_id, - project=project, - entity=entity, - resume='allow', - allow_val_change=True) + self.wandb_run = wandb.init( + id=run_id, project=project, entity=entity, resume="allow", allow_val_change=True + ) opt.resume = model_artifact_name elif self.wandb: - self.wandb_run = wandb.init(config=opt, - resume="allow", - project='YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem, - entity=opt.entity, - name=opt.name if opt.name != 'exp' else None, - job_type=job_type, - id=run_id, - allow_val_change=True) if not wandb.run else wandb.run + self.wandb_run = ( + wandb.init( + config=opt, + resume="allow", + project="YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem, + entity=opt.entity, + name=opt.name if opt.name != "exp" else None, + job_type=job_type, + id=run_id, + allow_val_change=True, + ) + if not wandb.run + else wandb.run + ) if self.wandb_run: - if self.job_type == 'Training': + if self.job_type == "Training": if opt.upload_dataset: if not opt.resume: self.wandb_artifact_data_dict = self.check_and_upload_dataset(opt) @@ -184,29 +188,28 @@ def __init__(self, opt, run_id=None, job_type='Training'): self.wandb_artifact_data_dict = self.wandb_artifact_data_dict or self.data_dict # write data_dict to config. useful for resuming from artifacts. Do this only when not resuming. - self.wandb_run.config.update({'data_dict': self.wandb_artifact_data_dict}, - allow_val_change=True) + self.wandb_run.config.update({"data_dict": self.wandb_artifact_data_dict}, allow_val_change=True) self.setup_training(opt) - if self.job_type == 'Dataset Creation': + if self.job_type == "Dataset Creation": self.data_dict = self.check_and_upload_dataset(opt) def check_and_upload_dataset(self, opt): """ - Check if the dataset format is compatible and upload it as W&B artifact + Check if the dataset format is compatible and upload it as W&B artifact. - arguments: + Arguments: opt (namespace)-- Commandline arguments for current run - returns: + Returns: Updated dataset info dictionary where local dataset paths are replaced by WAND_ARFACT_PREFIX links. """ - assert wandb, 'Install wandb to upload dataset' - config_path = self.log_dataset_artifact(opt.data, - opt.single_cls, - 'YOLOv5' if opt.project == 'runs/train' else Path(opt.project).stem) + assert wandb, "Install wandb to upload dataset" + config_path = self.log_dataset_artifact( + opt.data, opt.single_cls, "YOLOv5" if opt.project == "runs/train" else Path(opt.project).stem + ) print("Created dataset config file ", config_path) - with open(config_path, errors='ignore') as f: + with open(config_path, errors="ignore") as f: wandb_data_dict = yaml.safe_load(f) return wandb_data_dict @@ -215,9 +218,9 @@ def setup_training(self, opt): Setup the necessary processes for training YOLO models: - Attempt to download model checkpoint and dataset artifacts if opt.resume stats with WANDB_ARTIFACT_PREFIX - Update data_dict, to contain info of previous run if resumed and the paths of dataset artifact if downloaded - - Setup log_dict, initialize bbox_interval + - Setup log_dict, initialize bbox_interval. - arguments: + Arguments: opt (namespace) -- commandline arguments for this run """ @@ -228,22 +231,29 @@ def setup_training(self, opt): if modeldir: self.weights = Path(modeldir) / "last.pt" config = self.wandb_run.config - opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = str( - self.weights), config.save_period, config.batch_size, config.bbox_interval, config.epochs, \ - config.hyp + opt.weights, opt.save_period, opt.batch_size, opt.bbox_interval, opt.epochs, opt.hyp = ( + str(self.weights), + config.save_period, + config.batch_size, + config.bbox_interval, + config.epochs, + config.hyp, + ) data_dict = self.data_dict if self.val_artifact is None: # If --upload_dataset is set, use the existing artifact, don't download - self.train_artifact_path, self.train_artifact = self.download_dataset_artifact(data_dict.get('train'), - opt.artifact_alias) - self.val_artifact_path, self.val_artifact = self.download_dataset_artifact(data_dict.get('val'), - opt.artifact_alias) + self.train_artifact_path, self.train_artifact = self.download_dataset_artifact( + data_dict.get("train"), opt.artifact_alias + ) + self.val_artifact_path, self.val_artifact = self.download_dataset_artifact( + data_dict.get("val"), opt.artifact_alias + ) if self.train_artifact_path is not None: - train_path = Path(self.train_artifact_path) / 'data/images/' - data_dict['train'] = str(train_path) + train_path = Path(self.train_artifact_path) / "data/images/" + data_dict["train"] = str(train_path) if self.val_artifact_path is not None: - val_path = Path(self.val_artifact_path) / 'data/images/' - data_dict['val'] = str(val_path) + val_path = Path(self.val_artifact_path) / "data/images/" + data_dict["val"] = str(val_path) if self.val_artifact is not None: self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") @@ -260,104 +270,115 @@ def setup_training(self, opt): def download_dataset_artifact(self, path, alias): """ - download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX + Download the model checkpoint artifact if the path starts with WANDB_ARTIFACT_PREFIX. - arguments: + Arguments: path -- path of the dataset to be used for training alias (str)-- alias of the artifact to be download/used for training - returns: - (str, wandb.Artifact) -- path of the downladed dataset and it's corresponding artifact object if dataset + Returns: + (str, wandb.Artifact) -- path of the downloaded dataset and it's corresponding artifact object if dataset is found otherwise returns (None, None) """ if isinstance(path, str) and path.startswith(WANDB_ARTIFACT_PREFIX): artifact_path = Path(remove_prefix(path, WANDB_ARTIFACT_PREFIX) + ":" + alias) dataset_artifact = wandb.use_artifact(artifact_path.as_posix().replace("\\", "/")) - assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn\'t exist'" + assert dataset_artifact is not None, "'Error: W&B dataset artifact doesn't exist'" datadir = dataset_artifact.download() return datadir, dataset_artifact return None, None def download_model_artifact(self, opt): """ - download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX + Download the model checkpoint artifact if the resume path starts with WANDB_ARTIFACT_PREFIX. - arguments: + Arguments: opt (namespace) -- Commandline arguments for this run """ if opt.resume.startswith(WANDB_ARTIFACT_PREFIX): model_artifact = wandb.use_artifact(remove_prefix(opt.resume, WANDB_ARTIFACT_PREFIX) + ":latest") - assert model_artifact is not None, 'Error: W&B model artifact doesn\'t exist' + assert model_artifact is not None, "Error: W&B model artifact doesn't exist" modeldir = model_artifact.download() - epochs_trained = model_artifact.metadata.get('epochs_trained') - total_epochs = model_artifact.metadata.get('total_epochs') + model_artifact.metadata.get("epochs_trained") + total_epochs = model_artifact.metadata.get("total_epochs") is_finished = total_epochs is None - assert not is_finished, 'training is finished, can only resume incomplete runs.' + assert not is_finished, "training is finished, can only resume incomplete runs." return modeldir, model_artifact return None, None def log_model(self, path, opt, epoch, fitness_score, best_model=False): """ - Log the model checkpoint as W&B artifact + Log the model checkpoint as W&B artifact. - arguments: + Arguments: path (Path) -- Path of directory containing the checkpoints opt (namespace) -- Command line arguments for this run epoch (int) -- Current epoch number - fitness_score (float) -- fitness score for current epoch + fitness_score (float) -- fitness score for current epoch best_model (boolean) -- Boolean representing if the current checkpoint is the best yet. """ - model_artifact = wandb.Artifact('run_' + wandb.run.id + '_model', type='model', metadata={ - 'original_url': str(path), - 'epochs_trained': epoch + 1, - 'save period': opt.save_period, - 'project': opt.project, - 'total_epochs': opt.epochs, - 'fitness_score': fitness_score - }) - model_artifact.add_file(str(path / 'last.pt'), name='last.pt') - wandb.log_artifact(model_artifact, - aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), 'best' if best_model else '']) + model_artifact = wandb.Artifact( + "run_" + wandb.run.id + "_model", + type="model", + metadata={ + "original_url": str(path), + "epochs_trained": epoch + 1, + "save period": opt.save_period, + "project": opt.project, + "total_epochs": opt.epochs, + "fitness_score": fitness_score, + }, + ) + model_artifact.add_file(str(path / "last.pt"), name="last.pt") + wandb.log_artifact( + model_artifact, aliases=["latest", "last", "epoch " + str(self.current_epoch), "best" if best_model else ""] + ) print("Saving model artifact on epoch ", epoch + 1) def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config=False): """ - Log the dataset as W&B artifact and return the new data file with W&B links + Log the dataset as W&B artifact and return the new data file with W&B links. - arguments: + Arguments: data_file (str) -- the .yaml file with information about the dataset like - path, classes etc. single_class (boolean) -- train multi-class data as single-class project (str) -- project name. Used to construct the artifact path - overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new + overwrite_config (boolean) -- overwrites the data.yaml file if set to true otherwise creates a new file with _wandb postfix. Eg -> data_wandb.yaml - returns: + Returns: the new .yaml file with artifact links. it can be used to start training directly from artifacts """ self.data_dict = check_dataset(data_file) # parse and check data = dict(self.data_dict) - nc, names = (1, ['item']) if single_cls else (int(data['nc']), data['names']) + nc, names = (1, ["item"]) if single_cls else (int(data["nc"]), data["names"]) names = {k: v for k, v in enumerate(names)} # to index dictionary - self.train_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['train'], rect=True, batch_size=1), names, name='train') if data.get('train') else None - self.val_artifact = self.create_dataset_table(LoadImagesAndLabels( - data['val'], rect=True, batch_size=1), names, name='val') if data.get('val') else None - if data.get('train'): - data['train'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'train') - if data.get('val'): - data['val'] = WANDB_ARTIFACT_PREFIX + str(Path(project) / 'val') + self.train_artifact = ( + self.create_dataset_table(LoadImagesAndLabels(data["train"], rect=True, batch_size=1), names, name="train") + if data.get("train") + else None + ) + self.val_artifact = ( + self.create_dataset_table(LoadImagesAndLabels(data["val"], rect=True, batch_size=1), names, name="val") + if data.get("val") + else None + ) + if data.get("train"): + data["train"] = WANDB_ARTIFACT_PREFIX + str(Path(project) / "train") + if data.get("val"): + data["val"] = WANDB_ARTIFACT_PREFIX + str(Path(project) / "val") path = Path(data_file).stem - path = (path if overwrite_config else path + '_wandb') + '.yaml' # updated data.yaml path - data.pop('download', None) - data.pop('path', None) - with open(path, 'w') as f: + path = (path if overwrite_config else path + "_wandb") + ".yaml" # updated data.yaml path + data.pop("download", None) + data.pop("path", None) + with open(path, "w") as f: yaml.safe_dump(data, f) - if self.job_type == 'Training': # builds correct artifact pipeline graph + if self.job_type == "Training": # builds correct artifact pipeline graph self.wandb_run.use_artifact(self.val_artifact) self.wandb_run.use_artifact(self.train_artifact) self.val_artifact.wait() - self.val_table = self.val_artifact.get('val') + self.val_table = self.val_artifact.get("val") self.map_val_table_path() else: self.wandb_run.log_artifact(self.train_artifact) @@ -367,6 +388,7 @@ def log_dataset_artifact(self, data_file, single_cls, project, overwrite_config= def map_val_table_path(self): """ Map the validation dataset Table like name of file -> it's id in the W&B Table. + Useful for - referencing artifacts for evaluation. """ self.val_table_path_map = {} @@ -374,45 +396,51 @@ def map_val_table_path(self): for i, data in enumerate(tqdm(self.val_table.data)): self.val_table_path_map[data[3]] = data[0] - def create_dataset_table(self, dataset, class_to_id, name='dataset'): + def create_dataset_table(self, dataset, class_to_id, name="dataset"): """ Create and return W&B artifact containing W&B Table of the dataset. - arguments: + Arguments: dataset (LoadImagesAndLabels) -- instance of LoadImagesAndLabels class used to iterate over the data to build Table class_to_id (dict(int, str)) -- hash map that maps class ids to labels name (str) -- name of the artifact - returns: + Returns: dataset artifact to be logged or used """ - # TODO: Explore multiprocessing to slpit this loop parallely| This is essential for speeding up the the logging + # TODO: Explore multiprocessing to slpit this loop parallelly| This is essential for speeding up the the logging artifact = wandb.Artifact(name=name, type="dataset") img_files = tqdm([dataset.path]) if isinstance(dataset.path, str) and Path(dataset.path).is_dir() else None img_files = tqdm(dataset.img_files) if not img_files else img_files for img_file in img_files: if Path(img_file).is_dir(): - artifact.add_dir(img_file, name='data/images') - labels_path = 'labels'.join(dataset.path.rsplit('images', 1)) - artifact.add_dir(labels_path, name='data/labels') + artifact.add_dir(img_file, name="data/images") + labels_path = "labels".join(dataset.path.rsplit("images", 1)) + artifact.add_dir(labels_path, name="data/labels") else: - artifact.add_file(img_file, name='data/images/' + Path(img_file).name) + artifact.add_file(img_file, name="data/images/" + Path(img_file).name) label_file = Path(img2label_paths([img_file])[0]) - artifact.add_file(str(label_file), - name='data/labels/' + label_file.name) if label_file.exists() else None + artifact.add_file( + str(label_file), name="data/labels/" + label_file.name + ) if label_file.exists() else None table = wandb.Table(columns=["id", "train_image", "Classes", "name"]) - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in class_to_id.items()]) + class_set = wandb.Classes([{"id": id, "name": name} for id, name in class_to_id.items()]) for si, (img, labels, paths, shapes) in enumerate(tqdm(dataset)): box_data, img_classes = [], {} for cls, *xywh in labels[:, 1:].tolist(): cls = int(cls) - box_data.append({"position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, - "class_id": cls, - "box_caption": "%s" % (class_to_id[cls])}) + box_data.append( + { + "position": {"middle": [xywh[0], xywh[1]], "width": xywh[2], "height": xywh[3]}, + "class_id": cls, + "box_caption": f"{class_to_id[cls]}", + } + ) img_classes[cls] = class_to_id[cls] boxes = {"ground_truth": {"box_data": box_data, "class_labels": class_to_id}} # inference-space - table.add_data(si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), - Path(paths).name) + table.add_data( + si, wandb.Image(paths, classes=class_set, boxes=boxes), list(img_classes.values()), Path(paths).name + ) artifact.add(table, name) return artifact @@ -420,59 +448,69 @@ def log_training_progress(self, predn, path, names): """ Build evaluation Table. Uses reference from validation dataset table. - arguments: + Arguments: predn (list): list of predictions in the native space in the format - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image names (dict(int, str)): hash map that maps class ids to labels """ - class_set = wandb.Classes([{'id': id, 'name': name} for id, name in names.items()]) + class_set = wandb.Classes([{"id": id, "name": name} for id, name in names.items()]) box_data = [] total_conf = 0 for *xyxy, conf, cls in predn.tolist(): if conf >= 0.25: box_data.append( - {"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"}) + { + "position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel", + } + ) total_conf = total_conf + conf boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space id = self.val_table_path_map[Path(path).name] - self.result_table.add_data(self.current_epoch, - id, - self.val_table.data[id][1], - wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), - total_conf / max(1, len(box_data)) - ) + self.result_table.add_data( + self.current_epoch, + id, + self.val_table.data[id][1], + wandb.Image(self.val_table.data[id][1], boxes=boxes, classes=class_set), + total_conf / max(1, len(box_data)), + ) def val_one_image(self, pred, predn, path, names, im): """ - Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media panel + Log validation data for one image. updates the result Table if validation dataset is uploaded and log bbox media + panel. - arguments: + Arguments: pred (list): list of scaled predictions in the format - [xmin, ymin, xmax, ymax, confidence, class] predn (list): list of predictions in the native space - [xmin, ymin, xmax, ymax, confidence, class] - path (str): local path of the current evaluation image + path (str): local path of the current evaluation image """ if self.val_table and self.result_table: # Log Table if Val dataset is uploaded as artifact self.log_training_progress(predn, path, names) if len(self.bbox_media_panel_images) < self.max_imgs_to_log and self.current_epoch > 0: if self.current_epoch % self.bbox_interval == 0: - box_data = [{"position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, - "class_id": int(cls), - "box_caption": "%s %.3f" % (names[cls], conf), - "scores": {"class_score": conf}, - "domain": "pixel"} for *xyxy, conf, cls in pred.tolist()] + box_data = [ + { + "position": {"minX": xyxy[0], "minY": xyxy[1], "maxX": xyxy[2], "maxY": xyxy[3]}, + "class_id": int(cls), + "box_caption": f"{names[cls]} {conf:.3f}", + "scores": {"class_score": conf}, + "domain": "pixel", + } + for *xyxy, conf, cls in pred.tolist() + ] boxes = {"predictions": {"box_data": box_data, "class_labels": names}} # inference-space self.bbox_media_panel_images.append(wandb.Image(im, boxes=boxes, caption=path.name)) def log(self, log_dict): """ - save the metrics to the logging dictionary + Save the metrics to the logging dictionary. - arguments: + Arguments: log_dict (Dict) -- metrics/media to be logged in current step """ if self.wandb_run: @@ -481,9 +519,9 @@ def log(self, log_dict): def end_epoch(self, best_result=False): """ - commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. + Commit the log_dict, model artifacts and Tables to W&B and flush the log_dict. - arguments: + Arguments: best_result (boolean): Boolean representing if the result of this evaluation is best or not """ if self.wandb_run: @@ -494,18 +532,18 @@ def end_epoch(self, best_result=False): self.log_dict = {} self.bbox_media_panel_images = [] if self.result_artifact: - self.result_artifact.add(self.result_table, 'result') - wandb.log_artifact(self.result_artifact, aliases=['latest', 'last', 'epoch ' + str(self.current_epoch), - ('best' if best_result else '')]) + self.result_artifact.add(self.result_table, "result") + wandb.log_artifact( + self.result_artifact, + aliases=["latest", "last", "epoch " + str(self.current_epoch), ("best" if best_result else "")], + ) wandb.log({"evaluation": self.result_table}) self.result_table = wandb.Table(["epoch", "id", "ground truth", "prediction", "avg_confidence"]) self.result_artifact = wandb.Artifact("run_" + wandb.run.id + "_progress", "evaluation") def finish_run(self): - """ - Log metrics if any and finish the current W&B run - """ + """Log metrics if any and finish the current W&B run.""" if self.wandb_run: if self.log_dict: with all_logging_disabled(): @@ -515,7 +553,7 @@ def finish_run(self): @contextmanager def all_logging_disabled(highest_level=logging.CRITICAL): - """ source - https://gist.github.com/simon-weber/7853144 + """Source - https://gist.github.com/simon-weber/7853144 A context manager that will prevent any logging messages triggered during the body from being processed. :param highest_level: the maximum logging level in use. This would only need to be changed if a custom level greater than CRITICAL is defined. diff --git a/utils/loss.py b/utils/loss.py index fac432d0edc3..41a93794db7f 100644 --- a/utils/loss.py +++ b/utils/loss.py @@ -1,7 +1,5 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license -""" -Loss functions -""" +"""Loss functions.""" import torch import torch.nn as nn @@ -18,8 +16,8 @@ def smooth_BCE(eps=0.1): # https://github.com/ultralytics/yolov3/issues/238#iss class BCEBlurWithLogitsLoss(nn.Module): # BCEwithLogitLoss() with reduced missing label effects. def __init__(self, alpha=0.05): - super(BCEBlurWithLogitsLoss, self).__init__() - self.loss_fcn = nn.BCEWithLogitsLoss(reduction='none') # must be nn.BCEWithLogitsLoss() + super().__init__() + self.loss_fcn = nn.BCEWithLogitsLoss(reduction="none") # must be nn.BCEWithLogitsLoss() self.alpha = alpha def forward(self, pred, true): @@ -35,12 +33,12 @@ def forward(self, pred, true): class FocalLoss(nn.Module): # Wraps focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(FocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element + self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) @@ -54,9 +52,9 @@ def forward(self, pred, true): modulating_factor = (1.0 - p_t) ** self.gamma loss *= alpha_factor * modulating_factor - if self.reduction == 'mean': + if self.reduction == "mean": return loss.mean() - elif self.reduction == 'sum': + elif self.reduction == "sum": return loss.sum() else: # 'none' return loss @@ -65,12 +63,12 @@ def forward(self, pred, true): class QFocalLoss(nn.Module): # Wraps Quality focal loss around existing loss_fcn(), i.e. criteria = FocalLoss(nn.BCEWithLogitsLoss(), gamma=1.5) def __init__(self, loss_fcn, gamma=1.5, alpha=0.25): - super(QFocalLoss, self).__init__() + super().__init__() self.loss_fcn = loss_fcn # must be nn.BCEWithLogitsLoss() self.gamma = gamma self.alpha = alpha self.reduction = loss_fcn.reduction - self.loss_fcn.reduction = 'none' # required to apply FL to each element + self.loss_fcn.reduction = "none" # required to apply FL to each element def forward(self, pred, true): loss = self.loss_fcn(pred, true) @@ -80,9 +78,9 @@ def forward(self, pred, true): modulating_factor = torch.abs(true - pred_prob) ** self.gamma loss *= alpha_factor * modulating_factor - if self.reduction == 'mean': + if self.reduction == "mean": return loss.mean() - elif self.reduction == 'sum': + elif self.reduction == "sum": return loss.sum() else: # 'none' return loss @@ -96,22 +94,22 @@ def __init__(self, model, autobalance=False): h = model.hyp # hyperparameters # Define criteria - BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['cls_pw']], device=device)) - BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h['obj_pw']], device=device)) + BCEcls = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["cls_pw"]], device=device)) + BCEobj = nn.BCEWithLogitsLoss(pos_weight=torch.tensor([h["obj_pw"]], device=device)) # Class label smoothing https://arxiv.org/pdf/1902.04103.pdf eqn 3 - self.cp, self.cn = smooth_BCE(eps=h.get('label_smoothing', 0.0)) # positive, negative BCE targets + self.cp, self.cn = smooth_BCE(eps=h.get("label_smoothing", 0.0)) # positive, negative BCE targets # Focal loss - g = h['fl_gamma'] # focal loss gamma + g = h["fl_gamma"] # focal loss gamma if g > 0: BCEcls, BCEobj = FocalLoss(BCEcls, g), FocalLoss(BCEobj, g) det = model.module.model[-1] if is_parallel(model) else model.model[-1] # Detect() module - self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, .02]) # P3-P7 + self.balance = {3: [4.0, 1.0, 0.4]}.get(det.nl, [4.0, 1.0, 0.25, 0.06, 0.02]) # P3-P7 self.ssi = list(det.stride).index(16) if autobalance else 0 # stride 16 index self.BCEcls, self.BCEobj, self.gr, self.hyp, self.autobalance = BCEcls, BCEobj, 1.0, h, autobalance - for k in 'na', 'nc', 'nl', 'anchors': + for k in "na", "nc", "nl", "anchors": setattr(self, k, getattr(det, k)) def __call__(self, p, targets): # predictions, targets, model @@ -129,7 +127,7 @@ def __call__(self, p, targets): # predictions, targets, model ps = pi[b, a, gj, gi] # prediction subset corresponding to targets # Regression - pxy = ps[:, :2].sigmoid() * 2. - 0.5 + pxy = ps[:, :2].sigmoid() * 2.0 - 0.5 pwh = (ps[:, 2:4].sigmoid() * 2) ** 2 * anchors[i] pbox = torch.cat((pxy, pwh), 1) # predicted box iou = bbox_iou(pbox.T, tbox[i], x1y1x2y2=False, CIoU=True) # iou(prediction, target) @@ -159,9 +157,9 @@ def __call__(self, p, targets): # predictions, targets, model if self.autobalance: self.balance = [x / self.balance[self.ssi] for x in self.balance] - lbox *= self.hyp['box'] - lobj *= self.hyp['obj'] - lcls *= self.hyp['cls'] + lbox *= self.hyp["box"] + lobj *= self.hyp["obj"] + lcls *= self.hyp["cls"] bs = tobj.shape[0] # batch size return (lbox + lobj + lcls) * bs, torch.cat((lbox, lobj, lcls)).detach() @@ -175,10 +173,20 @@ def build_targets(self, p, targets): targets = torch.cat((targets.repeat(na, 1, 1), ai[:, :, None]), 2) # append anchor indices g = 0.5 # bias - off = torch.tensor([[0, 0], - [1, 0], [0, 1], [-1, 0], [0, -1], # j,k,l,m - # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm - ], device=targets.device).float() * g # offsets + off = ( + torch.tensor( + [ + [0, 0], + [1, 0], + [0, 1], + [-1, 0], + [0, -1], # j,k,l,m + # [1, 1], [1, -1], [-1, 1], [-1, -1], # jk,jm,lk,lm + ], + device=targets.device, + ).float() + * g + ) # offsets for i in range(self.nl): anchors = self.anchors[i] @@ -189,15 +197,15 @@ def build_targets(self, p, targets): if nt: # Matches r = t[:, :, 4:6] / anchors[:, None] # wh ratio - j = torch.max(r, 1. / r).max(2)[0] < self.hyp['anchor_t'] # compare + j = torch.max(r, 1.0 / r).max(2)[0] < self.hyp["anchor_t"] # compare # j = wh_iou(anchors, t[:, 4:6]) > model.hyp['iou_t'] # iou(3,n)=wh_iou(anchors(3,2), gwh(n,2)) t = t[j] # filter # Offsets gxy = t[:, 2:4] # grid xy gxi = gain[[2, 3]] - gxy # inverse - j, k = ((gxy % 1. < g) & (gxy > 1.)).T - l, m = ((gxi % 1. < g) & (gxi > 1.)).T + j, k = ((gxy % 1.0 < g) & (gxy > 1.0)).T + l, m = ((gxi % 1.0 < g) & (gxi > 1.0)).T j = torch.stack((torch.ones_like(j), j, k, l, m)) t = t.repeat((5, 1, 1))[j] offsets = (torch.zeros_like(gxy)[None] + off[:, None])[j] diff --git a/utils/metrics.py b/utils/metrics.py index 4f1b5e2d2c2d..2347195f170f 100644 --- a/utils/metrics.py +++ b/utils/metrics.py @@ -1,7 +1,5 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license -""" -Model validation metrics -""" +"""Model validation metrics.""" import math import warnings @@ -18,8 +16,10 @@ def fitness(x): return (x[:, :4] * w).sum(1) -def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names=()): - """ Compute the average precision, given the recall and precision curves. +def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir=".", names=()): + """ + Compute the average precision, given the recall and precision curves. + Source: https://github.com/rafaelpadilla/Object-Detection-Metrics. # Arguments tp: True positives (nparray, nx1 or nx10). @@ -31,7 +31,6 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names # Returns The average precision as computed in py-faster-rcnn. """ - # Sort by objectness i = np.argsort(-conf) tp, conf, pred_cls = tp[i], conf[i], pred_cls[i] @@ -72,24 +71,23 @@ def ap_per_class(tp, conf, pred_cls, target_cls, plot=False, save_dir='.', names # Compute F1 (harmonic mean of precision and recall) f1 = 2 * p * r / (p + r + 1e-16) if plot: - plot_pr_curve(px, py, ap, Path(save_dir) / 'PR_curve.png', names) - plot_mc_curve(px, f1, Path(save_dir) / 'F1_curve.png', names, ylabel='F1') - plot_mc_curve(px, p, Path(save_dir) / 'P_curve.png', names, ylabel='Precision') - plot_mc_curve(px, r, Path(save_dir) / 'R_curve.png', names, ylabel='Recall') + plot_pr_curve(px, py, ap, Path(save_dir) / "PR_curve.png", names) + plot_mc_curve(px, f1, Path(save_dir) / "F1_curve.png", names, ylabel="F1") + plot_mc_curve(px, p, Path(save_dir) / "P_curve.png", names, ylabel="Precision") + plot_mc_curve(px, r, Path(save_dir) / "R_curve.png", names, ylabel="Recall") i = f1.mean(0).argmax() # max F1 index - return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype('int32') + return p[:, i], r[:, i], ap, f1[:, i], unique_classes.astype("int32") def compute_ap(recall, precision): - """ Compute the average precision, given the recall and precision curves + """Compute the average precision, given the recall and precision curves # Arguments recall: The recall curve (list) precision: The precision curve (list) # Returns - Average precision, precision curve, recall curve + Average precision, precision curve, recall curve. """ - # Append sentinel values to beginning and end mrec = np.concatenate(([0.0], recall, [1.0])) mpre = np.concatenate(([1.0], precision, [0.0])) @@ -98,8 +96,8 @@ def compute_ap(recall, precision): mpre = np.flip(np.maximum.accumulate(np.flip(mpre))) # Integrate area under curve - method = 'interp' # methods: 'continuous', 'interp' - if method == 'interp': + method = "interp" # methods: 'continuous', 'interp' + if method == "interp": x = np.linspace(0, 1, 101) # 101-point interp (COCO) ap = np.trapz(np.interp(x, mrec, mpre), x) # integrate else: # 'continuous' @@ -119,13 +117,14 @@ def __init__(self, nc, conf=0.25, iou_thres=0.45): def process_batch(self, detections, labels): """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, + y2) format. + Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: - None, updates confusion matrix accordingly + None, updates confusion matrix accordingly. """ detections = detections[detections[:, 4] > self.conf] gt_classes = labels[:, 0].int() @@ -160,31 +159,38 @@ def process_batch(self, detections, labels): def matrix(self): return self.matrix - def plot(self, normalize=True, save_dir='', names=()): + def plot(self, normalize=True, save_dir="", names=()): try: import seaborn as sn - array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1E-6) if normalize else 1) # normalize columns + array = self.matrix / ((self.matrix.sum(0).reshape(1, -1) + 1e-6) if normalize else 1) # normalize columns array[array < 0.005] = np.nan # don't annotate (would appear as 0.00) fig = plt.figure(figsize=(12, 9), tight_layout=True) sn.set(font_scale=1.0 if self.nc < 50 else 0.8) # for label size labels = (0 < len(names) < 99) and len(names) == self.nc # apply names to ticklabels with warnings.catch_warnings(): - warnings.simplefilter('ignore') # suppress empty matrix RuntimeWarning: All-NaN slice encountered - sn.heatmap(array, annot=self.nc < 30, annot_kws={"size": 8}, cmap='Blues', fmt='.2f', square=True, - xticklabels=names + ['background FP'] if labels else "auto", - yticklabels=names + ['background FN'] if labels else "auto").set_facecolor((1, 1, 1)) - fig.axes[0].set_xlabel('True') - fig.axes[0].set_ylabel('Predicted') - fig.savefig(Path(save_dir) / 'confusion_matrix.png', dpi=250) + warnings.simplefilter("ignore") # suppress empty matrix RuntimeWarning: All-NaN slice encountered + sn.heatmap( + array, + annot=self.nc < 30, + annot_kws={"size": 8}, + cmap="Blues", + fmt=".2f", + square=True, + xticklabels=names + ["background FP"] if labels else "auto", + yticklabels=names + ["background FN"] if labels else "auto", + ).set_facecolor((1, 1, 1)) + fig.axes[0].set_xlabel("True") + fig.axes[0].set_ylabel("Predicted") + fig.savefig(Path(save_dir) / "confusion_matrix.png", dpi=250) plt.close() except Exception as e: - print(f'WARNING: ConfusionMatrix plot failure: {e}') + print(f"WARNING: ConfusionMatrix plot failure: {e}") def print(self): for i in range(self.nc + 1): - print(' '.join(map(str, self.matrix[i]))) + print(" ".join(map(str, self.matrix[i]))) def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps=1e-7): @@ -202,8 +208,9 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= b2_y1, b2_y2 = box2[1] - box2[3] / 2, box2[1] + box2[3] / 2 # Intersection area - inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * \ - (torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1)).clamp(0) + inter = (torch.min(b1_x2, b2_x2) - torch.max(b1_x1, b2_x1)).clamp(0) * ( + torch.min(b1_y2, b2_y2) - torch.max(b1_y1, b2_y1) + ).clamp(0) # Union Area w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps @@ -215,13 +222,14 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= cw = torch.max(b1_x2, b2_x2) - torch.min(b1_x1, b2_x1) # convex (smallest enclosing box) width ch = torch.max(b1_y2, b2_y2) - torch.min(b1_y1, b2_y1) # convex height if CIoU or DIoU: # Distance or Complete IoU https://arxiv.org/abs/1911.08287v1 - c2 = cw ** 2 + ch ** 2 + eps # convex diagonal squared - rho2 = ((b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + - (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2) / 4 # center distance squared + c2 = cw**2 + ch**2 + eps # convex diagonal squared + rho2 = ( + (b2_x1 + b2_x2 - b1_x1 - b1_x2) ** 2 + (b2_y1 + b2_y2 - b1_y1 - b1_y2) ** 2 + ) / 4 # center distance squared if DIoU: return iou - rho2 / c2 # DIoU elif CIoU: # https://github.com/Zzh-tju/DIoU-SSD-pytorch/blob/master/utils/box/box_utils.py#L47 - v = (4 / math.pi ** 2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) + v = (4 / math.pi**2) * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2) with torch.no_grad(): alpha = v / (v - iou + (1 + eps)) return iou - (rho2 / c2 + v * alpha) # CIoU @@ -235,14 +243,16 @@ def bbox_iou(box1, box2, x1y1x2y2=True, GIoU=False, DIoU=False, CIoU=False, eps= def box_iou(box1, box2): # https://github.com/pytorch/vision/blob/master/torchvision/ops/boxes.py """ - Return intersection-over-union (Jaccard index) of boxes. - Both sets of boxes are expected to be in (x1, y1, x2, y2) format. + Return intersection-over-union (Jaccard index) of boxes. Both sets of boxes are expected to be in (x1, y1, x2, y2) + format. + Arguments: box1 (Tensor[N, 4]) box2 (Tensor[M, 4]) + Returns: iou (Tensor[N, M]): the NxM matrix containing the pairwise - IoU values for every element in boxes1 and boxes2 + IoU values for every element in boxes1 and boxes2. """ def box_area(box): @@ -257,13 +267,15 @@ def box_area(box): return inter / (area1[:, None] + area2 - inter) # iou = inter / (area1 + area2 - inter) -def bbox_ioa(box1, box2, eps=1E-7): - """ Returns the intersection over box2 area given box1, box2. Boxes are x1y1x2y2 +def bbox_ioa(box1, box2, eps=1e-7): + """ + Returns the intersection over box2 area given box1, box2. + + Boxes are x1y1x2y2 box1: np.array of shape(4) box2: np.array of shape(nx4) - returns: np.array of shape(n) + returns: np.array of shape(n). """ - box2 = box2.transpose() # Get the coordinates of bounding boxes @@ -271,8 +283,9 @@ def bbox_ioa(box1, box2, eps=1E-7): b2_x1, b2_y1, b2_x2, b2_y2 = box2[0], box2[1], box2[2], box2[3] # Intersection area - inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * \ - (np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1)).clip(0) + inter_area = (np.minimum(b1_x2, b2_x2) - np.maximum(b1_x1, b2_x1)).clip(0) * ( + np.minimum(b1_y2, b2_y2) - np.maximum(b1_y1, b2_y1) + ).clip(0) # box2 area box2_area = (b2_x2 - b2_x1) * (b2_y2 - b2_y1) + eps @@ -291,20 +304,21 @@ def wh_iou(wh1, wh2): # Plots ---------------------------------------------------------------------------------------------------------------- -def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): + +def plot_pr_curve(px, py, ap, save_dir="pr_curve.png", names=()): # Precision-recall curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) py = np.stack(py, axis=1) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py.T): - ax.plot(px, y, linewidth=1, label=f'{names[i]} {ap[i, 0]:.3f}') # plot(recall, precision) + ax.plot(px, y, linewidth=1, label=f"{names[i]} {ap[i, 0]:.3f}") # plot(recall, precision) else: - ax.plot(px, py, linewidth=1, color='grey') # plot(recall, precision) + ax.plot(px, py, linewidth=1, color="grey") # plot(recall, precision) - ax.plot(px, py.mean(1), linewidth=3, color='blue', label='all classes %.3f mAP@0.5' % ap[:, 0].mean()) - ax.set_xlabel('Recall') - ax.set_ylabel('Precision') + ax.plot(px, py.mean(1), linewidth=3, color="blue", label=f"all classes {ap[:, 0].mean():.3f} mAP@0.5") + ax.set_xlabel("Recall") + ax.set_ylabel("Precision") ax.set_xlim(0, 1) ax.set_ylim(0, 1) plt.legend(bbox_to_anchor=(1.04, 1), loc="upper left") @@ -312,18 +326,18 @@ def plot_pr_curve(px, py, ap, save_dir='pr_curve.png', names=()): plt.close() -def plot_mc_curve(px, py, save_dir='mc_curve.png', names=(), xlabel='Confidence', ylabel='Metric'): +def plot_mc_curve(px, py, save_dir="mc_curve.png", names=(), xlabel="Confidence", ylabel="Metric"): # Metric-confidence curve fig, ax = plt.subplots(1, 1, figsize=(9, 6), tight_layout=True) if 0 < len(names) < 21: # display per-class legend if < 21 classes for i, y in enumerate(py): - ax.plot(px, y, linewidth=1, label=f'{names[i]}') # plot(confidence, metric) + ax.plot(px, y, linewidth=1, label=f"{names[i]}") # plot(confidence, metric) else: - ax.plot(px, py.T, linewidth=1, color='grey') # plot(confidence, metric) + ax.plot(px, py.T, linewidth=1, color="grey") # plot(confidence, metric) y = py.mean(0) - ax.plot(px, y, linewidth=3, color='blue', label=f'all classes {y.max():.2f} at {px[y.argmax()]:.3f}') + ax.plot(px, y, linewidth=3, color="blue", label=f"all classes {y.max():.2f} at {px[y.argmax()]:.3f}") ax.set_xlabel(xlabel) ax.set_ylabel(ylabel) ax.set_xlim(0, 1) diff --git a/utils/plots.py b/utils/plots.py index 00b8f88811e2..27cc52534cbf 100644 --- a/utils/plots.py +++ b/utils/plots.py @@ -1,7 +1,5 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license -""" -Plotting utils -""" +"""Plotting utils.""" import math import os @@ -17,23 +15,43 @@ import torch from PIL import Image, ImageDraw, ImageFont -from utils.general import user_config_dir, is_ascii, is_chinese, xywh2xyxy, xyxy2xywh +from utils.general import is_ascii, is_chinese, user_config_dir, xywh2xyxy, xyxy2xywh from utils.metrics import fitness # Settings CONFIG_DIR = user_config_dir() # Ultralytics settings dir -RANK = int(os.getenv('RANK', -1)) -matplotlib.rc('font', **{'size': 11}) -matplotlib.use('Agg') # for writing to files only +RANK = int(os.getenv("RANK", -1)) +matplotlib.rc("font", **{"size": 11}) +matplotlib.use("Agg") # for writing to files only class Colors: # Ultralytics color palette https://ultralytics.com/ def __init__(self): # hex = matplotlib.colors.TABLEAU_COLORS.values() - hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - self.palette = [self.hex2rgb('#' + c) for c in hex] + hex = ( + "FF3838", + "FF9D97", + "FF701F", + "FFB21D", + "CFD231", + "48F90A", + "92CC17", + "3DDB86", + "1A9334", + "00D4BB", + "2C99A8", + "00C2FF", + "344593", + "6473FF", + "0018EC", + "8438FF", + "520085", + "CB38FF", + "FF95C8", + "FF37C7", + ) + self.palette = [self.hex2rgb("#" + c) for c in hex] self.n = len(self.palette) def __call__(self, i, bgr=False): @@ -42,21 +60,21 @@ def __call__(self, i, bgr=False): @staticmethod def hex2rgb(h): # rgb order (PIL) - return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4)) + return tuple(int(h[1 + i : 1 + i + 2], 16) for i in (0, 2, 4)) colors = Colors() # create instance for 'from utils.plots import colors' -def check_font(font='Arial.ttf', size=10): +def check_font(font="Arial.ttf", size=10): # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary font = Path(font) font = font if font.exists() else (CONFIG_DIR / font.name) try: return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception as e: # download if missing + except Exception: # download if missing url = "https://ultralytics.com/assets/" + font.name - print(f'Downloading {url} to {font}...') + print(f"Downloading {url} to {font}...") torch.hub.download_url_to_file(url, str(font), progress=False) return ImageFont.truetype(str(font), size) @@ -66,29 +84,36 @@ class Annotator: check_font() # download TTF if necessary # YOLOv5 Annotator for train/val mosaics and jpgs and detect/hub inference annotations - def __init__(self, im, line_width=None, font_size=None, font='Arial.ttf', pil=False, example='abc'): - assert im.data.contiguous, 'Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images.' + def __init__(self, im, line_width=None, font_size=None, font="Arial.ttf", pil=False, example="abc"): + assert im.data.contiguous, "Image not contiguous. Apply np.ascontiguousarray(im) to Annotator() input images." self.pil = pil or not is_ascii(example) or is_chinese(example) if self.pil: # use PIL self.im = im if isinstance(im, Image.Image) else Image.fromarray(im) self.draw = ImageDraw.Draw(self.im) - self.font = check_font(font='Arial.Unicode.ttf' if is_chinese(example) else font, - size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12)) + self.font = check_font( + font="Arial.Unicode.ttf" if is_chinese(example) else font, + size=font_size or max(round(sum(self.im.size) / 2 * 0.035), 12), + ) else: # use cv2 self.im = im self.lw = line_width or max(round(sum(im.shape) / 2 * 0.003), 2) # line width - def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): + def box_label(self, box, label="", color=(128, 128, 128), txt_color=(255, 255, 255)): # Add one xyxy box to image with label if self.pil or not is_ascii(label): self.draw.rectangle(box, width=self.lw, outline=color) # box if label: w, h = self.font.getsize(label) # text width, height outside = box[1] - h >= 0 # label fits outside box - self.draw.rectangle([box[0], - box[1] - h if outside else box[1], - box[0] + w + 1, - box[1] + 1 if outside else box[1] + h + 1], fill=color) + self.draw.rectangle( + [ + box[0], + box[1] - h if outside else box[1], + box[0] + w + 1, + box[1] + 1 if outside else box[1] + h + 1, + ], + fill=color, + ) # self.draw.text((box[0], box[1]), label, fill=txt_color, font=self.font, anchor='ls') # for PIL>8.0 self.draw.text((box[0], box[1] - h if outside else box[1]), label, fill=txt_color, font=self.font) else: # cv2 @@ -100,8 +125,16 @@ def box_label(self, box, label='', color=(128, 128, 128), txt_color=(255, 255, 2 outside = p1[1] - h - 3 >= 0 # label fits outside box p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 cv2.rectangle(self.im, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(self.im, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, self.lw / 3, txt_color, - thickness=tf, lineType=cv2.LINE_AA) + cv2.putText( + self.im, + label, + (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), + 0, + self.lw / 3, + txt_color, + thickness=tf, + lineType=cv2.LINE_AA, + ) def rectangle(self, xy, fill=None, outline=None, width=1): # Add rectangle to image (PIL-only) @@ -133,7 +166,7 @@ def butter_lowpass_filtfilt(data, cutoff=1500, fs=50000, order=5): def butter_lowpass(cutoff, fs, order): nyq = 0.5 * fs normal_cutoff = cutoff / nyq - return butter(order, normal_cutoff, btype='low', analog=False) + return butter(order, normal_cutoff, btype="low", analog=False) b, a = butter_lowpass(cutoff, fs, order=order) return filtfilt(b, a, data) # forward-backward filter @@ -148,7 +181,7 @@ def output_to_target(output): return np.array(targets) -def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max_size=1920, max_subplots=16): +def plot_images(images, targets, paths=None, fname="images.jpg", names=None, max_size=1920, max_subplots=16): # Plot image grid with labels if isinstance(images, torch.Tensor): images = images.cpu().float().numpy() @@ -158,7 +191,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max images *= 255.0 # de-normalise (optional) bs, _, h, w = images.shape # batch size, _, height, width bs = min(bs, max_subplots) # limit plot images - ns = np.ceil(bs ** 0.5) # number of subplots (square) + ns = np.ceil(bs**0.5) # number of subplots (square) # Build Image mosaic = np.full((int(ns * h), int(ns * w), 3), 255, dtype=np.uint8) # init @@ -167,7 +200,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max break x, y = int(w * (i // ns)), int(h * (i % ns)) # block origin im = im.transpose(1, 2, 0) - mosaic[y:y + h, x:x + w, :] = im + mosaic[y : y + h, x : x + w, :] = im # Resize (optional) scale = max_size / ns / max(h, w) @@ -187,7 +220,7 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max if len(targets) > 0: ti = targets[targets[:, 0] == i] # image targets boxes = xywh2xyxy(ti[:, 2:6]).T - classes = ti[:, 1].astype('int') + classes = ti[:, 1].astype("int") labels = ti.shape[1] == 6 # labels if no conf column conf = None if labels else ti[:, 6] # check for confidence presence (label vs pred) @@ -204,59 +237,59 @@ def plot_images(images, targets, paths=None, fname='images.jpg', names=None, max color = colors(cls) cls = names[cls] if names else cls if labels or conf[j] > 0.25: # 0.25 conf thresh - label = f'{cls}' if labels else f'{cls} {conf[j]:.1f}' + label = f"{cls}" if labels else f"{cls} {conf[j]:.1f}" annotator.box_label(box, label, color=color) annotator.im.save(fname) # save -def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=''): +def plot_lr_scheduler(optimizer, scheduler, epochs=300, save_dir=""): # Plot LR simulating training for full epochs optimizer, scheduler = copy(optimizer), copy(scheduler) # do not modify originals y = [] for _ in range(epochs): scheduler.step() - y.append(optimizer.param_groups[0]['lr']) - plt.plot(y, '.-', label='LR') - plt.xlabel('epoch') - plt.ylabel('LR') + y.append(optimizer.param_groups[0]["lr"]) + plt.plot(y, ".-", label="LR") + plt.xlabel("epoch") + plt.ylabel("LR") plt.grid() plt.xlim(0, epochs) plt.ylim(0) - plt.savefig(Path(save_dir) / 'LR.png', dpi=200) + plt.savefig(Path(save_dir) / "LR.png", dpi=200) plt.close() def plot_val_txt(): # from utils.plots import *; plot_val() # Plot val.txt histograms - x = np.loadtxt('val.txt', dtype=np.float32) + x = np.loadtxt("val.txt", dtype=np.float32) box = xyxy2xywh(x[:, :4]) cx, cy = box[:, 0], box[:, 1] fig, ax = plt.subplots(1, 1, figsize=(6, 6), tight_layout=True) ax.hist2d(cx, cy, bins=600, cmax=10, cmin=0) - ax.set_aspect('equal') - plt.savefig('hist2d.png', dpi=300) + ax.set_aspect("equal") + plt.savefig("hist2d.png", dpi=300) fig, ax = plt.subplots(1, 2, figsize=(12, 6), tight_layout=True) ax[0].hist(cx, bins=600) ax[1].hist(cy, bins=600) - plt.savefig('hist1d.png', dpi=200) + plt.savefig("hist1d.png", dpi=200) def plot_targets_txt(): # from utils.plots import *; plot_targets_txt() # Plot targets.txt histograms - x = np.loadtxt('targets.txt', dtype=np.float32).T - s = ['x targets', 'y targets', 'width targets', 'height targets'] + x = np.loadtxt("targets.txt", dtype=np.float32).T + s = ["x targets", "y targets", "width targets", "height targets"] fig, ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True) ax = ax.ravel() for i in range(4): - ax[i].hist(x[i], bins=100, label='%.3g +/- %.3g' % (x[i].mean(), x[i].std())) + ax[i].hist(x[i], bins=100, label=f"{x[i].mean():.3g} +/- {x[i].std():.3g}") ax[i].legend() ax[i].set_title(s[i]) - plt.savefig('targets.jpg', dpi=200) + plt.savefig("targets.jpg", dpi=200) -def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_val_study() +def plot_val_study(file="", dir="", x=None): # from utils.plots import *; plot_val_study() # Plot file=study.txt generated by val.py (or plot all study*.txt in dir) save_dir = Path(file).parent if file else Path(dir) plot2 = False # plot additional results @@ -265,59 +298,72 @@ def plot_val_study(file='', dir='', x=None): # from utils.plots import *; plot_ fig2, ax2 = plt.subplots(1, 1, figsize=(8, 4), tight_layout=True) # for f in [save_dir / f'study_coco_{x}.txt' for x in ['yolov5n6', 'yolov5s6', 'yolov5m6', 'yolov5l6', 'yolov5x6']]: - for f in sorted(save_dir.glob('study*.txt')): + for f in sorted(save_dir.glob("study*.txt")): y = np.loadtxt(f, dtype=np.float32, usecols=[0, 1, 2, 3, 7, 8, 9], ndmin=2).T x = np.arange(y.shape[1]) if x is None else np.array(x) if plot2: - s = ['P', 'R', 'mAP@.5', 'mAP@.5:.95', 't_preprocess (ms/img)', 't_inference (ms/img)', 't_NMS (ms/img)'] + s = ["P", "R", "mAP@.5", "mAP@.5:.95", "t_preprocess (ms/img)", "t_inference (ms/img)", "t_NMS (ms/img)"] for i in range(7): - ax[i].plot(x, y[i], '.-', linewidth=2, markersize=8) + ax[i].plot(x, y[i], ".-", linewidth=2, markersize=8) ax[i].set_title(s[i]) j = y[3].argmax() + 1 - ax2.plot(y[5, 1:j], y[3, 1:j] * 1E2, '.-', linewidth=2, markersize=8, - label=f.stem.replace('study_coco_', '').replace('yolo', 'YOLO')) - - ax2.plot(1E3 / np.array([209, 140, 97, 58, 35, 18]), [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], - 'k.-', linewidth=2, markersize=8, alpha=.25, label='EfficientDet') + ax2.plot( + y[5, 1:j], + y[3, 1:j] * 1e2, + ".-", + linewidth=2, + markersize=8, + label=f.stem.replace("study_coco_", "").replace("yolo", "YOLO"), + ) + + ax2.plot( + 1e3 / np.array([209, 140, 97, 58, 35, 18]), + [34.6, 40.5, 43.0, 47.5, 49.7, 51.5], + "k.-", + linewidth=2, + markersize=8, + alpha=0.25, + label="EfficientDet", + ) ax2.grid(alpha=0.2) ax2.set_yticks(np.arange(20, 60, 5)) ax2.set_xlim(0, 57) ax2.set_ylim(25, 55) - ax2.set_xlabel('GPU Speed (ms/img)') - ax2.set_ylabel('COCO AP val') - ax2.legend(loc='lower right') - f = save_dir / 'study.png' - print(f'Saving {f}...') + ax2.set_xlabel("GPU Speed (ms/img)") + ax2.set_ylabel("COCO AP val") + ax2.legend(loc="lower right") + f = save_dir / "study.png" + print(f"Saving {f}...") plt.savefig(f, dpi=300) -def plot_labels(labels, names=(), save_dir=Path('')): +def plot_labels(labels, names=(), save_dir=Path("")): # plot dataset labels - print('Plotting labels... ') + print("Plotting labels... ") c, b = labels[:, 0], labels[:, 1:].transpose() # classes, boxes nc = int(c.max() + 1) # number of classes - x = pd.DataFrame(b.transpose(), columns=['x', 'y', 'width', 'height']) + x = pd.DataFrame(b.transpose(), columns=["x", "y", "width", "height"]) # seaborn correlogram - sn.pairplot(x, corner=True, diag_kind='auto', kind='hist', diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) - plt.savefig(save_dir / 'labels_correlogram.jpg', dpi=200) + sn.pairplot(x, corner=True, diag_kind="auto", kind="hist", diag_kws=dict(bins=50), plot_kws=dict(pmax=0.9)) + plt.savefig(save_dir / "labels_correlogram.jpg", dpi=200) plt.close() # matplotlib labels - matplotlib.use('svg') # faster + matplotlib.use("svg") # faster ax = plt.subplots(2, 2, figsize=(8, 8), tight_layout=True)[1].ravel() - y = ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) + ax[0].hist(c, bins=np.linspace(0, nc, nc + 1) - 0.5, rwidth=0.8) # [y[2].patches[i].set_color([x / 255 for x in colors(i)]) for i in range(nc)] # update colors bug #3195 - ax[0].set_ylabel('instances') + ax[0].set_ylabel("instances") if 0 < len(names) < 30: ax[0].set_xticks(range(len(names))) ax[0].set_xticklabels(names, rotation=90, fontsize=10) else: - ax[0].set_xlabel('classes') - sn.histplot(x, x='x', y='y', ax=ax[2], bins=50, pmax=0.9) - sn.histplot(x, x='width', y='height', ax=ax[3], bins=50, pmax=0.9) + ax[0].set_xlabel("classes") + sn.histplot(x, x="x", y="y", ax=ax[2], bins=50, pmax=0.9) + sn.histplot(x, x="width", y="height", ax=ax[3], bins=50, pmax=0.9) # rectangles labels[:, 1:3] = 0.5 # center @@ -326,49 +372,49 @@ def plot_labels(labels, names=(), save_dir=Path('')): for cls, *box in labels[:1000]: ImageDraw.Draw(img).rectangle(box, width=1, outline=colors(cls)) # plot ax[1].imshow(img) - ax[1].axis('off') + ax[1].axis("off") for a in [0, 1, 2, 3]: - for s in ['top', 'right', 'left', 'bottom']: + for s in ["top", "right", "left", "bottom"]: ax[a].spines[s].set_visible(False) - plt.savefig(save_dir / 'labels.jpg', dpi=200) - matplotlib.use('Agg') + plt.savefig(save_dir / "labels.jpg", dpi=200) + matplotlib.use("Agg") plt.close() -def profile_idetection(start=0, stop=0, labels=(), save_dir=''): +def profile_idetection(start=0, stop=0, labels=(), save_dir=""): # Plot iDetection '*.txt' per-image logs. from utils.plots import *; profile_idetection() ax = plt.subplots(2, 4, figsize=(12, 6), tight_layout=True)[1].ravel() - s = ['Images', 'Free Storage (GB)', 'RAM Usage (GB)', 'Battery', 'dt_raw (ms)', 'dt_smooth (ms)', 'real-world FPS'] - files = list(Path(save_dir).glob('frames*.txt')) + s = ["Images", "Free Storage (GB)", "RAM Usage (GB)", "Battery", "dt_raw (ms)", "dt_smooth (ms)", "real-world FPS"] + files = list(Path(save_dir).glob("frames*.txt")) for fi, f in enumerate(files): try: results = np.loadtxt(f, ndmin=2).T[:, 90:-30] # clip first and last rows n = results.shape[1] # number of rows x = np.arange(start, min(stop, n) if stop else n) results = results[:, x] - t = (results[0] - results[0].min()) # set t0=0s + t = results[0] - results[0].min() # set t0=0s results[0] = x for i, a in enumerate(ax): if i < len(results): - label = labels[fi] if len(labels) else f.stem.replace('frames_', '') - a.plot(t, results[i], marker='.', label=label, linewidth=1, markersize=5) + label = labels[fi] if len(labels) else f.stem.replace("frames_", "") + a.plot(t, results[i], marker=".", label=label, linewidth=1, markersize=5) a.set_title(s[i]) - a.set_xlabel('time (s)') + a.set_xlabel("time (s)") # if fi == len(files) - 1: # a.set_ylim(bottom=0) - for side in ['top', 'right']: + for side in ["top", "right"]: a.spines[side].set_visible(False) else: a.remove() except Exception as e: - print('Warning: Plotting error for %s; %s' % (f, e)) + print(f"Warning: Plotting error for {f}; {e}") ax[1].legend() - plt.savefig(Path(save_dir) / 'idetection_profile.png', dpi=200) + plt.savefig(Path(save_dir) / "idetection_profile.png", dpi=200) -def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; plot_evolve() +def plot_evolve(evolve_csv="path/to/evolve.csv"): # from utils.plots import *; plot_evolve() # Plot evolve.csv hyp evolution results evolve_csv = Path(evolve_csv) data = pd.read_csv(evolve_csv) @@ -377,30 +423,30 @@ def plot_evolve(evolve_csv='path/to/evolve.csv'): # from utils.plots import *; f = fitness(x) j = np.argmax(f) # max fitness index plt.figure(figsize=(10, 12), tight_layout=True) - matplotlib.rc('font', **{'size': 8}) + matplotlib.rc("font", **{"size": 8}) for i, k in enumerate(keys[7:]): v = x[:, 7 + i] mu = v[j] # best single result plt.subplot(6, 5, i + 1) - plt.scatter(v, f, c=hist2d(v, f, 20), cmap='viridis', alpha=.8, edgecolors='none') - plt.plot(mu, f.max(), 'k+', markersize=15) - plt.title('%s = %.3g' % (k, mu), fontdict={'size': 9}) # limit to 40 characters + plt.scatter(v, f, c=hist2d(v, f, 20), cmap="viridis", alpha=0.8, edgecolors="none") + plt.plot(mu, f.max(), "k+", markersize=15) + plt.title(f"{k} = {mu:.3g}", fontdict={"size": 9}) # limit to 40 characters if i % 5 != 0: plt.yticks([]) - print('%15s: %.3g' % (k, mu)) - f = evolve_csv.with_suffix('.png') # filename + print("%15s: %.3g" % (k, mu)) + f = evolve_csv.with_suffix(".png") # filename plt.savefig(f, dpi=200) plt.close() - print(f'Saved {f}') + print(f"Saved {f}") -def plot_results(file='path/to/results.csv', dir=''): +def plot_results(file="path/to/results.csv", dir=""): # Plot training results.csv. Usage: from utils.plots import *; plot_results('path/to/results.csv') save_dir = Path(file).parent if file else Path(dir) fig, ax = plt.subplots(2, 5, figsize=(12, 6), tight_layout=True) ax = ax.ravel() - files = list(save_dir.glob('results*.csv')) - assert len(files), f'No results.csv files found in {save_dir.resolve()}, nothing to plot.' + files = list(save_dir.glob("results*.csv")) + assert len(files), f"No results.csv files found in {save_dir.resolve()}, nothing to plot." for fi, f in enumerate(files): try: data = pd.read_csv(f) @@ -409,26 +455,26 @@ def plot_results(file='path/to/results.csv', dir=''): for i, j in enumerate([1, 2, 3, 4, 5, 8, 9, 10, 6, 7]): y = data.values[:, j] # y[y == 0] = np.nan # don't show zero values - ax[i].plot(x, y, marker='.', label=f.stem, linewidth=2, markersize=8) + ax[i].plot(x, y, marker=".", label=f.stem, linewidth=2, markersize=8) ax[i].set_title(s[j], fontsize=12) # if j in [8, 9, 10]: # share train and val loss y axes # ax[i].get_shared_y_axes().join(ax[i], ax[i - 5]) except Exception as e: - print(f'Warning: Plotting error for {f}: {e}') + print(f"Warning: Plotting error for {f}: {e}") ax[1].legend() - fig.savefig(save_dir / 'results.png', dpi=200) + fig.savefig(save_dir / "results.png", dpi=200) plt.close() -def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detect/exp')): +def feature_visualization(x, module_type, stage, n=32, save_dir=Path("runs/detect/exp")): """ x: Features to be visualized module_type: Module type stage: Module stage within model n: Maximum number of feature maps to plot - save_dir: Directory to save results + save_dir: Directory to save results. """ - if 'Detect' not in module_type: + if "Detect" not in module_type: batch, channels, height, width = x.shape # batch, channels, height, width if height > 1 and width > 1: f = f"stage{stage}_{module_type.split('.')[-1]}_features.png" # filename @@ -440,8 +486,8 @@ def feature_visualization(x, module_type, stage, n=32, save_dir=Path('runs/detec plt.subplots_adjust(wspace=0.05, hspace=0.05) for i in range(n): ax[i].imshow(blocks[i].squeeze()) # cmap='gray' - ax[i].axis('off') + ax[i].axis("off") - print(f'Saving {save_dir / f}... ({n}/{channels})') - plt.savefig(save_dir / f, dpi=300, bbox_inches='tight') + print(f"Saving {save_dir / f}... ({n}/{channels})") + plt.savefig(save_dir / f, dpi=300, bbox_inches="tight") plt.close() diff --git a/utils/torch_utils.py b/utils/torch_utils.py index 352ecf572c9f..b8a902b50a67 100644 --- a/utils/torch_utils.py +++ b/utils/torch_utils.py @@ -1,7 +1,5 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license -""" -PyTorch utils -""" +"""PyTorch utils.""" import datetime import logging @@ -30,9 +28,7 @@ @contextmanager def torch_distributed_zero_first(local_rank: int): - """ - Decorator to make all processes in distributed training wait for each local_master to do something. - """ + """Decorator to make all processes in distributed training wait for each local_master to do something.""" if local_rank not in [-1, 0]: dist.barrier(device_ids=[local_rank]) yield @@ -43,44 +39,44 @@ def torch_distributed_zero_first(local_rank: int): def date_modified(path=__file__): # return human-readable file modification date, i.e. '2021-3-26' t = datetime.datetime.fromtimestamp(Path(path).stat().st_mtime) - return f'{t.year}-{t.month}-{t.day}' + return f"{t.year}-{t.month}-{t.day}" def git_describe(path=Path(__file__).parent): # path must be a directory # return human-readable git description, i.e. v5.0-5-g3e25f1e https://git-scm.com/docs/git-describe - s = f'git -C {path} describe --tags --long --always' + s = f"git -C {path} describe --tags --long --always" try: return subprocess.check_output(s, shell=True, stderr=subprocess.STDOUT).decode()[:-1] - except subprocess.CalledProcessError as e: - return '' # not a git repository + except subprocess.CalledProcessError: + return "" # not a git repository -def select_device(device='', batch_size=None): +def select_device(device="", batch_size=None): # device = 'cpu' or '0' or '0,1,2,3' - s = f'YOLOv5 πŸš€ {git_describe() or date_modified()} torch {torch.__version__} ' # string - device = str(device).strip().lower().replace('cuda:', '') # to string, 'cuda:0' to '0' - cpu = device == 'cpu' + s = f"YOLOv5 πŸš€ {git_describe() or date_modified()} torch {torch.__version__} " # string + device = str(device).strip().lower().replace("cuda:", "") # to string, 'cuda:0' to '0' + cpu = device == "cpu" if cpu: - os.environ['CUDA_VISIBLE_DEVICES'] = '-1' # force torch.cuda.is_available() = False + os.environ["CUDA_VISIBLE_DEVICES"] = "-1" # force torch.cuda.is_available() = False elif device: # non-cpu device requested - os.environ['CUDA_VISIBLE_DEVICES'] = device # set environment variable - assert torch.cuda.is_available(), f'CUDA unavailable, invalid device {device} requested' # check availability + os.environ["CUDA_VISIBLE_DEVICES"] = device # set environment variable + assert torch.cuda.is_available(), f"CUDA unavailable, invalid device {device} requested" # check availability cuda = not cpu and torch.cuda.is_available() if cuda: - devices = device.split(',') if device else '0' # range(torch.cuda.device_count()) # i.e. 0,1,6,7 + devices = device.split(",") if device else "0" # range(torch.cuda.device_count()) # i.e. 0,1,6,7 n = len(devices) # device count if n > 1 and batch_size: # check batch_size is divisible by device_count - assert batch_size % n == 0, f'batch-size {batch_size} not multiple of GPU count {n}' - space = ' ' * (len(s) + 1) + assert batch_size % n == 0, f"batch-size {batch_size} not multiple of GPU count {n}" + space = " " * (len(s) + 1) for i, d in enumerate(devices): p = torch.cuda.get_device_properties(i) - s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024 ** 2}MB)\n" # bytes to MB + s += f"{'' if i == 0 else space}CUDA:{d} ({p.name}, {p.total_memory / 1024**2}MB)\n" # bytes to MB else: - s += 'CPU\n' + s += "CPU\n" - LOGGER.info(s.encode().decode('ascii', 'ignore') if platform.system() == 'Windows' else s) # emoji-safe - return torch.device('cuda:0' if cuda else 'cpu') + LOGGER.info(s.encode().decode("ascii", "ignore") if platform.system() == "Windows" else s) # emoji-safe + return torch.device("cuda:0" if cuda else "cpu") def time_sync(): @@ -102,18 +98,20 @@ def profile(input, ops, n=10, device=None): results = [] logging.basicConfig(format="%(message)s", level=logging.INFO) device = device or select_device() - print(f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" - f"{'input':>24s}{'output':>24s}") + print( + f"{'Params':>12s}{'GFLOPs':>12s}{'GPU_mem (GB)':>14s}{'forward (ms)':>14s}{'backward (ms)':>14s}" + f"{'input':>24s}{'output':>24s}" + ) for x in input if isinstance(input, list) else [input]: x = x.to(device) x.requires_grad = True for m in ops if isinstance(ops, list) else [ops]: - m = m.to(device) if hasattr(m, 'to') else m # device - m = m.half() if hasattr(m, 'half') and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m - tf, tb, t = 0., 0., [0., 0., 0.] # dt forward, backward + m = m.to(device) if hasattr(m, "to") else m # device + m = m.half() if hasattr(m, "half") and isinstance(x, torch.Tensor) and x.dtype is torch.float16 else m + tf, tb, t = 0.0, 0.0, [0.0, 0.0, 0.0] # dt forward, backward try: - flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1E9 * 2 # GFLOPs + flops = thop.profile(m, inputs=(x,), verbose=False)[0] / 1e9 * 2 # GFLOPs except: flops = 0 @@ -127,14 +125,14 @@ def profile(input, ops, n=10, device=None): t[2] = time_sync() except Exception as e: # no backward method print(e) - t[2] = float('nan') + t[2] = float("nan") tf += (t[1] - t[0]) * 1000 / n # ms per op forward tb += (t[2] - t[1]) * 1000 / n # ms per op backward - mem = torch.cuda.memory_reserved() / 1E9 if torch.cuda.is_available() else 0 # (GB) - s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else 'list' - s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else 'list' + mem = torch.cuda.memory_reserved() / 1e9 if torch.cuda.is_available() else 0 # (GB) + s_in = tuple(x.shape) if isinstance(x, torch.Tensor) else "list" + s_out = tuple(y.shape) if isinstance(y, torch.Tensor) else "list" p = sum(list(x.numel() for x in m.parameters())) if isinstance(m, nn.Module) else 0 # parameters - print(f'{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}') + print(f"{p:12}{flops:12.4g}{mem:>14.3f}{tf:14.4g}{tb:14.4g}{str(s_in):>24s}{str(s_out):>24s}") results.append([p, flops, mem, tf, tb, s_in, s_out]) except Exception as e: print(e) @@ -177,7 +175,7 @@ def find_modules(model, mclass=nn.Conv2d): def sparsity(model): # Return global model sparsity - a, b = 0., 0. + a, b = 0.0, 0.0 for p in model.parameters(): a += p.numel() b += (p == 0).sum() @@ -187,23 +185,30 @@ def sparsity(model): def prune(model, amount=0.3): # Prune model to requested global sparsity import torch.nn.utils.prune as prune - print('Pruning model... ', end='') + + print("Pruning model... ", end="") for name, m in model.named_modules(): if isinstance(m, nn.Conv2d): - prune.l1_unstructured(m, name='weight', amount=amount) # prune - prune.remove(m, 'weight') # make permanent - print(' %.3g global sparsity' % sparsity(model)) + prune.l1_unstructured(m, name="weight", amount=amount) # prune + prune.remove(m, "weight") # make permanent + print(f" {sparsity(model):.3g} global sparsity") def fuse_conv_and_bn(conv, bn): # Fuse convolution and batchnorm layers https://tehnokv.com/posts/fusing-batchnorm-and-conv/ - fusedconv = nn.Conv2d(conv.in_channels, - conv.out_channels, - kernel_size=conv.kernel_size, - stride=conv.stride, - padding=conv.padding, - groups=conv.groups, - bias=True).requires_grad_(False).to(conv.weight.device) + fusedconv = ( + nn.Conv2d( + conv.in_channels, + conv.out_channels, + kernel_size=conv.kernel_size, + stride=conv.stride, + padding=conv.padding, + groups=conv.groups, + bias=True, + ) + .requires_grad_(False) + .to(conv.weight.device) + ) # prepare filters w_conv = conv.weight.clone().view(conv.out_channels, -1) @@ -223,26 +228,29 @@ def model_info(model, verbose=False, img_size=640): n_p = sum(x.numel() for x in model.parameters()) # number parameters n_g = sum(x.numel() for x in model.parameters() if x.requires_grad) # number gradients if verbose: - print('%5s %40s %9s %12s %20s %10s %10s' % ('layer', 'name', 'gradient', 'parameters', 'shape', 'mu', 'sigma')) + print("%5s %40s %9s %12s %20s %10s %10s" % ("layer", "name", "gradient", "parameters", "shape", "mu", "sigma")) for i, (name, p) in enumerate(model.named_parameters()): - name = name.replace('module_list.', '') - print('%5g %40s %9s %12g %20s %10.3g %10.3g' % - (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std())) + name = name.replace("module_list.", "") + print( + "%5g %40s %9s %12g %20s %10.3g %10.3g" + % (i, name, p.requires_grad, p.numel(), list(p.shape), p.mean(), p.std()) + ) try: # FLOPs from thop import profile - stride = max(int(model.stride.max()), 32) if hasattr(model, 'stride') else 32 - img = torch.zeros((1, model.yaml.get('ch', 3), stride, stride), device=next(model.parameters()).device) # input - flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1E9 * 2 # stride GFLOPs + + stride = max(int(model.stride.max()), 32) if hasattr(model, "stride") else 32 + img = torch.zeros((1, model.yaml.get("ch", 3), stride, stride), device=next(model.parameters()).device) # input + flops = profile(deepcopy(model), inputs=(img,), verbose=False)[0] / 1e9 * 2 # stride GFLOPs img_size = img_size if isinstance(img_size, list) else [img_size, img_size] # expand if int/float - fs = ', %.1f GFLOPs' % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs + fs = ", %.1f GFLOPs" % (flops * img_size[0] / stride * img_size[1] / stride) # 640x640 GFLOPs except (ImportError, Exception): - fs = '' + fs = "" LOGGER.info(f"Model Summary: {len(list(model.modules()))} layers, {n_p} parameters, {n_g} gradients{fs}") -def load_classifier(name='resnet101', n=2): +def load_classifier(name="resnet101", n=2): # Loads a pretrained model reshaped to n-class output model = torchvision.models.__dict__[name](pretrained=True) @@ -268,7 +276,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) else: h, w = img.shape[2:] s = (int(h * ratio), int(w * ratio)) # new size - img = F.interpolate(img, size=s, mode='bilinear', align_corners=False) # resize + img = F.interpolate(img, size=s, mode="bilinear", align_corners=False) # resize if not same_shape: # pad/crop img h, w = [math.ceil(x * ratio / gs) * gs for x in (h, w)] return F.pad(img, [0, w - s[1], 0, h - s[0]], value=0.447) # value = imagenet mean @@ -277,7 +285,7 @@ def scale_img(img, ratio=1.0, same_shape=False, gs=32): # img(16,3,256,416) def copy_attr(a, b, include=(), exclude=()): # Copy attributes from b to a, options to only include [...] and to exclude [...] for k, v in b.__dict__.items(): - if (len(include) and k not in include) or k.startswith('_') or k in exclude: + if (len(include) and k not in include) or k.startswith("_") or k in exclude: continue else: setattr(a, k, v) @@ -288,7 +296,7 @@ class EarlyStopping: def __init__(self, patience=30): self.best_fitness = 0.0 # i.e. mAP self.best_epoch = 0 - self.patience = patience or float('inf') # epochs to wait after fitness stops improving to stop + self.patience = patience or float("inf") # epochs to wait after fitness stops improving to stop self.possible_stop = False # possible stop may occur next epoch def __call__(self, epoch, fitness): @@ -299,12 +307,12 @@ def __call__(self, epoch, fitness): self.possible_stop = delta >= (self.patience - 1) # possible stop may occur next epoch stop = delta >= self.patience # stop training if patience exceeded if stop: - LOGGER.info(f'EarlyStopping patience {self.patience} exceeded, stopping training.') + LOGGER.info(f"EarlyStopping patience {self.patience} exceeded, stopping training.") return stop class ModelEMA: - """ Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models + """Model Exponential Moving Average from https://github.com/rwightman/pytorch-image-models Keep a moving average of everything in the model state_dict (parameters and buffers). This is intended to allow functionality like https://www.tensorflow.org/api_docs/python/tf/train/ExponentialMovingAverage @@ -333,8 +341,8 @@ def update(self, model): for k, v in self.ema.state_dict().items(): if v.dtype.is_floating_point: v *= d - v += (1. - d) * msd[k].detach() + v += (1.0 - d) * msd[k].detach() - def update_attr(self, model, include=(), exclude=('process_group', 'reducer')): + def update_attr(self, model, include=(), exclude=("process_group", "reducer")): # Update EMA attributes copy_attr(self.ema, model, include, exclude) diff --git a/val.py b/val.py index 2fc547322a0a..838ebb74b696 100644 --- a/val.py +++ b/val.py @@ -1,6 +1,6 @@ # YOLOv5 πŸš€ by Ultralytics, GPL-3.0 license """ -Validate a trained YOLOv5 model accuracy on a custom dataset +Validate a trained YOLOv5 model accuracy on a custom dataset. Usage: $ python path/to/val.py --data coco128.yaml --weights yolov5s.pt --img 640 @@ -24,14 +24,28 @@ ROOT = Path(os.path.relpath(ROOT, Path.cwd())) # relative from models.experimental import attempt_load +from utils.callbacks import Callbacks from utils.datasets import create_dataloader -from utils.general import coco80_to_coco91_class, check_dataset, check_img_size, check_requirements, \ - check_suffix, check_yaml, box_iou, non_max_suppression, scale_coords, xyxy2xywh, xywh2xyxy, set_logging, \ - increment_path, colorstr, print_args -from utils.metrics import ap_per_class, ConfusionMatrix +from utils.general import ( + box_iou, + check_dataset, + check_img_size, + check_requirements, + check_suffix, + check_yaml, + coco80_to_coco91_class, + colorstr, + increment_path, + non_max_suppression, + print_args, + scale_coords, + set_logging, + xywh2xyxy, + xyxy2xywh, +) +from utils.metrics import ConfusionMatrix, ap_per_class from utils.plots import output_to_target, plot_images, plot_val_study from utils.torch_utils import select_device, time_sync -from utils.callbacks import Callbacks def save_one_txt(predn, save_conf, shape, file): @@ -40,8 +54,8 @@ def save_one_txt(predn, save_conf, shape, file): for *xyxy, conf, cls in predn.tolist(): xywh = (xyxy2xywh(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh line = (cls, *xywh, conf) if save_conf else (cls, *xywh) # label format - with open(file, 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') + with open(file, "a") as f: + f.write(("%g " * len(line)).rstrip() % line + "\n") def save_one_json(predn, jdict, path, class_map): @@ -50,20 +64,25 @@ def save_one_json(predn, jdict, path, class_map): box = xyxy2xywh(predn[:, :4]) # xywh box[:, :2] -= box[:, 2:] / 2 # xy center to top-left corner for p, b in zip(predn.tolist(), box.tolist()): - jdict.append({'image_id': image_id, - 'category_id': class_map[int(p[5])], - 'bbox': [round(x, 3) for x in b], - 'score': round(p[4], 5)}) + jdict.append( + { + "image_id": image_id, + "category_id": class_map[int(p[5])], + "bbox": [round(x, 3) for x in b], + "score": round(p[4], 5), + } + ) def process_batch(detections, labels, iouv): """ Return correct predictions matrix. Both sets of boxes are in (x1, y1, x2, y2) format. + Arguments: detections (Array[N, 6]), x1, y1, x2, y2, conf, class labels (Array[M, 5]), class, x1, y1, x2, y2 Returns: - correct (Array[N, 10]), for 10 IoU levels + correct (Array[N, 10]), for 10 IoU levels. """ correct = torch.zeros(detections.shape[0], iouv.shape[0], dtype=torch.bool, device=iouv.device) iou = box_iou(labels[:, 1:], detections[:, :4]) @@ -81,32 +100,33 @@ def process_batch(detections, labels, iouv): @torch.no_grad() -def run(data, - weights=None, # model.pt path(s) - batch_size=32, # batch size - imgsz=640, # inference size (pixels) - conf_thres=0.001, # confidence threshold - iou_thres=0.6, # NMS IoU threshold - task='val', # train, val, test, speed or study - device='', # cuda device, i.e. 0 or 0,1,2,3 or cpu - single_cls=False, # treat as single-class dataset - augment=False, # augmented inference - verbose=False, # verbose output - save_txt=False, # save results to *.txt - save_hybrid=False, # save label+prediction hybrid results to *.txt - save_conf=False, # save confidences in --save-txt labels - save_json=False, # save a COCO-JSON results file - project=ROOT / 'runs/val', # save to project/name - name='exp', # save to project/name - exist_ok=False, # existing project/name ok, do not increment - half=True, # use FP16 half-precision inference - model=None, - dataloader=None, - save_dir=Path(''), - plots=True, - callbacks=Callbacks(), - compute_loss=None, - ): +def run( + data, + weights=None, # model.pt path(s) + batch_size=32, # batch size + imgsz=640, # inference size (pixels) + conf_thres=0.001, # confidence threshold + iou_thres=0.6, # NMS IoU threshold + task="val", # train, val, test, speed or study + device="", # cuda device, i.e. 0 or 0,1,2,3 or cpu + single_cls=False, # treat as single-class dataset + augment=False, # augmented inference + verbose=False, # verbose output + save_txt=False, # save results to *.txt + save_hybrid=False, # save label+prediction hybrid results to *.txt + save_conf=False, # save confidences in --save-txt labels + save_json=False, # save a COCO-JSON results file + project=ROOT / "runs/val", # save to project/name + name="exp", # save to project/name + exist_ok=False, # existing project/name ok, do not increment + half=True, # use FP16 half-precision inference + model=None, + dataloader=None, + save_dir=Path(""), + plots=True, + callbacks=Callbacks(), + compute_loss=None, +): # Initialize/load model and set device training = model is not None if training: # called by train.py @@ -117,10 +137,10 @@ def run(data, # Directories save_dir = increment_path(Path(project) / name, exist_ok=exist_ok) # increment run - (save_dir / 'labels' if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir + (save_dir / "labels" if save_txt else save_dir).mkdir(parents=True, exist_ok=True) # make dir # Load model - check_suffix(weights, '.pt') + check_suffix(weights, ".pt") model = attempt_load(weights, map_location=device) # load FP32 model gs = max(int(model.stride.max()), 32) # grid size (max stride) imgsz = check_img_size(imgsz, s=gs) # check image size @@ -133,30 +153,31 @@ def run(data, data = check_dataset(data) # check # Half - half &= device.type != 'cpu' # half precision only supported on CUDA + half &= device.type != "cpu" # half precision only supported on CUDA model.half() if half else model.float() # Configure model.eval() - is_coco = isinstance(data.get('val'), str) and data['val'].endswith('coco/val2017.txt') # COCO dataset - nc = 1 if single_cls else int(data['nc']) # number of classes + is_coco = isinstance(data.get("val"), str) and data["val"].endswith("coco/val2017.txt") # COCO dataset + nc = 1 if single_cls else int(data["nc"]) # number of classes iouv = torch.linspace(0.5, 0.95, 10).to(device) # iou vector for mAP@0.5:0.95 niou = iouv.numel() # Dataloader if not training: - if device.type != 'cpu': + if device.type != "cpu": model(torch.zeros(1, 3, imgsz, imgsz).to(device).type_as(next(model.parameters()))) # run once - pad = 0.0 if task == 'speed' else 0.5 - task = task if task in ('train', 'val', 'test') else 'val' # path to train/val/test images - dataloader = create_dataloader(data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True, - prefix=colorstr(f'{task}: '))[0] + pad = 0.0 if task == "speed" else 0.5 + task = task if task in ("train", "val", "test") else "val" # path to train/val/test images + dataloader = create_dataloader( + data[task], imgsz, batch_size, gs, single_cls, pad=pad, rect=True, prefix=colorstr(f"{task}: ") + )[0] seen = 0 confusion_matrix = ConfusionMatrix(nc=nc) - names = {k: v for k, v in enumerate(model.names if hasattr(model, 'names') else model.module.names)} + names = {k: v for k, v in enumerate(model.names if hasattr(model, "names") else model.module.names)} class_map = coco80_to_coco91_class() if is_coco else list(range(1000)) - s = ('%20s' + '%11s' * 6) % ('Class', 'Images', 'Labels', 'P', 'R', 'mAP@.5', 'mAP@.5:.95') + s = ("%20s" + "%11s" * 6) % ("Class", "Images", "Labels", "P", "R", "mAP@.5", "mAP@.5:.95") dt, p, r, f1, mp, mr, map50, map = [0.0, 0.0, 0.0], 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 loss = torch.zeros(3, device=device) jdict, stats, ap, ap_class = [], [], [], [] @@ -218,16 +239,16 @@ def run(data, # Save/log if save_txt: - save_one_txt(predn, save_conf, shape, file=save_dir / 'labels' / (path.stem + '.txt')) + save_one_txt(predn, save_conf, shape, file=save_dir / "labels" / (path.stem + ".txt")) if save_json: save_one_json(predn, jdict, path, class_map) # append to COCO-JSON dictionary - callbacks.run('on_val_image_end', pred, predn, path, names, img[si]) + callbacks.run("on_val_image_end", pred, predn, path, names, img[si]) # Plot images if plots and batch_i < 3: - f = save_dir / f'val_batch{batch_i}_labels.jpg' # labels + f = save_dir / f"val_batch{batch_i}_labels.jpg" # labels Thread(target=plot_images, args=(img, targets, paths, f, names), daemon=True).start() - f = save_dir / f'val_batch{batch_i}_pred.jpg' # predictions + f = save_dir / f"val_batch{batch_i}_pred.jpg" # predictions Thread(target=plot_images, args=(img, output_to_target(out), paths, f, names), daemon=True).start() # Compute statistics @@ -241,8 +262,8 @@ def run(data, nt = torch.zeros(1) # Print results - pf = '%20s' + '%11i' * 2 + '%11.3g' * 4 # print format - print(pf % ('all', seen, nt.sum(), mp, mr, map50, map)) + pf = "%20s" + "%11i" * 2 + "%11.3g" * 4 # print format + print(pf % ("all", seen, nt.sum(), mp, mr, map50, map)) # Print results per class if (verbose or (nc < 50 and not training)) and nc > 1 and len(stats): @@ -250,33 +271,33 @@ def run(data, print(pf % (names[c], seen, nt[c], p[i], r[i], ap50[i], ap[i])) # Print speeds - t = tuple(x / seen * 1E3 for x in dt) # speeds per image + t = tuple(x / seen * 1e3 for x in dt) # speeds per image if not training: shape = (batch_size, 3, imgsz, imgsz) - print(f'Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}' % t) + print(f"Speed: %.1fms pre-process, %.1fms inference, %.1fms NMS per image at shape {shape}" % t) # Plots if plots: confusion_matrix.plot(save_dir=save_dir, names=list(names.values())) - callbacks.run('on_val_end') + callbacks.run("on_val_end") # Save JSON if save_json and len(jdict): - w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else '' # weights - anno_json = str(Path(data.get('path', '../coco')) / 'annotations/instances_val2017.json') # annotations json + w = Path(weights[0] if isinstance(weights, list) else weights).stem if weights is not None else "" # weights + anno_json = str(Path(data.get("path", "../coco")) / "annotations/instances_val2017.json") # annotations json pred_json = str(save_dir / f"{w}_predictions.json") # predictions json - print(f'\nEvaluating pycocotools mAP... saving {pred_json}...') - with open(pred_json, 'w') as f: + print(f"\nEvaluating pycocotools mAP... saving {pred_json}...") + with open(pred_json, "w") as f: json.dump(jdict, f) try: # https://github.com/cocodataset/cocoapi/blob/master/PythonAPI/pycocoEvalDemo.ipynb - check_requirements(['pycocotools']) + check_requirements(["pycocotools"]) from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api - eval = COCOeval(anno, pred, 'bbox') + eval = COCOeval(anno, pred, "bbox") if is_coco: eval.params.imgIds = [int(Path(x).stem) for x in dataloader.dataset.img_files] # image IDs to evaluate eval.evaluate() @@ -284,12 +305,12 @@ def run(data, eval.summarize() map, map50 = eval.stats[:2] # update results (mAP@0.5:0.95, mAP@0.5) except Exception as e: - print(f'pycocotools unable to run: {e}') + print(f"pycocotools unable to run: {e}") # Return results model.float() # for training if not training: - s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else '' + s = f"\n{len(list(save_dir.glob('labels/*.txt')))} labels saved to {save_dir / 'labels'}" if save_txt else "" print(f"Results saved to {colorstr('bold', save_dir)}{s}") maps = np.zeros(nc) + map for i, c in enumerate(ap_class): @@ -299,28 +320,28 @@ def run(data, def parse_opt(): parser = argparse.ArgumentParser() - parser.add_argument('--data', type=str, default=ROOT / 'data/coco128.yaml', help='dataset.yaml path') - parser.add_argument('--weights', nargs='+', type=str, default=ROOT / 'yolov5s.pt', help='model.pt path(s)') - parser.add_argument('--batch-size', type=int, default=32, help='batch size') - parser.add_argument('--imgsz', '--img', '--img-size', type=int, default=640, help='inference size (pixels)') - parser.add_argument('--conf-thres', type=float, default=0.001, help='confidence threshold') - parser.add_argument('--iou-thres', type=float, default=0.6, help='NMS IoU threshold') - parser.add_argument('--task', default='val', help='train, val, test, speed or study') - parser.add_argument('--device', default='', help='cuda device, i.e. 0 or 0,1,2,3 or cpu') - parser.add_argument('--single-cls', action='store_true', help='treat as single-class dataset') - parser.add_argument('--augment', action='store_true', help='augmented inference') - parser.add_argument('--verbose', action='store_true', help='report mAP by class') - parser.add_argument('--save-txt', action='store_true', help='save results to *.txt') - parser.add_argument('--save-hybrid', action='store_true', help='save label+prediction hybrid results to *.txt') - parser.add_argument('--save-conf', action='store_true', help='save confidences in --save-txt labels') - parser.add_argument('--save-json', action='store_true', help='save a COCO-JSON results file') - parser.add_argument('--project', default=ROOT / 'runs/val', help='save to project/name') - parser.add_argument('--name', default='exp', help='save to project/name') - parser.add_argument('--exist-ok', action='store_true', help='existing project/name ok, do not increment') - parser.add_argument('--half', action='store_true', help='use FP16 half-precision inference') + parser.add_argument("--data", type=str, default=ROOT / "data/coco128.yaml", help="dataset.yaml path") + parser.add_argument("--weights", nargs="+", type=str, default=ROOT / "yolov5s.pt", help="model.pt path(s)") + parser.add_argument("--batch-size", type=int, default=32, help="batch size") + parser.add_argument("--imgsz", "--img", "--img-size", type=int, default=640, help="inference size (pixels)") + parser.add_argument("--conf-thres", type=float, default=0.001, help="confidence threshold") + parser.add_argument("--iou-thres", type=float, default=0.6, help="NMS IoU threshold") + parser.add_argument("--task", default="val", help="train, val, test, speed or study") + parser.add_argument("--device", default="", help="cuda device, i.e. 0 or 0,1,2,3 or cpu") + parser.add_argument("--single-cls", action="store_true", help="treat as single-class dataset") + parser.add_argument("--augment", action="store_true", help="augmented inference") + parser.add_argument("--verbose", action="store_true", help="report mAP by class") + parser.add_argument("--save-txt", action="store_true", help="save results to *.txt") + parser.add_argument("--save-hybrid", action="store_true", help="save label+prediction hybrid results to *.txt") + parser.add_argument("--save-conf", action="store_true", help="save confidences in --save-txt labels") + parser.add_argument("--save-json", action="store_true", help="save a COCO-JSON results file") + parser.add_argument("--project", default=ROOT / "runs/val", help="save to project/name") + parser.add_argument("--name", default="exp", help="save to project/name") + parser.add_argument("--exist-ok", action="store_true", help="existing project/name ok, do not increment") + parser.add_argument("--half", action="store_true", help="use FP16 half-precision inference") opt = parser.parse_args() opt.data = check_yaml(opt.data) # check YAML - opt.save_json |= opt.data.endswith('coco.yaml') + opt.save_json |= opt.data.endswith("coco.yaml") opt.save_txt |= opt.save_hybrid print_args(FILE.stem, opt) return opt @@ -328,30 +349,48 @@ def parse_opt(): def main(opt): set_logging() - check_requirements(exclude=('tensorboard', 'thop')) + check_requirements(exclude=("tensorboard", "thop")) - if opt.task in ('train', 'val', 'test'): # run normally + if opt.task in ("train", "val", "test"): # run normally run(**vars(opt)) - elif opt.task == 'speed': # speed benchmarks + elif opt.task == "speed": # speed benchmarks # python val.py --task speed --data coco.yaml --batch 1 --weights yolov5n.pt yolov5s.pt... for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=opt.imgsz, conf_thres=.25, iou_thres=.45, - device=opt.device, save_json=False, plots=False) - - elif opt.task == 'study': # run over a range of settings and save/plot + run( + opt.data, + weights=w, + batch_size=opt.batch_size, + imgsz=opt.imgsz, + conf_thres=0.25, + iou_thres=0.45, + device=opt.device, + save_json=False, + plots=False, + ) + + elif opt.task == "study": # run over a range of settings and save/plot # python val.py --task study --data coco.yaml --iou 0.7 --weights yolov5n.pt yolov5s.pt... x = list(range(256, 1536 + 128, 128)) # x axis (image sizes) for w in opt.weights if isinstance(opt.weights, list) else [opt.weights]: - f = f'study_{Path(opt.data).stem}_{Path(w).stem}.txt' # filename to save to + f = f"study_{Path(opt.data).stem}_{Path(w).stem}.txt" # filename to save to y = [] # y axis for i in x: # img-size - print(f'\nRunning {f} point {i}...') - r, _, t = run(opt.data, weights=w, batch_size=opt.batch_size, imgsz=i, conf_thres=opt.conf_thres, - iou_thres=opt.iou_thres, device=opt.device, save_json=opt.save_json, plots=False) + print(f"\nRunning {f} point {i}...") + r, _, t = run( + opt.data, + weights=w, + batch_size=opt.batch_size, + imgsz=i, + conf_thres=opt.conf_thres, + iou_thres=opt.iou_thres, + device=opt.device, + save_json=opt.save_json, + plots=False, + ) y.append(r + t) # results and times - np.savetxt(f, y, fmt='%10.4g') # save - os.system('zip -r study.zip study_*.txt') + np.savetxt(f, y, fmt="%10.4g") # save + os.system("zip -r study.zip study_*.txt") plot_val_study(x=x) # plot From e18ccb2be03cb07b3d867033ef75e51dfbb9e4c9 Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sat, 19 Apr 2025 09:16:32 +0000 Subject: [PATCH 4/4] Auto-format by https://ultralytics.com/actions --- train.py | 1 + 1 file changed, 1 insertion(+) diff --git a/train.py b/train.py index fb2952b747f9..be25b94ddbf4 100644 --- a/train.py +++ b/train.py @@ -212,6 +212,7 @@ def train( # Scheduler if opt.linear_lr: + def lf(x): return (1 - x / (epochs - 1)) * (1.0 - hyp["lrf"]) + hyp["lrf"] # linear else: