palmprint-recognition/train_by_yolo.ipynb

435 lines
22 KiB
Plaintext

{
"cells": [
{
"cell_type": "code",
"execution_count": 1,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Looking in indexes: https://mirrors.aliyun.com/pypi/simple\n",
"Requirement already satisfied: ultralytics in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (8.2.102)\n",
"Requirement already satisfied: numpy<2.0.0,>=1.23.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (1.26.4)\n",
"Requirement already satisfied: matplotlib>=3.3.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (3.9.2)\n",
"Requirement already satisfied: opencv-python>=4.6.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (4.10.0.84)\n",
"Requirement already satisfied: pillow>=7.1.2 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (10.4.0)\n",
"Requirement already satisfied: pyyaml>=5.3.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (6.0.2)\n",
"Requirement already satisfied: requests>=2.23.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (2.32.3)\n",
"Requirement already satisfied: scipy>=1.4.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (1.14.1)\n",
"Requirement already satisfied: torch>=1.8.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (2.4.1)\n",
"Requirement already satisfied: torchvision>=0.9.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (0.19.1)\n",
"Requirement already satisfied: tqdm>=4.64.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (4.66.5)\n",
"Requirement already satisfied: psutil in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (6.0.0)\n",
"Requirement already satisfied: py-cpuinfo in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (9.0.0)\n",
"Requirement already satisfied: pandas>=1.1.4 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (2.2.3)\n",
"Requirement already satisfied: seaborn>=0.11.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (0.13.2)\n",
"Requirement already satisfied: ultralytics-thop>=2.0.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from ultralytics) (2.0.8)\n",
"Requirement already satisfied: contourpy>=1.0.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (1.3.0)\n",
"Requirement already satisfied: cycler>=0.10 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (0.12.1)\n",
"Requirement already satisfied: fonttools>=4.22.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (4.54.1)\n",
"Requirement already satisfied: kiwisolver>=1.3.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (1.4.7)\n",
"Requirement already satisfied: packaging>=20.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (24.1)\n",
"Requirement already satisfied: pyparsing>=2.3.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (3.1.4)\n",
"Requirement already satisfied: python-dateutil>=2.7 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from matplotlib>=3.3.0->ultralytics) (2.9.0.post0)\n",
"Requirement already satisfied: pytz>=2020.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from pandas>=1.1.4->ultralytics) (2024.2)\n",
"Requirement already satisfied: tzdata>=2022.7 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from pandas>=1.1.4->ultralytics) (2024.2)\n",
"Requirement already satisfied: charset-normalizer<4,>=2 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from requests>=2.23.0->ultralytics) (3.3.2)\n",
"Requirement already satisfied: idna<4,>=2.5 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from requests>=2.23.0->ultralytics) (3.10)\n",
"Requirement already satisfied: urllib3<3,>=1.21.1 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from requests>=2.23.0->ultralytics) (2.2.3)\n",
"Requirement already satisfied: certifi>=2017.4.17 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from requests>=2.23.0->ultralytics) (2024.8.30)\n",
"Requirement already satisfied: filelock in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (3.16.1)\n",
"Requirement already satisfied: typing-extensions>=4.8.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (4.12.2)\n",
"Requirement already satisfied: sympy in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (1.13.3)\n",
"Requirement already satisfied: networkx in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (3.3)\n",
"Requirement already satisfied: jinja2 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (3.1.4)\n",
"Requirement already satisfied: fsspec in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from torch>=1.8.0->ultralytics) (2024.9.0)\n",
"Requirement already satisfied: colorama in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from tqdm>=4.64.0->ultralytics) (0.4.6)\n",
"Requirement already satisfied: six>=1.5 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from python-dateutil>=2.7->matplotlib>=3.3.0->ultralytics) (1.16.0)\n",
"Requirement already satisfied: MarkupSafe>=2.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from jinja2->torch>=1.8.0->ultralytics) (2.1.5)\n",
"Requirement already satisfied: mpmath<1.4,>=1.1.0 in d:\\projects\\test\\palmprint-recognition\\.venv\\lib\\site-packages (from sympy->torch>=1.8.0->ultralytics) (1.3.0)\n",
"Note: you may need to restart the kernel to use updated packages.\n"
]
}
],
"source": [
"%pip install ultralytics"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {},
"outputs": [],
"source": [
"from ultralytics import YOLO\n"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 模型"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {},
"outputs": [],
"source": [
"\n",
"# Load a model\n",
"model = YOLO(\"yolov8n-seg.pt\") # load a pretrained model (recommended for training)"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 训练"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Ultralytics YOLOv8.2.102 Python-3.10.11 torch-2.4.1+cpu CPU (AMD Ryzen 7 5700G with Radeon Graphics)\n",
"\u001b[34m\u001b[1mengine\\trainer: \u001b[0mtask=segment, mode=train, model=yolov8n-seg.pt, data=data.yaml, epochs=1, time=None, patience=100, batch=16, imgsz=640, save=True, save_period=-1, cache=False, device=None, workers=8, project=None, name=train8, exist_ok=False, pretrained=True, optimizer=auto, verbose=True, seed=0, deterministic=True, single_cls=False, rect=False, cos_lr=False, close_mosaic=10, resume=False, amp=True, fraction=1.0, profile=False, freeze=None, multi_scale=False, overlap_mask=True, mask_ratio=4, dropout=0.0, val=True, split=val, save_json=False, save_hybrid=False, conf=None, iou=0.7, max_det=300, half=False, dnn=False, plots=True, source=None, vid_stride=1, stream_buffer=False, visualize=False, augment=False, agnostic_nms=False, classes=None, retina_masks=False, embed=None, show=False, save_frames=False, save_txt=False, save_conf=False, save_crop=False, show_labels=True, show_conf=True, show_boxes=True, line_width=None, format=torchscript, keras=False, optimize=False, int8=False, dynamic=False, simplify=True, opset=None, workspace=4, nms=False, lr0=0.01, lrf=0.01, momentum=0.937, weight_decay=0.0005, warmup_epochs=3.0, warmup_momentum=0.8, warmup_bias_lr=0.1, box=7.5, cls=0.5, dfl=1.5, pose=12.0, kobj=1.0, label_smoothing=0.0, nbs=64, hsv_h=0.015, hsv_s=0.7, hsv_v=0.4, degrees=0.0, translate=0.1, scale=0.5, shear=0.0, perspective=0.0, flipud=0.0, fliplr=0.5, bgr=0.0, mosaic=1.0, mixup=0.0, copy_paste=0.0, auto_augment=randaugment, erasing=0.4, crop_fraction=1.0, cfg=None, tracker=botsort.yaml, save_dir=d:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\n",
"\n",
" from n params module arguments \n",
" 0 -1 1 464 ultralytics.nn.modules.conv.Conv [3, 16, 3, 2] \n",
" 1 -1 1 4672 ultralytics.nn.modules.conv.Conv [16, 32, 3, 2] \n",
" 2 -1 1 7360 ultralytics.nn.modules.block.C2f [32, 32, 1, True] \n",
" 3 -1 1 18560 ultralytics.nn.modules.conv.Conv [32, 64, 3, 2] \n",
" 4 -1 2 49664 ultralytics.nn.modules.block.C2f [64, 64, 2, True] \n",
" 5 -1 1 73984 ultralytics.nn.modules.conv.Conv [64, 128, 3, 2] \n",
" 6 -1 2 197632 ultralytics.nn.modules.block.C2f [128, 128, 2, True] \n",
" 7 -1 1 295424 ultralytics.nn.modules.conv.Conv [128, 256, 3, 2] \n",
" 8 -1 1 460288 ultralytics.nn.modules.block.C2f [256, 256, 1, True] \n",
" 9 -1 1 164608 ultralytics.nn.modules.block.SPPF [256, 256, 5] \n",
" 10 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 11 [-1, 6] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 12 -1 1 148224 ultralytics.nn.modules.block.C2f [384, 128, 1] \n",
" 13 -1 1 0 torch.nn.modules.upsampling.Upsample [None, 2, 'nearest'] \n",
" 14 [-1, 4] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 15 -1 1 37248 ultralytics.nn.modules.block.C2f [192, 64, 1] \n",
" 16 -1 1 36992 ultralytics.nn.modules.conv.Conv [64, 64, 3, 2] \n",
" 17 [-1, 12] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 18 -1 1 123648 ultralytics.nn.modules.block.C2f [192, 128, 1] \n",
" 19 -1 1 147712 ultralytics.nn.modules.conv.Conv [128, 128, 3, 2] \n",
" 20 [-1, 9] 1 0 ultralytics.nn.modules.conv.Concat [1] \n",
" 21 -1 1 493056 ultralytics.nn.modules.block.C2f [384, 256, 1] \n",
" 22 [15, 18, 21] 1 1004275 ultralytics.nn.modules.head.Segment [1, 32, 64, [64, 128, 256]] \n",
"YOLOv8n-seg summary: 261 layers, 3,263,811 parameters, 3,263,795 gradients, 12.1 GFLOPs\n",
"\n",
"Transferred 417/417 items from pretrained weights\n",
"Freezing layer 'model.22.dfl.conv.weight'\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mtrain: \u001b[0mScanning images\\train\\Birjand University Mobile Palmprint Database (BMPD)\\001... 0 images, 1289 backgrounds, 0 corrupt: 100%|██████████| 1289/1289 [00:01<00:00, 880.89it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mtrain: \u001b[0mWARNING No labels found in images\\train\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache. See https://docs.ultralytics.com/datasets for dataset formatting guidance.\n",
"\u001b[34m\u001b[1mtrain: \u001b[0mNew cache created: images\\train\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache\n",
"WARNING No labels found in images\\train\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache, training may not work correctly. See https://docs.ultralytics.com/datasets for dataset formatting guidance.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
"\u001b[34m\u001b[1mval: \u001b[0mScanning images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001... 0 images, 323 backgrounds, 0 corrupt: 100%|██████████| 323/323 [00:00<00:00, 873.37it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mval: \u001b[0mWARNING No labels found in images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache. See https://docs.ultralytics.com/datasets for dataset formatting guidance.\n",
"\u001b[34m\u001b[1mval: \u001b[0mNew cache created: images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache\n",
"WARNING No labels found in images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache, training may not work correctly. See https://docs.ultralytics.com/datasets for dataset formatting guidance.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Plotting labels to d:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\\labels.jpg... \n",
"zero-size array to reduction operation maximum which has no identity\n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m 'optimizer=auto' found, ignoring 'lr0=0.01' and 'momentum=0.937' and determining best 'optimizer', 'lr0' and 'momentum' automatically... \n",
"\u001b[34m\u001b[1moptimizer:\u001b[0m AdamW(lr=0.002, momentum=0.9) with parameter groups 66 weight(decay=0.0), 77 weight(decay=0.0005), 76 bias(decay=0.0)\n",
"Image sizes 640 train, 640 val\n",
"Using 0 dataloader workers\n",
"Logging results to \u001b[1md:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\u001b[0m\n",
"Starting training for 1 epochs...\n",
"\n",
" Epoch GPU_mem box_loss seg_loss cls_loss dfl_loss Instances Size\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" 1/1 0G 0 0 89.74 0 0 640: 100%|██████████| 81/81 [12:26<00:00, 9.21s/it]\n",
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 11/11 [01:27<00:00, 8.00s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" all 323 0 0 0 0 0 0 0 0 0\n",
"WARNING no labels found in segment set, can not compute metrics without labels\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"1 epochs completed in 0.232 hours.\n",
"Optimizer stripped from d:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\\weights\\last.pt, 6.8MB\n",
"Optimizer stripped from d:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\\weights\\best.pt, 6.8MB\n",
"\n",
"Validating d:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\\weights\\best.pt...\n",
"Ultralytics YOLOv8.2.102 Python-3.10.11 torch-2.4.1+cpu CPU (AMD Ryzen 7 5700G with Radeon Graphics)\n",
"YOLOv8n-seg summary (fused): 195 layers, 3,258,259 parameters, 0 gradients, 12.0 GFLOPs\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 11/11 [01:19<00:00, 7.27s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" all 323 0 0 0 0 0 0 0 0 0\n",
"WARNING no labels found in segment set, can not compute metrics without labels\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Speed: 3.2ms preprocess, 127.8ms inference, 0.0ms loss, 4.8ms postprocess per image\n",
"Results saved to \u001b[1md:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train8\u001b[0m\n"
]
}
],
"source": [
"# Train the model\n",
"results = model.train(data=\"data.yaml\", epochs=1, imgsz=640)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": []
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 验证"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Ultralytics YOLOv8.2.102 Python-3.10.11 torch-2.4.1+cpu CPU (AMD Ryzen 7 5700G with Radeon Graphics)\n",
"YOLOv8n-seg summary (fused): 195 layers, 3,258,259 parameters, 0 gradients, 12.0 GFLOPs\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\u001b[34m\u001b[1mval: \u001b[0mScanning images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache... 0 images, 323 backgrounds, 0 corrupt: 100%|██████████| 323/323 [00:00<?, ?it/s]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"WARNING No labels found in images\\val\\Birjand University Mobile Palmprint Database (BMPD)\\001.cache, training may not work correctly. See https://docs.ultralytics.com/datasets for dataset formatting guidance.\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n",
" Class Images Instances Box(P R mAP50 mAP50-95) Mask(P R mAP50 mAP50-95): 100%|██████████| 21/21 [01:16<00:00, 3.66s/it]"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
" all 323 0 0 0 0 0 0 0 0 0\n",
"WARNING no labels found in segment set, can not compute metrics without labels\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"\n"
]
},
{
"name": "stdout",
"output_type": "stream",
"text": [
"Speed: 3.0ms preprocess, 116.4ms inference, 0.0ms loss, 4.8ms postprocess per image\n",
"Results saved to \u001b[1md:\\Projects\\test\\palmprint-recognition\\runs\\segment\\train82\u001b[0m\n"
]
},
{
"data": {
"text/plain": [
"array([], dtype=float64)"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"# Load a model\n",
"# model = YOLO(\"yolov8n-seg.pt\") # load an official model\n",
"# model = YOLO(\"path/to/best.pt\") # load a custom model\n",
"\n",
"# Validate the model\n",
"metrics = model.val() # no arguments needed, dataset and settings remembered\n",
"metrics.box.map # map50-95(B)\n",
"metrics.box.map50 # map50(B)\n",
"metrics.box.map75 # map75(B)\n",
"metrics.box.maps # a list contains map50-95(B) of each category\n",
"metrics.seg.map # map50-95(M)\n",
"metrics.seg.map50 # map50(M)\n",
"metrics.seg.map75 # map75(M)\n",
"metrics.seg.maps # a list contains map50-95(M) of each category"
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 测试"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"\n",
"image 1/1 d:\\Projects\\test\\palmprint-recognition\\2.jpg: 640x480 (no detections), 64.5ms\n",
"Speed: 5.0ms preprocess, 64.5ms inference, 0.0ms postprocess per image at shape (1, 3, 640, 480)\n"
]
}
],
"source": [
"# Load a model\n",
"# model = YOLO(\"yolov8n-seg.pt\") # load an official model\n",
"# model = YOLO(\"path/to/best.pt\") # load a custom model\n",
"\n",
"# Predict with the model\n",
"results = model(\"2.jpg\") "
]
},
{
"cell_type": "markdown",
"metadata": {},
"source": [
"## 保存"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"model.save(\"export/palmprint_seg.pt\")"
]
}
],
"metadata": {
"kernelspec": {
"display_name": ".venv",
"language": "python",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.11"
}
},
"nbformat": 4,
"nbformat_minor": 2
}