You can not select more than 25 topics Topics must start with a chinese character,a letter or number, can include dashes ('-') and can be up to 35 characters long.

keypoint.ipynb 476 kB

3 years ago
123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475
  1. {
  2. "cells": [
  3. {
  4. "cell_type": "code",
  5. "execution_count": null,
  6. "id": "c691ce2d",
  7. "metadata": {},
  8. "outputs": [],
  9. "source": [
  10. "import matplotlib.pyplot as plt\n",
  11. "import torch\n",
  12. "import cv2\n",
  13. "from torchvision import transforms\n",
  14. "import numpy as np\n",
  15. "from utils.datasets import letterbox\n",
  16. "from utils.general import non_max_suppression_kpt\n",
  17. "from utils.plots import output_to_keypoint, plot_skeleton_kpts"
  18. ]
  19. },
  20. {
  21. "cell_type": "code",
  22. "execution_count": null,
  23. "id": "d17fb9ee",
  24. "metadata": {},
  25. "outputs": [],
  26. "source": [
  27. "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
  28. "weigths = torch.load('yolov7-w6-pose.pt')\n",
  29. "model = weigths['model']\n",
  30. "model = model.half().to(device)\n",
  31. "_ = model.eval()"
  32. ]
  33. },
  34. {
  35. "cell_type": "code",
  36. "execution_count": null,
  37. "id": "27396504",
  38. "metadata": {},
  39. "outputs": [],
  40. "source": [
  41. "image = cv2.imread('./person.jpg')\n",
  42. "image = letterbox(image, 960, stride=64, auto=True)[0]\n",
  43. "image_ = image.copy()\n",
  44. "image = transforms.ToTensor()(image)\n",
  45. "image = torch.tensor(np.array([image.numpy()]))\n",
  46. "image = image.to(device)\n",
  47. "image = image.half()\n",
  48. "\n",
  49. "output, _ = model(image)"
  50. ]
  51. },
  52. {
  53. "cell_type": "code",
  54. "execution_count": null,
  55. "id": "77662e7d",
  56. "metadata": {},
  57. "outputs": [],
  58. "source": [
  59. "output = non_max_suppression_kpt(output, 0.25, 0.65, nc=model.yaml['nc'], nkpt=model.yaml['nkpt'], kpt_label=True)\n",
  60. "output = output_to_keypoint(output)\n",
  61. "nimg = image[0].permute(1, 2, 0) * 255\n",
  62. "nimg = nimg.cpu().numpy().astype(np.uint8)\n",
  63. "nimg = cv2.cvtColor(nimg, cv2.COLOR_RGB2BGR)\n",
  64. "for idx in range(output.shape[0]):\n",
  65. " plot_skeleton_kpts(nimg, output[idx, 7:].T, 3)"
  66. ]
  67. },
  68. {
  69. "cell_type": "code",
  70. "execution_count": 5,
  71. "id": "7b0882c0",
  72. "metadata": {},
  73. "outputs": [
  74. {
  75. "data": {

随着人工智能和大数据的发展,任一方面对自动化工具有着一定的需求,在当下疫情防控期间,使用mindspore来实现yolo模型来进行目标检测及语义分割,对视频或图片都可以进行口罩佩戴检测和行人社交距离检测,来对公共场所的疫情防控来实行自动化管理。