Browse Source

Reorganize folder structure of multiedgetracking.

Signed-off-by: Vittorio Cozzolino <vittorio.cozzolino@huawei.com>

Reorganize folder structure of multiedgetracking.
Remove sync function with edge for multiedgertacking controllers (not used currently).
Edit README.md and Dockerfiles.
Remove some explicit parameters from YAML files (use default value).
Add empty line at the end of the YAML and Python files.
Add comments to the multiedgeinference code.
Minor edits to the run.sh script.
Update Docker image version number in YAML files.

Signed-off-by: Vittorio Cozzolino <vittorio.cozzolino@huawei.com>
tags/v0.5.0
Vittorio Cozzolino 3 years ago
parent
commit
5055543652
97 changed files with 552 additions and 385 deletions
  1. +3
    -3
      examples/README.md
  2. +8
    -8
      examples/build_image.sh
  3. +2
    -2
      examples/multi-edge-inference-feature-extraction.Dockerfile
  4. +2
    -2
      examples/multi-edge-inference-gpu-feature-extraction.Dockerfile
  5. +2
    -2
      examples/multi-edge-inference-gpu-videoanalytics.Dockerfile
  6. +2
    -2
      examples/multi-edge-inference-reid.Dockerfile
  7. +2
    -2
      examples/multi-edge-inference-videoanalytics.Dockerfile
  8. +18
    -18
      examples/multiedgeinference/pedestrian_tracking/README.md
  9. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/__init__.py
  10. +5
    -5
      examples/multiedgeinference/pedestrian_tracking/detection/model/__init__.py
  11. +17
    -4
      examples/multiedgeinference/pedestrian_tracking/detection/model/bytetracker.py
  12. +30
    -10
      examples/multiedgeinference/pedestrian_tracking/detection/worker.py
  13. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/__init__.py
  14. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/exps/__init__.py
  15. +25
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/exps/yolox_s_mix_det.py
  16. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/__init__.py
  17. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/__init__.py
  18. +3
    -1
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/base_exp.py
  19. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/build.py
  20. +78
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/yolox_base.py
  21. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/__init__.py
  22. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/darknet.py
  23. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/losses.py
  24. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/network_blocks.py
  25. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_fpn.py
  26. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_head.py
  27. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_pafpn.py
  28. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolox.py
  29. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/basetrack.py
  30. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/byte_tracker.py
  31. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/kalman_filter.py
  32. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/matching.py
  33. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/__init__.py
  34. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/boxes.py
  35. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/metric.py
  36. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/model_utils.py
  37. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/setup_env.py
  38. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/IBNMeta.py
  39. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/MetaModules.py
  40. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/__init__.py
  41. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/resMeta.py
  42. +12
    -6
      examples/multiedgeinference/pedestrian_tracking/feature_extraction/worker.py
  43. +1
    -1
      examples/multiedgeinference/pedestrian_tracking/reid/store_result.py
  44. +26
    -13
      examples/multiedgeinference/pedestrian_tracking/reid/worker.py
  45. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/tutorial/1.jpg
  46. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/tutorial/2.jpg
  47. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/tutorial/arch.png
  48. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/tutorial/cleanup.sh
  49. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/tutorial/deploy.sh
  50. +2
    -2
      examples/multiedgeinference/pedestrian_tracking/tutorial/run.sh
  51. +1
    -1
      examples/multiedgeinference/pedestrian_tracking/yaml/feature-extraction-service.yaml
  52. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/kafka/kafkabrk.yaml
  53. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/kafka/kafkasvc.yaml
  54. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/kafka/zoodeploy.yaml
  55. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/kafka/zooservice.yaml
  56. +7
    -7
      examples/multiedgeinference/pedestrian_tracking/yaml/models/model_detection.yaml
  57. +2
    -2
      examples/multiedgeinference/pedestrian_tracking/yaml/models/model_m3l.yaml
  58. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/pv/reid_volume.yaml
  59. +0
    -0
      examples/multiedgeinference/pedestrian_tracking/yaml/pvc/reid-volume-claim.yaml
  60. +1
    -3
      examples/multiedgeinference/pedestrian_tracking/yaml/reid-job.yaml
  61. +4
    -10
      examples/multiedgeinference/pedestrian_tracking/yaml/video-analytics-job.yaml
  62. +0
    -13
      examples/multiedgetracking/detection/yolox/exps/yolox_s_mix_det.py
  63. +0
    -41
      examples/multiedgetracking/detection/yolox/yolox/exp/yolox_base.py
  64. +2
    -2
      lib/requirements.dev.txt
  65. +6
    -6
      lib/sedna/algorithms/__init__.py
  66. +8
    -2
      lib/sedna/algorithms/optical_flow/__init__.py
  67. +1
    -1
      lib/sedna/backend/torch/__init__.py
  68. +3
    -4
      lib/sedna/core/base.py
  69. +0
    -0
      lib/sedna/core/multi_edge_inference/__init__.py
  70. +41
    -6
      lib/sedna/core/multi_edge_inference/components/__init__.py
  71. +48
    -5
      lib/sedna/core/multi_edge_inference/components/detector.py
  72. +90
    -0
      lib/sedna/core/multi_edge_inference/components/feature_extraction.py
  73. +41
    -5
      lib/sedna/core/multi_edge_inference/components/reid.py
  74. +4
    -0
      lib/sedna/core/multi_edge_inference/data_classes.py
  75. +10
    -20
      lib/sedna/core/multi_edge_inference/plugins/__init__.py
  76. +3
    -3
      lib/sedna/core/multi_edge_inference/plugins/registered.py
  77. +0
    -0
      lib/sedna/core/multi_edge_inference/utils.py
  78. +0
    -47
      lib/sedna/core/multi_edge_tracking/components/feature_extraction.py
  79. +1
    -1
      lib/sedna/datasources/kafka/kafka_manager.py
  80. +1
    -1
      lib/sedna/datasources/kafka/producer.py
  81. +0
    -0
      lib/sedna/service/multi_edge_inference/__init__.py
  82. +0
    -0
      lib/sedna/service/multi_edge_inference/interface/__init__.py
  83. +1
    -1
      lib/sedna/service/multi_edge_inference/interface/detection_endpoint.py
  84. +0
    -0
      lib/sedna/service/multi_edge_inference/interface/fe_endpoint.py
  85. +2
    -2
      lib/sedna/service/multi_edge_inference/interface/reid_endpoint.py
  86. +0
    -0
      lib/sedna/service/multi_edge_inference/server/__init__.py
  87. +1
    -1
      lib/sedna/service/multi_edge_inference/server/detection.py
  88. +0
    -0
      lib/sedna/service/multi_edge_inference/server/feature_extraction.py
  89. +2
    -2
      lib/sedna/service/multi_edge_inference/server/reid.py
  90. +2
    -20
      pkg/globalmanager/controllers/featureextraction/downstream.go
  91. +5
    -9
      pkg/globalmanager/controllers/featureextraction/featureextractionservice.go
  92. +1
    -1
      pkg/globalmanager/controllers/featureextraction/upstream.go
  93. +2
    -20
      pkg/globalmanager/controllers/reid/downstream.go
  94. +18
    -18
      pkg/globalmanager/controllers/reid/reidjob.go
  95. +2
    -15
      pkg/globalmanager/controllers/reid/upstream.go
  96. +2
    -20
      pkg/globalmanager/controllers/videoanalytics/downstream.go
  97. +2
    -15
      pkg/globalmanager/controllers/videoanalytics/upstream.go

+ 3
- 3
examples/README.md View File

@@ -7,7 +7,7 @@ This repository is home to the following features of examples:
* [Incremental Learning](#incremental-learning)
* [Federated Learning](#federated-learning)
* [Lifelong Learning](#lifelong-learning)
* [Pedestrians ReID](#pedestrian-reid)
* [Multi-Edge Inference](#multi-edge-inference)
* [Shared Storage](#shared-storage)
### Joint Inference
@@ -22,8 +22,8 @@ Example: [Using Federated Learning Job in Surface Defect Detection Scenario](./f
### Lifelong Learning
Example: [Using Lifelong Learning Job in Thermal Comfort Prediction Scenario](./lifelong_learning/atcii/README.md)
### Pedestrian ReID
Example: [Using ReID to Track an Infected COVID-19 Carrier in Pandemic Scenario](./multiedgetracking/tutorial/README.md)
### Multi-Edge Inference
Example: [Using ReID to Track an Infected COVID-19 Carrier in Pandemic Scenario](./multiedgeinference/pedestrian_tracking/README.md)
### Shared Storage
| Support Protocols |Support Features| Examples |Release|


+ 8
- 8
examples/build_image.sh View File

@@ -57,15 +57,15 @@ fi

cd "$(dirname "${BASH_SOURCE[0]}")"

IMAGE_TAG=${IMAGE_TAG:-v0.4.0}
IMAGE_TAG=${GIT_TAG:-v0.5.0}
EXAMPLE_REPO_PREFIX=${IMAGE_REPO}/sedna-example-

dockerfiles_multiedgetracking=(
multi-edge-tracking-feature-extraction.Dockerfile
# multi-edge-tracking-gpu-feature-extraction.Dockerfile
# multi-edge-tracking-gpu-videoanalytics.Dockerfile
multi-edge-tracking-reid.Dockerfile
multi-edge-tracking-videoanalytics.Dockerfile
dockerfiles_multiedgeinference=(
multi-edge-inference-feature-extraction.Dockerfile
# multi-edge-inference-gpu-feature-extraction.Dockerfile
# multi-edge-inference-gpu-videoanalytics.Dockerfile
multi-edge-inference-reid.Dockerfile
multi-edge-inference-videoanalytics.Dockerfile
)

dockerfiles_federated_learning=(
@@ -92,7 +92,7 @@ incremental-learning-helmet-detection.Dockerfile
for tp in ${type[@]}; do
if [[ "$tp" == "all" ]]; then
dockerfiles+=(
"${dockerfiles_multiedgetracking[@]}"
"${dockerfiles_multiedgeinference[@]}"
"${dockerfiles_federated_learning[@]}"
"${dockerfiles_joint_inference[@]}"
"${dockerfiles_lifelong_learning[@]}"


examples/multi-edge-tracking-feature-extraction.Dockerfile → examples/multi-edge-inference-feature-extraction.Dockerfile View File

@@ -32,10 +32,10 @@ WORKDIR /home/work
COPY ./lib /home/lib

# Add M3L imports
COPY examples/multiedgetracking/feature_extraction /home/work
COPY examples/multiedgeinference/pedestrian_tracking/feature_extraction /home/work

ENV PYTHONPATH "${PYTHONPATH}:/home/work"
ENV LOG_LEVEL="INFO"

ENTRYPOINT ["python"]
CMD ["worker.py"]
CMD ["worker.py"]

examples/multi-edge-tracking-gpu-feature-extraction.Dockerfile → examples/multi-edge-inference-gpu-feature-extraction.Dockerfile View File

@@ -38,10 +38,10 @@ WORKDIR /home/work
COPY ./lib /home/lib

# Add M3L imports
COPY examples/multiedgetracking/feature_extraction /home/work
COPY examples/multiedgeinference/pedestrian_tracking/feature_extraction /home/work

ENV PYTHONPATH "${PYTHONPATH}:/home/work"
ENV LOG_LEVEL="INFO"

ENTRYPOINT ["python3.8"]
CMD ["worker.py"]
CMD ["worker.py"]

examples/multi-edge-tracking-gpu-videoanalytics.Dockerfile → examples/multi-edge-inference-gpu-videoanalytics.Dockerfile View File

@@ -98,9 +98,9 @@ RUN pip install onnx protobuf==3.16.0
WORKDIR /home/work
COPY ./lib /home/lib

COPY examples/multiedgetracking/detection/ /home/work/
COPY examples/multiedgeinference/pedestrian_tracking/detection/ /home/work/

ENV LOG_LEVEL="INFO"

ENTRYPOINT ["python"]
CMD ["worker.py"]
CMD ["worker.py"]

examples/multi-edge-tracking-reid.Dockerfile → examples/multi-edge-inference-reid.Dockerfile View File

@@ -28,9 +28,9 @@ ENV PYTHONPATH "${PYTHONPATH}:/home/lib"
WORKDIR /home/work
COPY ./lib /home/lib

COPY examples/multiedgetracking/reid /home/work/
COPY examples/multiedgeinference/pedestrian_tracking/reid /home/work/

ENV LOG_LEVEL="INFO"

ENTRYPOINT ["python"]
CMD ["worker.py"]
CMD ["worker.py"]

examples/multi-edge-tracking-videoanalytics.Dockerfile → examples/multi-edge-inference-videoanalytics.Dockerfile View File

@@ -36,9 +36,9 @@ ENV PYTHONPATH "${PYTHONPATH}:/home/lib"
WORKDIR /home/work
COPY ./lib /home/lib

COPY examples/multiedgetracking/detection /home/work/
COPY examples/multiedgeinference/pedestrian_tracking/detection /home/work/

ENV LOG_LEVEL="DEBUG"

ENTRYPOINT ["python"]
CMD ["worker.py"]
CMD ["worker.py"]

examples/multiedgetracking/tutorial/README.md → examples/multiedgeinference/pedestrian_tracking/README.md View File

@@ -5,8 +5,8 @@ Estimated completion time: ~60-100 mins.
Requirements:
- K8s cluster
- Sedna
- KubeEdge
- Internet connection to download the containers images
- Optional: Kubeedge
- Optional: multi-node cluster

# Introduction
@@ -17,43 +17,43 @@ However, our ReID solution is much more advanced as it does not require a galler

The example images below show the ability of our system to re-identify a potential carrier of the virus and detect close contact proximity risk.

![image info](./1.jpg) ![image info](./2.jpg)
![image info](tutorial/1.jpg) ![image info](tutorial/2.jpg)

# System Architecture and Components

The image below shows the system architecture and its simplified workflow:

![image info](./arch.png)
![image info](tutorial/arch.png)

## Components

**ReID Job**: it performs the ReID.

- Available for CPU only.
- Folder with specific implementation `examples/multiedgetracking/reid`.
- Component specs in `lib/sedna/core/multi_edge_tracking/components/reid.py`.
- Defined by the Dockerfile `multi-edge-tracking-reid.Dockerfile`.
- Folder with specific implementation `examples/multiedgeinference/pedestrian_tracking/reid`.
- Component specs in `lib/sedna/core/multi_edge_inference/components/reid.py`.
- Defined by the Dockerfile `multi-edge-inference-reid.Dockerfile`.

**Feature Extraction Service**: it performs the extraction of the features necessary for the ReID step.

- Available for CPU and GPU.
- Folder with specific implementation details `examples/multiedgetracking/feature_extraction`.
- Component specs in `lib/sedna/core/multi_edge_tracking/components/feature_extraction.py`.
- Defined by the Dockerfile `multi-edge-tracking-feature-extraction.Dockerfile` or `multi-edge-tracking-gpu-feature-extraction.Dockerfile`.
- Folder with specific implementation details `examples/multiedgeinference/pedestrian_tracking/feature_extraction`.
- Component specs in `lib/sedna/core/multi_edge_inference/components/feature_extraction.py`.
- Defined by the Dockerfile `multi-edge-inference-feature-extraction.Dockerfile` or `multi-edge-inference-gpu-feature-extraction.Dockerfile`.
- It loads the model defined by the CRD in the YAML file `yaml/models/model_m3l.yaml`.

**VideoAnalytics Job**: it performs tracking of objects (pedestrians) in a video.

- Available for CPU and GPU.
- Folder with specific implementation details `examples/multiedgetracking/detection`.
- AI model code in `examples/multiedgetracking/detection/estimator/bytetracker.py`.
- Component specs in `lib/sedna/core/multi_edge_tracking/components/detection.py`.
- Defined by the Dockerfile `multi-edge-tracking-videoanalytics.Dockerfile` or `multi-edge-tracking-gpu-videoanalytics.Dockerfile`.
- Folder with specific implementation details `examples/multiedgeinference/pedestrian_tracking/detection`.
- AI model code in `examples/multiedgeinference/detection/estimator/bytetracker.py`.
- Component specs in `lib/sedna/core/multi_edge_inference/components/detection.py`.
- Defined by the Dockerfile `multi-edge-inference-videoanalytics.Dockerfile` or `multi-edge-inference-gpu-videoanalytics.Dockerfile`.
- It loads the model defined by the CRD in the YAML file `yaml/models/model_detection.yaml`.

# Build Phase

Go to the `sedna/examples` directory and run: `./build_image.sh -r <your-docker-private-repo> multiedgetracking` to build the Docker images. Remember to **push** the images to your own Docker repository!
Go to the `sedna/examples` directory and run: `./build_image.sh -r <your-docker-private-repo> multiedgeinference` to build the Docker images. Remember to **push** the images to your own Docker repository!

Run `make crds` in the `SEDNA_HOME` and then register the new CRD in the K8S cluster with `make install crds` or:
- `kubectl create -f sedna/build/crd/sedna.io_featureextractionservices.yaml`
@@ -161,7 +161,7 @@ The provided YAML files are configured to run the feature extraction and ReID po

Download the sample video and query images which will be placed in the NFS folder:
```
wget http://obs.eu-central-201.myhuaweicloud.com/bucket-sedna.obs.eu-central-201.myhuaweicloud.com:80/test_video.zip?AWSAccessKeyId=GWQRKFEFDMSUHM36WN7V&Expires=1652284916&Signature=IFO8%2B55%2BxsOuqozaRipSoCuFUYA%3D
wget https://drive.google.com/file/d/1HTpzY09bQJe-68d09We3fUrd7REXK5j5/view?usp=sharing -o test_video.zip
unzip test_video.zip
sudo cp -r test_video/query /data/network_shared/reid/query #copy sample images to query folder
sudo cp test_video/test_video.mp4 /data/network_shared/reid/video/test_video.mp4 #copy sample video to video folder
@@ -192,11 +192,11 @@ Following, the application workflow is divided in 2 parts: analysis of the video
# Cleanup

Don't forget to delete the jobs once they are completed:
- `k delete -f multiedgetracking/yaml/video-analytics-job.yaml`
- `k delete -f multiedgetracking/yaml/reid-job.yaml`
- `k delete -f multiedgeinference/pedestrian_tracking/yaml/video-analytics-job.yaml`
- `k delete -f multiedgeinference/pedestrian_tracking/yaml/reid-job.yaml`

To also delete the feature extraction service:
- `k delete -f multiedgetracking/yaml/feature-extraction.yaml`
- `k delete -f multiedgeinference/pedestrian_tracking/yaml/feature-extraction.yaml`

# **Automated Installation**


examples/multiedgetracking/detection/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/__init__.py View File


examples/multiedgetracking/detection/estimator/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/model/__init__.py View File

@@ -16,15 +16,15 @@ import importlib

from sedna.common.log import LOGGER

def str_to_estimator_class(module_name=".", estimator_class="ByteTracker"):
def str_to_class(module_name=".", class_name="ByteTracker"):
"""Return a class type from a string reference"""
LOGGER.info(f"Dynamically loading estimator class {estimator_class}")
LOGGER.info(f"Dynamically loading class {class_name}")
try:
module_ = importlib.import_module(module_name + estimator_class.lower(), package="estimator")
module_ = importlib.import_module(module_name + class_name.lower(), package="model")
try:
class_ = getattr(module_, estimator_class)
class_ = getattr(module_, class_name)
except AttributeError:
LOGGER.error('Estimator class does not exist')
LOGGER.error('Class does not exist')
except ImportError:
LOGGER.error('Module does not exist')
return class_ or None

examples/multiedgetracking/detection/estimator/bytetracker.py → examples/multiedgeinference/pedestrian_tracking/detection/model/bytetracker.py View File

@@ -18,9 +18,9 @@ import torch
import numpy as np
import cv2

from sedna.core.multi_edge_tracking.plugins import PluggableModel
from sedna.core.multi_edge_tracking.data_classes import OP_MODE, DetTrackResult
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.plugins import PluggableModel
from sedna.core.multi_edge_inference.data_classes import OP_MODE, DetTrackResult
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.common.log import LOGGER

# YOLOX imports
@@ -103,7 +103,9 @@ class ByteTracker(PluggableModel):


def forward(self, data):
""" Data is of List images type"""
""" Perform forward pass on with the provided nerual network.
'data' is a List of images.
"""
img_info = {}

@@ -179,6 +181,10 @@ class ByteTracker(PluggableModel):

def detection(self, data, output, img_info, det_time, frame_nr):
"""
Performs object detection.
"""

result = None

# Prepare image with boxes overlaid
@@ -213,6 +219,10 @@ class ByteTracker(PluggableModel):


def tracking(self, data, output, img_info, det_time, frame_nr):
"""
Performs object tracking.
"""

# initialize placeholders for the tracking data
online_tlwhs = []
online_ids = []
@@ -272,6 +282,9 @@ class ByteTracker(PluggableModel):
return self.tracking(data, outputs, img_info, det_time, frame_nr)

def predict(self, data, **kwargs):
"""
Main prediction function.
"""
tresult = []

start = time.time()

examples/multiedgetracking/detection/worker.py → examples/multiedgeinference/pedestrian_tracking/detection/worker.py View File

@@ -18,24 +18,24 @@ import os
import time
import cv2
from urllib.request import Request, urlopen
from estimator import str_to_estimator_class
from model import str_to_class
from sedna.common.log import LOGGER
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.datasources.obs.connector import OBSClientWrapper
from sedna.core.multi_edge_tracking.components.detector import ObjectDetector
from sedna.core.multi_edge_inference.components.detector import ObjectDetector

class Bootstrapper():
def __init__(self) -> None:
LOGGER.info("Creating Detection/Tracking Bootstrapper module")

self.estimator_class = get_parameters('estimator_class', "ByteTracker")
self.model = get_parameters('model', "ByteTracker")
self.hostname = get_parameters('hostname', "unknown")
self.fps = float(get_parameters('fps', 25))
self.batch_size = int(get_parameters('batch_size', 1))
self.video_id = get_parameters('video_id', 0)
self.video_address = get_parameters('video_address', "")

self.eclass = str_to_estimator_class(estimator_class=self.estimator_class)
self.model_class = str_to_class(class_name=self.model)

self.enable_obs = bool(util.strtobool(get_parameters('ENABLE_OBS', "False")))
@@ -45,6 +45,11 @@ class Bootstrapper():
self.service = None
def run(self):
"""
Entry point for the component. It decides how to process the video
source based on its kind.
"""

protocol = self.video_address.split(":")[0]
LOGGER.info(f"Detected video source protocol {protocol} for video source {self.video_address}.")

@@ -66,6 +71,9 @@ class Bootstrapper():
self.close()

def download_video(self):
"""
Downloads a video given its address.
"""
try:
req = Request(self.video_address, headers={'User-Agent': 'Mozilla/5.0'})
LOGGER.info("Video download complete")
@@ -80,6 +88,9 @@ class Bootstrapper():
LOGGER.error(f"Unable to download video file {ex}")

def connect_to_camera(self, stream_address):
"""
Connects to the video source camera.
"""
camera = None
while camera == None or not camera.isOpened():
try:
@@ -92,8 +103,11 @@ class Bootstrapper():
return camera

def process_video_from_disk(self, filename, timeout=20):
selected_estimator=self.eclass(video_id=self.video_id)
self.service = ObjectDetector(models=[selected_estimator])
"""
Processes a video loaded from disk.
"""
model=self.model_class(video_id=self.video_id)
self.service = ObjectDetector(models=[model])

cap = cv2.VideoCapture(filename)
index = 0
@@ -115,8 +129,11 @@ class Bootstrapper():
cap.release()

def process_video_from_stream(self, timeout=20):
selected_estimator=self.eclass(video_id=self.video_id)
self.service=ObjectDetector(models=[selected_estimator], asynchronous=True)
"""
Processes a video accessible through a stream.
"""
model=self.model_class(video_id=self.video_id)
self.service=ObjectDetector(models=[model], asynchronous=True)

nframe = 0
grabbed = False
@@ -144,6 +161,9 @@ class Bootstrapper():
camera.release()
def close(self, timeout=20):
"""
Exits the worker.
"""
while (time.time() - self.service.heartbeat) <= timeout:
LOGGER.debug(f"Waiting for more data from the feature extraction service..")
time.sleep(1)
@@ -154,4 +174,4 @@ class Bootstrapper():

if __name__ == '__main__':
bs = Bootstrapper()
bs.run()
bs.run()

examples/multiedgetracking/detection/yolox/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/__init__.py View File


examples/multiedgetracking/detection/yolox/exps/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/exps/__init__.py View File


+ 25
- 0
examples/multiedgeinference/pedestrian_tracking/detection/yolox/exps/yolox_s_mix_det.py View File

@@ -0,0 +1,25 @@
# encoding: utf-8
import os
from yolox.yolox.exp import Exp as MyExp


class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 1
self.depth = 0.33
self.width = 0.50
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]
self.train_ann = "train.json"
self.val_ann = "train.json"
self.input_size = (608, 1088)
self.test_size = (608, 1088)
self.random_size = (12, 26)
self.max_epoch = 80
self.print_interval = 20
self.eval_interval = 5
self.test_conf = 0.001
self.nmsthre = 0.7
self.no_aug_epochs = 10
self.basic_lr_per_img = 0.001 / 64.0
self.warmup_epochs = 1

examples/multiedgetracking/detection/yolox/yolox/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/__init__.py View File


examples/multiedgetracking/detection/yolox/yolox/exp/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/__init__.py View File


examples/multiedgetracking/detection/yolox/yolox/exp/base_exp.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/base_exp.py View File

@@ -16,6 +16,9 @@ class BaseExp(metaclass=ABCMeta):

def __init__(self):
self.seed = None
self.output_dir = "./YOLOX_outputs"
self.print_interval = 100
self.eval_interval = 10

@abstractmethod
def get_model(self) -> Module:
@@ -29,4 +32,3 @@ class BaseExp(metaclass=ABCMeta):
if not k.startswith("_")
]
return None


examples/multiedgetracking/detection/yolox/yolox/exp/build.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/build.py View File


+ 78
- 0
examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/exp/yolox_base.py View File

@@ -0,0 +1,78 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.

import torch
import torch.distributed as dist
import torch.nn as nn

import os

from .base_exp import BaseExp


class Exp(BaseExp):
def __init__(self):
super().__init__()

# ---------------- model config ---------------- #
self.num_classes = 80
self.depth = 1.00
self.width = 1.00

# ---------------- dataloader config ---------------- #
# set worker to 4 for shorter dataloader init time
self.data_num_workers = 4
self.input_size = (640, 640)
self.random_size = (14, 26)
self.train_ann = "instances_train2017.json"
self.val_ann = "instances_val2017.json"

# --------------- transform config ----------------- #
self.degrees = 10.0
self.translate = 0.1
self.scale = (0.1, 2)
self.mscale = (0.8, 1.6)
self.shear = 2.0
self.perspective = 0.0
self.enable_mixup = True

# -------------- training config --------------------- #
self.warmup_epochs = 5
self.max_epoch = 300
self.warmup_lr = 0
self.basic_lr_per_img = 0.01 / 64.0
self.scheduler = "yoloxwarmcos"
self.no_aug_epochs = 15
self.min_lr_ratio = 0.05
self.ema = True

self.weight_decay = 5e-4
self.momentum = 0.9
self.print_interval = 10
self.eval_interval = 10
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]

# ----------------- testing config ------------------ #
self.test_size = (640, 640)
self.test_conf = 0.001
self.nmsthre = 0.65

def get_model(self):
from yolox.yolox.models import YOLOPAFPN, YOLOX, YOLOXHead

def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03

if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels)
self.model = YOLOX(backbone, head)

self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
return self.model

examples/multiedgetracking/detection/yolox/yolox/models/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/__init__.py View File


examples/multiedgetracking/detection/yolox/yolox/models/darknet.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/darknet.py View File


examples/multiedgetracking/detection/yolox/yolox/models/losses.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/losses.py View File


examples/multiedgetracking/detection/yolox/yolox/models/network_blocks.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/network_blocks.py View File


examples/multiedgetracking/detection/yolox/yolox/models/yolo_fpn.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_fpn.py View File


examples/multiedgetracking/detection/yolox/yolox/models/yolo_head.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_head.py View File


examples/multiedgetracking/detection/yolox/yolox/models/yolo_pafpn.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolo_pafpn.py View File


examples/multiedgetracking/detection/yolox/yolox/models/yolox.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/models/yolox.py View File


examples/multiedgetracking/detection/yolox/yolox/tracker/basetrack.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/basetrack.py View File


examples/multiedgetracking/detection/yolox/yolox/tracker/byte_tracker.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/byte_tracker.py View File


examples/multiedgetracking/detection/yolox/yolox/tracker/kalman_filter.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/kalman_filter.py View File


examples/multiedgetracking/detection/yolox/yolox/tracker/matching.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/tracker/matching.py View File


examples/multiedgetracking/detection/yolox/yolox/utils/__init__.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/__init__.py View File


examples/multiedgetracking/detection/yolox/yolox/utils/boxes.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/boxes.py View File


examples/multiedgetracking/detection/yolox/yolox/utils/metric.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/metric.py View File


examples/multiedgetracking/detection/yolox/yolox/utils/model_utils.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/model_utils.py View File


examples/multiedgetracking/detection/yolox/yolox/utils/setup_env.py → examples/multiedgeinference/pedestrian_tracking/detection/yolox/yolox/utils/setup_env.py View File


examples/multiedgetracking/feature_extraction/M3L/IBNMeta.py → examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/IBNMeta.py View File


examples/multiedgetracking/feature_extraction/M3L/MetaModules.py → examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/MetaModules.py View File


examples/multiedgetracking/feature_extraction/M3L/__init__.py → examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/__init__.py View File


examples/multiedgetracking/feature_extraction/M3L/resMeta.py → examples/multiedgeinference/pedestrian_tracking/feature_extraction/M3L/resMeta.py View File


examples/multiedgetracking/feature_extraction/worker.py → examples/multiedgeinference/pedestrian_tracking/feature_extraction/worker.py View File

@@ -24,17 +24,20 @@ from functools import reduce
from threading import Thread

from sedna.common.log import LOGGER
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_tracking.plugins import PluggableModel
from sedna.core.multi_edge_tracking.data_classes import DetTrackResult, Target
from sedna.core.multi_edge_tracking.components.feature_extraction import FEService
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.core.multi_edge_inference.plugins import PluggableModel
from sedna.core.multi_edge_inference.data_classes import DetTrackResult, Target
from sedna.core.multi_edge_inference.components.feature_extraction import FEService

os.environ['BACKEND_TYPE'] = 'TORCH'

class FeatureExtractionAI(PluggableModel):

def __init__(self, **kwargs):
# Initialize feature extraction module
"""
Initialize feature extraction module
"""
self.model = None

# Device and input parameters
@@ -73,6 +76,9 @@ class FeatureExtractionAI(PluggableModel):
self.model.eval()

def extract_features(self, data : List[DetTrackResult] ):
"""
Extract ReID features from the provided image.
"""
input_batch = None
j = 0
offset = 0
@@ -201,4 +207,4 @@ class Bootstrapper(Thread):
# Starting the FE service.
if __name__ == '__main__':
bs = Bootstrapper()
bs.run()
bs.run()

examples/multiedgetracking/reid/store_result.py → examples/multiedgeinference/pedestrian_tracking/reid/store_result.py View File

@@ -18,7 +18,7 @@ import cv2
from PIL import Image

from sedna.common.log import LOGGER
from sedna.core.multi_edge_tracking.data_classes import DetTrackResult
from sedna.core.multi_edge_inference.data_classes import DetTrackResult
from sedna.algorithms.reid.close_contact_estimation import ContactTracker

def create_results_folder(folder):

examples/multiedgetracking/reid/worker.py → examples/multiedgeinference/pedestrian_tracking/reid/worker.py View File

@@ -13,6 +13,7 @@
# limitations under the License.

from distutils import util
import pathlib
import time
import torch
import numpy as np
@@ -23,11 +24,13 @@ from store_result import save_image
from sedna.algorithms.reid.multi_img_matching import match_query_to_targets
from sedna.algorithms.reid.close_contact_estimation import ContactTracker
from sedna.common.log import LOGGER
from sedna.core.multi_edge_tracking.components.reid import ReID
from sedna.core.multi_edge_tracking.data_classes import DetTrackResult, OP_MODE, Target
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.components.reid import ReID
from sedna.core.multi_edge_inference.data_classes import DetTrackResult, OP_MODE, Target
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.datasources.obs.connector import OBSClientWrapper

MOUNT_PATH="/data/network_shared/reid"

class ReIDWorker():

def __init__(self, **kwargs):
@@ -35,12 +38,12 @@ class ReIDWorker():
self.op_mode = OP_MODE(get_parameters('op_mode', 'covid19'))
self.threshold = get_parameters('match_threshold', 0.75)
self.user_id = get_parameters('user_id', "DEFAULT")
self.query_images = str(get_parameters('query_images', "/data/query/sample.png")).split("|")
self.query_images = str(get_parameters('query_images', f"{MOUNT_PATH}/query/sample.png")).split("|")

self.target = None
self.targets_list : List[Target] = []

self.results_base_folder = "/data/images/"
self.results_base_folder = f"{MOUNT_PATH}/images/"

self.CT = ContactTracker(draw_top_view=False)
@@ -67,6 +70,9 @@ class ReIDWorker():
return [data]

def update_target(self, ldata):
"""
Updates the target for the ReID.
"""
LOGGER.info(f"Target updated for user {ldata[0].userid} with {len(ldata[0].features)} feature vectors!")
self.targets_list = ldata

@@ -114,6 +120,10 @@ class ReIDWorker():
return None

def tracking_no_gallery(self, det_track : DetTrackResult):
"""
Performs ReID without gallery using the results from the
tracking and feature extraction component.
"""
det_track.targetID = [-1] * len(det_track.bbox_coord)
for target in self.targets_list:
@@ -141,6 +151,9 @@ class ReIDWorker():
return det_track

def store_result(self, det_track : DetTrackResult):
"""
Stores ReID result on disk (and OBS, if enabled).
"""
try:
filename = save_image(det_track, self.CT, folder=f"{self.results_base_folder}{det_track.userID}/")
if self.enable_obs:
@@ -153,7 +166,6 @@ class Bootstrapper(Thread):
super().__init__()

self.daemon = True
self.folder = "/data/"
self.retry = 3
self.job = ReID(models=[ReIDWorker()], asynchronous=False)

@@ -161,16 +173,17 @@ class Bootstrapper(Thread):
LOGGER.info("Loading data from disk.")

while self.retry > 0:
files = self.job.get_files_list(self.folder)
files = self.job.get_files_list(f"{MOUNT_PATH}/")

if files:
LOGGER.debug(f"Loaded {len(files)} files.")
for filename in files:
data = self.job.read_from_disk(filename)
if data:
LOGGER.debug(f"File {filename} loaded!")
self.job.put(data)
self.job.delete_from_disk(filename)
if pathlib.Path(filename).suffix == '.dat':
data = self.job.read_from_disk(filename)
if data:
LOGGER.debug(f"File {filename} loaded!")
self.job.put(data)
self.job.delete_from_disk(filename)
break
else:
LOGGER.warning("No data available to process!")
@@ -182,4 +195,4 @@ class Bootstrapper(Thread):
# Start the ReID job.
if __name__ == '__main__':
bs = Bootstrapper()
bs.run()
bs.run()

examples/multiedgetracking/tutorial/1.jpg → examples/multiedgeinference/pedestrian_tracking/tutorial/1.jpg View File


examples/multiedgetracking/tutorial/2.jpg → examples/multiedgeinference/pedestrian_tracking/tutorial/2.jpg View File


examples/multiedgetracking/tutorial/arch.png → examples/multiedgeinference/pedestrian_tracking/tutorial/arch.png View File


examples/multiedgetracking/tutorial/cleanup.sh → examples/multiedgeinference/pedestrian_tracking/tutorial/cleanup.sh View File


examples/multiedgetracking/tutorial/deploy.sh → examples/multiedgeinference/pedestrian_tracking/tutorial/deploy.sh View File


examples/multiedgetracking/tutorial/run.sh → examples/multiedgeinference/pedestrian_tracking/tutorial/run.sh View File

@@ -86,7 +86,7 @@ do
sleep 0.2
done

echo -n "" && echo "🟢 VideoAnalytics job has completed."
echo "" && echo "🟢 VideoAnalytics job has completed."
echo "⚪ Clean-up VideoAnalytics job resources."
kubectl delete -f ../yaml/video-analytics-job.yaml

@@ -112,7 +112,7 @@ do
sleep 0.2
done

echo "" && echo "🟢 ReID job has completed."
echo "" && echo "🟢 ReID job has completed."
echo "⚪ Clean-up ReID job resources."
kubectl delete -f ../yaml/reid-job.yaml


examples/multiedgetracking/yaml/feature-extraction-service.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/feature-extraction-service.yaml View File

@@ -18,7 +18,7 @@ spec:
nodeSelector:
node-role.kubernetes.io/master: ''
containers:
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-tracking-feature-extraction:sase
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-inference-feature-extraction:v0.5.0
imagePullPolicy: Always
name: feature-extraction
env:

examples/multiedgetracking/yaml/kafka/kafkabrk.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/kafka/kafkabrk.yaml View File


examples/multiedgetracking/yaml/kafka/kafkasvc.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/kafka/kafkasvc.yaml View File


examples/multiedgetracking/yaml/kafka/zoodeploy.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/kafka/zoodeploy.yaml View File


examples/multiedgetracking/yaml/kafka/zooservice.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/kafka/zooservice.yaml View File


examples/multiedgetracking/yaml/models/model_detection.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/models/model_detection.yaml View File

@@ -6,10 +6,10 @@ metadata:
spec:
url: "/data/ai_models/object_detection/pedestrians/yolox.pth"
format: "pth"
# description: "YoloX model to detect pedestrians."
# purpose: "detection"
# classes:
# - "pedestrian"
# extra:
# - name: "model_size"
# value: "bytetrack_s_mot17"
description: "YoloX model to detect pedestrians."
purpose: "detection"
classes:
- "pedestrian"
extra:
- name: "model_size"
value: "bytetrack_s_mot17"

examples/multiedgetracking/yaml/models/model_m3l.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/models/model_m3l.yaml View File

@@ -6,5 +6,5 @@ metadata:
spec:
url: "/data/ai_models/m3l/m3l.pth"
format: "pth"
# description: "M3L to extract ReID features."
# purpose: "feature_extraction"
description: "M3L to extract ReID features."
purpose: "feature_extraction"

examples/multiedgetracking/yaml/pv/reid_volume.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/pv/reid_volume.yaml View File


examples/multiedgetracking/yaml/pvc/reid-volume-claim.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/pvc/reid-volume-claim.yaml View File


examples/multiedgetracking/yaml/reid-job.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/reid-job.yaml View File

@@ -10,14 +10,12 @@ spec:
nodeSelector:
node-role.kubernetes.io/master: ''
containers:
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-tracking-reid:sase
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-inference-reid:v0.5.0
name: reid
imagePullPolicy: Always
env:
- name: OBS_TOKEN
value: ""
- name: op_mode # can be one of covid19, tracking, or detection
value: covid19
- name: match_thresh # the matching threshold for the reid
value: "0.35"
- name: user_id # the user running the job

examples/multiedgetracking/yaml/video-analytics-job.yaml → examples/multiedgeinference/pedestrian_tracking/yaml/video-analytics-job.yaml View File

@@ -12,7 +12,7 @@ spec:
nodeSelector:
node-role.kubernetes.io/master: ''
containers:
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-tracking-videoanalytics:sase
- image: registry-cbu.huawei.com/kubeedge/sedna-example-multi-edge-inference-videoanalytics:sase
imagePullPolicy: Always
name: detection
env:
@@ -22,18 +22,12 @@ spec:
value: "MASTER_NODE_IP|kafka-service"
- name: KAFKA_BIND_PORTS # a list of Kafka brokers port, separated by pipe (|)
value: "9092|9092"
- name: "estimator_class" # only ByteTracker is currently supported
value: "ByteTracker"
- name: confidence_thr # ByteTracker NMS confidence threshold
value: "0.7"
- name: fps # video sampling rate, used only when processing a network stream
value: "5"
- name: "video_id" # the video id, can be any value
- name: video_id # the video id, can be any value
value: "0000-1111-2222"
- name: "video_address" # the video source can be rtsp (recommended), http, or a file loaded from NFS
- name: video_address # the video source can be rtsp (recommended), http, or a file loaded from NFS
value: "rtsp://RTSP_SERVER_IP/video/0" #rtsp://7.182.8.79/video/0
- name: op_mode # covid19
value: covid19
- name: hostname
valueFrom:
fieldRef:
@@ -51,4 +45,4 @@ spec:
volumes:
- name: vol1
persistentVolumeClaim:
claimName: reid-pvc
claimName: reid-pvc

+ 0
- 13
examples/multiedgetracking/detection/yolox/exps/yolox_s_mix_det.py View File

@@ -1,13 +0,0 @@
# encoding: utf-8
import os
from yolox.yolox.exp import Exp as MyExp


class Exp(MyExp):
def __init__(self):
super(Exp, self).__init__()
self.num_classes = 1
self.depth = 0.33
self.width = 0.50
self.exp_name = os.path.split(os.path.realpath(__file__))[1].split(".")[0]


+ 0
- 41
examples/multiedgetracking/detection/yolox/yolox/exp/yolox_base.py View File

@@ -1,41 +0,0 @@
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# Copyright (c) 2014-2021 Megvii Inc. All rights reserved.

import torch
import torch.distributed as dist
import torch.nn as nn

import os

from .base_exp import BaseExp


class Exp(BaseExp):
def __init__(self):
super().__init__()

# ---------------- model config ---------------- #
self.num_classes = 80
self.depth = 1.00
self.width = 1.00


def get_model(self):
from yolox.yolox.models import YOLOPAFPN, YOLOX, YOLOXHead

def init_yolo(M):
for m in M.modules():
if isinstance(m, nn.BatchNorm2d):
m.eps = 1e-3
m.momentum = 0.03

if getattr(self, "model", None) is None:
in_channels = [256, 512, 1024]
backbone = YOLOPAFPN(self.depth, self.width, in_channels=in_channels)
head = YOLOXHead(self.num_classes, self.width, in_channels=in_channels)
self.model = YOLOX(backbone, head)

self.model.apply(init_yolo)
self.model.head.initialize_biases(1e-2)
return self.model

+ 2
- 2
lib/requirements.dev.txt View File

@@ -2,5 +2,5 @@
plato-learn~=0.26 # Apache-2.0
# lifelong_learning
scikit-learn~=0.24.1 # BSD
# multi_edge_tracking
kafka-python~=2.0.2 # Apache-2.0
# multi_edge_inference
kafka-python~=2.0.2 # Apache-2.0

+ 6
- 6
lib/sedna/algorithms/__init__.py View File

@@ -12,9 +12,9 @@
# See the License for the specific language governing permissions and
# limitations under the License.

from . import aggregation # federated_learning
from . import hard_example_mining # joint_inference incremental_learning
from . import multi_task_learning # lifelong_learning
from . import unseen_task_detect # lifelong_learning
from . import optical_flow # multi_edge_tracking
from . import reid # multi_edge_tracking
from . import aggregation
from . import hard_example_mining
from . import multi_task_learning
from . import unseen_task_detect
from . import optical_flow
from . import reid

+ 8
- 2
lib/sedna/algorithms/optical_flow/__init__.py View File

@@ -24,7 +24,7 @@ __all__ = ('LukasKanade')


class BaseFilter(metaclass=abc.ABCMeta):
"""The base class to define unified interface."""
"""The base class to define an unified interface."""

def __call__(self, old_frame=None, current_frame=None):
"""predict function, and it must be implemented by
@@ -38,6 +38,9 @@ class BaseFilter(metaclass=abc.ABCMeta):

@ClassFactory.register(ClassType.OF, alias="LukasKanadeOF")
class LukasKanade(BaseFilter, abc.ABC):
"""
Class to detect movement between two consecutive images.
"""
def __init__(self, **kwargs):
# Parameters for ShiTomasi corner detection
self.feature_params = dict(maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
@@ -83,6 +86,9 @@ class LukasKanade(BaseFilter, abc.ABC):
@ClassFactory.register(ClassType.OF, alias="LukasKanadeOF_CUDA")
class LukasKanadeCUDA(BaseFilter, abc.ABC):
"""
Class to detect movement between two consecutive images (GPU implementation).
"""
def __init__(self, **kwargs):
# Parameters for ShiTomasi corner detection
self.feature_params = dict(srcType= cv2.CV_8UC1, maxCorners=100, qualityLevel=0.3, minDistance=7, blockSize=7)
@@ -133,4 +139,4 @@ class LukasKanadeCUDA(BaseFilter, abc.ABC):
except Exception as ex:
LOGGER.error(f"Error during the execution of the optical flow estimation! [{ex}]")

return movement
return movement

+ 1
- 1
lib/sedna/backend/torch/__init__.py View File

@@ -59,4 +59,4 @@ class TorchBackend(BackendBase):
else:
LOGGER.info("Path to model does not exists!")

self.has_load = True
self.has_load = True

+ 3
- 4
lib/sedna/core/base.py View File

@@ -106,8 +106,6 @@ class ModelLoadingThread(threading.Thread):

class JobBase:
""" sedna feature base class """
parameters = Context

def __init__(self, estimator, config=None):
self.config = BaseConfig()
if config:
@@ -119,6 +117,7 @@ class JobBase:
self.worker_name = self.config.worker_name or self.job_name
self.namespace = self.config.namespace or self.job_name
self.lc_server = self.config.lc_server

if str(
self.get_parameters("MODEL_HOT_UPDATE", "False")
).lower() == "true":
@@ -159,7 +158,7 @@ class JobBase:
return callback_func(res) if callback_func else res

def get_parameters(self, param, default=None):
return self.parameters.get_parameters(param=param, default=default)
return Context.get_parameters(param=param, default=default)

def report_task_info(self, task_info, status, results=None, kind="train"):
message = {
@@ -177,4 +176,4 @@ class JobBase:
try:
LCClient.send(self.lc_server, self.worker_name, message)
except Exception as err:
self.log.error(err)
self.log.error(err)

lib/sedna/core/multi_edge_tracking/__init__.py → lib/sedna/core/multi_edge_inference/__init__.py View File


lib/sedna/core/multi_edge_tracking/components/__init__.py → lib/sedna/core/multi_edge_inference/components/__init__.py View File

@@ -24,15 +24,15 @@ import traceback
import uuid
from typing import List
from sedna.common.log import LOGGER
from sedna.core.multi_edge_tracking.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.datasources.kafka.kafka_manager import KafkaConsumerThread, KafkaProducer
from distutils import util
from collections import deque

class BaseService(ABC):
"""
Base wrapper for video analytics, feature extraction, and reid services
Base MultiEdgeInference wrapper for video analytics, feature extraction, and reid components.
"""

def __init__(self, consumer_topics = [], producer_topics = [], plugins : List[PluggableNetworkService] = [], models : List[PluggableModel] = [], timeout = 10, asynchronous = False):
@@ -69,6 +69,10 @@ class BaseService(ABC):
self._post_init()

def _init_kafka_connection(self, consumer_topics , producer_topics):
"""
Initializes the Kafka backend, if enabled.
"""

self.kafka_enabled = bool(util.strtobool(get_parameters("KAFKA_ENABLED", "False")))

if self.kafka_enabled:
@@ -94,12 +98,19 @@ class BaseService(ABC):


def _post_init(self):
"""
It starts the main data acquisition loop in a separate thread. It can be overridden
to add some post initialization calls.
"""
threading.Thread(target=self.fetch_data, daemon=True).start()
return

# Use asynch mode for ingesting a stream (e.g., RTSP).
# Use synch mode when reading from disk (e.g., a video file).
def put(self, data):
"""
Call this function to push data into the component. For example, after you extract a
frame from a video stream, you can call put(image). Depending on the value of the
'asynchronous' parameter, the data will be put into a different data structure.
"""
data = self.preprocess(data)
if data:
return self._put_data_asynchronous(data) if self.asynchronous else self._put_data_synchronous(data)
@@ -174,6 +185,11 @@ class BaseService(ABC):
return
def get_plugin(self, plugin_key : PLUGIN):
"""
This function allows to select the network service to communicate to based
on its name (given that is has been registered before).The list of registered
plugins can be found in plugins/registered.py.
"""
try:
ls = list(filter(lambda n: n.kind == plugin_key.name, self.plugins))[0]
except IndexError as ie:
@@ -189,22 +205,41 @@ class BaseService(ABC):

# Distributes the data in the queue to the models associated to this service
def distribute_data(self, data = [], **kwargs):
"""
This function sends the data to all the AI models passed to with this
component during the initialization phase.
"""
for ai in self.models:
self.process_data(ai, data)
return

@abstractmethod
def process_data(self, ai, data, **kwargs):
"""
The user needs to implement this function to call the main processing
function of the AI model and decide what to do with the result.
"""
return
@abstractmethod
def update_operational_mode(self, status):
"""
The user needs to trigger updates to the AI model, if necessary.
"""
return

def preprocess(self, data, **kwargs):
"""
The user can override this function to inject some preprocessing
operation to be executed before the data is added to the data
structure by the 'put()' function.
"""
return data

class FileOperations:
"""
Class containing file operations to read/write from disk.
"""
def read_from_disk(self, path):
data = []
try:
@@ -229,4 +264,4 @@ class FileOperations:


def get_files_list(self, folder):
return [join(folder, f) for f in os.listdir(folder) if isfile(join(folder, f))]
return [join(folder, f) for f in os.listdir(folder) if isfile(join(folder, f))]

lib/sedna/core/multi_edge_tracking/components/detector.py → lib/sedna/core/multi_edge_inference/components/detector.py View File

@@ -19,14 +19,57 @@ import numpy as np
from sedna.algorithms.optical_flow import LukasKanade, LukasKanadeCUDA
from sedna.common.log import LOGGER

from sedna.core.multi_edge_tracking.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_tracking.plugins.registered import Feature_Extraction_I, VideoAnalytics
from sedna.core.multi_edge_tracking.components import BaseService, FileOperations
from sedna.core.multi_edge_inference.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_inference.plugins.registered import Feature_Extraction_I, VideoAnalytics
from sedna.core.multi_edge_inference.components import BaseService, FileOperations

class ObjectDetector(BaseService, FileOperations):
"""
Object Detection/Tracking service.
In MultiEdgeInference, the Object Detection/Tracking component
is deployed as a service at the edge and it used to detect or track objects
(for example, pedestrians) and send the result to the cloud for
further processing using Kafka or REST API.

Parameters
----------
consumer_topics : List
A list of Kafka topics used to communicate with the Feature
Extraction service (to receive data from it).
This is accessed only if the Kafka backend is in use.
producer_topics : List
A list of Kafka topics used to communicate with the Feature
Extraction service (to send data to it).
This is accessed only if the Kafka backend is in use.
plugins : List
A list of PluggableNetworkService. It can be left empty
as the ObjectDetector service is already preconfigured
to connect to the correct network services.
models : List
A list of PluggableModel. By passing a specific instance
of the model, it is possible to customize the ObjectDetector
to, for example, track different objects as long as the
PluggableModel interface is respected.
timeout: int
It sets a timeout condition to terminate the main fetch loop after the specified
amount of seconds has passed since we received the last frame.
asynchronous: bool
If True, the AI processing will be decoupled from the data acquisition step.
If False, the processing will be sequential. In general, set it to True when
ingesting a stream (e.g., RTSP) and to False when reading from disk
(e.g., a video file).


Examples
--------
>>> model = ByteTracker() # A class implementing the PluggableModel abstract class (example in pedestrian_tracking/detector/model/bytetracker.py)
>>> objecttracking_service = ObjectDetector(models=[model], asynchronous=True)

Notes
-----
For the parameters described above, only 'models' has to be defined, while
for others the default value will work in most cases.
"""

def __init__(self, consumer_topics = ["enriched_object"], producer_topics=["object_detection"], plugins: List[PluggableNetworkService] = [], models: List[PluggableModel] = [], timeout = 10, asynchronous = False):
merged_plugins = [VideoAnalytics(wrapper=self), Feature_Extraction_I()] + plugins
super().__init__(consumer_topics, producer_topics, merged_plugins, models, timeout, asynchronous)
@@ -52,7 +95,7 @@ class ObjectDetector(BaseService, FileOperations):

# We change the preprocess function to add the optical flow analysis
def preprocess(self, data):
# This is risky, we should create a specific class to represent data processed by FE or create a flag in the detrack object.
# TODO: Improve this check, this is not reliable.
if isinstance(data, List):
self.data_counter += len(data)
LOGGER.info(f"Received data from FE module (counter={self.data_counter}). Writing to local storage")

+ 90
- 0
lib/sedna/core/multi_edge_inference/components/feature_extraction.py View File

@@ -0,0 +1,90 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List
from sedna.core.multi_edge_inference.components import BaseService
from sedna.core.multi_edge_inference.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_inference.plugins.registered import Feature_Extraction, VideoAnalytics_I

class FEService(BaseService):
"""
In MultiEdgeInference, the Feature Extraction component
is deployed in the edge or the cloud and it used to extract
ReID features from frames received by the ObjectDetector component
and send back to it the enriched data using Kafka or REST API.

Parameters
----------
consumer_topics : List
A list of Kafka topics used to communicate with the Object
Detector service (to receive data from it).
This is accessed only if the Kafka backend is in use.
producer_topics : List
A list of Kafka topics used to communicate with the Object
Detector service (to send data to it).
This is accessed only if the Kafka backend is in use.
plugins : List
A list of PluggableNetworkService. It can be left empty
as the FeatureExtraction service is already preconfigured
to connect to the correct network services.
models : List
A list of PluggableModel. By passing a specific instance
of the model, it is possible to customize the FeatureExtraction
component to, for example, extract differently the objects
features.
timeout: int
It sets a timeout condition to terminate the main fetch loop after the specified
amount of seconds has passed since we received the last frame.
asynchronous: bool
If True, the AI processing will be decoupled from the data acquisition step.
If False, the processing will be sequential. In general, set it to True when
ingesting a stream (e.g., RTSP) and to False when reading from disk
(e.g., a video file).


Examples
--------
>>> model = FeatureExtractionAI() # A class implementing the PluggableModel abstract class (example in pedestrian_tracking/feature_extraction/worker.py)
>>> fe_service = FEService(models=[model], asynchronous=False)

Notes
-----
For the parameters described above, only 'models' has to be defined, while
for others the default value will work in most cases.
"""


def __init__(self, consumer_topics = ["object_detection"], producer_topics=["enriched_object"], plugins: List[PluggableNetworkService] = [], models: List[PluggableModel] = [], timeout = 10, asynchronous = False):
merged_plugins = [VideoAnalytics_I(), Feature_Extraction(wrapper=self)] + plugins
super().__init__(consumer_topics, producer_topics, merged_plugins, models, timeout, asynchronous)

def process_data(self, ai, data, **kwargs):
for ai in self.models:
result = ai.inference(data)

if result != []:
if self.kafka_enabled:
for d in result:
self.producer.write_result(d)
else:
plg = self.get_plugin(PLUGIN.VIDEO_ANALYTICS_I)
plg.plugin_api.transmit(result, **kwargs)

def update_operational_mode(self, status):
pass

def get_target_features(self, ldata):
# TODO: Fix this workaround, we need a function to select a model based on its name
fe_ai = self.models[0]
return fe_ai.get_target_features(ldata)

lib/sedna/core/multi_edge_tracking/components/reid.py → lib/sedna/core/multi_edge_inference/components/reid.py View File

@@ -15,13 +15,49 @@
from typing import List
from sedna.common.log import LOGGER

from sedna.core.multi_edge_tracking.components import BaseService, FileOperations
from sedna.core.multi_edge_tracking.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_tracking.plugins.registered import Feature_Extraction_I, ReID_Server
from sedna.core.multi_edge_inference.components import BaseService, FileOperations
from sedna.core.multi_edge_inference.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_inference.plugins.registered import Feature_Extraction_I, ReID_Server

class ReID(BaseService, FileOperations):
"""
ReID service.
In MultiEdgeInference, the ReID component is deployed in the cloud
and it used to identify a target by compairing its features
with the ones genereated from the Feature Extraction component.

Parameters
----------
consumer_topics : List
Leave empty.
producer_topics : List
Leave empty.
plugins : List
A list of PluggableNetworkService. It can be left empty
as the ReID component is already preconfigured
to connect to the correct network services.
models : List
A list of PluggableModel. In this case we abuse of the term
model as the ReID doesn't really use an AI model but rather
a wrapper for the ReID functions.
timeout: int
It sets a timeout condition to terminate the main fetch loop after the specified
amount of seconds has passed since we received the last frame.
asynchronous: bool
If True, the AI processing will be decoupled from the data acquisition step.
If False, the processing will be sequential. In general, set it to True when
ingesting a stream (e.g., RTSP) and to False when reading from disk
(e.g., a video file).


Examples
--------
>>> model = ReIDWorker() # A class implementing the PluggableModel abstract class (example in pedestrian_tracking/reid/worker.py)
>>> self.job = ReID(models=[model], asynchronous=False)

Notes
-----
For the parameters described above, only 'models' has to be defined, while
for others the default value will work in most cases.
"""

def __init__(self, consumer_topics = [], producer_topics=[], plugins: List[PluggableNetworkService] = [], models: List[PluggableModel] = [], timeout = 10, asynchronous = True):
@@ -52,4 +88,4 @@ class ReID(BaseService, FileOperations):
def get_target_features(self, ldata):
feature_extraction_plugin = self.get_plugin(PLUGIN.FEATURE_EXTRACTION_I)
features = feature_extraction_plugin.plugin_api.get_target_features(ldata)
return features
return features

lib/sedna/core/multi_edge_tracking/data_classes.py → lib/sedna/core/multi_edge_inference/data_classes.py View File

@@ -25,6 +25,10 @@ class OP_MODE(Enum):

# Class defining the output of a ReID service.
class DetTrackResult:
"""
Base data object exchanged by the MultiEdgeInference components.
"""

def __init__(self, frame_index : int = 0, bbox : List = None, scene = None, confidence : List = None, detection_time : List = None, camera : int = 0, bbox_coord : List = [], tracking_ids : List = [], features : List = [], is_target=False, ID : List = []):
self.userID = "DEFAULT" # Name of the enduser using the application, used to bound the user to the results
self.frame_index = frame_index # Video frame index number

lib/sedna/core/multi_edge_tracking/plugins/__init__.py → lib/sedna/core/multi_edge_inference/plugins/__init__.py View File

@@ -30,7 +30,7 @@ from sedna.common.config import BaseConfig
from sedna.common.file_ops import FileOps
from sedna.common.log import LOGGER

from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.utils import get_parameters


# Class defining the possible plugin services.
@@ -45,6 +45,9 @@ class PLUGIN(Enum):
VIDEO_ANALYTICS_I = "VideoAnalytics_I"

class PluggableNetworkService(ABC):
"""
Abstract class to wrap a REST service.
"""
def __init__(self, ip, port, plugin_api : object = None):
self.ip = ip
self.port = port
@@ -66,23 +69,10 @@ class PluggableNetworkService(ABC):
if callable(start):
threading.Thread(target=self.plugin_api.start, daemon=True).start()

class PluggableDatasource(ABC):
def __init__(self, **kwargs) -> None:
super().__init__()

self.kind = PLUGIN(self.__class__.__name__).name

LOGGER.info(f"Created PluggableDatasource of kind {self.kind}")

@abstractmethod
def write(self, data, **kwargs):
return

@abstractmethod
def read(self, **kwargs):
return

class PluggableModel(ABC):
"""
Abstract class to wrap and AI model.
"""
def __init__(self) -> None:
self.config = BaseConfig()
self.model_backend = self._set_backend()
@@ -126,7 +116,7 @@ class PluggableModel(ABC):
raise NotImplementedError

def inference(self, data=None, post_process=None, **kwargs):
"""Calls the model 'predict' function"""
res = self.model_backend.predict(data, **kwargs)
callback_func = None
if callable(post_process):
@@ -147,7 +137,7 @@ class PluggableModel(ABC):
return callback_func(res) if callback_func else res

def _set_backend(self):
"""Create Trainer class"""
"""Configure AI backend parameters based on model type."""
use_cuda = False
backend_type = os.getenv(
'BACKEND_TYPE', self.config.get("backend_type", "UNKNOWN")
@@ -181,4 +171,4 @@ class PluggableModel(ABC):
model_save_path=base_model_save,
model_name=model_save_name,
model_save_url=model_save_url
)
)

lib/sedna/core/multi_edge_tracking/plugins/registered.py → lib/sedna/core/multi_edge_inference/plugins/registered.py View File

@@ -13,8 +13,8 @@
# limitations under the License.

from sedna.common.utils import get_host_ip
from sedna.core.multi_edge_tracking.plugins import PluggableNetworkService
from sedna.core.multi_edge_tracking.utils import get_parameters
from sedna.core.multi_edge_inference.plugins import PluggableNetworkService
from sedna.core.multi_edge_inference.utils import get_parameters
from sedna.service.multi_edge_tracking.interface import *
from sedna.service.multi_edge_tracking.server import *

@@ -78,4 +78,4 @@ class VideoAnalytics_I(PluggableNetworkService):
ip,
port,
Detection("video_analytics", ip=ip, port=int(port))
)
)

lib/sedna/core/multi_edge_tracking/utils.py → lib/sedna/core/multi_edge_inference/utils.py View File


+ 0
- 47
lib/sedna/core/multi_edge_tracking/components/feature_extraction.py View File

@@ -1,47 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List
from sedna.core.multi_edge_tracking.components import BaseService
from sedna.core.multi_edge_tracking.plugins import PLUGIN, PluggableModel, PluggableNetworkService
from sedna.core.multi_edge_tracking.plugins.registered import Feature_Extraction, VideoAnalytics_I

class FEService(BaseService):
"""
Feature Extraction service.
"""

def __init__(self, consumer_topics = ["object_detection"], producer_topics=["enriched_object"], plugins: List[PluggableNetworkService] = [], models: List[PluggableModel] = [], timeout = 10, asynchronous = False):
merged_plugins = [VideoAnalytics_I(), Feature_Extraction(wrapper=self)] + plugins
super().__init__(consumer_topics, producer_topics, merged_plugins, models, timeout, asynchronous)

def process_data(self, ai, data, **kwargs):
for ai in self.models:
result = ai.inference(data)

if result != []:
if self.kafka_enabled:
for d in result:
self.producer.write_result(d)
else:
plg = self.get_plugin(PLUGIN.VIDEO_ANALYTICS_I)
plg.plugin_api.transmit(result, **kwargs)

def update_operational_mode(self, status):
pass

def get_target_features(self, ldata):
# TODO: Fix this workaround, we need a function to select a model based on its name
fe_ai = self.models[0]
return fe_ai.get_target_features(ldata)

+ 1
- 1
lib/sedna/datasources/kafka/kafka_manager.py View File

@@ -43,4 +43,4 @@ class KafkaConsumerThread(Thread):
while not self.consumer.disconnected:
data = self.consumer.consume_messages_poll()
if data:
self.callback(data)
self.callback(data)

+ 1
- 1
lib/sedna/datasources/kafka/producer.py View File

@@ -66,4 +66,4 @@ class Producer(Client):

def close(self):
LOGGER.debug("Shutting down producer")
self.producer.close()
self.producer.close()

lib/sedna/service/multi_edge_tracking/__init__.py → lib/sedna/service/multi_edge_inference/__init__.py View File


lib/sedna/service/multi_edge_tracking/interface/__init__.py → lib/sedna/service/multi_edge_inference/interface/__init__.py View File


lib/sedna/service/multi_edge_tracking/interface/detection_endpoint.py → lib/sedna/service/multi_edge_inference/interface/detection_endpoint.py View File

@@ -16,7 +16,7 @@ import pickle
from sedna.service.client import http_request

class Detection:
"""Endpoint to trigger detection"""
"""Endpoint to trigger the Object Tracking component"""

def __init__(self, service_name, version="",
ip="127.0.0.1", port="8080", protocol="http"):

lib/sedna/service/multi_edge_tracking/interface/fe_endpoint.py → lib/sedna/service/multi_edge_inference/interface/fe_endpoint.py View File


lib/sedna/service/multi_edge_tracking/interface/reid_endpoint.py → lib/sedna/service/multi_edge_inference/interface/reid_endpoint.py View File

@@ -13,7 +13,7 @@
# limitations under the License.

import pickle
from sedna.core.multi_edge_tracking.data_classes import DetTrackResult
from sedna.core.multi_edge_inference.data_classes import DetTrackResult
from sedna.service.client import http_request

class ReID_Endpoint:
@@ -30,4 +30,4 @@ class ReID_Endpoint:
def transmit(self, x : DetTrackResult, **kwargs):
"""Transfer feature vector to ReID worker"""
_url = f"{self.endpoint}/reid"
return http_request(url=_url, method="POST", data=pickle.dumps(x))
return http_request(url=_url, method="POST", data=pickle.dumps(x))

lib/sedna/service/multi_edge_tracking/server/__init__.py → lib/sedna/service/multi_edge_inference/server/__init__.py View File


lib/sedna/service/multi_edge_tracking/server/detection.py → lib/sedna/service/multi_edge_inference/server/detection.py View File

@@ -24,7 +24,7 @@ __all__ = ('DetectionServer', )

class DetectionServer(BaseServer): # pylint: disable=too-many-arguments
"""
rest api server for detection
REST api server for object detection component
"""

def __init__(

lib/sedna/service/multi_edge_tracking/server/feature_extraction.py → lib/sedna/service/multi_edge_inference/server/feature_extraction.py View File


lib/sedna/service/multi_edge_tracking/server/reid.py → lib/sedna/service/multi_edge_inference/server/reid.py View File

@@ -24,7 +24,7 @@ __all__ = ('ReIDServer', )

class ReIDServer(BaseServer): # pylint: disable=too-many-arguments
"""
rest api server for reid
REST api server for reid
"""

def __init__(
@@ -73,4 +73,4 @@ class ReIDServer(BaseServer): # pylint: disable=too-many-arguments
s = await request.body()
self.model.inference(pickle.loads(s), post_process=None)

return 200
return 200

+ 2
- 20
pkg/globalmanager/controllers/featureextraction/downstream.go View File

@@ -17,32 +17,14 @@ limitations under the License.
package featureextraction

import (
"fmt"

"k8s.io/apimachinery/pkg/watch"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
)

// NOTE: For this job we don't need synchronization
func (c *Controller) syncToEdge(eventType watch.EventType, obj interface{}) error {
service, ok := obj.(*sednav1.FeatureExtractionService)
if !ok {
return nil
}

// Since Kind may be empty,
// we need to fix the kind here if missing.
// more details at https://github.com/kubernetes/kubernetes/issues/3030
service.Kind = KindName

// Here only propagate to the nodes with non empty name
nodeName := service.Spec.Template.Spec.NodeName
if len(nodeName) == 0 {
return fmt.Errorf("empty node name")
}

return c.sendToEdgeFunc(nodeName, eventType, service)
return nil
}

func (c *Controller) SetDownstreamSendFunc(f runtime.DownstreamSendFunc) error {


+ 5
- 9
pkg/globalmanager/controllers/featureextraction/featureextractionservice.go View File

@@ -49,17 +49,15 @@ import (
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
)

const (
FEWorker = "fe"
FEPort = 6000
)

const (
// Name is this controller name
Name = "FeatureExtraction"

// KindName is the kind name of CR this controller controls
KindName = "FeatureExtractionService"
// VideoAnalyticsWorker is this name given to the worker pod
FEWorker = "fe"
// VideoAnalyticsPort is the port where the service will be exposed
FEPort = 6000
)

// FeatureExtractionServicerKind contains the schema.GroupVersionKind for this controller type.
@@ -184,7 +182,7 @@ func (c *Controller) updateDeployment(old, cur interface{}) {
c.addDeployment(curD)
}

// obj could be an *sednav1.ObjectSearchService, or a DeletionFinalStateUnknown marker item,
// obj could be an *sednav1.FeatureExtractionService, or a DeletionFinalStateUnknown marker item,
// immediate tells the controller to update the status right away, and should
// happen ONLY when there was a successful pod run.
func (c *Controller) enqueueController(obj interface{}, immediate bool) {
@@ -387,8 +385,6 @@ func (c *Controller) sync(key string) (bool, error) {

if failedPods > 0 || failedDeployment > 0 {
serviceFailed = true
// TODO: Split code to handle deployment failure separately
// TODO: get the failed worker, and knows that which worker fails, edge inference worker or cloud inference worker
reason = "workerFailed"
message = "the worker of FeatureExtractionService failed"
newCondtionType = sednav1.FeatureExtractionServiceCondFailed


+ 1
- 1
pkg/globalmanager/controllers/featureextraction/upstream.go View File

@@ -21,11 +21,11 @@ import (
"encoding/json"
"fmt"

metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)

// updateHandler handles the updates from LC(running at edge) to update the


+ 2
- 20
pkg/globalmanager/controllers/reid/downstream.go View File

@@ -17,32 +17,14 @@ limitations under the License.
package reid

import (
"fmt"

"k8s.io/apimachinery/pkg/watch"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
)

// NOTE: For this job we don't need synchronization
func (c *Controller) syncToEdge(eventType watch.EventType, obj interface{}) error {
job, ok := obj.(*sednav1.ReidJob)
if !ok {
return nil
}

// Since Kind may be empty,
// we need to fix the kind here if missing.
// more details at https://github.com/kubernetes/kubernetes/issues/3030
job.Kind = KindName

nodeName := job.Spec.Template.Spec.NodeName
if len(nodeName) == 0 {
return fmt.Errorf("empty node name")
}

return c.sendToEdgeFunc(nodeName, eventType, job)

return nil
}

func (c *Controller) SetDownstreamSendFunc(f runtime.DownstreamSendFunc) error {


+ 18
- 18
pkg/globalmanager/controllers/reid/reidjob.go View File

@@ -492,7 +492,7 @@ func New(cc *runtime.ControllerContext) (runtime.FeatureControllerI, error) {
eventBroadcaster := record.NewBroadcaster()
eventBroadcaster.StartRecordingToSink(&v1core.EventSinkImpl{Interface: cc.KubeClient.CoreV1().Events("")})

fc := &Controller{
rc := &Controller{
kubeClient: cc.KubeClient,
client: cc.SednaClient.SednaV1alpha1(),

@@ -503,38 +503,38 @@ func New(cc *runtime.ControllerContext) (runtime.FeatureControllerI, error) {

jobInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) {
fc.enqueueController(obj, true)
rc.enqueueController(obj, true)

// when a video analytics job is added,
// when a reid job is added,
// send it to edge's LC.
fc.syncToEdge(watch.Added, obj)
rc.syncToEdge(watch.Added, obj)
},
UpdateFunc: func(old, cur interface{}) {
fc.enqueueController(cur, true)
rc.enqueueController(cur, true)

// when a video analytics job is updated,
// when a reid job is updated,
// send it to edge's LC as Added event.
fc.syncToEdge(watch.Added, cur)
rc.syncToEdge(watch.Added, cur)
},
DeleteFunc: func(obj interface{}) {
fc.enqueueController(obj, true)
rc.enqueueController(obj, true)

// when a video analytics job is deleted,
// when a reid job is deleted,
// send it to edge's LC.
fc.syncToEdge(watch.Deleted, obj)
rc.syncToEdge(watch.Deleted, obj)
},
})

fc.jobLister = jobInformer.Lister()
fc.jobStoreSynced = jobInformer.Informer().HasSynced
rc.jobLister = jobInformer.Lister()
rc.jobStoreSynced = jobInformer.Informer().HasSynced

podInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: fc.addPod,
UpdateFunc: fc.updatePod,
DeleteFunc: fc.deletePod,
AddFunc: rc.addPod,
UpdateFunc: rc.updatePod,
DeleteFunc: rc.deletePod,
})
fc.podStore = podInformer.Lister()
fc.podStoreSynced = podInformer.Informer().HasSynced
rc.podStore = podInformer.Lister()
rc.podStoreSynced = podInformer.Informer().HasSynced

return fc, nil
return rc, nil
}

+ 2
- 15
pkg/globalmanager/controllers/reid/upstream.go View File

@@ -21,10 +21,11 @@ import (
"encoding/json"
"fmt"

"k8s.io/klog/v2"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)

const upstreamStatusUpdateRetries = 3
@@ -47,19 +48,6 @@ func newUnmarshalError(namespace, name, operation string, content []byte) error
return fmt.Errorf("Unable to unmarshal content for (%s/%s) operation: '%s', content: '%+v'", namespace, name, operation, string(content))
}

// retryUpdateStatus simply retries to call the status update func
func retryUpdateStatus(name, namespace string, updateStatusFunc func() error) error {
var err error
for retry := 0; retry <= upstreamStatusUpdateRetries; retry++ {
err = updateStatusFunc()
if err == nil {
return nil
}
klog.Warningf("Error to update %s/%s status, retried %d times: %+v", namespace, name, retry, err)
}
return err
}

// updateFromEdge updates the reid job's status
func (c *Controller) updateFromEdge(name, namespace, operation string, content []byte) (err error) {
// Output defines owner output information
@@ -68,7 +56,6 @@ func (c *Controller) updateFromEdge(name, namespace, operation string, content [
}

var status struct {
// Phase always should be "inference"
Phase string `json:"phase"`
Status string `json:"status"`
Output *Output `json:"output"`


+ 2
- 20
pkg/globalmanager/controllers/videoanalytics/downstream.go View File

@@ -17,32 +17,14 @@ limitations under the License.
package videoanalytics

import (
"fmt"

"k8s.io/apimachinery/pkg/watch"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
)

// NOTE: For this job we don't need synchronization with the edge
func (c *Controller) syncToEdge(eventType watch.EventType, obj interface{}) error {
job, ok := obj.(*sednav1.VideoAnalyticsJob)
if !ok {
return nil
}

// Since Kind may be empty,
// we need to fix the kind here if missing.
// more details at https://github.com/kubernetes/kubernetes/issues/3030
job.Kind = KindName

nodeName := job.Spec.Template.Spec.NodeName
if len(nodeName) == 0 {
return fmt.Errorf("empty node name")
}

return c.sendToEdgeFunc(nodeName, eventType, job)

return nil
}

func (c *Controller) SetDownstreamSendFunc(f runtime.DownstreamSendFunc) error {


+ 2
- 15
pkg/globalmanager/controllers/videoanalytics/upstream.go View File

@@ -21,10 +21,11 @@ import (
"encoding/json"
"fmt"

"k8s.io/klog/v2"

sednav1 "github.com/kubeedge/sedna/pkg/apis/sedna/v1alpha1"
"github.com/kubeedge/sedna/pkg/globalmanager/runtime"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/klog/v2"
)

const upstreamStatusUpdateRetries = 3
@@ -71,19 +72,6 @@ func newUnmarshalError(namespace, name, operation string, content []byte) error
return fmt.Errorf("Unable to unmarshal content for (%s/%s) operation: '%s', content: '%+v'", namespace, name, operation, string(content))
}

// retryUpdateStatus simply retries to call the status update func
func retryUpdateStatus(name, namespace string, updateStatusFunc func() error) error {
var err error
for retry := 0; retry <= upstreamStatusUpdateRetries; retry++ {
err = updateStatusFunc()
if err == nil {
return nil
}
klog.Warningf("Error to update %s/%s status, retried %d times: %+v", namespace, name, retry, err)
}
return err
}

// updateFromEdge updates the videoanalytics job's status
func (c *Controller) updateFromEdge(name, namespace, operation string, content []byte) (err error) {
// Output defines owner output information
@@ -92,7 +80,6 @@ func (c *Controller) updateFromEdge(name, namespace, operation string, content [
}

var status struct {
// Phase always should be "inference"
Phase string `json:"phase"`
Status string `json:"status"`
Output *Output `json:"output"`


Loading…
Cancel
Save