Browse Source

Conduct code check and improve docs of unstructured lifelong learning

Signed-off-by: SiqiLuo <1587295470@qq.com>
tags/v0.6.0
SiqiLuo 2 years ago
parent
commit
c5cf783d96
100 changed files with 934 additions and 3502 deletions
  1. +3
    -3
      build/kb/Dockerfile
  2. +1
    -1
      examples/lifelong-learning-atcii-classifier.Dockerfile
  3. +1
    -6
      examples/lifelong-learning-cityscapes-segmentation.Dockerfile
  4. +266
    -0
      examples/lifelong_learning/cityscapes/README.md
  5. +54
    -0
      examples/lifelong_learning/cityscapes/RFNet/accuracy.py
  6. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/__init__.py
  7. +20
    -15
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/custom_transforms.py
  8. +18
    -12
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/custom_transforms_rgb.py
  9. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/datasets/__init__.py
  10. +116
    -0
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/datasets/cityscapes.py
  11. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/dataloaders/utils.py
  12. +1
    -3
      examples/lifelong_learning/cityscapes/RFNet/estimators/eval.py
  13. +2
    -0
      examples/lifelong_learning/cityscapes/RFNet/estimators/train.py
  14. +17
    -14
      examples/lifelong_learning/cityscapes/RFNet/evaluate.py
  15. BIN
      examples/lifelong_learning/cityscapes/RFNet/images/2086.png
  16. BIN
      examples/lifelong_learning/cityscapes/RFNet/images/2086_color.png
  17. BIN
      examples/lifelong_learning/cityscapes/RFNet/images/2086_merge.png
  18. +27
    -38
      examples/lifelong_learning/cityscapes/RFNet/interface.py
  19. +3
    -12
      examples/lifelong_learning/cityscapes/RFNet/models/replicate.py
  20. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/models/resnet/__init__.py
  21. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/models/resnet/resnet_single_scale_single_attention.py
  22. +5
    -4
      examples/lifelong_learning/cityscapes/RFNet/models/rfnet.py
  23. +144
    -0
      examples/lifelong_learning/cityscapes/RFNet/models/util.py
  24. +105
    -0
      examples/lifelong_learning/cityscapes/RFNet/predict.py
  25. +23
    -20
      examples/lifelong_learning/cityscapes/RFNet/train.py
  26. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/__init__.py
  27. +1
    -1
      examples/lifelong_learning/cityscapes/RFNet/utils/args.py
  28. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/calculate_weights.py
  29. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/iouEval.py
  30. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/loss.py
  31. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/lr_scheduler.py
  32. +0
    -5
      examples/lifelong_learning/cityscapes/RFNet/utils/metrics.py
  33. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/ml_regression.py
  34. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/nn_regression.py
  35. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/saver.py
  36. +0
    -0
      examples/lifelong_learning/cityscapes/RFNet/utils/summaries.py
  37. +8
    -0
      examples/lifelong_learning/cityscapes/yaml/dataset.yaml
  38. +119
    -0
      examples/lifelong_learning/cityscapes/yaml/robot-dog-delivery.yaml
  39. +0
    -201
      examples/lifelong_learning/robot_dog_delivery/README.md
  40. +0
    -71
      examples/lifelong_learning/robot_dog_delivery/RFNet/accuracy.py
  41. +0
    -148
      examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/datasets/cityscapes.py
  42. +0
    -98
      examples/lifelong_learning/robot_dog_delivery/RFNet/models/util.py
  43. +0
    -53
      examples/lifelong_learning/robot_dog_delivery/RFNet/predict.py
  44. +0
    -168
      examples/lifelong_learning/robot_dog_delivery/RFNet/ramp_postprocess.py
  45. +0
    -335
      examples/lifelong_learning/robot_dog_delivery/RFNet/robo_infer_service.py
  46. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/battery/simplebattery.yaml
  47. +0
    -42
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/camera/realsense435i.yaml
  48. +0
    -28
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/common/movebase.yaml
  49. +0
    -13
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/control/x20control.yaml
  50. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/imu/simpleimu.yaml
  51. +0
    -17
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/odom/simpleodom.yaml
  52. +0
    -80
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/robots/ysc_x20.yaml
  53. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/system.custom.yaml
  54. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Battery.msg
  55. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdGo.msg
  56. +0
    -3
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdInit.msg
  57. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdMode.msg
  58. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdPower.msg
  59. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdResp.msg
  60. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdSet.msg
  61. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdSway.msg
  62. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdTurn.msg
  63. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdVideo.msg
  64. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Driver.msg
  65. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Gait.msg
  66. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Motor.msg
  67. +0
    -2
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotLog.msg
  68. +0
    -7
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotMovement.msg
  69. +0
    -3
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotNav.msg
  70. +0
    -4
      examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotStatus.msg
  71. +0
    -71
      examples/lifelong_learning/robot_dog_delivery/robo_detection/package.xml
  72. +0
    -13
      examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/__init__.py
  73. +0
    -136
      examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/integration_interface.py
  74. +0
    -132
      examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/integration_main.py
  75. +0
    -97
      examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/interface.py
  76. +0
    -46
      examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/main.py
  77. +0
    -8
      examples/lifelong_learning/robot_dog_delivery/robo_detection/requirements-sdk.txt
  78. +0
    -6
      examples/lifelong_learning/robot_dog_delivery/robo_detection/requirements.txt
  79. +0
    -1
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/VERSION
  80. +0
    -0
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/__init__.py
  81. +0
    -21
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/__version__.py
  82. +0
    -0
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/__init__.py
  83. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/__init__.py
  84. +0
    -40
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/base.py
  85. +0
    -44
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/image_selection.py
  86. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/__init__.py
  87. +0
    -38
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/base.py
  88. +0
    -93
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/odom.py
  89. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/__init__.py
  90. +0
    -64
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/base.py
  91. +0
    -281
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/move_base.py
  92. +0
    -184
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/Astar.py
  93. +0
    -16
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/__init__.py
  94. +0
    -175
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/base.py
  95. +0
    -187
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/dijkstra.py
  96. +0
    -0
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/__init__.py
  97. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/__init__.py
  98. +0
    -66
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/base.py
  99. +0
    -253
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/ros.py
  100. +0
    -15
      examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/message_channel/__init__.py

+ 3
- 3
build/kb/Dockerfile View File

@@ -12,15 +12,15 @@
# See the License for the specific language governing permissions and
# limitations under the License.

FROM python:3.6-slim
FROM python:3.9-slim

RUN pip install colorlog~=4.7.2
RUN pip install PyYAML~=5.4.1
RUN pip install fastapi~=0.63.0
RUN pip install starlette~=0.13.6
RUN pip install pydantic~=1.8.1
RUN pip install joblib~=1.0.1
RUN pip install pandas~=1.1.5
RUN pip install joblib~=1.2.0
RUN pip install pandas
RUN pip install uvicorn~=0.14.0
RUN pip install python-multipart~=0.0.5
RUN pip install SQLAlchemy~=1.4.7


+ 1
- 1
examples/lifelong-learning-atcii-classifier.Dockerfile View File

@@ -2,9 +2,9 @@ FROM python:3.6-slim

COPY ./lib/requirements.txt /home
# install requirements of sedna lib
RUN pip install --upgrade pip
RUN pip install -r /home/requirements.txt
RUN pip install joblib~=1.0.1
RUN pip install pandas~=1.1.5
RUN pip install scikit-learn~=0.24.1
RUN pip install xgboost~=1.3.3



examples/lifelong-learning-robo-rfnet.Dockerfile → examples/lifelong-learning-cityscapes-segmentation.Dockerfile View File

@@ -10,16 +10,11 @@ COPY ./lib/requirements.dev.txt /home
# install requirements of sedna lib
RUN pip install -r /home/requirements.txt
RUN pip install -r /home/requirements.dev.txt
RUN pip install joblib~=1.2.0
RUN pip install pandas
RUN pip install scikit-learn~=0.23.2
RUN pip install torchvision~=0.13.0
RUN pip install Pillow
RUN pip install tqdm
RUN pip install minio
RUN pip install protobuf~=3.20.1
RUN pip install matplotlib
RUN pip install opencv-python
RUN pip install python-multipart
RUN pip install tensorboard
RUN pip install watchdog
@@ -29,7 +24,7 @@ ENV PYTHONPATH "/home/lib"
WORKDIR /home/work
COPY ./lib /home/lib

COPY ./examples/lifelong_learning/robot_dog_delivery /home/work/
COPY ./examples/lifelong_learning/cityscapes /home/work/
WORKDIR /home/work/RFNet

ENTRYPOINT ["python"]

+ 266
- 0
examples/lifelong_learning/cityscapes/README.md View File

@@ -0,0 +1,266 @@
# Using Lifelong Learning in Campus Robot Delivery Scenario

## Introduction
This example introduces how to use Sedna lifelong learning to implement the lifelong learning delivery task of the robot in a campus. Based on open source project [RFNet](https://github.com/AHupuJR/RFNet) as base model, we realize intelligent perception of environment with Sedna lifelong learning in this example.

The demo mainly shows:
1. Lifelong learning unseen task recognition algorithm (prediction) can identify and collect unseen task samples.
2. Lifelong learning unseen task recognition algorithm (detection) can trigger alarms to remind robot admin that emergency response is required.
3. Lifelong learning unseen task training algorithm improves the inference accuracy of known categories and the ability to identify new categories.

### Install Sedna
Follow the [Sedna installation document](/docs/setup/install.md) to install Sedna.

### Configurations

#### Prepare Images
This example uses the following images:
- Training worker: docker.io/luosiqi/sedna-robo:v0.1.2
- Evaluation worker: docker.io/luosiqi/sedna-robo:v0.1.2
- Inference worker: docker.io/luosiqi/sedna-robo:v0.1.2

These images are generated by the script [build_images.sh](/examples/build_image.sh).

Users can also generate customized images for different workers and config them in yaml which will be presented in the following steps.

#### Configure user nodes.
```
WORKER_NODE=sedna-mini-control-plane

DATA_NODE=$WORKER_NODE
TRAIN_NODE=$WORKER_NODE
EVAL_NODE=$WORKER_NODE
INFER_NODE=$WORKER_NODE
```

Particularly, data node, train node, eval node and infer node are custom codes which can be specified by users for actual running. Here, for simplicity, we use the same node to demonstrate.

#### Prepare Dataset

Step 1: Users can use semantic segmentation datasets from [CITYSCAPES](https://www.cityscapes-dataset.com/). While we also provide a re-organized [dataset segmentation_data.zip](https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/robo_dog_delivery/segmentation_data.zip) of CITYSCAPES as an example for training and evaluation.

Download and unzip segmentation_data.zip. Put it into /data of ```$DATA_NODE```. Or you can use ```docker exec``` to get into ```$DATA_NODE``` and then execute the following commands.
```
mkdir /data
cd /data
wget https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/robo_dog_delivery/segmentation_data.zip
unzip segmentation_data.zip
```

Step 2: download [test data](https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/robo_dog_delivery/test_data.zip) which is for demonstration at the inference stage, unzip test_data.zip and put it into /data of ```$INFER_NODE```. Or you can use ```docker exec``` to get into ```$INFER_NODE``` and then execute the following commands.
```
mkdir /data
cd /data
wget https://kubeedge.obs.cn-north-1.myhuaweicloud.com/examples/robo_dog_delivery/test_data.zip
unzip test_data.zip
```

After finishing the above preparations, execute the following commands to config.
```
local_prefix=/data
cloud_image=docker.io/luosiqi/sedna-robo:v0.1.2
edge_image=docker.io/luosiqi/sedna-robo:v0.1.2
data_url=$local_prefix/segmentation_data/data.txt
OUTPUT=$local_prefix/lifelonglearningjob/output
job_name=robo-demo
```

### Create Lifelong Learning Job

#### Create Initial Dataset Resource
```
kubectl create -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: Dataset
metadata:
name: lifelong-robo-dataset
spec:
url: "$data_url"
format: "txt"
nodeName: "$DATA_NODE"
EOF
```

#### Create Robot Dog Delivery Lifelong Learning Job
```
kubectl create -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: LifelongLearningJob
metadata:
name: $job_name
spec:
dataset:
name: "lifelong-robo-dataset"
trainProb: 0.8
trainSpec:
template:
spec:
nodeName: $TRAIN_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: train-worker
imagePullPolicy: IfNotPresent
args: ["train.py"]
env:
- name: "num_class"
value: "24"
- name: "epoches"
value: "1"
- name: "attribute"
value: "real, sim"
- name: "city"
value: "bremen"
- name: "BACKEND_TYPE"
value: "PYTORCH"
resources:
limits:
cpu: 6
memory: 12Gi
requests:
cpu: 4
memory: 12Gi
volumeMounts:
- mountPath: /dev/shm
name: cache-volume
volumes:
- emptyDir:
medium: Memory
sizeLimit: 256Mi
name: cache-volume
trigger:
checkPeriodSeconds: 30
timer:
start: 00:00
end: 24:00
condition:
operator: ">"
threshold: 100
metric: num_of_samples
evalSpec:
template:
spec:
nodeName: $EVAL_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: eval-worker
imagePullPolicy: IfNotPresent
args: ["evaluate.py"]
env:
- name: "operator"
value: "<"
- name: "model_threshold"
value: "0"
- name: "num_class"
value: "24"
- name: "BACKEND_TYPE"
value: "PYTORCH"
resources:
limits:
cpu: 6
memory: 12Gi
requests:
cpu: 4
memory: 12Gi
deploySpec:
template:
spec:
nodeName: $INFER_NODE
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- image: $edge_image
name: infer-worker
imagePullPolicy: IfNotPresent
args: ["predict.py"]
env:
- name: "test_data"
value: "/data/test_data"
- name: "num_class"
value: "24"
- name: "unseen_save_url"
value: "/data/unseen_samples"
- name: "INFERENCE_RESULT_DIR"
value: "/data/infer_results"
- name: "BACKEND_TYPE"
value: "PYTORCH"
volumeMounts:
- name: unseenurl
mountPath: /data/unseen_samples
- name: inferdata
mountPath: /data/infer_results
- name: testdata
mountPath: /data/test_data
resources:
limits:
cpu: 6
memory: 12Gi
requests:
cpu: 4
memory: 12Gi
volumes:
- name: unseenurl
hostPath:
path: /data/unseen_samples
type: DirectoryOrCreate
- name: inferdata
hostPath:
path: /data/infer_results
type: DirectoryOrCreate
- name: testdata
hostPath:
path: /data/test_data
type: DirectoryOrCreate
outputDir: $OUTPUT/$job_name
EOF
```

### Check Lifelong Learning Status

```
kubectl get lifelonglearningjob
```

### Effect Display

#### Evaluation results of two round lifelong learning

<table class="tg">
<thead>
<tr>
<th class="tg-lboi" rowspan="2">Rounds</th>
<th class="tg-c3ow" colspan="3">Metrics</th>
</tr>
<tr>
<th class="tg-0pky">Pixel Accuracy Class (CPA)</th>
<th class="tg-0pky">Mean Intersection over Union (mIoU)</th>
<th class="tg-0pky">Frequency Weighted Intersection over Union (FWIoU)</th>
</tr>
</thead>
<tbody>
<tr>
<td class="tg-0pky">Round 1</td>
<td class="tg-0pky">0.385</td>
<td class="tg-0pky">0.319</td>
<td class="tg-0pky">0.648</td>
</tr>
<tr>
<td class="tg-0pky">Round 2</td>
<td class="tg-0pky">0.402</td>
<td class="tg-0pky">0.339</td>
<td class="tg-0pky">0.659</td>
</tr>
</tbody>
</table>

#### Segmentation inference display
![Inference sample](./RFNet/images/2086.png) | ![Segementation display](./RFNet/images/2086_color.png)|![Merge](./RFNet/images/2086_merge.png)
---|---|---







+ 54
- 0
examples/lifelong_learning/cityscapes/RFNet/accuracy.py View File

@@ -0,0 +1,54 @@
# Copyright 2023 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from tqdm import tqdm
from sedna.common.class_factory import ClassType, ClassFactory
from sedna.common.log import LOGGER
from utils.args import EvaluationArguments
from utils.metrics import Evaluator
from dataloaders import make_data_loader
__all__ = ('accuracy', )
@ClassFactory.register(ClassType.GENERAL)
def accuracy(y_true, y_pred, **kwargs):
args = EvaluationArguments()
_, _, test_loader = make_data_loader(args, test_data=y_true)
evaluator = Evaluator(args.num_class)
tbar = tqdm(test_loader, desc='\r')
for i, (sample, _) in enumerate(tbar):
if args.depth:
image, depth, target = sample['image'], sample['depth'], sample['label']
else:
image, target = sample['image'], sample['label']
if args.cuda:
image, target = image.cuda(), target.cuda()
if args.depth:
depth = depth.cuda()
target[target > evaluator.num_class - 1] = 255
target = target.cpu().numpy()
# Add batch sample into evaluator
evaluator.add_batch(target, y_pred[i])
# Test during the training
CPA = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
LOGGER.info("CPA:{}, mIoU:{}, fwIoU: {}".format(CPA, mIoU, FWIoU))
return mIoU

examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/__init__.py → examples/lifelong_learning/cityscapes/RFNet/dataloaders/__init__.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/custom_transforms.py → examples/lifelong_learning/cityscapes/RFNet/dataloaders/custom_transforms.py View File

@@ -1,15 +1,16 @@
import torch
import random
import numpy as np

from PIL import Image, ImageOps, ImageFilter


class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""

def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
@@ -60,10 +61,12 @@ class ToTensor(object):
'depth': depth,
'label': mask}


class CropBlackArea(object):
"""
crop black area for depth image
"""

def __call__(self, sample):
img = sample['image']
depth = sample['depth']
@@ -78,12 +81,9 @@ class CropBlackArea(object):
depth = depth.crop((left, top, right, bottom))
mask = mask.crop((left, top, right, bottom))
# resize
img = img.resize((width,height), Image.BILINEAR)
depth = depth.resize((width,height), Image.BILINEAR)
mask = mask.resize((width,height), Image.NEAREST)
# img = img.resize((512,1024), Image.BILINEAR)
# depth = depth.resize((512,1024), Image.BILINEAR)
# mask = mask.resize((512,1024), Image.NEAREST)
img = img.resize((width, height), Image.BILINEAR)
depth = depth.resize((width, height), Image.BILINEAR)
mask = mask.resize((width, height), Image.NEAREST)

return {'image': img,
'depth': depth,
@@ -148,7 +148,8 @@ class RandomScaleCrop(object):
depth = sample['depth']
mask = sample['label']
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
short_size = random.randint(
int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
@@ -164,8 +165,10 @@ class RandomScaleCrop(object):
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
depth = ImageOps.expand(depth, border=(0, 0, padw, padh), fill=0) # depth多余的部分填0
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
depth = ImageOps.expand(depth, border=(
0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(
0, 0, padw, padh), fill=self.fill)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - self.crop_size)
@@ -209,9 +212,11 @@ class FixScaleCrop(object):
'depth': depth,
'label': mask}


class FixedResize(object):
def __init__(self, size):
self.size = (size, size) # size: (h, w)
# size: (h, w)
self.size = (size, size)

def __call__(self, sample):
img = sample['image']
@@ -228,13 +233,13 @@ class FixedResize(object):
'depth': depth,
'label': mask}


class Relabel(object):
def __init__(self, olabel, nlabel): # change trainid label from olabel to nlabel
def __init__(self, olabel, nlabel):
# change trainid label from olabel to nlabel
self.olabel = olabel
self.nlabel = nlabel

def __call__(self, tensor):
# assert (isinstance(tensor, torch.LongTensor) or isinstance(tensor,
# torch.ByteTensor)), 'tensor needs to be LongTensor'
tensor[tensor == self.olabel] = self.nlabel
return tensor
return tensor

examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/custom_transforms_rgb.py → examples/lifelong_learning/cityscapes/RFNet/dataloaders/custom_transforms_rgb.py View File

@@ -4,12 +4,14 @@ import numpy as np

from PIL import Image, ImageOps, ImageFilter


class Normalize(object):
"""Normalize a tensor image with mean and standard deviation.
Args:
mean (tuple): means for each channel.
std (tuple): standard deviations for each channel.
"""

def __init__(self, mean=(0., 0., 0.), std=(1., 1., 1.)):
self.mean = mean
self.std = std
@@ -60,10 +62,12 @@ class ToTensor(object):
return {'image': img,
'label': mask}


class CropBlackArea(object):
"""
crop black area for depth image
"""

def __call__(self, sample):
img = sample['image']
mask = sample['label']
@@ -76,15 +80,13 @@ class CropBlackArea(object):
img = img.crop((left, top, right, bottom))
mask = mask.crop((left, top, right, bottom))
# resize
img = img.resize((width,height), Image.BILINEAR)
mask = mask.resize((width,height), Image.NEAREST)
# img = img.resize((512,1024), Image.BILINEAR)
# mask = mask.resize((512,1024), Image.NEAREST)
print(img.size)
img = img.resize((width, height), Image.BILINEAR)
mask = mask.resize((width, height), Image.NEAREST)

return {'image': img,
'label': mask}


class ToTensor_test(object):
"""Convert Image object in sample to Tensors."""

@@ -149,7 +151,8 @@ class RandomScaleCrop(object):
img = sample['image']
mask = sample['label']
# random scale (short edge)
short_size = random.randint(int(self.base_size * 0.5), int(self.base_size * 2.0))
short_size = random.randint(
int(self.base_size * 0.5), int(self.base_size * 2.0))
w, h = img.size
if h > w:
ow = short_size
@@ -164,7 +167,8 @@ class RandomScaleCrop(object):
padh = self.crop_size - oh if oh < self.crop_size else 0
padw = self.crop_size - ow if ow < self.crop_size else 0
img = ImageOps.expand(img, border=(0, 0, padw, padh), fill=0)
mask = ImageOps.expand(mask, border=(0, 0, padw, padh), fill=self.fill)
mask = ImageOps.expand(mask, border=(
0, 0, padw, padh), fill=self.fill)
# random crop crop_size
w, h = img.size
x1 = random.randint(0, w - self.crop_size)
@@ -202,9 +206,11 @@ class FixScaleCrop(object):
return {'image': img,
'label': mask}


class FixedResize(object):
def __init__(self, size):
self.size = (size, size) # size: (h, w)
# size: (h, w)
self.size = (size, size)

def __call__(self, sample):
img = sample['image']
@@ -218,13 +224,13 @@ class FixedResize(object):
return {'image': img,
'label': mask}


class Relabel(object):
def __init__(self, olabel, nlabel): # change trainid label from olabel to nlabel
def __init__(self, olabel, nlabel):
# change trainid label from olabel to nlabel
self.olabel = olabel
self.nlabel = nlabel

def __call__(self, tensor):
# assert (isinstance(tensor, torch.LongTensor) or isinstance(tensor,
# torch.ByteTensor)), 'tensor needs to be LongTensor'
tensor[tensor == self.olabel] = self.nlabel
return tensor
return tensor

examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/datasets/__init__.py → examples/lifelong_learning/cityscapes/RFNet/dataloaders/datasets/__init__.py View File


+ 116
- 0
examples/lifelong_learning/cityscapes/RFNet/dataloaders/datasets/cityscapes.py View File

@@ -0,0 +1,116 @@
import os

from PIL import Image
from torch.utils import data
from torchvision import transforms

from dataloaders import custom_transforms as tr


class CityscapesSegmentation(data.Dataset):

def __init__(self, args, data=None, split="train"):

self.split = split
self.args = args
self.images = {}
self.disparities = {}
self.labels = {}

self.images[split] = [img[0]
for img in data.x] if hasattr(data, "x") else data

if hasattr(data, "x") and len(data.x[0]) == 1:
self.disparities[split] = self.images[split]
elif hasattr(data, "x") and len(data.x[0]) == 2:
self.disparities[split] = [img[1] for img in data.x]
else:
self.disparities[split] = data

self.labels[split] = data.y if hasattr(data, "y") else data

self.ignore_index = 255

if len(self.images[split]) == 0:
raise Exception("No RGB images for split=[%s] found in %s" % (
split, self.images_base))
if len(self.disparities[split]) == 0:
raise Exception("No depth images for split=[%s] found in %s" % (
split, self.disparities_base))

print("Found %d %s RGB images" % (len(self.images[split]), split))
print("Found %d %s disparity images" %
(len(self.disparities[split]), split))

def __len__(self):
return len(self.images[self.split])

def __getitem__(self, index):
img_path = self.images[self.split][index].rstrip()
disp_path = self.disparities[self.split][index].rstrip()
try:
lbl_path = self.labels[self.split][index].rstrip()
_img = Image.open(img_path).convert('RGB').resize(
self.args.image_size, Image.BILINEAR)
_depth = Image.open(disp_path).resize(
self.args.image_size, Image.BILINEAR)
_target = Image.open(lbl_path).resize(
self.args.image_size, Image.BILINEAR)

sample = {'image': _img, 'depth': _depth, 'label': _target}
except:
_img = Image.open(img_path).convert('RGB').resize(
self.args.image_size, Image.BILINEAR)
_depth = Image.open(disp_path).resize(
self.args.image_size, Image.BILINEAR)
sample = {'image': _img, 'depth': _depth, 'label': _img}

if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample), img_path
elif self.split == 'test':
return self.transform_ts(sample), img_path
elif self.split == 'custom_resize':
return self.transform_ts(sample), img_path

def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]

def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.CropBlackArea(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size,
crop_size=self.args.crop_size, fill=255),
tr.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

def transform_val(self, sample):

composed_transforms = transforms.Compose([
tr.CropBlackArea(),
tr.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

def transform_ts(self, sample):

composed_transforms = transforms.Compose([
tr.Normalize(mean=(0.485, 0.456, 0.406),
std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/utils.py → examples/lifelong_learning/cityscapes/RFNet/dataloaders/utils.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/estimators/eval.py → examples/lifelong_learning/cityscapes/RFNet/estimators/eval.py View File

@@ -102,7 +102,7 @@ class Validator(object):
self.args.merge_label_save_path, img_name)
os.makedirs(os.path.dirname(merge_label_name), exist_ok=True)
pre_color_image = ToPILImage()(
pre_colors[i]) # pre_colors.dtype = float64
pre_colors[i])
image_merge(image[i], pre_color_image, merge_label_name)
if not self.args.save_predicted_image:
@@ -113,8 +113,6 @@ class Validator(object):
os.makedirs(os.path.dirname(color_label_name), exist_ok=True)
os.makedirs(os.path.dirname(label_name), exist_ok=True)
# color = paint_trapezoid(np.array(pre_color_image))
# cv2.imwrite(color_label_name, color)
pre_color_image.save(color_label_name)
pre_label_image = ToPILImage()(pre_labels[i])

examples/lifelong_learning/robot_dog_delivery/RFNet/estimators/train.py → examples/lifelong_learning/cityscapes/RFNet/estimators/train.py View File

@@ -218,3 +218,5 @@ class Trainer(object):
'optimizer': self.optimizer.state_dict(),
'best_pred': self.best_pred,
}, is_best)
return new_pred

examples/lifelong_learning/robot_dog_delivery/RFNet/evaluate.py → examples/lifelong_learning/cityscapes/RFNet/evaluate.py View File

@@ -1,22 +1,25 @@
import os
os.environ["TEST_DATASET_URL"] = "/home/lsq/RFNet/data_index/test.txt"
os.environ["MODEL_URLS"] = "s3://kubeedge/sedna-robo/kb/index.pkl"
os.environ["OUTPUT_URL"] = "s3://kubeedge/sedna-robo/kb_next/"
os.environ["KB_SERVER"] = "http://0.0.0.0:9020"
os.environ["operator"] = "<"
os.environ["model_threshold"] = "0.01"
# Copyright 2023 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
os.environ["S3_ENDPOINT_URL"] = "https://obs.cn-north-1.myhuaweicloud.com"
os.environ["SECRET_ACCESS_KEY"] = "OYPxi4uD9k5E90z0Od3Ug99symbJZ0AfyB4oveQc"
os.environ["ACCESS_KEY_ID"] = "EMPTKHQUGPO2CDUFD2YR"
import os
from sedna.core.lifelong_learning import LifelongLearning
from sedna.datasources import TxtDataParse
from sedna.common.config import Context
from accuracy import accuracy
from basemodel import Model
from interface import Estimator
def _load_txt_dataset(dataset_url):
@@ -31,13 +34,13 @@ def _load_txt_dataset(dataset_url):
def eval():
estimator = Model(num_class=31)
estimator = Estimator(num_class=Context.get_parameters("num_class", 24))
eval_dataset_url = Context.get_parameters("test_dataset_url")
eval_data = TxtDataParse(data_type="eval", func=_load_txt_dataset)
eval_data.parse(eval_dataset_url, use_raw=False)
task_allocation = {
"method": "TaskAllocationSimple"
"method": "TaskAllocationByOrigin"
}
ll_job = LifelongLearning(estimator,

BIN
examples/lifelong_learning/cityscapes/RFNet/images/2086.png View File

Before After
Width: 2048  |  Height: 1024  |  Size: 2.8 MB

BIN
examples/lifelong_learning/cityscapes/RFNet/images/2086_color.png View File

Before After
Width: 2048  |  Height: 1024  |  Size: 29 kB

BIN
examples/lifelong_learning/cityscapes/RFNet/images/2086_merge.png View File

Before After
Width: 2048  |  Height: 1024  |  Size: 1.7 MB

examples/lifelong_learning/robot_dog_delivery/RFNet/basemodel.py → examples/lifelong_learning/cityscapes/RFNet/interface.py View File

@@ -1,3 +1,17 @@
# Copyright 2023 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
@@ -8,7 +22,6 @@ from torchvision import transforms
from torch.utils.data import DataLoader
from torchvision import transforms
from sedna.common.config import Context
from sedna.datasources import TxtDataParse
from sedna.common.file_ops import FileOps
from sedna.common.log import LOGGER
from sedna.common.config import BaseConfig
@@ -17,9 +30,7 @@ from dataloaders import custom_transforms as tr
from utils.args import TrainingArguments, EvaluationArguments
from estimators.train import Trainer
from estimators.eval import Validator, load_my_state_dict
from accuracy import robo_accuracy
os.environ["BACKEND_TYPE"] = ''
from accuracy import accuracy
def preprocess_url(image_urls):
@@ -67,18 +78,7 @@ def preprocess_frames(frames):
return trainsformed_frames
def _load_txt_dataset(dataset_url):
# use original dataset url
original_dataset_url = Context.get_parameters('original_dataset_url', "")
dataset_urls = dataset_url.split()
dataset_urls = [
os.path.join(
os.path.dirname(original_dataset_url),
dataset_url) for dataset_url in dataset_urls]
return dataset_urls[:-1], dataset_urls[-1]
class Model:
class Estimator:
def __init__(self, **kwargs):
self.train_args = TrainingArguments(**kwargs)
self.val_args = EvaluationArguments(**kwargs)
@@ -101,7 +101,8 @@ class Model:
self.validator = Validator(self.val_args)
def train(self, train_data, valid_data=None, **kwargs):
self.trainer = Trainer(self.train_args, train_data=train_data)
self.trainer = Trainer(
self.train_args, train_data=train_data, valid_data=valid_data)
LOGGER.info("Total epoches: {}".format(self.trainer.args.epochs))
for epoch in range(
self.trainer.args.start_epoch,
@@ -114,7 +115,8 @@ class Model:
(epoch % self.trainer.args.eval_interval ==
(self.trainer.args.eval_interval - 1) or
epoch == self.trainer.args.epochs - 1):
# save checkpoint when it meets eval_interval or the training finishes
# save checkpoint when it meets eval_interval
# or the training finishes
is_best = False
train_model_url = self.trainer.saver.save_checkpoint({
'epoch': epoch + 1,
@@ -124,12 +126,12 @@ class Model:
}, is_best)
self.trainer.writer.close()
self.train_model_url = train_model_url
return self.train_model_url
return {"mIoU": 0 if not valid_data
else self.trainer.validation(epoch)}
def predict(self, data, **kwargs):
prediction = kwargs.get('prediction')
if isinstance(data[0], dict):
data = preprocess_frames(data)
@@ -142,20 +144,18 @@ class Model:
shuffle=False,
pin_memory=False)
if not prediction:
return self.validator.validate()
else:
return prediction
return self.validator.validate()
def evaluate(self, data, **kwargs):
predictions = self.predict(data.x)
return robo_accuracy(data.y, predictions)
return accuracy(data.y, predictions)
def load(self, model_url, **kwargs):
if model_url:
self.validator.new_state_dict = torch.load(model_url)
self.validator.model = load_my_state_dict(
self.validator.model, self.validator.new_state_dict['state_dict'])
self.validator.model,
self.validator.new_state_dict['state_dict'])
self.train_args.resume = model_url
else:
@@ -167,14 +167,3 @@ class Model:
return self.train_model_url
return FileOps.upload(self.train_model_url, model_path)
if __name__ == '__main__':
model = Model(num_class=31)
txt = "/home/lsq/RFNet/data_index/train.txt"
model.load("/home/lsq/RFNet/models/best_all_epoch_142_mean-iu_0.94952.pth")
data = TxtDataParse(data_type="eval", func=_load_txt_dataset)
data.parse(txt, use_raw=False)
model.evaluate(data)
# model.save("./models/e1_2f.pth")

examples/lifelong_learning/robot_dog_delivery/RFNet/models/replicate.py → examples/lifelong_learning/cityscapes/RFNet/models/replicate.py View File

@@ -1,13 +1,3 @@
# -*- coding: utf-8 -*-
# File : replicate.py
# Author : Jiayuan Mao
# Email : maojiayuan@gmail.com
# Date : 27/01/2018
#
# This file is part of Synchronized-BatchNorm-PyTorch.
# https://github.com/vacancy/Synchronized-BatchNorm-PyTorch
# Distributed under MIT License.

import functools

from torch.nn.parallel.data_parallel import DataParallel
@@ -57,7 +47,8 @@ class DataParallelWithCallback(DataParallel):
"""

def replicate(self, module, device_ids):
modules = super(DataParallelWithCallback, self).replicate(module, device_ids)
modules = super(DataParallelWithCallback,
self).replicate(module, device_ids)
execute_replication_callbacks(modules)
return modules

@@ -85,4 +76,4 @@ def patch_replication_callback(data_parallel):
execute_replication_callbacks(modules)
return modules

data_parallel.replicate = new_replicate
data_parallel.replicate = new_replicate

examples/lifelong_learning/robot_dog_delivery/RFNet/models/resnet/__init__.py → examples/lifelong_learning/cityscapes/RFNet/models/resnet/__init__.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/models/resnet/resnet_single_scale_single_attention.py → examples/lifelong_learning/cityscapes/RFNet/models/resnet/resnet_single_scale_single_attention.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/models/rfnet.py → examples/lifelong_learning/cityscapes/RFNet/models/rfnet.py View File

@@ -1,16 +1,18 @@
import torch.nn as nn
from itertools import chain # 串联多个迭代对象
from itertools import chain

from .util import _BNReluConv, upsample


class RFNet(nn.Module):
def __init__(self, backbone, num_classes, use_bn=True):
super(RFNet, self).__init__()
self.backbone = backbone
self.num_classes = num_classes
self.logits = _BNReluConv(self.backbone.num_features, self.num_classes, batch_norm=use_bn)
self.logits = _BNReluConv(
self.backbone.num_features, self.num_classes, batch_norm=use_bn)

def forward(self, rgb_inputs, depth_inputs = None):
def forward(self, rgb_inputs, depth_inputs=None):
x, additional = self.backbone(rgb_inputs, depth_inputs)
logits = self.logits.forward(x)
logits_upsample = upsample(logits, rgb_inputs.shape[2:])
@@ -21,4 +23,3 @@ class RFNet(nn.Module):

def fine_tune_params(self):
return self.backbone.fine_tune_params()


+ 144
- 0
examples/lifelong_learning/cityscapes/RFNet/models/util.py View File

@@ -0,0 +1,144 @@
import torch
import torch.nn as nn
import torch.nn.functional as F


def upsample(x, size): return F.interpolate(
x, size, mode='bilinear', align_corners=False)


batchnorm_momentum = 0.01 / 2


def get_n_params(parameters):
pp = 0
for p in parameters:
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp


class _BNReluConv(nn.Sequential):
def __init__(self,
num_maps_in,
num_maps_out,
k=3,
batch_norm=True,
bn_momentum=0.1,
bias=False,
dilation=1):
super(_BNReluConv, self).__init__()
if batch_norm:
self.add_module('norm', nn.BatchNorm2d(
num_maps_in, momentum=bn_momentum))
self.add_module('relu', nn.ReLU(inplace=batch_norm is True))
padding = k // 2
self.add_module('conv',
nn.Conv2d(num_maps_in,
num_maps_out,
kernel_size=k,
padding=padding,
bias=bias,
dilation=dilation))


class _Upsample(nn.Module):
def __init__(self,
num_maps_in,
skip_maps_in,
num_maps_out,
use_bn=True,
k=3):
super(_Upsample, self).__init__()
self.bottleneck = _BNReluConv(
skip_maps_in, num_maps_in, k=1, batch_norm=use_bn)
self.blend_conv = _BNReluConv(
num_maps_in, num_maps_out, k=k, batch_norm=use_bn)

def forward(self, x, skip):
skip = self.bottleneck.forward(skip)
skip_size = skip.size()[2:4]
x = upsample(x, skip_size)
x = x + skip
x = self.blend_conv.forward(x)
return x


class SpatialPyramidPooling(nn.Module):
def __init__(self,
num_maps_in,
num_levels,
bt_size=512,
level_size=128,
out_size=128,
grids=(6, 3, 2, 1),
square_grid=False,
bn_momentum=0.1,
use_bn=True):
super(SpatialPyramidPooling, self).__init__()
self.grids = grids
self.square_grid = square_grid
self.spp = nn.Sequential()
self.spp.add_module('spp_bn',
_BNReluConv(num_maps_in,
bt_size,
k=1,
bn_momentum=bn_momentum,
batch_norm=use_bn))
num_features = bt_size
final_size = num_features
for i in range(num_levels):
final_size += level_size
self.spp.add_module('spp' + str(i),
_BNReluConv(num_features,
level_size,
k=1,
bn_momentum=bn_momentum,
batch_norm=use_bn))
self.spp.add_module('spp_fuse',
_BNReluConv(final_size,
out_size,
k=1,
bn_momentum=bn_momentum,
batch_norm=use_bn))

def forward(self, x):
levels = []
target_size = x.size()[2:4]

ar = target_size[1] / target_size[0]

x = self.spp[0].forward(x)
levels.append(x)
num = len(self.spp) - 1

for i in range(1, num):
if not self.square_grid:
grid_size = (self.grids[i - 1],
max(1, round(ar * self.grids[i - 1])))
x_pooled = F.adaptive_avg_pool2d(x, grid_size)
else:
x_pooled = F.adaptive_avg_pool2d(x, self.grids[i - 1])
level = self.spp[i].forward(x_pooled)

level = upsample(level, target_size)
levels.append(level)
x = torch.cat(levels, 1)
x = self.spp[-1].forward(x)
return x


class _UpsampleBlend(nn.Module):
def __init__(self, num_features, use_bn=True):
super(_UpsampleBlend, self).__init__()
self.blend_conv = _BNReluConv(
num_features, num_features, k=3, batch_norm=use_bn)

def forward(self, x, skip):
skip_size = skip.size()[2:4]
x = upsample(x, skip_size)
x = x + skip
x = self.blend_conv.forward(x)
return x

+ 105
- 0
examples/lifelong_learning/cityscapes/RFNet/predict.py View File

@@ -0,0 +1,105 @@
# Copyright 2023 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from PIL import Image
from sedna.datasources import BaseDataSource
from sedna.common.config import Context
from sedna.common.log import LOGGER
from sedna.common.file_ops import FileOps
from sedna.core.lifelong_learning import LifelongLearning
from interface import Estimator
def unseen_sample_postprocess(sample, save_url):
if isinstance(sample, dict):
img = sample.get("image")
image_name = "{}.png".format(str(time.time()))
image_url = FileOps.join_path(save_url, image_name)
img.save(image_url)
else:
image_name = os.path.basename(sample[0])
image_url = FileOps.join_path(save_url, image_name)
FileOps.upload(sample[0], image_url, clean=False)
def preprocess(samples):
data = BaseDataSource(data_type="test")
data.x = [samples]
return data
def init_ll_job():
estimator = Estimator(num_class=Context.get_parameters("num_class", 24),
save_predicted_image=True,
merge=True)
task_allocation = {
"method": "TaskAllocationStream"
}
unseen_task_allocation = {
"method": "UnseenTaskAllocationDefault"
}
ll_job = LifelongLearning(
estimator,
unseen_estimator=unseen_task_processing,
task_definition=None,
task_relationship_discovery=None,
task_allocation=task_allocation,
task_remodeling=None,
inference_integrate=None,
task_update_decision=None,
unseen_task_allocation=unseen_task_allocation,
unseen_sample_recognition=None,
unseen_sample_re_recognition=None)
return ll_job
def unseen_task_processing():
return "Warning: unseen sample detected."
def predict():
ll_job = init_ll_job()
test_data_dir = Context.get_parameters("test_data")
test_data = os.listdir(test_data_dir)
test_data_num = len(test_data)
count = 0
# Simulate a permenant inference service
while True:
for i, data in enumerate(test_data):
LOGGER.info(f"Start to inference image {i + count + 1}")
test_data_url = os.path.join(test_data_dir, data)
img_rgb = Image.open(test_data_url).convert("RGB")
sample = {'image': img_rgb, "depth": img_rgb, "label": img_rgb}
predict_data = preprocess(sample)
prediction, is_unseen, _ = ll_job.inference(
predict_data,
unseen_sample_postprocess=unseen_sample_postprocess)
LOGGER.info(f"Image {i + count + 1} is unseen task: {is_unseen}")
LOGGER.info(
f"Image {i + count + 1} prediction result: {prediction}")
time.sleep(1.0)
count += test_data_num
if __name__ == '__main__':
predict()

examples/lifelong_learning/robot_dog_delivery/RFNet/train.py → examples/lifelong_learning/cityscapes/RFNet/train.py View File

@@ -1,10 +1,24 @@
# Copyright 2023 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from sedna.core.lifelong_learning import LifelongLearning
from sedna.common.config import Context, BaseConfig
from sedna.datasources import TxtDataParse
from basemodel import Model
from interface import Estimator
def _load_txt_dataset(dataset_url):
@@ -20,11 +34,15 @@ def _load_txt_dataset(dataset_url):
def train(estimator, train_data):
task_definition = {
"method": "TaskDefinitionSimple"
"method": "TaskDefinitionByOrigin",
"param": {
"attribute": Context.get_parameters("attribute"),
"city": Context.get_parameters("city")
}
}
task_allocation = {
"method": "TaskAllocationSimple"
"method": "TaskAllocationByOrigin"
}
ll_job = LifelongLearning(estimator,
@@ -42,24 +60,9 @@ def train(estimator, train_data):
ll_job.train(train_data)
def update(estimator, train_data):
ll_job = LifelongLearning(estimator,
task_definition=None,
task_relationship_discovery=None,
task_allocation=None,
task_remodeling=None,
inference_integrate=None,
task_update_decision=None,
unseen_task_allocation=None,
unseen_sample_recognition=None,
unseen_sample_re_recognition=None
)
ll_job.update(train_data)
def run():
estimator = Model(num_class=31, epochs=1)
estimator = Estimator(num_class=int(Context.get_parameters("num_class", 24)),
epochs=int(Context.get_parameters("epoches", 1)))
train_dataset_url = BaseConfig.train_dataset_url
train_data = TxtDataParse(data_type="train", func=_load_txt_dataset)
train_data.parse(train_dataset_url, use_raw=False)

examples/lifelong_learning/robot_dog_delivery/RFNet/utils/__init__.py → examples/lifelong_learning/cityscapes/RFNet/utils/__init__.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/args.py → examples/lifelong_learning/cityscapes/RFNet/utils/args.py View File

@@ -19,7 +19,7 @@ class Arguments:
self.image_size = kwargs.get(
"image_size", (2048, 1024)) # output image shape
# input batch size for training
self.batch_size = kwargs.get("batch_size")
self.batch_size = int(kwargs.get("batch_size", 4))
self.val_batch_size = int(kwargs.get(
"val_batch_size", 1)) # input batch size for validation
self.test_batch_size = int(kwargs.get(

examples/lifelong_learning/robot_dog_delivery/RFNet/utils/calculate_weights.py → examples/lifelong_learning/cityscapes/RFNet/utils/calculate_weights.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/iouEval.py → examples/lifelong_learning/cityscapes/RFNet/utils/iouEval.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/loss.py → examples/lifelong_learning/cityscapes/RFNet/utils/loss.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/lr_scheduler.py → examples/lifelong_learning/cityscapes/RFNet/utils/lr_scheduler.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/metrics.py → examples/lifelong_learning/cityscapes/RFNet/utils/metrics.py View File

@@ -110,11 +110,6 @@ class Evaluator(object):
np.diag(self.confusion_matrix))

FWIoU = (freq[freq > 0] * iu[freq > 0]).sum()
CFWIoU = freq[freq > 0] * iu[freq > 0]
print('-----------FWIoU of each classes-----------')
print("road : %.6f" % (CFWIoU[0] * 100.0), "%\t")
print("sidewalk : %.6f" % (CFWIoU[1] * 100.0), "%\t")
return FWIoU

def Frequency_Weighted_Intersection_over_Union_Curb(self):

examples/lifelong_learning/robot_dog_delivery/RFNet/utils/ml_regression.py → examples/lifelong_learning/cityscapes/RFNet/utils/ml_regression.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/nn_regression.py → examples/lifelong_learning/cityscapes/RFNet/utils/nn_regression.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/saver.py → examples/lifelong_learning/cityscapes/RFNet/utils/saver.py View File


examples/lifelong_learning/robot_dog_delivery/RFNet/utils/summaries.py → examples/lifelong_learning/cityscapes/RFNet/utils/summaries.py View File


+ 8
- 0
examples/lifelong_learning/cityscapes/yaml/dataset.yaml View File

@@ -0,0 +1,8 @@
apiVersion: sedna.io/v1alpha1
kind: Dataset
metadata:
name: lifelong-robo-dataset
spec:
url: "$data_url"
format: "txt"
nodeName: "$DATA_NODE"

+ 119
- 0
examples/lifelong_learning/cityscapes/yaml/robot-dog-delivery.yaml View File

@@ -0,0 +1,119 @@
apiVersion: sedna.io/v1alpha1
kind: LifelongLearningJob
metadata:
name: $job_name
spec:
dataset:
name: "lifelong-robo-dataset"
trainProb: 0.8
trainSpec:
template:
spec:
nodeName: $TRAIN_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: train-worker
imagePullPolicy: IfNotPresent
args: ["train.py"]
env:
- name: "num_class"
value: "24"
- name: "epoches"
value: "1"
resources:
limits:
cpu: 6
memory: 12Gi
requests:
cpu: 4
memory: 10Gi
volumeMounts:
- mountPath: /dev/shm
name: cache-volume
volumes:
- emptyDir:
medium: Memory
sizeLimit: 256Mi
name: cache-volume
trigger:
checkPeriodSeconds: 30
timer:
start: 00:00
end: 24:00
condition:
operator: ">"
threshold: 100
metric: num_of_samples
evalSpec:
template:
spec:
nodeName: $EVAL_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: eval-worker
imagePullPolicy: IfNotPresent
args: ["evaluate.py"]
env:
- name: "operator"
value: "<"
- name: "model_threshold"
value: "0"
- name: "num_class"
value: "24"
resources:
limits:
cpu: 6
memory: 6Gi
requests:
cpu: 4
memory: 5Gi
deploySpec:
template:
spec:
nodeName: $INFER_NODE
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- image: $edge_image
name: infer-worker
imagePullPolicy: IfNotPresent
args: ["predict.py"]
env:
- name: "test_data"
value: "/data/test_data"
- name: "num_class"
value: "24"
- name: "unseen_save_url"
value: "/data/unseen_samples"
- name: "INFERENCE_RESULT_DIR"
value: "/data/infer_results"
volumeMounts:
- name: unseenurl
mountPath: /data/unseen_samples
- name: inferdata
mountPath: /data/infer_results
- name: testdata
mountPath: /data/test_data
resources:
limits:
cpu: 6
memory: 6Gi
requests:
cpu: 4
memory: 3Gi
volumes:
- name: unseenurl
hostPath:
path: /data/unseen_samples
type: DirectoryOrCreate
- name: inferdata
hostPath:
path: /data/infer_results
type: DirectoryOrCreate
- name: testdata
hostPath:
path: /data/test_data
type: DirectoryOrCreate
outputDir: $OUTPUT/$job_name

+ 0
- 201
examples/lifelong_learning/robot_dog_delivery/README.md View File

@@ -1,201 +0,0 @@
# Using Lifelong Learning in Campus Robot Delivery Scenario
## Introduction
This example introduces how to use Sedna lifelong learning to implement the lifelong learning delivery task of the robot in the campus.
This demo mainly shows:
1. Lifelong learning unseen task recognition algorithm (prediction) can identify and collect unseen task samples.
2. Lifelong learning unseen task recognition algorithm (detection) can trigger alarms to remind robot admin that emergency response is required.
3. Lifelong learning unseen task training algorithm improves the inference accuracy of known categories and the ability to identify new categories.
## System Architecture
### Install Sedna
Follow the [Sedna installation document](/docs/setup/install.md) to install Sedna.
### Prepare Dataset
Step1: download [data.txt](https://kubeedge.obs.cn-north-1.myhuaweicloud.com:443/sedna-robo/data.txt?AWSAccessKeyId=EMPTKHQUGPO2CDUFD2YR&Expires=1697698966&Signature=s42sKZVewIP/kgLc4dEzjNCRXfk%3D)
and upload it to a specfied s3 directory. In this example, the directory is s3://kubeedge/sedna-robo/sedna_data.
Step2: download and decompress [sedna_data.zip](https://kubeedge.obs.cn-north-1.myhuaweicloud.com:443/sedna-robo/sedna_data.zip?AWSAccessKeyId=EMPTKHQUGPO2CDUFD2YR&Expires=1697700463&Signature=9OlCC8qBcQr8LfX1ptbsr7nhU5s%3D),
then upload the training images in it to the s3 directory where data.txt is stored.
### Prepare Images
This example uses these images:
1. training worker: swr.cn-south-1.myhuaweicloud.com/sedna/sedna-robo:v0.1.1
2. evaluate worker: swr.cn-south-1.myhuaweicloud.com/sedna/sedna-robo:v0.1.1
3. inference worker: swr.cn-south-1.myhuaweicloud.com/sedna/sedna-robo-infer:v0.1.1
These images are generated by the script [build_images.sh](/examples/build_image.sh).
### Create Lifelong Learning Job
#### Preparations
Before this, user must provide an s3 directory for storage which is denoted as "s3_prefix" in this example, by setting environment variable s3_prefix.
```
s3_prefix=$s3_prefix
cloud_image=swr.cn-south-1.myhuaweicloud.com/sedna/sedna-robo:v0.1.1
edge_image=swr.cn-south-1.myhuaweicloud.com/sedna/sedna-robo-infer:v0.1.1
data_url=$s3_prefix/sedna_data/data.txt
DATA_NODE=sedna-mini-control-plane
TRAIN_NODE=sedna-mini-control-plane
EVAL_NODE=sedna-mini-control-plane
INFER_NODE=sedna-mini-control-plane
OUTPUT=$s3_prefix/lifelonglearningjob/output
job_name=robo-demo
```
#### Create s3 storage resources
Before this, users must generate the S3_ENDPOINT, ACCESS_KEY_ID and SECRET_ACCESS_KEY of their own s3 accounts and set environment
variables S3_ENDPOINT, ACCESS_KEY_ID and SECRET_ACCESS_KEY.
```
action=${1:-create}
kubectl $action -f - <<EOF
apiVersion: v1
stringData:
ACCESS_KEY_ID: $ACCESS_KEY_ID
SECRET_ACCESS_KEY: $SECRET_ACCESS_KEY
kind: Secret
metadata:
name: my
annotations:
s3-endpoint: $S3_ENDPOINT
type: Opaque
EOF
```
#### Create Initial Dataset Resource
```
kubectl $action -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: Dataset
metadata:
name: lifelong-dataset-robo
spec:
url: "$data_url"
format: "txt"
nodeName: "$DATA_NODE"
credentialName: my
EOF
```
#### Create robot-dog-delivery Lifelong Learning Job
```
action=${1:-create}
kubectl $action -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: LifelongLearningJob
metadata:
name: $job_name
spec:
dataset:
name: "lifelong-dataset-robo"
trainProb: 0.8
trainSpec:
template:
spec:
nodeName: $TRAIN_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: train-worker
imagePullPolicy: IfNotPresent
args: ["sedna_train.py"]
env:
- name: "train_ratio"
value: "0.9"
- name: "BACKEND_TYPE"
value: "PYTORCH"
resources:
limits:
cpu: 6
memory: 6Gi
requests:
cpu: 3
memory: 3Gi
volumeMounts:
- mountPath: /dev/shm
name: cache-volume
volumes:
- emptyDir:
medium: Memory
sizeLimit: 128Mi
name: cache-volume
trigger:
checkPeriodSeconds: 30
timer:
start: 00:00
end: 24:00
condition:
operator: ">"
threshold: 100
metric: num_of_samples
evalSpec:
template:
spec:
nodeName: $EVAL_NODE
dnsPolicy: ClusterFirstWithHostNet
containers:
- image: $cloud_image
name: eval-worker
imagePullPolicy: IfNotPresent
args: ["sedna_evaluate.py"]
env:
- name: "operator"
value: "<"
- name: "model_threshold" # Threshold for filtering deploy models
value: "0"
- name: "BACKEND_TYPE"
value: "PYTORCH"
resources:
limits:
cpu: 6
memory: 6Gi
requests:
cpu: 3
memory: 3Gi
deploySpec:
template:
spec:
nodeName: $INFER_NODE
dnsPolicy: ClusterFirstWithHostNet
hostNetwork: true
containers:
- image: $edge_image
name: infer-worker
imagePullPolicy: IfNotPresent
env:
- name: "BIG_MODEL_IP"
value: "http://94.74.91.114"
- name: "BIG_MODEL_PORT"
value: "30001"
- name: "RUN_FILE"
value: "integration_main.py"
resources: # user defined resources
limits:
cpu: 6
memory: 6Gi
requests:
cpu: 3
memory: 3Gi
credentialName: my
outputDir: $OUTPUT/$job_name
EOF
```
### Check Lifelong Learning Status
```
kubectl get lifelonglearningjob
```
### Effect Display

+ 0
- 71
examples/lifelong_learning/robot_dog_delivery/RFNet/accuracy.py View File

@@ -1,71 +0,0 @@
from tqdm import tqdm
from sedna.common.class_factory import ClassType, ClassFactory
from utils.args import EvaluationArguments
from utils.metrics import Evaluator
from dataloaders import make_data_loader
__all__ = ('accuracy', 'robo_accuracy')
@ClassFactory.register(ClassType.GENERAL)
def accuracy(y_true, y_pred, **kwargs):
args = EvaluationArguments()
_, _, test_loader = make_data_loader(args, test_data=y_true)
evaluator = Evaluator(args.num_class)
tbar = tqdm(test_loader, desc='\r')
for i, (sample, img_path) in enumerate(tbar):
if args.depth:
image, depth, target = sample['image'], sample['depth'], sample['label']
else:
image, target = sample['image'], sample['label']
if args.cuda:
image, target = image.cuda(), target.cuda()
if args.depth:
depth = depth.cuda()
target[target > evaluator.num_class - 1] = 255
target = target.cpu().numpy()
# Add batch sample into evaluator
evaluator.add_batch(target, y_pred[i])
# Test during the training
CPA = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
print("CPA:{}, mIoU:{}, fwIoU: {}".format(CPA, mIoU, FWIoU))
return CPA
@ClassFactory.register(ClassType.GENERAL)
def robo_accuracy(y_true, y_pred, **kwargs):
y_pred = y_pred[0]
args = EvaluationArguments()
_, _, test_loader = make_data_loader(args, test_data=y_true)
evaluator = Evaluator(args.num_class)
tbar = tqdm(test_loader, desc='\r')
for i, (sample, img_path) in enumerate(tbar):
if args.depth:
image, depth, target = sample['image'], sample['depth'], sample['label']
else:
image, target = sample['image'], sample['label']
if args.cuda:
image, target = image.cuda(), target.cuda()
if args.depth:
depth = depth.cuda()
target[target > evaluator.num_class - 1] = 255
target = target.cpu().numpy()
# Add batch sample into evaluator
evaluator.add_batch(target, y_pred[i])
# Test during the training
CPA = evaluator.Pixel_Accuracy_Class()
mIoU = evaluator.Mean_Intersection_over_Union()
FWIoU = evaluator.Frequency_Weighted_Intersection_over_Union()
print("CPA:{}, mIoU:{}, fwIoU: {}".format(CPA, mIoU, FWIoU))
return CPA

+ 0
- 148
examples/lifelong_learning/robot_dog_delivery/RFNet/dataloaders/datasets/cityscapes.py View File

@@ -1,148 +0,0 @@
import os
import numpy as np
from PIL import Image
from torch.utils import data
from torchvision import transforms
from dataloaders import custom_transforms as tr

class CityscapesSegmentation(data.Dataset):

def __init__(self, args, data=None, split="train"):

self.split = split
self.args = args
self.images = {}
self.disparities = {}
self.labels = {}

self.images[split] = [img[0] for img in data.x] if hasattr(data, "x") else data

if hasattr(data, "x") and len(data.x[0]) == 1:
self.disparities[split] = self.images[split]
elif hasattr(data, "x") and len(data.x[0]) == 2:
self.disparities[split] = [img[1] for img in data.x]
else:
self.disparities[split] = data

self.labels[split] = data.y if hasattr(data, "y") else data

self.ignore_index = 255

if len(self.images[split]) == 0:
raise Exception("No RGB images for split=[%s] found in %s" % (split, self.images_base))
if len(self.disparities[split]) == 0:
raise Exception("No depth images for split=[%s] found in %s" % (split, self.disparities_base))

print("Found %d %s RGB images" % (len(self.images[split]), split))
print("Found %d %s disparity images" % (len(self.disparities[split]), split))


def __len__(self):
return len(self.images[self.split])

def __getitem__(self, index):
img_path = self.images[self.split][index].rstrip()
disp_path = self.disparities[self.split][index].rstrip()
try:
lbl_path = self.labels[self.split][index].rstrip()
_img = Image.open(img_path).convert('RGB').resize(self.args.image_size, Image.BILINEAR)
_depth = Image.open(disp_path).resize(self.args.image_size, Image.BILINEAR)
_target = Image.open(lbl_path).resize(self.args.image_size, Image.BILINEAR)
# _img = Image.open(img_path).convert('RGB')
# _depth = Image.open(disp_path)
# _target = Image.open(lbl_path)
sample = {'image': _img,'depth':_depth, 'label': _target}
except:
_img = Image.open(img_path).convert('RGB').resize(self.args.image_size, Image.BILINEAR)
_depth = Image.open(disp_path).resize(self.args.image_size, Image.BILINEAR)
# _img = Image.open(img_path).convert('RGB')
# _depth = Image.open(disp_path)
sample = {'image': _img,'depth':_depth, 'label': _img}

if self.split == 'train':
return self.transform_tr(sample)
elif self.split == 'val':
return self.transform_val(sample), img_path
elif self.split == 'test':
return self.transform_ts(sample), img_path
elif self.split == 'custom_resize':
return self.transform_ts(sample), img_path


def recursive_glob(self, rootdir='.', suffix=''):
"""Performs recursive glob with given suffix and rootdir
:param rootdir is the root directory
:param suffix is the suffix to be searched
"""
return [os.path.join(looproot, filename)
for looproot, _, filenames in os.walk(rootdir)
for filename in filenames if filename.endswith(suffix)]

def transform_tr(self, sample):
composed_transforms = transforms.Compose([
tr.CropBlackArea(),
tr.RandomHorizontalFlip(),
tr.RandomScaleCrop(base_size=self.args.base_size, crop_size=self.args.crop_size, fill=255),
# tr.RandomGaussianBlur(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

def transform_val(self, sample):

composed_transforms = transforms.Compose([
tr.CropBlackArea(),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

def transform_ts(self, sample):

composed_transforms = transforms.Compose([
#tr.CropBlackArea(),
#tr.FixedResize(size=self.args.crop_size),
tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),
tr.ToTensor()])

return composed_transforms(sample)

if __name__ == '__main__':
from dataloaders.utils import decode_segmap
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import argparse

parser = argparse.ArgumentParser()
args = parser.parse_args()
args.base_size = 513
args.crop_size = 513

cityscapes_train = CityscapesSegmentation(args, split='train')

dataloader = DataLoader(cityscapes_train, batch_size=2, shuffle=True, num_workers=2)

for ii, sample in enumerate(dataloader):
for jj in range(sample["image"].size()[0]):
img = sample['image'].numpy()
gt = sample['label'].numpy()
tmp = np.array(gt[jj]).astype(np.uint8)
segmap = decode_segmap(tmp, dataset='cityscapes')
img_tmp = np.transpose(img[jj], axes=[1, 2, 0])
img_tmp *= (0.229, 0.224, 0.225)
img_tmp += (0.485, 0.456, 0.406)
img_tmp *= 255.0
img_tmp = img_tmp.astype(np.uint8)
plt.figure()
plt.title('display')
plt.subplot(211)
plt.imshow(img_tmp)
plt.subplot(212)
plt.imshow(segmap)

if ii == 1:
break

plt.show(block=True)


+ 0
- 98
examples/lifelong_learning/robot_dog_delivery/RFNet/models/util.py View File

@@ -1,98 +0,0 @@
import torch
import torch.nn as nn
import torch.nn.functional as F

upsample = lambda x, size: F.interpolate(x, size, mode='bilinear', align_corners=False)
batchnorm_momentum = 0.01 / 2


def get_n_params(parameters):
pp = 0
for p in parameters:
nn = 1
for s in list(p.size()):
nn = nn * s
pp += nn
return pp


class _BNReluConv(nn.Sequential):
def __init__(self, num_maps_in, num_maps_out, k=3, batch_norm=True, bn_momentum=0.1, bias=False, dilation=1):
super(_BNReluConv, self).__init__()
if batch_norm:
self.add_module('norm', nn.BatchNorm2d(num_maps_in, momentum=bn_momentum))
self.add_module('relu', nn.ReLU(inplace=batch_norm is True))
padding = k // 2 # same conv
self.add_module('conv', nn.Conv2d(num_maps_in, num_maps_out,
kernel_size=k, padding=padding, bias=bias, dilation=dilation))


class _Upsample(nn.Module):
def __init__(self, num_maps_in, skip_maps_in, num_maps_out, use_bn=True, k=3):
super(_Upsample, self).__init__()
self.bottleneck = _BNReluConv(skip_maps_in, num_maps_in, k=1, batch_norm=use_bn)
self.blend_conv = _BNReluConv(num_maps_in, num_maps_out, k=k, batch_norm=use_bn)

def forward(self, x, skip):
skip = self.bottleneck.forward(skip)
skip_size = skip.size()[2:4]
x = upsample(x, skip_size)
x = x + skip
x = self.blend_conv.forward(x)
return x


class SpatialPyramidPooling(nn.Module):
def __init__(self, num_maps_in, num_levels, bt_size=512, level_size=128, out_size=128,
grids=(6, 3, 2, 1), square_grid=False, bn_momentum=0.1, use_bn=True):
super(SpatialPyramidPooling, self).__init__()
self.grids = grids
self.square_grid = square_grid
self.spp = nn.Sequential()
self.spp.add_module('spp_bn',
_BNReluConv(num_maps_in, bt_size, k=1, bn_momentum=bn_momentum, batch_norm=use_bn))
num_features = bt_size
final_size = num_features
for i in range(num_levels):
final_size += level_size
self.spp.add_module('spp' + str(i),
_BNReluConv(num_features, level_size, k=1, bn_momentum=bn_momentum, batch_norm=use_bn))
self.spp.add_module('spp_fuse',
_BNReluConv(final_size, out_size, k=1, bn_momentum=bn_momentum, batch_norm=use_bn))

def forward(self, x):
levels = []
target_size = x.size()[2:4]

ar = target_size[1] / target_size[0]

x = self.spp[0].forward(x)
levels.append(x)
num = len(self.spp) - 1

for i in range(1, num):
if not self.square_grid:
grid_size = (self.grids[i - 1], max(1, round(ar * self.grids[i - 1])))
x_pooled = F.adaptive_avg_pool2d(x, grid_size)
else:
x_pooled = F.adaptive_avg_pool2d(x, self.grids[i - 1])
level = self.spp[i].forward(x_pooled)

level = upsample(level, target_size)
levels.append(level)
x = torch.cat(levels, 1)
x = self.spp[-1].forward(x)
return x


class _UpsampleBlend(nn.Module):
def __init__(self, num_features, use_bn=True):
super(_UpsampleBlend, self).__init__()
self.blend_conv = _BNReluConv(num_features, num_features, k=3, batch_norm=use_bn)

def forward(self, x, skip):
skip_size = skip.size()[2:4]
x = upsample(x, skip_size)
x = x + skip
x = self.blend_conv.forward(x)
return x

+ 0
- 53
examples/lifelong_learning/robot_dog_delivery/RFNet/predict.py View File

@@ -1,53 +0,0 @@
import time
from sedna.datasources import BaseDataSource
from sedna.core.lifelong_learning import LifelongLearning
from basemodel import Model
def preprocess(samples):
data = BaseDataSource(data_type="test")
data.x = [samples]
return data
def postprocess(samples):
image_names, imgs = [], []
for sample in samples:
img = sample.get("image")
image_names.append("{}.png".format(str(time.time())))
imgs.append(img)
return image_names, imgs
def init_ll_job():
estimator = Model(num_class=31,
save_predicted_image=True,
merge=True)
task_allocation = {
"method": "TaskAllocationDefault"
}
unseen_task_allocation = {
"method": "UnseenTaskAllocationDefault"
}
ll_job = LifelongLearning(
estimator,
unseen_estimator=unseen_task_processing,
task_definition=None,
task_relationship_discovery=None,
task_allocation=task_allocation,
task_remodeling=None,
inference_integrate=None,
task_update_decision=None,
unseen_task_allocation=unseen_task_allocation,
unseen_sample_recognition=None,
unseen_sample_re_recognition=None)
return ll_job
def unseen_task_processing():
return "Warning: unseen sample detected."

+ 0
- 168
examples/lifelong_learning/robot_dog_delivery/RFNet/ramp_postprocess.py View File

@@ -1,168 +0,0 @@
import cv2
import numpy as np
def parse_result(label, count, ratio):
count_d = dict(zip(label, count))
ramp_count = count_d.get(21, 0)
if ramp_count / np.sum(count) > ratio:
return True
else:
return False
def get_ramp(results, img_rgb):
results = np.array(results[0])
input_height, input_width = results.shape
# big trapezoid
big_closest = np.array([
[0, int(input_height)],
[int(input_width),
int(input_height)],
[int(0.882 * input_width + .5),
int(.8 * input_height + .5)],
[int(0.118 * input_width + .5),
int(.8 * input_height + .5)]
])
big_future = np.array([
[int(0.118 * input_width + .5),
int(.8 * input_height + .5)],
[int(0.882 * input_width + .5),
int(.8 * input_height + .5)],
[int(.765 * input_width + .5),
int(.66 * input_height + .5)],
[int(.235 * input_width + .5),
int(.66 * input_height + .5)]
])
# small trapezoid
small_closest = np.array([
[488, int(input_height)],
[1560, int(input_height)],
[1391, int(.8 * input_height + .5)],
[621, int(.8 * input_height + .5)]
])
small_future = np.array([
[741, int(.66 * input_height + .5)],
[1275, int(.66 * input_height + .5)],
[1391, int(.8 * input_height + .5)],
[621, int(.8 * input_height + .5)]
])
upper_left = np.array([
[1567, 676],
[1275, 676],
[1391, 819],
[1806, 819]
])
bottom_left = np.array([
[1806, 819],
[1391, 819],
[1560, 1024],
[2048, 1024]
])
upper_right = np.array([
[741, 676],
[481, 676],
[242, 819],
[621, 819]
])
bottom_right = np.array([
[621, 819],
[242, 819],
[0, 1024],
[488, 1024]
])
# _draw_closest_and_future(
# (big_closest, big_future),
# (small_closest, small_future),
# img_rgb)
ramp_location = locate_ramp(small_closest, small_future,
upper_left, bottom_left,
upper_right, bottom_right,
results)
if not ramp_location:
ramp_location = "no_ramp"
return ramp_location
def locate_ramp(small_closest, small_future,
upper_left, bottom_left,
upper_right, bottom_right,
results):
if has_ramp(results, (small_closest, small_future), 0.9, 0.7):
return "small_trapezoid"
right_location = has_ramp(results, (bottom_right, upper_right), 0.4, 0.2)
if right_location:
return f"{right_location}_left"
left_location = has_ramp(results, (bottom_left, upper_left), 0.4, 0.2)
if left_location:
return f"{left_location}_right"
return False
def has_ramp(results, areas, partial_ratio, all_ratio):
bottom, upper = areas
input_height, input_width = results.shape
mask = np.zeros((input_height, input_width), dtype=np.uint8)
mask = cv2.fillPoly(mask, [bottom], 1)
label, count = np.unique(results[mask == 1], return_counts=True)
has_ramp_bottom = parse_result(label, count, partial_ratio)
mask = np.zeros((input_height, input_width), dtype=np.uint8)
mask = cv2.fillPoly(mask, [upper], 1)
label, count = np.unique(results[mask == 1], return_counts=True)
has_ramp_upper = parse_result(label, count, partial_ratio)
if has_ramp_bottom:
return "bottom"
if has_ramp_upper:
return "upper"
mask = np.zeros((input_height, input_width), dtype=np.uint8)
mask = cv2.fillPoly(mask, [bottom], 1)
mask = cv2.fillPoly(mask, [upper], 1)
label, count = np.unique(results[mask == 1], return_counts=True)
has_ramp = parse_result(label, count, all_ratio)
if has_ramp:
return "center"
else:
return False
def _draw_closest_and_future(big, small, img_rgb):
big_closest, big_future = big
small_closest, small_future = small
img_array = np.array(img_rgb)
big_closest_color = [0, 50, 50]
big_future_color = [0, 69, 0]
small_closest_color = [0, 100, 100]
small_future_color = [69, 69, 69]
height, weight, channel = img_array.shape
img = np.zeros((height, weight, channel), dtype=np.uint8)
img = cv2.fillPoly(img, [big_closest], big_closest_color)
img = cv2.fillPoly(img, [big_future], big_future_color)
img = cv2.fillPoly(img, [small_closest], small_closest_color)
img = cv2.fillPoly(img, [small_future], small_future_color)
img_array = 0.3 * img + img_array
cv2.imwrite("test.png", img_array)

+ 0
- 335
examples/lifelong_learning/robot_dog_delivery/RFNet/robo_infer_service.py View File

@@ -1,335 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import time
from io import BytesIO
from typing import Optional, Any
import cv2
import numpy as np
from PIL import Image
import uvicorn
from pydantic import BaseModel
from fastapi import FastAPI, UploadFile, File
from fastapi.routing import APIRoute
from fastapi.middleware.cors import CORSMiddleware
from fastapi.responses import HTMLResponse
from sedna.common.utils import get_host_ip
from sedna.common.config import BaseConfig
from sedna.common.config import Context
from predict import init_ll_job, preprocess
from ramp_postprocess import get_ramp
class ImagePayload(BaseModel):
image: UploadFile = File(...)
depth: Optional[UploadFile] = None
class ResultModel(BaseModel):
type: int = 0
box: Any = None
curr: str = None
future: str = None
img: str = None
ramp: str = None
class ResultResponse(BaseModel):
msg: str = ""
result: Optional[ResultModel] = None
code: int
class BaseServer:
# pylint: disable=too-many-instance-attributes,too-many-arguments
DEBUG = True
WAIT_TIME = 15
def __init__(
self,
servername: str,
host: str,
http_port: int = 8080,
grpc_port: int = 8081,
workers: int = 1,
ws_size: int = 16 * 1024 * 1024,
ssl_key=None,
ssl_cert=None,
timeout=300):
self.server_name = servername
self.app = None
self.host = host or '0.0.0.0'
self.http_port = http_port or 80
self.grpc_port = grpc_port
self.workers = workers
self.keyfile = ssl_key
self.certfile = ssl_cert
self.ws_size = int(ws_size)
self.timeout = int(timeout)
protocal = "https" if self.certfile else "http"
self.url = f"{protocal}://{self.host}:{self.http_port}"
def run(self, app, **kwargs):
if hasattr(app, "add_middleware"):
app.add_middleware(
CORSMiddleware, allow_origins=["*"], allow_credentials=True,
allow_methods=["*"], allow_headers=["*"],
)
uvicorn.run(
app,
host=self.host,
port=self.http_port,
ssl_keyfile=self.keyfile,
ssl_certfile=self.certfile,
workers=self.workers,
timeout_keep_alive=self.timeout,
**kwargs)
def get_all_urls(self):
url_list = [{"path": route.path, "name": route.name}
for route in getattr(self.app, 'routes', [])]
return url_list
class InferenceServer(BaseServer): # pylint: disable=too-many-arguments
"""
rest api server for inference
"""
def __init__(
self,
servername,
host: str,
http_port: int = 30001,
max_buffer_size: int = 104857600,
workers: int = 1):
super(
InferenceServer,
self).__init__(
servername=servername,
host=host,
http_port=http_port,
workers=workers)
self.ll_job = init_ll_job()
self.inference_image_dir = os.environ.get("IMAGE_TOPIC_URL", os.path.join(
BaseConfig.data_path_prefix, "inference_images"))
os.makedirs(self.inference_image_dir, exist_ok=True)
self.max_buffer_size = max_buffer_size
self.app = FastAPI(
routes=[
APIRoute(
f"/{servername}",
self.model_info,
methods=["GET"],
),
APIRoute(
f"/{servername}/predict",
self.predict,
methods=["POST"],
response_model=ResultResponse
),
],
log_level="trace",
timeout=600,
)
self.index_frame = 0
def start(self):
return self.run(self.app)
@staticmethod
def model_info():
return HTMLResponse(
"""<h1>Welcome to the RestNet API!</h1>
<p>To use this service, send a POST HTTP request to {this-url}/predict</p>
<p>The JSON payload has the following format: {"image": "BASE64_STRING_OF_IMAGE",
"depth": "BASE64_STRING_OF_DEPTH"}</p>
""")
async def predict(self, image: UploadFile = File(...), depth: Optional[UploadFile] = None) -> ResultResponse:
contents = await image.read()
start_time = time.time()
self.image = Image.open(BytesIO(contents)).convert('RGB')
self.image.save(os.path.join(
self.inference_image_dir, f"{str(time.time())}.png"))
self.index_frame = self.index_frame + 1
img_rgb = Image.fromarray(np.array(self.image))
if depth:
depth_contents = await depth.read()
depth = Image.open(BytesIO(depth_contents)).convert('RGB')
img_dep = cv2.resize(np.array(depth), (2048, 1024),
interpolation=cv2.INTER_CUBIC)
img_dep = Image.fromarray(img_dep)
else:
img_dep = img_rgb
sample = {'image': img_rgb, "depth": img_dep, "label": img_rgb}
predict_data = preprocess(sample)
end_time1 = time.time()
print("preprocess time:", end_time1 - start_time)
end_time2 = time.time()
prediction, is_unseen_task, _ = self.ll_job.inference(predict_data)
end_time3 = time.time()
print("inference time: ", end_time3 - end_time2)
if is_unseen_task:
return {
"msg": "",
"result": {
"type": 1,
"box": '',
"img": '',
"curr": '',
"future": '',
"ramp": ''
},
"code": 0
}
# curb_results, ramp_results = results
img_rgb = cv2.resize(np.array(self.image),
(2048, 1024), interpolation=cv2.INTER_CUBIC)
img_rgb = Image.fromarray(np.array(img_rgb))
results = post_process(prediction)
curr, future = get_curb(results["result"]["box"], img_rgb)
results["result"]["curr"] = curr
results["result"]["future"] = future
results["result"]["box"] = None
if Context.get_parameters("robo_skill") == "ramp_detection":
results["result"]["ramp"] = get_ramp(
prediction[0].tolist(), img_rgb)
else:
results["result"]["ramp"] = "no_ramp"
end_time4 = time.time()
print("total time:", end_time4 - start_time)
return results
def parse_result(label, count):
label_map = ['road', 'sidewalk']
count_d = dict(zip(label, count))
curb_count = count_d.get(19, 0)
if curb_count / np.sum(count) > 0.2: # > 0.3
return "curb"
r = sorted(label, key=count_d.get, reverse=True)[0]
try:
c = label_map[r]
except:
c = "other"
return c
def get_curb(results, img_rgb):
results = np.array(results[0])
input_height, input_width = results.shape
# small trapezoid
closest = np.array([
[0, int(input_height)],
[int(input_width),
int(input_height)],
[int(0.882 * input_width + .5),
int(.8 * input_height + .5)],
[int(0.118 * input_width + .5),
int(.8 * input_height + .5)]
])
future = np.array([
[int(0.118 * input_width + .5),
int(.8 * input_height + .5)],
[int(0.882 * input_width + .5),
int(.8 * input_height + .5)],
[int(.765 * input_width + .5),
int(.66 * input_height + .5)],
[int(.235 * input_width + .5),
int(.66 * input_height + .5)]
])
# big trapezoid
# closest = np.array([
# [0, int(input_height)],
# [int(input_width),
# int(input_height)],
# [int(0.882 * input_width + .5),
# int(.7 * input_height + .5)],
# [int(0.118 * input_width + .5),
# int(.7 * input_height + .5)]
# ])
#
# future = np.array([
# [int(0.118 * input_width + .5),
# int(.7 * input_height + .5)],
# [int(0.882 * input_width + .5),
# int(.7 * input_height + .5)],
# [int(.765 * input_width + .5),
# int(.5 * input_height + .5)],
# [int(.235 * input_width + .5),
# int(.5 * input_height + .5)]
# ])
# _draw_closest_and_future(closest, future, img_rgb)
mask = np.zeros((input_height, input_width), dtype=np.uint8)
mask = cv2.fillPoly(mask, [closest], 1)
mask = cv2.fillPoly(mask, [future], 2)
d1, c1 = np.unique(results[mask == 1], return_counts=True)
d2, c2 = np.unique(results[mask == 2], return_counts=True)
c = parse_result(d1, c1)
f = parse_result(d2, c2)
return c, f
def _draw_closest_and_future(closest, future, img_rgb):
img_array = np.array(img_rgb)
closest_color = [0, 191, 255]
future_color = [255, 69, 0]
img_array = cv2.fillPoly(img_array, [closest], closest_color)
img_array = cv2.fillPoly(img_array, [future], future_color)
new_img = Image.fromarray(img_array)
new_img.save("test.png")
def post_process(res):
res = res[0].tolist()
type = 0
mesg = {
"msg": "",
"result": {
"type": type,
"box": res
},
"code": 0
}
return mesg
if __name__ == '__main__':
web_app = InferenceServer("lifelong-learning-robo", host=get_host_ip())
web_app.start()

+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/battery/simplebattery.yaml View File

@@ -1,15 +0,0 @@
name: "battery"
manufacturer: ""
series: ""
description: ""
driver:
version: "ros1"
type: "ros"
name: "ros_battery_driver"
data:
support: true
target: "/battery_level"
actual_hz: 1
origin_hz: 10
requirement: # Used to generate roslaunch files.
- deep_msgs

+ 0
- 42
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/camera/realsense435i.yaml View File

@@ -1,42 +0,0 @@
name: "d435i"
manufacturer: "intel"
series: "stereo depth"
description: ""
driver:
version: "ros1"
type: "ros"
name: "ros_rgbd_camera_driver"
rgb:
support: true
encoding: "bgr8"
target: "/camera/color/image_raw" # Topic in ros or function in class
width: 320
height: 240
actual_hz: 30
origin_hz: 30
pan: # pan value allowed for the camera platform
min: -2.7
max: 2.6
tilt:
min: -1.4
max: 1.75
depth:
support: true
encoding: "passthrough"
map_factor: 0 # Factor to scale depth image by to convert it into meters
target: "/camera/depth/image_rect_raw"
aligned_depth_to_color: "/camera/aligned_depth_to_color/image_raw"
width: 320
height: 240
actual_hz: 30
origin_hz: 30
info:
target: "/camera/color/camera_info"
actual_hz: 30
origin_hz: 30
pcd:
support: false
requirement: # Used to generate roslaunch files.
- cv_bridge
- std_msgs
- sensor_msgs

+ 0
- 28
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/common/movebase.yaml View File

@@ -1,28 +0,0 @@
name: "movebase"
description: ""
driver:
version: "ros1"
type: "ros"
name: "MoveBase"
target:
goal: "/move_base_simple/goal" # goal to be pusblished
status: "/move_base/status" # topic used to get status of movebase
cancel: "/move_base/cancel" # topic used to cancel the goal sent to movebase
action: "/move_base" # Ros action topic for movebase
move_vel: "/cmd_vel" # topic used to set velocity
laserscan: "/scan"
mapframe: "map" # world frame name
localizer:
algorithm: "odom"
parameters:
- key: "mapframe"
value: "map"
limited:
min_distance: 0.1
exec_time: 0
requirement: # Used to generate roslaunch files.
- actionlib
- actionlib_msgs
- move_base_msgs
- geometry_msgs
- tf

+ 0
- 13
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/control/x20control.yaml View File

@@ -1,13 +0,0 @@
name: "ysc_control"
description: ""
driver:
version: "0.0.1"
type: "ros"
name: "ysc_control"
parameter:
local_port: 20002
ctrl_ip: '192.168.1.120'
ctrl_port: 43893
gait_topic: "/robot_gait_state"
requirement: # Used to generate roslaunch files.
- deep_msgs

+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/imu/simpleimu.yaml View File

@@ -1,15 +0,0 @@
name: "imu"
manufacturer: ""
series: ""
description: ""
driver:
version: "ros1"
type: "ros"
name: "ros_imu_driver"
data:
support: true
target: "/imu"
actual_hz: 10
origin_hz: 200
requirement: # Used to generate roslaunch files.
- sensor_msgs

+ 0
- 17
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/odom/simpleodom.yaml View File

@@ -1,17 +0,0 @@
name: "odom"
manufacturer: ""
series: ""
description: ""
driver:
version: "ros1"
type: "ros"
name: "ros_common_driver"
data:
support: true
target: "/odom"
pose: "initialpose"
mapframe: "map" # world frame name
actual_hz: 10
origin_hz: 200
requirement: # Used to generate roslaunch files.
- nav_msgs

+ 0
- 80
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/robots/ysc_x20.yaml View File

@@ -1,80 +0,0 @@
name: "绝影x20"
manufacturer: "ysc"
series: "x20"
description: "quadruped robot dog"
environment:
backend: "ros1" # ros / ros2 / harmony
requirement:
- rospy
- rostopic
- roslib
sensors:
camera:
- name: "camera_front_up"
config: "realsense435i"
rgb:
target: "/camera_front_up/color/image_raw" # Topic in ros or function in class
actual_hz: 10
origin_hz: 30
depth:
target: "/camera_front_up/depth/image_rect_raw"
aligned_depth_to_color: "/camera_front_up/aligned_depth_to_color/image_raw"
info:
target: "/camera_front_up/color/camera_info"
- name: "camera_front_down"
config: "realsense435i"
rgb:
target: "/camera_front_down/color/image_raw"
actual_hz: 10
origin_hz: 30
depth:
target: "/camera_front_down/depth/image_rect_raw"
aligned_depth_to_color: "/camera_front_down/aligned_depth_to_color/image_raw"
info:
target: "/camera_front_up/color/camera_info"
imu:
- name: "simple_imu"
config: "simpleimu"
data:
target: "/imu"
actual_hz: 10
origin_hz: 199
battery:
- name: "battery"
config: "simplebattery"
data:
target: "/battery_level"
actual_hz: 1
origin_hz: 10
odom:
- name: "odom"
config: "simpleodom"
data:
target: "/odom"
actual_hz: 10
origin_hz: 200
navigation:
name: "base_planner"
config: "movebase"
target:
goal: "/move_base_simple/goal" # get goal
status: "/move_base/status" # execution status is available
cancel: "/move_base/cancel" # cancel the goal sent to movebase
action: "/move_base" # base action command
planner: "/move_base/GlobalPlanner/make_plan"
move: "/cmd_vel"
laserscan: "/scan"
mapframe: "map" # world frame name
localizer:
algorithm: "odom"
parameter:
- key: "mapframe"
value: "map"
- key: "topic"
value: "/odom"
- key: "pose_pub"
value: "/initialpose"
control:
- legged:
name: "ysc_control" # control method supported by vendor
config: "x20control"

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/configs/system.custom.yaml View File

@@ -1 +0,0 @@
task_id: "xdkwx"

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Battery.msg View File

@@ -1,2 +0,0 @@
Header header
float32 data

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdGo.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 3
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdInit.msg View File

@@ -1,3 +0,0 @@
float64 x
float64 y
float64 yaw

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdMode.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdPower.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdResp.msg View File

@@ -1 +0,0 @@
string msg

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdSet.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdSway.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdTurn.msg View File

@@ -1 +0,0 @@
bool cmd

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/CmdVideo.msg View File

@@ -1,2 +0,0 @@
uint8 camID
bool cmd

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Driver.msg View File

@@ -1,2 +0,0 @@
Header header
float32 data

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Gait.msg View File

@@ -1,2 +0,0 @@
Header header
int32 data

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/Motor.msg View File

@@ -1,2 +0,0 @@
Header header
float32 data

+ 0
- 2
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotLog.msg View File

@@ -1,2 +0,0 @@
uint8 level
string msg

+ 0
- 7
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotMovement.msg View File

@@ -1,7 +0,0 @@
float32 mileage
float32 speed
float32 acceleration
float32 direction
float32 roll
float32 pitch
float32 yaw

+ 0
- 3
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotNav.msg View File

@@ -1,3 +0,0 @@
float64 x
float64 y
float64 yaw

+ 0
- 4
examples/lifelong_learning/robot_dog_delivery/robo_detection/deep_msgs/msg/RobotStatus.msg View File

@@ -1,4 +0,0 @@
uint16 energy
uint16 runtime
string movement
string perception

+ 0
- 71
examples/lifelong_learning/robot_dog_delivery/robo_detection/package.xml View File

@@ -1,71 +0,0 @@
<?xml version="1.0"?>
<package format="2">
<name>deep_msgs</name>
<version>0.0.0</version>
<description>The deep_msgs package</description>

<!-- One maintainer tag required, multiple allowed, one person per tag -->
<!-- Example: -->
<!-- <maintainer email="jane.doe@example.com">Jane Doe</maintainer> -->
<maintainer email="luoteng-emoji@todo.todo">luoteng-emoji</maintainer>


<!-- One license tag required, multiple allowed, one license per tag -->
<!-- Commonly used license strings: -->
<!-- BSD, MIT, Boost Software License, GPLv2, GPLv3, LGPLv2.1, LGPLv3 -->
<license>TODO</license>


<!-- Url tags are optional, but multiple are allowed, one per tag -->
<!-- Optional attribute type can be: website, bugtracker, or repository -->
<!-- Example: -->
<!-- <url type="website">http://wiki.ros.org/deep_msgs</url> -->


<!-- Author tags are optional, multiple are allowed, one per tag -->
<!-- Authors do not have to be maintainers, but could be -->
<!-- Example: -->
<!-- <author email="jane.doe@example.com">Jane Doe</author> -->


<!-- The *depend tags are used to specify dependencies -->
<!-- Dependencies can be catkin packages or system dependencies -->
<!-- Examples: -->
<!-- Use depend as a shortcut for packages that are both build and exec dependencies -->
<!-- <depend>roscpp</depend> -->
<!-- Note that this is equivalent to the following: -->
<!-- <build_depend>roscpp</build_depend> -->
<!-- <exec_depend>roscpp</exec_depend> -->
<!-- Use build_depend for packages you need at compile time: -->
<!-- <build_depend>message_generation</build_depend> -->
<!-- Use build_export_depend for packages you need in order to build against this package: -->
<!-- <build_export_depend>message_generation</build_export_depend> -->
<!-- Use buildtool_depend for build tool packages: -->
<!-- <buildtool_depend>catkin</buildtool_depend> -->
<!-- Use exec_depend for packages you need at runtime: -->
<!-- <exec_depend>message_runtime</exec_depend> -->
<!-- Use test_depend for packages you need only for testing: -->
<!-- <test_depend>gtest</test_depend> -->
<!-- Use doc_depend for packages you need only for building documentation: -->
<!-- <doc_depend>doxygen</doc_depend> -->
<buildtool_depend>catkin</buildtool_depend>
<build_depend>roscpp</build_depend>
<build_depend>rospy</build_depend>
<build_depend>std_msgs</build_depend>
<build_export_depend>roscpp</build_export_depend>
<build_export_depend>rospy</build_export_depend>
<build_export_depend>std_msgs</build_export_depend>
<exec_depend>roscpp</exec_depend>
<exec_depend>rospy</exec_depend>
<exec_depend>std_msgs</exec_depend>


<!-- The export tag contains other, unspecified, tags -->
<export>
<!-- Other tools can request additional information be placed here -->

</export>
<build_depend>message_generation</build_depend>
<build_export_depend>message_generation</build_export_depend>
<exec_depend>message_runtime</exec_depend>
</package>

+ 0
- 13
examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/__init__.py View File

@@ -1,13 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

+ 0
- 136
examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/integration_interface.py View File

@@ -1,136 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import cv2
import time
import requests
import tenacity
from tenacity import retry
import numpy as np
from robosdk.utils.logger import logging
LOGGER = logging.bind(instance="lifelong-learning-robo")
@retry(stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_result(lambda x: x is None),
wait=tenacity.wait_fixed(1))
def http_request(url, method=None, timeout=None, binary=True, **kwargs):
_maxTimeout = timeout if timeout else 10
_method = "GET" if not method else method
try:
response = requests.request(method=_method, url=url, **kwargs)
if response.status_code == 200:
return (response.json() if binary else
response.content.decode("utf-8"))
elif 200 < response.status_code < 400:
LOGGER.info(f"Redirect_URL: {response.url}")
LOGGER.warning(
'Get invalid status code %s while request %s',
response.status_code,
url)
except (ConnectionRefusedError, requests.exceptions.ConnectionError):
LOGGER.warning(f'Connection refused while request {url}')
except requests.exceptions.HTTPError as err:
LOGGER.warning(f"Http Error while request {url} : f{err}")
except requests.exceptions.Timeout as err:
LOGGER.warning(f"Timeout Error while request {url} : f{err}")
except requests.exceptions.RequestException as err:
LOGGER.warning(f"Error occurred while request {url} : f{err}")
class Estimator:
def __init__(self, service_name="lifelong-learning-robo",
input_size=(240, 424)):
self.input_height, self.input_width = input_size
self.remote_ip = os.getenv("BIG_MODEL_IP", "http://localhost")
self.port = int(os.getenv("BIG_MODEL_PORT", "8080"))
self.endpoint = f"{self.remote_ip}:{self.port}/{service_name}/predict"
self.curr_gait = ""
self.fps = 30
self.inferDangerCount = 0
self.hold_time = 3
self.freq = cv2.getTickFrequency()
self.last_change = int(time.time())
self._poly = np.array([
[0, int(self.input_height)],
[int(self.input_width),
int(self.input_height)],
[int(0.764 * self.input_width + .5),
int(0.865 * self.input_height + .5)],
[int(0.236 * self.input_width + .5),
int(0.865 * self.input_height + .5)]
], dtype=np.int32)
def predict(self, rgb, depth):
t1 = cv2.getTickCount()
image = cv2.imencode('.jpg', rgb)[1].tobytes()
depth = cv2.imencode('.jpg', depth)[1].tobytes()
orig_h, orig_w, _ = rgb.shape
result = http_request(
self.endpoint, method="POST", files={
"image": ('rgb.jpg', image, "image/jpeg"),
"depth": None
}
)
t2 = cv2.getTickCount()
time1 = (t2 - t1) / self.freq
self.fps = round(1 / time1, 2)
if not isinstance(result, dict):
return
msg = result.get("msg", "")
r = result.get("result", {})
code = int(result.get("code", 1)) if str(
result.get("code", 1)).isdigit() else 1
if len(msg) and code != 0:
LOGGER.warning(msg)
return
_type = int(r.get("type", 1))
if _type == 1:
self.curr_gait = "stop"
LOGGER.warning("unknown result")
step = 1.0 / self.fps
c = r.get("curr", "unknown")
f = r.get("future", "unknown")
if c == "curb" or f == "curb" or c != f:
self.inferDangerCount = min(self.inferDangerCount + step, 10)
else:
self.inferDangerCount = max(self.inferDangerCount - 2 * step, 0)
if self.inferDangerCount > 1:
self.curr_gait = "up-stair"
# self.last_change = int(time.time())
elif self.inferDangerCount == 0:
# now = int(time.time())
# if now - self.last_change > self.hold_time:
self.curr_gait = "trot"
location = result.get("result").get("ramp")
return location
if __name__ == '__main__':
os.environ["BIG_MODEL_IP"] = "http://100.94.29.220"
os.environ["BIG_MODEL_PORT"] = "30001"
f1 = "./E1_door.1716.rgb.png"
f2 = "./E1_door.1716.depth.png"
_frame = cv2.imread(f1)
_depth = cv2.imread(f2, -1) / 1000
d = Estimator()
cv2.imwrite("./frame_1716.out.png", d.predict(_frame, _depth))

+ 0
- 132
examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/integration_main.py View File

@@ -1,132 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
import threading
import numpy as np
import multiprocessing
from cv_bridge import CvBridge
from robosdk.core import Robot
from robosdk.utils.context import Context
from robosdk.utils.constant import GaitType
from robosdk.msgs.sender.ros import RosMessagePublish
from ramp_detection.integration_interface import Estimator
class Detection:
def __init__(self):
self.robot = Robot(name="x20", config="ysc_x20")
self.segment = Estimator()
self.robot.connect()
self.publish = RosMessagePublish()
_topic = Context.get("curb_detection", "/robovideo")
self.publish.register("curb_detection", topic=_topic,
converter=CvBridge().cv2_to_imgmsg,
convert_param={"encoding": "bgr8"})
# self.robot.navigation.speed(0.4)
self.stair_start_time = None
self.stair_hold_time = 7
self.complete_change_upstair_time = None
self.unseen_sample_threshold = 3
self.unseen_sample_num = 0
def run(self):
if not getattr(self.robot, "camera", ""):
return
while 1:
img, dep = self.robot.camera.get_rgb_depth()
if img is None:
continue
result = self.segment.predict(img, depth=dep)
if not result:
self.robot.logger.info("Unseen sample detected.")
# self.unseen_sample_num += 1
# if self.unseen_sample_num >= self.unseen_sample_threshold:
# self.robot.logger.info("Stop in front of unseen sample!")
# for _ in range(3):
# self.robot.navigation.stop()
# self.unseen_sample_num = 0
continue
# else:
# self.unseen_sample_num = 0
if result == "no_ramp":
self.process_curb(result, img)
else:
self.process_ramp(result)
def process_curb(self, result, img):
current_time = time.time()
if self.stair_start_time and current_time - self.stair_start_time < self.stair_hold_time:
return
elif self.stair_start_time and current_time - self.stair_start_time >= self.stair_hold_time:
self.stair_start_time = None
if self.segment.curr_gait == "up-stair":
self.robot.logger.info("match curb")
gait = threading.Thread(name="", target=self.change_to_upstair)
stop = threading.Thread(name="", target=self.stop)
gait.start()
stop.start()
self.stair_start_time = time.time()
elif self.segment.curr_gait == "trot":
self.robot.logger.info("unmatch curb")
self.robot.control.change_gait(GaitType.TROT)
def process_ramp(self, location):
if location == "small_trapezoid":
self.robot.logger.info(f"Ramp location: {location}. Keep moving.")
self.robot.navigation.go_forward()
return
self.robot.logger.info(f"Ramp detected: {location}.")
if location == "upper_left" or location == "center_left":
self.robot.logger.info("Move to the left!")
self.robot.navigation.turn_left()
elif location == "bottom_left":
self.robot.logger.info("Backward and move to the left!")
self.robot.navigation.go_backward()
self.robot.navigation.turn_left()
elif location == "upper_right" or location == "center_right":
self.robot.logger.info("Move to the right!")
self.robot.navigation.turn_right()
elif location == "bottom_right":
self.robot.logger.info("Backward and move to the right!")
self.robot.navigation.go_backward()
self.robot.navigation.turn_right()
self.robot.navigation.go_forward()
def change_to_upstair(self):
self.robot.control.change_gait(GaitType.UPSTAIR)
self.complete_change_upstair_time = time.time()
# print("complete_time:", self.complete_change_upstair_time)
def stop(self):
print("Stop in front of curb!")
while 1:
self.robot.navigation.stop()
if self.complete_change_upstair_time is not None:
break
self.complete_change_upstair_time = None
if __name__ == '__main__':
project = Detection()
project.run()

+ 0
- 97
examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/interface.py View File

@@ -1,97 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

import os
import cv2
import time
import requests
import tenacity
from tenacity import retry
import numpy as np
from robosdk.utils.logger import logging

LOGGER = logging.bind(instance="lifelong-learning-robo")


@retry(stop=tenacity.stop_after_attempt(5),
retry=tenacity.retry_if_result(lambda x: x is None),
wait=tenacity.wait_fixed(1))
def http_request(url, method=None, timeout=None, binary=True, **kwargs):
_maxTimeout = timeout if timeout else 10
_method = "GET" if not method else method
try:
response = requests.request(method=_method, url=url, **kwargs)
if response.status_code == 200:
return (response.json() if binary else
response.content.decode("utf-8"))
elif 200 < response.status_code < 400:
LOGGER.info(f"Redirect_URL: {response.url}")
LOGGER.warning(
'Get invalid status code %s while request %s',
response.status_code,
url)
except (ConnectionRefusedError, requests.exceptions.ConnectionError):
LOGGER.warning(f'Connection refused while request {url}')
except requests.exceptions.HTTPError as err:
LOGGER.warning(f"Http Error while request {url} : f{err}")
except requests.exceptions.Timeout as err:
LOGGER.warning(f"Timeout Error while request {url} : f{err}")
except requests.exceptions.RequestException as err:
LOGGER.warning(f"Error occurred while request {url} : f{err}")


class Estimator:
def __init__(self, service_name="lifelong-learning-robo",
input_size=(240, 424)):
self.input_height, self.input_width = input_size
self.remote_ip = os.getenv("BIG_MODEL_IP", "http://localhost")
self.port = int(os.getenv("BIG_MODEL_PORT", "8080"))
self.endpoint = f"{self.remote_ip}:{self.port}/{service_name}/predict"
self.curr_gait = ""
self.fps = 30
self.inferDangerCount = 0
self.hold_time = 3
self.freq = cv2.getTickFrequency()
self.last_change = int(time.time())
self._poly = np.array([
[0, int(self.input_height)],
[int(self.input_width),
int(self.input_height)],
[int(0.764 * self.input_width + .5),
int(0.865 * self.input_height + .5)],
[int(0.236 * self.input_width + .5),
int(0.865 * self.input_height + .5)]
], dtype=np.int32)

def predict(self, rgb, depth):
image = cv2.imencode('.jpg', rgb)[1].tobytes()
orig_h, orig_w, _ = rgb.shape
result = http_request(
self.endpoint, method="POST", files={
"image": ('rgb.jpg', image, "image/jpeg"),
# "depth": ('dep.jpg', depth, "image/jpeg"),
}
)

return result

if __name__ == '__main__':
os.environ["BIG_MODEL_IP"] = "http://100.94.29.220"
os.environ["BIG_MODEL_PORT"] = "30001"
f1 = "./E1_door.1716.rgb.png"
f2 = "./E1_door.1716.depth.png"
_frame = cv2.imread(f1)
_depth = cv2.imread(f2, -1) / 1000
d = Estimator()
cv2.imwrite("./frame_1716.out.png", d.predict(_frame, _depth))

+ 0
- 46
examples/lifelong_learning/robot_dog_delivery/robo_detection/ramp_detection/main.py View File

@@ -1,46 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from cv_bridge import CvBridge
from robosdk.core import Robot
from robosdk.utils.context import Context
from robosdk.msgs.sender.ros import RosMessagePublish

from ramp_detection.interface import Estimator


class Detection:
def __init__(self):
self.robot = Robot(name="x20", config="ysc_x20")
self.segment = Estimator()
self.robot.connect()
self.publish = RosMessagePublish()
_topic = Context.get("curb_detection", "/robovideo")
self.publish.register("curb_detection", topic=_topic,
converter=CvBridge().cv2_to_imgmsg,
convert_param={"encoding": "bgr8"})

def run(self):
if not getattr(self.robot, "camera", ""):
return
while 1:
img, dep = self.robot.camera.get_rgb_depth()
if img is None:
continue
result = self.segment.predict(img, depth=dep)
print(result)

if __name__ == '__main__':
project = Detection()
project.run()

+ 0
- 8
examples/lifelong_learning/robot_dog_delivery/robo_detection/requirements-sdk.txt View File

@@ -1,8 +0,0 @@
loguru~=0.6.0 # MIT
minio~=7.0.3 # Apache-2.0
numpy>=1.13.3 # BSD
requests~=2.27.1 # MIT
Pillow~=9.0.1
pydantic~=1.9.0 # MIT
websockets~=9.1 # BSD
tenacity~=8.0.1 # Apache-2.0

+ 0
- 6
examples/lifelong_learning/robot_dog_delivery/robo_detection/requirements.txt View File

@@ -1,6 +0,0 @@
pytest~=4.6.9
setuptools~=58.3.0
numpy~=1.21.5
Pillow~=9.0.1
requests==2.24.0
tenacity

+ 0
- 1
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/VERSION View File

@@ -1 +0,0 @@
0.0.1

+ 0
- 0
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/__init__.py View File


+ 0
- 21
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/__version__.py View File

@@ -1,21 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os


_VERSION = os.path.join(os.path.dirname(__file__), "VERSION")

with open(_VERSION, "r", encoding="utf-8") as fin:
tmp = [line.strip() for line in fin if line.strip()]
__version__ = "-".join(tmp) if tmp else "dev"

+ 0
- 0
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/__init__.py View File


+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/__init__.py View File

@@ -1,15 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .image_selection import SimpleSelection

+ 0
- 40
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/base.py View File

@@ -1,40 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
from typing import List

from robosdk.utils.logger import logging
from robosdk.utils.util import Config
from robosdk.utils.util import ImageQualityEval


__all__ = ("Resolution", )


class Resolution(metaclass=abc.ABCMeta):
def __init__(self, name: str, config: Config):
self.resolution_name = name
self.config = config
if (getattr(self.config, "image", "") and
hasattr(self.config.image, "eval")):
eval_func = self.config.image.eval
else:
eval_func = "vollath"
self.eval = getattr(
ImageQualityEval, eval_func, ImageQualityEval.vollath)
self.logger = logging.bind(instance=self.resolution_name, system=True)

@abc.abstractmethod
def inference(self, imgs: List):
...

+ 0
- 44
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/image/image_selection.py View File

@@ -1,44 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from typing import List

import cv2
import numpy as np

from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory
from robosdk.utils.util import Config
from robosdk.utils.exceptions import SensorError

from .base import Resolution


__all__ = ("SimpleSelection", )


@ClassFactory.register(ClassType.IMAGE, alias="base_select")
class SimpleSelection(Resolution): # noqa

def __init__(self, name: str = "base_select", config: Config = None):
super(SimpleSelection, self).__init__(name=name, config=config)

def inference(self, imgs: List[np.ndarray]) -> np.ndarray:
if not len(imgs):
raise SensorError("No input images found")
if len(imgs) == 1:
return imgs[0]
s = sorted(imgs, key=lambda j: self.eval(
cv2.cvtColor(j, cv2.COLOR_BGR2GRAY)))
return s[-1]

+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/__init__.py View File

@@ -1,15 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .odom import Odom

+ 0
- 38
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/base.py View File

@@ -1,38 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import threading

from robosdk.utils.logger import logging
from robosdk.utils.schema import BasePose


__all__ = ("Localize", )


class Localize(metaclass=abc.ABCMeta):

def __init__(self, name: str = "base_localizer"):
self.localize_name = name
self.state_lock = threading.RLock()
self.curr_state = BasePose()
self.logger = logging.bind(instance=self.localize_name, system=True)

@abc.abstractmethod
def get_curr_state(self) -> BasePose:
...

@abc.abstractmethod
def set_curr_state(self, state: BasePose):
...

+ 0
- 93
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/localize/odom.py View File

@@ -1,93 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy

from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory
from robosdk.utils.schema import BasePose

from .base import Localize


@ClassFactory.register(ClassType.LOCALIZE, alias="odom")
class Odom(Localize): # noqa
def __init__(self, name: str = "Odometry",
mapframe: str = "map",
topic: str = "/odom",
pose_pub: str = "/initialpose"
):
super(Odom, self).__init__(name=name)

import rospy
from nav_msgs.msg import Odometry
from geometry_msgs.msg import PoseWithCovarianceStamped

rospy.Subscriber(
topic,
Odometry,
self.get_odom_state,
)
self.initial_pose = rospy.Publisher(
pose_pub,
PoseWithCovarianceStamped,
queue_size=1
)
self.mapframe = mapframe or "map"

def get_odom_state(self, msg):

import tf.transformations

self.state_lock.acquire()
orientation = msg.pose.pose.orientation
_, _, z = tf.transformations.euler_from_quaternion(
[orientation.x, orientation.y, orientation.z, orientation.w]
)
self.curr_state.x = msg.pose.pose.position.x
self.curr_state.y = msg.pose.pose.position.y
self.curr_state.z = z
self.state_lock.release()

def get_curr_state(self, **kwargs) -> BasePose:
state = copy.deepcopy(self.curr_state)
return state

def set_curr_state(self, state: BasePose):

import rospy
import tf.transformations
from geometry_msgs.msg import PoseWithCovarianceStamped

self.curr_state.x = state.x
self.curr_state.y = state.y
self.curr_state.z = state.z
self.curr_state.w = state.w

initial_pose = PoseWithCovarianceStamped()
initial_pose.header.seq = 1
initial_pose.header.stamp = rospy.Time.now()
initial_pose.header.frame_id = self.mapframe

q = tf.transformations.quaternion_from_euler(0, 0, state.z)
initial_pose.pose.pose.position.x = state.x
initial_pose.pose.pose.position.y = state.y
initial_pose.pose.pose.position.z = 0.0

initial_pose.pose.pose.orientation.x = q[0]
initial_pose.pose.pose.orientation.y = q[1]
initial_pose.pose.pose.orientation.z = q[2]
initial_pose.pose.pose.orientation.w = q[3]
for _ in range(10):
self.initial_pose.publish(initial_pose)
rospy.sleep(0.1)

+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/__init__.py View File

@@ -1,15 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .move_base import MoveBase

+ 0
- 64
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/base.py View File

@@ -1,64 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import threading

from robosdk.utils.schema import BasePose
from robosdk.utils.schema import PathNode
from robosdk.utils.logger import logging
from robosdk.utils.util import Config
from robosdk.cloud_robotics.task_agent.base import RoboActionHandle

__all__ = ("Navigation", )


class Navigation(metaclass=abc.ABCMeta):

def __init__(self, name: str, config: Config):
self.navigation_name = name
self.config = config
self.move_base_as = None
self.goal_lock = threading.RLock()
self.logger = logging.bind(instance=self.navigation_name, system=True)

@abc.abstractmethod
def execute_track(self, plan: PathNode):
...

@abc.abstractmethod
def goto(self, goal: BasePose):
...

@abc.abstractmethod
def goto_absolute(self, goal: BasePose):
...

def stop(self):
self.set_vel(forward=0.0, turn=0.0, execution=3)

@abc.abstractmethod
def set_vel(self,
forward: float = 0,
turn: float = 0,
execution: int = 1):
"""
set velocity to robot
:param forward: linear velocity in m/s
:param turn: rotational velocity in m/s
:param execution: execution time in seconds
"""
...

def set_action_server(self, action: RoboActionHandle):
self.move_base_as = action

+ 0
- 281
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/navigation/move_base.py View File

@@ -1,281 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from importlib import import_module

import numpy as np

from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory
from robosdk.utils.util import Config
from robosdk.utils.schema import BasePose
from robosdk.utils.schema import PathNode
from robosdk.utils.constant import ActionStatus
from robosdk.cloud_robotics.task_agent.base import RoboActionHandle

from .base import Navigation

__all__ = ("MoveBase",)


@ClassFactory.register(ClassType.NAVIGATION)
class MoveBase(Navigation): # noqa

def __init__(self,
name: str = "MoveBase",
config: Config = None
):
super(MoveBase, self).__init__(name=name, config=config)

import rospy
import actionlib
from geometry_msgs.msg import Twist
from sensor_msgs.msg import LaserScan
from move_base_msgs.msg import MoveBaseAction
from actionlib_msgs.msg import GoalStatusArray, GoalID

self.move_base_ac = actionlib.SimpleActionClient(
self.config.target.action, MoveBaseAction
)
rospy.Subscriber(
self.config.target.status,
GoalStatusArray,
self._move_base_status_callback,
)
self.scan_msg = LaserScan()
rospy.Subscriber(
self.config.target.laserscan, LaserScan,
self._laser_scan_callback)
self.move_base_cancel = rospy.Publisher(
self.config.target.cancel, GoalID, queue_size=1
)
self.curr_vel = BasePose()
self.vel_pub = rospy.Publisher(
self.config.target.move_vel, Twist, queue_size=1
)
self.curr_goal = BasePose()
rospy.Subscriber(
self.config.target.move_vel, Twist,
self._move_vel_callback)
self.localizer = self.get_localize_algorithm_from_config()

def _move_vel_callback(self, data):
if data is None:
return
self.curr_vel = BasePose(
x=data.linear.x,
y=data.linear.y,
)

def get_localize_algorithm_from_config(self, **param):
localizer = None
_param = dict()
if self.config.localizer and self.config.localizer.algorithm:
try:
_ = import_module(f"robosdk.algorithms.localize")
localizer = ClassFactory.get_cls(
ClassType.LOCALIZE, self.config.localizer.algorithm)
except Exception as e:
self.logger.error(
f"Fail to initial localizer algorithm "
f"{self.config.localizer.algorithm} : {e}")
if not localizer:
return
if (self.config.localizer and
isinstance(self.config.localizer.parameter, list)):
_param = {
p["key"]: p.get("value", "")
for p in self.config.localizer.parameter
if isinstance(p, dict) and "key" in p
}
_param.update(**param)
return localizer(**_param)

def _transform_goal(self, goal: BasePose):
import tf.transformations
from geometry_msgs.msg import PoseStamped
from geometry_msgs.msg import Pose
from geometry_msgs.msg import Point
from geometry_msgs.msg import Quaternion
from move_base_msgs.msg import MoveBaseGoal

self.goal_lock.acquire()
q = tf.transformations.quaternion_from_euler(0, 0, goal.z)
pose_stamped = PoseStamped(
pose=Pose(
Point(goal.x, goal.y, 0.000),
Quaternion(q[0], q[1], q[2], q[3])
)
)
pose_stamped.header.frame_id = self.config.target.mapframe
target = MoveBaseGoal()
target.target_pose = pose_stamped
self.curr_goal = goal.copy()
self.goal_lock.release()
return target

def cancel_goal(self):
from actionlib_msgs.msg import GoalID

self.logger.warning("Goal Cancel")
try:
self.move_base_cancel_goal_pub.publish(GoalID())
except Exception as err:
self.logger.debug(f"Cancel goal failure: {err}")
self.move_base_ac.cancel_all_goals()
self.stop()

def _move_base_status_callback(self, msg):
if msg.status_list:
self.execution_status = getattr(msg.status_list[-1], "status")

def _laser_scan_callback(self, msg):
self.scan_msg = msg

def execute_track(self, plan: PathNode, min_gap: float = 0):
target = plan.copy()
if not min_gap:
min_gap = self.config.limited.min_distance
while 1:
curr_point = self.get_location()
if curr_point - target <= abs(min_gap):
target = plan.next
if target is None:
self.logger.info("Path planning execute complete")
break
self.goto(goal=target.position)

def get_location(self, **parameter):
if self.localizer and hasattr(self.localizer, "get_curr_state"):
return self.localizer.get_curr_state(**parameter)
return self.curr_goal.copy()

def goto(self, goal: BasePose,
start_pos: BasePose = None,
limit_time: int = 0): # noqa
if start_pos is None:
start_pos = self.get_location()
curr_goal = self._get_absolute_pose(goal, start_pos)
return self.goto_absolute(curr_goal, limit_time=limit_time)

def goto_absolute(self, goal: BasePose, limit_time: int = 0):
self.logger.info(f"Sending the goal, {str(goal)}")
target = self._transform_goal(goal)
return self._send_action_goal(target, limit_time=limit_time)

@staticmethod
def _get_absolute_pose(goal: BasePose, base: BasePose):
nx = base.x + goal.x * np.cos(base.z) - goal.y * np.sin(base.z)
ny = base.y + goal.x * np.sin(base.z) + goal.y * np.cos(base.z)
nz = base.z + goal.z
return BasePose(x=nx, y=ny, z=nz)

def _send_action_goal(self, goal, limit_time: int = 0):
import rospy

self.logger.debug("Waiting for the server")
self.move_base_ac.wait_for_server()

self.move_base_ac.send_goal(goal)

self.logger.debug("Waiting for the move_base Result")
exit_without_time = True
if not limit_time:
limit_time = int(self.config.limited.exec_time)
if limit_time > 0:
exit_without_time = self.move_base_ac.wait_for_result(
rospy.Duration(limit_time)
)
while 1:
if not exit_without_time:
self.cancel_goal()
self.logger.warning("ERROR:Timed out achieving goal")
return False
if ((self.move_base_as and self.move_base_as.should_stop) or
(self.execution_status == ActionStatus.ABORTED.value)):
self.cancel_goal()
return False
if (isinstance(self.move_base_as, RoboActionHandle)
and self.move_base_as.is_preempt):
self.cancel_goal()
return False
if self.execution_status == ActionStatus.SUCCEEDED.value:
return True
rospy.sleep(0.1)

def set_vel(self,
forward: float = 0,
turn: float = 0,
execution: int = 1):
"""
set velocity to robot
:param forward: linear velocity in m/s
:param turn: rotational velocity in m/s
:param execution: execution time in seconds
"""

import rospy
from geometry_msgs.msg import Twist

msg = Twist()
msg.linear.x = forward
msg.angular.z = turn

stop_time = rospy.get_rostime() + rospy.Duration(execution)
while rospy.get_rostime() < stop_time:
self.vel_pub.publish(msg)
rospy.sleep(.05)

def turn_right(self):
self.set_vel(1.5, -.7)

def turn_left(self):
self.set_vel(1.5, .7)

def go_forward(self):
self.set_vel(1.3, 0)

def go_backward(self):
self.set_vel(-.5, 0)

def stop(self):
self.set_vel(0, 0)

def go_forward_util(self, stop_distance: float = 0.5):
while self.get_front_distance() > stop_distance:
self.set_vel(.1, 0)
self.stop()

def speed(self, linear: float = 0., rotational: float = 0.):
from geometry_msgs.msg import Twist

msg = Twist()
msg.linear.x = linear
msg.angular.z = rotational

self.vel_pub.publish(msg)

def get_front_distance(self, degree: int = 10) -> float:
if self.scan_msg is not None:
all_data = getattr(self.scan_msg, "ranges", [])
if not len(all_data):
return 0.0
circle = int(len(all_data) / 2)
left_degree = max(circle - abs(degree), 10)
right_degree = min(circle + abs(degree), len(all_data))
ranges = list(
filter(lambda x: (x != float('inf') and x != float("-inf")),
all_data[left_degree:right_degree])
)
return float(np.mean(ranges))
return 0.0

+ 0
- 184
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/Astar.py View File

@@ -1,184 +0,0 @@
"""
A* grid planning
author: Atsushi Sakai(@Atsushi_twi)
Nikos Kanargias (nkana@tee.gr)
see https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathPlanning/AStar/a_star.py # noqa
"""
import math

from robosdk.utils.schema import BasePose
from robosdk.utils.schema import PathNode
from robosdk.utils.constant import PgmItem
from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory

from .base import PathPlanner
from .base import GridMap


__all__ = ("AStar", )


@ClassFactory.register(ClassType.PATHPLANNING)
class AStar(PathPlanner): # noqa

def __init__(self,
world_map: GridMap,
start: BasePose,
goal: BasePose):
super(AStar, self).__init__(
world_map=world_map, start=start, goal=goal)
self.max_x, self.max_y = self.world.grid.shape

class Node:
def __init__(self, x, y, cost, parent_index):
self.x = x # index of grid
self.y = y # index of grid
self.cost = cost
self.parent_index = parent_index

def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.parent_index)

def planning(self, step=0) -> PathNode:
sx, sy = self.s_start.x, self.s_start.y
gx, gy, angle = self.s_goal.x, self.s_goal.y, self.s_goal.z
start_node = self.Node(self.calc_xy_index(sx),
self.calc_xy_index(sy), 0.0, -1)
goal_node = self.Node(self.calc_xy_index(gx),
self.calc_xy_index(gy), 0.0, -1)

open_set, closed_set = dict(), dict()
open_set[self.calc_grid_index(start_node)] = start_node

while 1:
if len(open_set) == 0:
break

c_id = min(
open_set,
key=lambda o: (open_set[o].cost +
self.calc_heuristic(goal_node, open_set[o])))
current = open_set[c_id]

if current.x == goal_node.x and current.y == goal_node.y:
goal_node.parent_index = current.parent_index
goal_node.cost = current.cost
break

# Remove the item from the open set
del open_set[c_id]

# Add it to the closed set
closed_set[c_id] = current

# expand_grid search grid based on motion model
for i, _ in enumerate(self.motion):
node = self.Node(current.x + self.motion[i][0],
current.y + self.motion[i][1],
current.cost + self.motion[i][2], c_id)
n_id = self.calc_grid_index(node)
if (not self.verify_node(node)) or (n_id in closed_set):
continue

if n_id not in open_set:
open_set[n_id] = node # discovered a new node
else:
if open_set[n_id].cost > node.cost:
# This path is the best until now. record it
open_set[n_id] = node

return self.calc_final_path(
goal_node, closed_set, angle=angle, step=step)

def calc_final_path(self, goal_node, closed_set, angle=0.2, step=0):
# generate final course

rx, ry = [self.calc_grid_position(goal_node.x)], [
self.calc_grid_position(goal_node.y)]
parent_index = goal_node.parent_index
while parent_index != -1:
n = closed_set[parent_index]
rx.append(self.calc_grid_position(n.x))
ry.append(self.calc_grid_position(n.y))
parent_index = n.parent_index
prev = None
first_node = None
if len(rx) < 4 or step == 1:
sequence = zip(reversed(rx), reversed(ry))
elif step > 1:
a1, b1 = rx[::-step], ry[::-step]
if a1[-1] != rx[0]:
a1.append(rx[0])
b1.append(rx[0])
sequence = zip(a1, b1)
else: # auto gen waypoint
sequence = []
prev = None
for inx in range(len(rx) - 1, -1, -1):
if (len(sequence) == 0) or (inx == 0):
prev = (rx[inx], ry[inx])
sequence.append(prev)
continue
_x, _y = rx[inx], ry[inx]
_x_n, _y_n = rx[inx - 1], ry[inx - 1]
if ((_x_n - _x) != (_x - prev[0])) or (
(_y_n - _y) != (_y - prev[1])
):
sequence.append((_x, _y))
prev = (_x, _y)
for seq, node in enumerate(sequence):
position = self.world.pixel2world(x=node[0], y=node[1], alt=angle)
curr = PathNode(
seq=seq, point=node, position=position, prev=prev
)
if isinstance(prev, PathNode):
prev.next = curr
else:
first_node = curr
prev = curr
return first_node

@staticmethod
def calc_heuristic(n1, n2):
w = 1.0 # weight of heuristic
d = w * math.hypot(n1.x - n2.x, n1.y - n2.y)
return d

@staticmethod
def calc_grid_position(index, min_position=0):
"""
calc grid position
:param index:
:param min_position:
:return:
"""
pos = index + min_position
return pos

@staticmethod
def calc_xy_index(position, min_pos=0):
return position - min_pos

def calc_grid_index(self, node):
return node.y * self.max_x + node.x

def verify_node(self, node):
px = self.calc_grid_position(node.x)
py = self.calc_grid_position(node.y)

if px <= 0:
return False
elif py <= 0:
return False
elif px >= self.max_x:
return False
elif py >= self.max_y:
return False

# collision check
if self.world.grid[node.x][node.y] == PgmItem.OBSTACLE.value:
return False

return True

+ 0
- 16
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/__init__.py View File

@@ -1,16 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .Astar import AStar
from .dijkstra import Dijkstra

+ 0
- 175
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/base.py View File

@@ -1,175 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import os
import math
from typing import List

import yaml
import numpy as np
from PIL import Image

from robosdk.utils.schema import BasePose
from robosdk.utils.schema import PgmMap
from robosdk.utils.schema import PathNode
from robosdk.utils.constant import PgmItem


__all__ = ("GridMap", "PathPlanner")


class MapBase(metaclass=abc.ABCMeta):
"""
Map base class
"""

def __init__(self):
self.info = None
self.grid = None
self.obstacles = None

@abc.abstractmethod
def parse_panoptic(self, **kwargs):
...

@abc.abstractmethod
def add_obstacle(self, **kwargs):
...

@abc.abstractmethod
def pixel2world(self, **kwargs) -> BasePose:
...

@abc.abstractmethod
def world2pixel(self, **kwargs) -> BasePose:
...


class GridMap(MapBase): # noqa
"""
2D grid map
"""

def __init__(self):
super(GridMap, self).__init__()
self.width = 0
self.height = 0
self.width_m = 0
self.height_m = 0
self.obstacles = []
self.padding_map = None

def read_from_pgm(self, config: str, pgm: str = None):
with open(config) as f:
data = yaml.load(f, Loader=yaml.FullLoader)
image = pgm if pgm else data['image']
if not os.path.isfile(image):
image = os.path.join(os.path.dirname(config),
os.path.basename(image))
if not os.path.isfile(image):
prefix, _ = os.path.splitext(config)
image = f"{prefix}.pgm"
if not os.path.isfile(image):
raise FileExistsError(f"Read PGM from {config} Error ...")
self.info = PgmMap(
image=image,
resolution=round(float(data['resolution']), 4),
origin=list(map(float, data['origin'])),
reverse=int(data['negate']),
occupied_thresh=data['occupied_thresh'],
free_thresh=data['free_thresh']
)
fh = Image.open(image)
self.height, self.width = fh.size
self.width_m = self.width * self.info.resolution
self.height_m = self.height * self.info.resolution
data = np.array(fh)
occ = data / 255. if self.info.reverse else (255. - data) / 255.

self.grid = np.zeros((self.width, self.height)) + PgmItem.UNKNOWN.value
self.grid[occ > self.info.occupied_thresh] = PgmItem.OBSTACLE.value
self.grid[occ < self.info.free_thresh] = PgmItem.FREE.value
self.obstacles = list(zip(*np.where(occ > self.info.occupied_thresh)))

def calc_obstacle_map(self, robot_radius: float = 0.01):
if not len(self.obstacles):
return
obstacles = np.array(self.obstacles)
row = obstacles[:, 0]
col = obstacles[:, 1]
x_min, x_max = min(row), max(row)
y_min, y_max = min(col), max(col)
self.grid = self.grid[x_min:x_max, y_min:y_max]
self.padding_map = np.array([x_min, y_min, 0])
self.obstacles = obstacles - [x_min, y_min]
self.height, self.width = self.grid.shape[:2]
self.width_m = self.width * self.info.resolution
self.height_m = self.height * self.info.resolution

if self.info.resolution < robot_radius:
# todo: Adjust obstacles to robot size
robot = int(robot_radius / self.info.resolution + 0.5)
for ox, oy in self.obstacles:
self.add_obstacle(
ox - robot, oy - robot,
ox + robot, ox + robot
)

def pixel2world(self,
x: float = 0.,
y: float = 0.,
alt: float = 0.) -> BasePose:
data = np.array([y, x, alt])
if self.padding_map is not None:
data += self.padding_map
x, y, z = list(
np.array(self.info.origin) + data * self.info.resolution
)
return BasePose(x=x, y=y, z=z)

def world2pixel(self, x, y, z=0.0) -> BasePose:
p1 = (np.array([y, x]) - self.info.origin[:2]) / self.info.resolution
if self.padding_map is not None:
p1 -= self.padding_map[:2]
px, py = int(p1[0] + 0.5), int(p1[1] + 0.5)
return BasePose(x=px, y=py, z=z)

def add_obstacle(self, x1, y1, x2, y2):
pass

def parse_panoptic(self, panoptic):
pass


class PathPlanner(metaclass=abc.ABCMeta):

def __init__(self, world_map: MapBase, start: BasePose, goal: BasePose):
self.world = world_map
self.motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]
self.s_start = self.world.world2pixel(x=start.x, y=start.y, z=start.z)
self.s_goal = self.world.world2pixel(x=goal.x, y=goal.y, z=start.z)

@abc.abstractmethod
def planning(self, **kwargs) -> PathNode:
pass

def set_motion(self, motions: List):
self.motion = motions.copy()

+ 0
- 187
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/algorithms/path_planning/dijkstra.py View File

@@ -1,187 +0,0 @@
"""

Grid based Dijkstra planning

author: Atsushi Sakai(@Atsushi_twi)
see https://github.com/AtsushiSakai/PythonRobotics/blob/master/PathPlanning/Dijkstra/dijkstra.py # noqa
"""

import math

from robosdk.utils.schema import BasePose
from robosdk.utils.schema import PathNode
from robosdk.utils.constant import PgmItem
from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory

from .base import PathPlanner
from .base import GridMap


__all__ = ("Dijkstra", )


@ClassFactory.register(ClassType.PATHPLANNING)
class Dijkstra(PathPlanner): # noqa

def __init__(self,
world_map: GridMap,
start: BasePose,
goal: BasePose):
super(Dijkstra, self).__init__(
world_map=world_map, start=start, goal=goal
)
self.max_x, self.max_y = self.world.grid.shape

class Node:
def __init__(self, x, y, cost, parent_index):
self.x = x # index of grid
self.y = y # index of grid
self.cost = cost
self.parent_index = parent_index # index of previous Node

def __str__(self):
return str(self.x) + "," + str(self.y) + "," + str(
self.cost) + "," + str(self.parent_index)

def planning(self, step=0) -> PathNode:
sx, sy = self.s_start.x, self.s_start.y
gx, gy, angle = self.s_goal.x, self.s_goal.y, self.s_goal.z
start_node = self.Node(self.calc_xy_index(sx),
self.calc_xy_index(sy), 0.0, -1)
goal_node = self.Node(self.calc_xy_index(gx),
self.calc_xy_index(gy), 0.0, -1)

open_set, closed_set = dict(), dict()
open_set[self.calc_grid_index(start_node)] = start_node

while 1:
if len(open_set) == 0:
break

c_id = min(open_set, key=lambda o: open_set[o].cost)
current = open_set[c_id]

if current.x == goal_node.x and current.y == goal_node.y:
goal_node.parent_index = current.parent_index
goal_node.cost = current.cost
break
del open_set[c_id]
closed_set[c_id] = current

# expand search grid based on motion model
for move_x, move_y, move_cost in self.motion:
node = self.Node(current.x + move_x, current.y + move_y,
current.cost + move_cost, c_id)
n_id = self.calc_grid_index(node)

if not self.verify_node(node) or (n_id in closed_set):
continue

if n_id not in open_set:
open_set[n_id] = node # Discover a new node
else:
if open_set[n_id].cost >= node.cost:
# This path is the best until now. record it!
open_set[n_id] = node

return self.calc_final_path(
goal_node, closed_set, angle=angle, step=step)

def calc_final_path(self, goal_node, closed_set, angle=0.2, step=0):
# generate final course
rx, ry = [self.calc_grid_position(goal_node.x)], [
self.calc_grid_position(goal_node.y)]
parent_index = goal_node.parent_index
while parent_index != -1:
n = closed_set[parent_index]
rx.append(self.calc_grid_position(n.x))
ry.append(self.calc_grid_position(n.y))
parent_index = n.parent_index
prev = None
first_node = None
if len(rx) < 4 or step == 1:
sequence = zip(reversed(rx), reversed(ry))
elif step > 1:
a1, b1 = rx[::-step], ry[::-step]
if a1[-1] != rx[0]:
a1.append(rx[0])
b1.append(rx[0])
sequence = zip(a1, b1)
else: # auto gen waypoint
sequence = []
prev = None
for inx in range(len(rx) - 1, -1, -1):
if (len(sequence) == 0) or (inx == 0):
prev = (rx[inx], ry[inx])
sequence.append(prev)
continue
_x, _y = rx[inx], ry[inx]
_x_n, _y_n = rx[inx - 1], ry[inx - 1]
if ((_x_n - _x) != (_x - prev[0])) or (
(_y_n - _y) != (_y - prev[1])
):
sequence.append((_x, _y))
prev = (_x, _y)
for seq, node in enumerate(sequence):
position = self.world.pixel2world(x=node[0], y=node[1], alt=angle)
curr = PathNode(
seq=seq, point=node, position=position, prev=prev
)
if isinstance(prev, PathNode):
prev.next = curr
else:
first_node = curr
prev = curr
return first_node

@staticmethod
def calc_grid_position(index, min_position=0):
"""
calc grid position
:param index:
:param min_position:
:return:
"""
pos = index + min_position
return pos

@staticmethod
def calc_xy_index(position, min_pos=0):
return position - min_pos

def calc_grid_index(self, node):
return node.y * self.max_x + node.x

def verify_node(self, node):
px = self.calc_grid_position(node.x)
py = self.calc_grid_position(node.y)

if px <= 0:
return False
elif py <= 0:
return False
elif px >= self.max_x:
return False
elif py >= self.max_y:
return False

# collision check
if self.world.grid[node.x][node.y] == PgmItem.OBSTACLE.value:
return False

return True

@staticmethod
def get_motion_model():
# dx, dy, cost
motion = [[1, 0, 1],
[0, 1, 1],
[-1, 0, 1],
[0, -1, 1],
[-1, -1, math.sqrt(2)],
[-1, 1, math.sqrt(2)],
[1, -1, math.sqrt(2)],
[1, 1, math.sqrt(2)]]

return motion

+ 0
- 0
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/__init__.py View File


+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/__init__.py View File

@@ -1,15 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .ros import RosTopicCollector

+ 0
- 66
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/base.py View File

@@ -1,66 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import abc
import threading

from robosdk.utils.util import Config
from robosdk.utils.logger import logging
from robosdk.cloud_robotics.message_channel import WSMessageChannel
from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory


class DataCollectorBase(metaclass=abc.ABCMeta):
def __init__(self, *args, **kwargs):
self.data_sub_lock = threading.RLock()
self.curr_frame_id = 0
self.curr_frame = None

@abc.abstractmethod
def start(self, *args, **kwargs):
...

@abc.abstractmethod
def close(self, *args, **kwargs):
...

@abc.abstractmethod
def parse(self, *args, **kwargs):
...


class DataCollector(metaclass=abc.ABCMeta): # noqa
def __init__(self, config: Config = None):
self.config = config
self.logger = logging.bind(instance="dataCollector", system=True)
self.data_sub_lock = threading.RLock()
try:
self.message_channel = ClassFactory.get_cls(
ClassType.GENERAL, self.config.Interaction)()
except (ValueError, AttributeError) as e:
self.logger.warning(f"fail to locate message channel, {e}, "
f"use `WSMessageChannel` as default")
self.message_channel = WSMessageChannel()

@abc.abstractmethod
def connect(self, *args, **kwargs):
...

@abc.abstractmethod
def close(self, *args, **kwargs):
...

def start(self, *args, **kwargs):
self.message_channel.setDaemon(True)
self.message_channel.start()

+ 0
- 253
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/data_collection/ros.py View File

@@ -1,253 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import json
import numpy as np

from robosdk.utils.class_factory import ClassType
from robosdk.utils.class_factory import ClassFactory
from robosdk.utils.exceptions import SensorError
from .base import DataCollectorBase
from .base import DataCollector


__all__ = ("RosDataCollector", "RosImages", "RosTopicCollector")


@ClassFactory.register(ClassType.GENERAL, alias="rospy/AnyMsg")
class RosDataCollector(DataCollectorBase): # noqa
ros_time_types = ['time', 'duration']
ros_primitive_types = ['bool', 'byte', 'char', 'int8', 'uint8', 'int16',
'uint16', 'int32', 'uint32', 'int64', 'uint64',
'float32', 'float64', 'string']
ros_header_types = ['Header', 'std_msgs/Header', 'roslib/Header']

def __init__(self, name: str, msg_type: str, publisher: str = ""):
super(RosDataCollector, self).__init__()

import message_filters
import roslib.message

self.message_class = roslib.message.get_message_class(msg_type)
if not self.message_class:
raise SensorError(f"message class - {name} were unable to load.")
self.sub = message_filters.Subscriber(self.topic, self.message_class)
self.topic = name
self.publisher = publisher

def start(self):
self.sub.registerCallback(self.parse)

def close(self):
self.sub.sub.unregister()

def __del__(self):
self.close()

def parse(self, data):
if data is None:
return
self.data_sub_lock.acquire()
self.curr_frame_id += 1
self.curr_frame = data
json_data = self.convert_ros_message_to_dictionary(message=data)
self.data_sub_lock.release()
return json_data

@staticmethod
def _get_message_fields(message):
return zip(message.__slots__, message._slot_types)

@staticmethod
def _convert_from_ros_binary(field_value):
field_value = base64.b64encode(field_value).decode('utf-8')
return field_value

@staticmethod
def _convert_from_ros_time(field_value):
field_value = {
'secs': field_value.secs,
'nsecs': field_value.nsecs
}
return field_value

def _convert_from_ros_array(self, field_type, field_value,
binary_array_as_bytes=True):
# use index to raise ValueError if '[' not present
list_type = field_type[:field_type.index('[')]
return [self._convert_from_ros_type(
list_type, value, binary_array_as_bytes) for value in field_value]

@staticmethod
def _is_ros_binary_type(field_type):
""" Checks if the field is a binary array one, fixed size or not"""
return field_type.startswith('uint8[') or field_type.startswith('char[')

@staticmethod
def _is_field_type_an_array(field_type):
return field_type.find('[') >= 0

def _is_field_type_a_primitive_array(self, field_type):
bracket_index = field_type.find('[')
if bracket_index < 0:
return False
else:
list_type = field_type[:bracket_index]
return list_type in self.ros_primitive_types

def _convert_from_ros_type(self, field_type, field_value,
binary_array_as_bytes=True):
if field_type in self.ros_primitive_types:
field_value = str(field_value)
elif field_type in self.ros_time_types:
field_value = self._convert_from_ros_time(field_value)
elif self._is_ros_binary_type(field_type):
if binary_array_as_bytes:
field_value = self._convert_from_ros_binary(field_value)
elif type(field_value) == str:
field_value = [ord(v) for v in field_value]
else:
field_value = list(field_value)
elif self._is_field_type_a_primitive_array(field_type):
field_value = list(field_value)
elif self._is_field_type_an_array(field_type):
field_value = self._convert_from_ros_array(
field_type, field_value, binary_array_as_bytes)
else:
field_value = self.convert_ros_message_to_dictionary(
field_value, binary_array_as_bytes)
return field_value

def convert_ros_message_to_dictionary(self, message,
binary_array_as_bytes=False):
"""
Takes in a ROS message and returns a Python dictionary.
"""

dictionary = {}
message_fields = self._get_message_fields(message)
for field_name, field_type in message_fields:
field_value = getattr(message, field_name)
dictionary[field_name] = self._convert_from_ros_type(
field_type, field_value, binary_array_as_bytes)
return dictionary


@ClassFactory.register(ClassType.GENERAL, alias="sensor_msgs/Image")
class RosImages(RosDataCollector):
def __init__(self, name: str, publisher: str = ""):
super(RosImages, self).__init__(name=name, publisher=publisher,
msg_type="sensor_msgs/Image")
from cv_bridge import CvBridge
self.cv_bridge = CvBridge()

def parse(self, data):
if data is None:
return
self.data_sub_lock.acquire()
self.curr_frame_id += 1
self.curr_frame = self.cv_bridge.imgmsg_to_cv2(data, data.encoding)
json_data = dict(
header={
'seq': data.header.seq,
'stamp': {
'secs': data.header.stamp.secs,
'nsecs': data.header.stamp.nsecs
},
'frame_id': data.header.frame_id
},
data=np.array(self.curr_frame).tolist(),
height=data.height,
width=data.width,
encoding=data.encoding,
is_bigendian=data.is_bigendian,
step=data.step
)
self.data_sub_lock.release()
return json_data


@ClassFactory.register(ClassType.GENERAL)
class RosTopicCollector(DataCollector): # noqa

def __init__(self):
super(RosTopicCollector, self).__init__()

import rostopic

pubs, _ = rostopic.get_topic_list()
self.logger.info(f"{len(pubs)} topics has found.")

self.all_topics = []

filter_topics = set()
filter_topic_types = set()
keep_topics = set()
keep_topic_types = set()
if self.config.IgnoreData:
filter_topics = self.config.IgnoreData.get(
"topic_name", "").split(",")
filter_topic_types = self.config.IgnoreData.get(
"topic_type", "").split(",")
if self.config.ForcusData:
keep_topics = self.config.ForcusData.get(
"topic_name", "").split(",")
keep_topic_types = self.config.ForcusData.get(
"topic_type", "").split(",")
for name, _type, publisher in pubs:
if name in filter_topics or _type in filter_topic_types:
self.logger.info(f"Skip sensor data of {name} - {_type} ...")
continue
if len(keep_topics) and name not in keep_topics:
self.logger.info(f"Skip sensor data of {name} - {_type} ...")
continue
if len(keep_topic_types) and _type not in keep_topic_types:
self.logger.info(f"Skip sensor data of {name} - {_type} ...")
continue
try:
sub_cls = ClassFactory.get_cls(ClassType.GENERAL, _type)
except ValueError:
sub_cls = ClassFactory.get_cls(ClassType.GENERAL,
"rospy/AnyMsg")
try:
sub = sub_cls(name=name, publisher=publisher)
except (ValueError, SensorError):
self.logger.warning(f"Sensor data of {name} "
f"were unable to record.")
continue
self.all_topics.append(sub)

def connect(self):
import message_filters

subs = [i.sub for i in self.all_topics]
if not len(subs):
raise SensorError("No data available.")
sync = message_filters.ApproximateTimeSynchronizer(
subs, queue_size=10, slop=0.2
)
sync.registerCallback(self.convert)
self.logger.info(f"DataCollector connect successfully")

def stop(self):
self.logger.warning("trying to unsubscribe")
map(lambda sub: sub.close(), self.all_topics)

def convert(self, *all_data):
data_collect = {}
for inx, msg_data in enumerate(all_data):
topic = self.all_topics[inx]
data = topic.parse(msg_data)
data_collect[topic.name] = data
self.message_channel.add_data(json.dumps(data_collect))

+ 0
- 15
examples/lifelong_learning/robot_dog_delivery/robo_detection/robosdk/cloud_robotics/message_channel/__init__.py View File

@@ -1,15 +0,0 @@
# Copyright 2021 The KubeEdge Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from .message_channel import WSMessageChannel

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save