This example is based on the example: Using Joint Inference Service in Helmet Detection Scenario.
Assume you have created a KubeEdge cluster that have one cloud node(e.g., cloud-node)
and one edge node(e.g., edge-node).
kubectl create -f - <<EOF
apiVersion: v1
kind: Secret
metadata:
name: mysecret
annotations:
s3-endpoint: s3.amazonaws.com # replace with your s3 endpoint e.g minio-service.kubeflow:9000
s3-usehttps: "1" # by default 1, if testing with minio you can set to 0
stringData: # use `stringData` for raw credential string or `data` for base64 encoded string
ACCESS_KEY_ID: XXXX
SECRET_ACCESS_KEY: XXXXXXXX
EOF
kubectl create -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: Model
metadata:
name: big-model
spec:
url : "s3://kubeedge/model/big-model/yolov3_darknet.pb"
format: "pb"
credentialName: mysecret
EOF
kubectl $action -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: Model
metadata:
name: little-model
spec:
url: "s3://kubeedge/model/little-model/yolov3_resnet18.pb"
format: "pb"
credentialName: mysecret
EOF
This example uses these images:
kubeedge/sedna-example-joint-inference-helmet-detection-little:v0.3.0kubeedge/sedna-example-joint-inference-helmet-detection-big:v0.3.0These images are generated by the script build_images.sh.
mkdir -p /joint_inference/output
LITTLE_MODEL_IMAGE=kubeedge/sedna-example-joint-inference-helmet-detection-little:v0.3.0
BIG_MODEL_IMAGE=kubeedge/sedna-example-joint-inference-helmet-detection-big:v0.3.0
kubectl create -f - <<EOF
apiVersion: sedna.io/v1alpha1
kind: JointInferenceService
metadata:
name: helmet-detection-inference-example
namespace: default
spec:
edgeWorker:
model:
name: "helmet-detection-inference-little-model"
hardExampleMining:
name: "IBT"
parameters:
- key: "threshold_img"
value: "0.9"
- key: "threshold_box"
value: "0.9"
template:
spec:
nodeName: edge-node
containers:
- image: $LITTLE_MODEL_IMAGE
imagePullPolicy: IfNotPresent
name: little-model
env: # user defined environments
- name: input_shape
value: "416,736"
- name: "video_url"
value: "rtsp://localhost/video"
- name: "all_examples_inference_output"
value: "/data/output"
- name: "hard_example_cloud_inference_output"
value: "/data/hard_example_cloud_inference_output"
- name: "hard_example_edge_inference_output"
value: "/data/hard_example_edge_inference_output"
resources: # user defined resources
requests:
memory: 64M
cpu: 100m
limits:
memory: 2Gi
volumeMounts:
- name: outputdir
mountPath: /data/
volumes: # user defined volumes
- name: outputdir
hostPath:
# user must create the directory in host
path: /joint_inference/output
type: DirectoryorCreate
cloudWorker:
model:
name: "helmet-detection-inference-big-model"
template:
spec:
nodeName: cloud-node
containers:
- image: $BIG_MODEL_IMAGE
name: big-model
imagePullPolicy: IfNotPresent
env: # user defined environments
- name: "input_shape"
value: "544,544"
resources: # user defined resources
requests:
memory: 2Gi
EOF
Refer to here.