Browse Source

update data process

tags/v0.3.0
之江实验室 3 years ago
parent
commit
d223f37acd
53 changed files with 47 additions and 2494 deletions
  1. +0
    -60
      dubhe_data_process/algorithm-annotation.py
  2. +0
    -67
      dubhe_data_process/algorithm-imagenet.py
  3. +0
    -54
      dubhe_data_process/algorithm-imgprocess.py
  4. +0
    -77
      dubhe_data_process/algorithm-ofrecord.py
  5. +0
    -60
      dubhe_data_process/algorithm-track.py
  6. +0
    -62
      dubhe_data_process/algorithm-videosample.py
  7. +0
    -45
      dubhe_data_process/annotation.py
  8. +1
    -1
      dubhe_data_process/common/RedisUtil.py
  9. +1
    -1
      dubhe_data_process/common/augment_utils/ACE.py
  10. +1
    -1
      dubhe_data_process/common/augment_utils/dehaze.py
  11. +1
    -1
      dubhe_data_process/common/augment_utils/hist_equalize.py
  12. +12
    -1
      dubhe_data_process/common/config.py
  13. +1
    -1
      dubhe_data_process/common/log_config.py
  14. +2
    -2
      dubhe_data_process/common/of_cnn_resnet.py
  15. +1
    -1
      dubhe_data_process/common/predict_with_print_box.py
  16. +12
    -6
      dubhe_data_process/common/select_gpu.py
  17. +1
    -1
      dubhe_data_process/common/yolo_net.py
  18. +0
    -80
      dubhe_data_process/data/coco.names
  19. +0
    -93
      dubhe_data_process/imagenet.py
  20. +0
    -189
      dubhe_data_process/imgprocess.py
  21. +0
    -0
      dubhe_data_process/log/dev/.gitkeep
  22. +1
    -1
      dubhe_data_process/luascript/delaytaskscript.py
  23. +1
    -1
      dubhe_data_process/luascript/failedtaskscript.py
  24. +1
    -1
      dubhe_data_process/luascript/finishtaskscript.py
  25. +1
    -1
      dubhe_data_process/luascript/gettaskscript.py
  26. +1
    -1
      dubhe_data_process/luascript/starttaskscript.py
  27. +1
    -1
      dubhe_data_process/of_model/config.py
  28. +1
    -1
      dubhe_data_process/of_model/imagenet1000_clsidx_to_labels.py
  29. +1
    -1
      dubhe_data_process/of_model/of_develop_2_of_python.py
  30. +1
    -1
      dubhe_data_process/of_model/resnet_model.py
  31. +0
    -181
      dubhe_data_process/ofrecord.py
  32. +0
    -2
      dubhe_data_process/oneflow-1/README.md
  33. +0
    -0
      dubhe_data_process/oneflow-1/__init__.py
  34. +0
    -167
      dubhe_data_process/oneflow-1/gen_ofrecord.py
  35. +0
    -161
      dubhe_data_process/oneflow-1/imagenet_server.py
  36. +0
    -191
      dubhe_data_process/oneflow-1/img_process_server.py
  37. +0
    -180
      dubhe_data_process/oneflow-1/ofrecord_server.py
  38. +0
    -156
      dubhe_data_process/oneflow-1/run_label_server.py
  39. +0
    -46
      dubhe_data_process/oneflow-1/upload_config.py
  40. +0
    -277
      dubhe_data_process/predict_with_print_box.py
  41. +0
    -100
      dubhe_data_process/taskexecutor.py
  42. +0
    -93
      dubhe_data_process/track.py
  43. +1
    -1
      dubhe_data_process/track_only/feature/feature_extractor_batch.py
  44. +1
    -1
      dubhe_data_process/track_only/hog_track.py
  45. +0
    -4
      dubhe_data_process/track_only/mot_track_kc.py
  46. +1
    -1
      dubhe_data_process/track_only/post_process.py
  47. +1
    -4
      dubhe_data_process/track_only/sort/nn_matching.py
  48. +0
    -4
      dubhe_data_process/track_only/sort/preprocessing.py
  49. +0
    -4
      dubhe_data_process/track_only/sort/track.py
  50. +0
    -4
      dubhe_data_process/track_only/sort/tracker.py
  51. +1
    -1
      dubhe_data_process/track_only/track_server.py
  52. +0
    -4
      dubhe_data_process/track_only/util.py
  53. +0
    -100
      dubhe_data_process/videosample.py

+ 0
- 60
dubhe_data_process/algorithm-annotation.py View File

@@ -1,60 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8
import threading

import taskexecutor
import time
import common.RedisUtil as f
import common.config as config
import annotation as annotation
import luascript.starttaskscript as start_script
import logging
import common.select_gpu as gpu

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

if __name__ == '__main__':
"""Automatic annotation algorithm entry."""
gpu.select_gpu()
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=taskexecutor.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
annotation._init()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(1)
else:
logging.info('get one task')
element = redisClient.eval(start_script.startTaskLua, 1, config.queue,
config.annotationStartQueue, int(time.time()))
if len(element) > 0:
taskexecutor.annotationExecutor(redisClient, element[0]);
else:
logging.info('task queue is empty.')
time.sleep(1)
except Exception as e:
logging.error('except:', e)
time.sleep(1)

+ 0
- 67
dubhe_data_process/algorithm-imagenet.py View File

@@ -1,67 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8

import json
import threading
import time
import imagenet as imagenet
import common.RedisUtil as f
import common.config as config
import luascript.starttaskscript as start_script
import logging
import common.select_gpu as gpu

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

if __name__ == '__main__':
"""Imagenet algorithm entry."""
gpu.select_gpu()
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=imagenet.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
imagenet._init()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(1)
else:
logging.info('get one task')
element = redisClient.eval(start_script.startTaskLua, 1, config.imagenetTaskQueue,
config.imagenetStartQueue, int(time.time()))
if len(element) > 0:
key = element[0].decode()
jsonStr = f.getByKey(redisClient, key.replace('"', ''));
result = imagenet.process(jsonStr, element[0])
logging.info("result:", json.dumps(result))

logging.info('save result to redis')
f.pushToQueue(redisClient, config.imagenetFinishQueue, json.dumps(result))
redisClient.zrem(config.imagenetStartQueue, element[0])
else:
logging.info('task queue is empty.')
time.sleep(2)
except Exception as e:
logging.error('except:', e)
time.sleep(1)

+ 0
- 54
dubhe_data_process/algorithm-imgprocess.py View File

@@ -1,54 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
import threading
import time

import common.RedisUtil as f
import luascript.starttaskscript as start_script
import common.config as config
import logging
import imgprocess

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

if __name__ == '__main__':
"""Enhancement algorithm entry."""
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=imgprocess.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(5)
else:
enhanceTaskId = redisClient.eval(start_script.startTaskLua, 1, config.imgProcessTaskQueue,
config.imgProcessStartQueue, int(time.time()))
if len(enhanceTaskId) > 0:
imgprocess.start_enhance_task(enhanceTaskId, redisClient)
else:
logging.info('task queue is empty.')
time.sleep(5)
except Exception as e:
logging.error('except:', e)
time.sleep(1)

+ 0
- 77
dubhe_data_process/algorithm-ofrecord.py View File

@@ -1,77 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8

import os
import json
import threading
import time
import common.RedisUtil as f
import common.config as config
import luascript.starttaskscript as start_script
import logging
import traceback
import ofrecord
logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',level=logging.DEBUG)

basePath = '/nfs/'
descPath = 'ofrecord/train'

if __name__ == '__main__':
"""Ofrecord algorithm entry."""
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=ofrecord.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(1)
else:
element = redisClient.eval(start_script.startTaskLua, 1, config.ofrecordTaskQueue,
config.ofrecordStartQueue, int(time.time()))
if len(element) > 0:
key = element[0].decode()
detail = f.getByKey(redisClient, key.replace('"', ''))
jsonStr = json.loads(detail.decode())
label_map = {}
index = 0
for item in jsonStr["datasetLabels"].keys():
if index >= 0 and item != '@type':
label_map[item] = jsonStr["datasetLabels"][item]
index += 1
ofrecord.execute(os.path.join(basePath, jsonStr["datasetPath"]),
os.path.join(basePath, jsonStr["datasetPath"], descPath),
label_map,
jsonStr["files"],
jsonStr["partNum"],
element[0])
logging.info('save result to redis')
f.pushToQueue(redisClient, config.ofrecordFinishQueue, key)
redisClient.zrem(config.ofrecordStartQueue, element[0])
else:
logging.info('task queue is empty.')
time.sleep(2)
except Exception as e:
logging.error('except:', e)
redisClient.zrem(config.ofrecordStartQueue, element[0])
time.sleep(1)

+ 0
- 60
dubhe_data_process/algorithm-track.py View File

@@ -1,60 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8
import threading
import time
import common.RedisUtil as f
import common.config as config
import luascript.starttaskscript as start_script
import logging
import track

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

if __name__ == '__main__':
"""Track algorithm entry."""
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=track.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(1)
else:
logging.info('get one task')
element = redisClient.eval(start_script.startTaskLua, 1, config.trackTaskQueue,
config.trackStartQueue, int(time.time()))
if len(element) > 0:
key = element[0].decode()
jsonStr = f.getByKey(redisClient, key.replace('"', ''));
if track.trackProcess(jsonStr, element[0]):
f.pushToQueue(redisClient, config.trackFinishQueue, key)
redisClient.zrem(config.trackStartQueue, element[0])
logging.info('success')
else:
logging.info('task queue is empty.')
time.sleep(1)
except Exception as e:
logging.error('except:', e)
time.sleep(1)

+ 0
- 62
dubhe_data_process/algorithm-videosample.py View File

@@ -1,62 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
import json
import threading
from datetime import datetime
import time

import common.RedisUtil as f
import luascript.starttaskscript as start_script
import common.config as config
import logging
import videosample

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

if __name__ == '__main__':
"""VideoSample algorithm entry."""
jsonData = config.loadJsonData(config.configPath)
redisClient = f.getRedisConnection(jsonData["ip"], jsonData["port"], jsonData["database"], jsonData["password"])
logging.info('init redis client %s', redisClient)
t = threading.Thread(target=videosample.delayKeyThread, args=(redisClient,))
t.setDaemon(True)
t.start()
while 1:
try:
if config.loadJsonData(config.sign) == 0:
logging.info('not to execute new task')
time.sleep(5)
else:
logging.info("read redis:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
sampleTask = redisClient.eval(start_script.startTaskLua, 1, config.videoPendingQueue,
config.videoStartQueue, int(time.time()))
logging.info(int(time.time()))
if len(sampleTask) > 0:
datasetId = json.loads(sampleTask[0])['datasetIdKey']
taskParameters = json.loads(redisClient.get("videoSample:" + str(datasetId)))
path = taskParameters['path']
frameList = taskParameters['frames']
videosample.sampleProcess(datasetId, path, frameList, redisClient)
else:
logging.info('task queue is empty.')
time.sleep(5)
except Exception as e:
logging.error('except:', e)
time.sleep(1)

+ 0
- 45
dubhe_data_process/annotation.py View File

@@ -1,45 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8
import time
import sys
sys.path.append(r"./common")
import predict_with_print_box as yolo_demo
from common.log_config import setup_log


label_log = setup_log('dev', 'label.log')


def _init():
print('init yolo_obj')
global yolo_obj
yolo_obj = yolo_demo.YoloInference(label_log)


def _annotation(type_, image_path_list, id_list, label_list, coco_flag=0):
"""Perform automatic annotation task."""
image_num = len(image_path_list)
if image_num < 16:
for i in range(16 - image_num):
image_path_list.append(image_path_list[0])
id_list.append(id_list[0])
image_num = len(image_path_list)
annotations = yolo_obj.yolo_inference(type_, id_list, image_path_list, label_list, coco_flag)
return annotations[0:image_num]

+ 1
- 1
dubhe_data_process/common/RedisUtil.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/common/augment_utils/ACE.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/common/augment_utils/dehaze.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/common/augment_utils/hist_equalize.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 12
- 1
dubhe_data_process/common/config.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -25,6 +25,11 @@ port = 6379
db = 0 db = 0
password = '' password = ''


# text_classification
textClassificationQueue = 'text_classification_task_queue'
textClassificationStartQueue = 'text_classification_processing_queue'
textClassificationFinishQueue = 'text_classification_finished_queue'

# annotation # annotation
queue = 'annotation_task_queue' queue = 'annotation_task_queue'
annotationStartQueue = 'annotation_processing_queue' annotationStartQueue = 'annotation_processing_queue'
@@ -44,6 +49,7 @@ ofrecordFinishQueue = 'ofrecord_finished_queue'
trackTaskQueue = 'track_task_queue' trackTaskQueue = 'track_task_queue'
trackStartQueue = 'track_processing_queue' trackStartQueue = 'track_processing_queue'
trackFinishQueue = 'track_finished_queue' trackFinishQueue = 'track_finished_queue'
trackFailedQueue = 'track_failed_queue'


# videosample # videosample
videoPendingQueue = "videoSample_unprocessed" videoPendingQueue = "videoSample_unprocessed"
@@ -51,6 +57,11 @@ videoStartQueue = "videoSample_processing"
videoFinishQueue = "videoSample_finished" videoFinishQueue = "videoSample_finished"
videoFailedQueue = "videoSample_failed" videoFailedQueue = "videoSample_failed"


# lungsegmentation
dcmTaskQueue = "dcm_task_queue"
dcmStartQueue = "dcm_processing_queue"
dcmFinishQueue = "dcm_finished_queue"

# imgprocess # imgprocess
imgProcessTaskQueue = 'imgProcess_unprocessed' imgProcessTaskQueue = 'imgProcess_unprocessed'
imgProcessFinishQueue = 'imgProcess_finished' imgProcessFinishQueue = 'imgProcess_finished'


+ 1
- 1
dubhe_data_process/common/log_config.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 2
- 2
dubhe_data_process/common/of_cnn_resnet.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -34,7 +34,7 @@ sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())


def init_resnet(): def init_resnet():
"""Initialize ResNet with pretrained weights""" """Initialize ResNet with pretrained weights"""
model_load_dir = 'of_model/resnet_v15_of_best_model_val_top1_773/'
model_load_dir = '../of_model/resnet_v15_of_best_model_val_top1_773/'
assert os.path.isdir(model_load_dir) assert os.path.isdir(model_load_dir)
check_point = flow.train.CheckPoint() check_point = flow.train.CheckPoint()
check_point.load(model_load_dir) check_point.load(model_load_dir)


+ 1
- 1
dubhe_data_process/common/predict_with_print_box.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 12
- 6
dubhe_data_process/common/select_gpu.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.
@@ -17,6 +17,7 @@
*/ */
""" """
import os import os
import random
import sys import sys
import pynvml import pynvml
import logging import logging
@@ -27,6 +28,7 @@ pynvml.nvmlInit()


def select_gpu(): def select_gpu():
deviceCount = pynvml.nvmlDeviceGetCount() deviceCount = pynvml.nvmlDeviceGetCount()
gpu_usable = []
for i in range(deviceCount): for i in range(deviceCount):
logging.info('-------------get GPU information--------------') logging.info('-------------get GPU information--------------')
handle = pynvml.nvmlDeviceGetHandleByIndex(i) handle = pynvml.nvmlDeviceGetHandleByIndex(i)
@@ -34,8 +36,12 @@ def select_gpu():
gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle) gpu_info = pynvml.nvmlDeviceGetMemoryInfo(handle)
logging.info('free:%s MB', gpu_info.free / (1000 * 1000)) logging.info('free:%s MB', gpu_info.free / (1000 * 1000))
if gpu_info.free / (1000 * 1000) > 3072: if gpu_info.free / (1000 * 1000) > 3072:
os.environ["CUDA_VISIBLE_DEVICES"] = str(i)
logging.info('use GPU:%s %s', i, pynvml.nvmlDeviceGetName(handle))
return
logging.info('No GPU is currently available')
sys.exit()
gpu_usable.append(i)
gpu_usable_num = len(gpu_usable)
if gpu_usable_num == 0:
logging.info('No GPU is currently available')
sys.exit()
random_gpu = random.randint(0, gpu_usable_num - 1)
os.environ["CUDA_VISIBLE_DEVICES"] = str(gpu_usable[random_gpu])
logging.info('use GPU:%s %s', gpu_usable[random_gpu], pynvml.nvmlDeviceGetName(handle))


+ 1
- 1
dubhe_data_process/common/yolo_net.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 0
- 80
dubhe_data_process/data/coco.names View File

@@ -1,80 +0,0 @@
person
bicycle
car
motorbike
aeroplane
bus
train
truck
boat
traffic light
fire hydrant
stop sign
parking meter
bench
bird
cat
dog
horse
sheep
cow
elephant
bear
zebra
giraffe
backpack
umbrella
handbag
tie
suitcase
frisbee
skis
snowboard
sports ball
kite
baseball bat
baseball glove
skateboard
surfboard
tennis racket
bottle
wine glass
cup
fork
knife
spoon
bowl
banana
apple
sandwich
orange
broccoli
carrot
hot dog
pizza
donut
cake
chair
sofa
pottedplant
bed
diningtable
toilet
tvmonitor
laptop
mouse
remote
keyboard
cell phone
microwave
oven
toaster
sink
refrigerator
book
clock
vase
scissors
teddy bear
hair drier
toothbrush

+ 0
- 93
dubhe_data_process/imagenet.py View File

@@ -1,93 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sched
import sys

sys.path.append(r"./common")
import logging
import time
import json
import common.of_cnn_resnet as of_cnn_resnet
import numpy as np
import luascript.delaytaskscript as delay_script
import common.config as config
from datetime import datetime

schedule = sched.scheduler(time.time, time.sleep)

base_path = "/nfs/"
delayId = ""


def _init():
of_cnn_resnet.init_resnet()
logging.info('env init finished')


def process(task_dict, key):
"""Imagenet task method.
Args:
task_dict: imagenet task details.
key: imagenet task key.
"""
global delayId
delayId = "\"" + eval(str(key, encoding="utf-8")) + "\""
task_dict = json.loads(task_dict)
id_list = []
image_path_list = []
for file in task_dict["files"]:
id_list.append(file["id"])
image_path_list.append(base_path + file["url"])
label_list = task_dict["labels"]
image_num = len(image_path_list)
annotations = []
for inds in range(len(image_path_list)):
temp = {}
temp['id'] = id_list[inds]
score, ca_id = of_cnn_resnet.resnet_inf(image_path_list[inds])
temp['annotation'] = [{'category_id': int(ca_id), 'score': np.float(score)}]
temp['annotation'] = json.dumps(temp['annotation'])
annotations.append(temp)
result = {"annotations": annotations, "task": key.decode()}
return result


def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
print("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
redisClient.eval(delay_script.delayTaskLua, 1, config.imagenetStartQueue, delayId, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)


def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

+ 0
- 189
dubhe_data_process/imgprocess.py View File

@@ -1,189 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
from datetime import datetime
import sched
import os
import cv2
import numpy as np
import logging
import time
import json
import argparse
import sys
import codecs
import shutil

import luascript.delaytaskscript as delay_script
import common.config as config

from common.augment_utils.ACE import ACE_color
from common.augment_utils.dehaze import deHaze, addHaze
from common.augment_utils.hist_equalize import adaptive_hist_equalize
from common.log_config import setup_log

schedule = sched.scheduler(time.time, time.sleep)

delayId = ""
finish_key = {}
re_task_id = {}
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

# task url suffix
img_pro_url = 'api/data/datasets/'

# arguments
parser = argparse.ArgumentParser(description="config for image augmentation server")
parser.add_argument("-m", "--mode", type=str, default="test", required=False)
args = parser.parse_args()

# url concat(ip + port + suffix)
url_json = './common/config/url.json'
with open(url_json) as f:
url_dict = json.loads(f.read())
img_pro_url = url_dict[args.mode] + img_pro_url

# creat task quene
base_path = "/nfs/"

# create log path and file
des_folder = os.path.join('./log', args.mode)
if not os.path.exists(des_folder):
os.makedirs(des_folder)

logging = setup_log(args.mode, 'enhance-' + args.mode + '.log')
enhanceTaskId = ""


def start_enhance_task(enhanceTaskId, redisClient):
"""Enhance task method.
Args:
enhanceTaskId: enhance task id.
redisClient: redis client.
"""
global delayId
detailKey = 'imgProcess:' + eval(str(enhanceTaskId[0], encoding="utf-8"))
delayId = "\"" + eval(str(enhanceTaskId[0], encoding="utf-8")) + "\""
print(detailKey)
taskParameters = json.loads(redisClient.get(detailKey).decode())
dataset_id = taskParameters['id']
img_save_path = taskParameters['enhanceFilePath']
ann_save_path = taskParameters["enhanceAnnotationPath"]
file_list = taskParameters['fileDtos']
nums_, img_path_list, ann_path_list = img_ann_list_gen(file_list)
process_type = taskParameters['type']
re_task_id = eval(str(enhanceTaskId[0], encoding="utf-8"))
img_process_config = [dataset_id, img_save_path,
ann_save_path, img_path_list,
ann_path_list, process_type, re_task_id]
image_enhance_process(img_process_config, redisClient)
logging.info(str(nums_) + ' images for augment')


def img_ann_list_gen(file_list):
"""Analyze the json request and convert to list"""
nums_ = len(file_list)
img_list = []
ann_list = []
for i in range(nums_):
img_list.append(file_list[i]['filePath'])
ann_list.append(file_list[i]['annotationPath'])
return nums_, img_list, ann_list


def image_enhance_process(img_task, redisClient):
"""The implementation of image augmentation thread"""
global img_pro_url
global finish_key
global re_task_id
logging.info('img_process server start'.center(66, '-'))
logging.info(img_pro_url)
try:
dataset_id = img_task[0]
img_save_path = img_task[1]
ann_save_path = img_task[2]
img_list = img_task[3]
ann_list = img_task[4]
method = img_task[5]
re_task_id = img_task[6]
suffix = '_enchanced_' + re_task_id
logging.info("dataset_id " + str(dataset_id))

finish_key = {"processKey": re_task_id}
finish_data = {"id": re_task_id,
"suffix": suffix}
for j in range(len(ann_list)):
img_path = img_list[j]
ann_path = ann_list[j]
img_process(suffix, img_path, ann_path,
img_save_path, ann_save_path, method)

redisClient.lpush(config.imgProcessFinishQueue, json.dumps(finish_key, separators=(',', ':')))
redisClient.set("imgProcess:finished:" + re_task_id, json.dumps(finish_data))
redisClient.zrem(config.imgProcessStartQueue, "\"" + re_task_id + "\"")
logging.info('suffix:' + suffix)
logging.info("End img_process of dataset:" + str(dataset_id))

except Exception as e:
redisClient.lpush(config.imgProcessFailedQueue, json.dumps(finish_key, separators=(',', ':')))
redisClient.zrem(config.imgProcessStartQueue, "\"" + re_task_id + "\"")
logging.info(img_pro_url)
logging.error("Error imgProcess")
logging.error(e)
time.sleep(0.01)


def img_process(suffix, img_path, ann_path, img_save_path, ann_save_path, method_ind):
"""Process images and save in specified path"""
inds2method = {1: deHaze, 2: addHaze, 3: ACE_color, 4: adaptive_hist_equalize}
method = inds2method[method_ind]
img_raw = cv2.imdecode(np.fromfile(img_path.encode('utf-8'), dtype=np.uint8), 1)
img_suffix = os.path.splitext(img_path)[-1]
ann_name = ann_path.replace(ann_save_path, '')
if method_ind <= 3:
processed_img = method(img_raw / 255.0) * 255
else:
processed_img = method(img_raw)
cv2.imwrite(img_save_path + ann_name + suffix + img_suffix,
processed_img.astype(np.uint8))
shutil.copyfile(ann_path.encode('utf-8'), (ann_path + suffix).encode('utf-8'))


def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
logging.info("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S") + ":" + delayId)
redisClient.eval(delay_script.delayTaskLua, 1, config.imgProcessStartQueue, delayId, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)


def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

+ 0
- 0
dubhe_data_process/log/dev/.gitkeep View File


+ 1
- 1
dubhe_data_process/luascript/delaytaskscript.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/luascript/failedtaskscript.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/luascript/finishtaskscript.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/luascript/gettaskscript.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/luascript/starttaskscript.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/of_model/config.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/of_model/imagenet1000_clsidx_to_labels.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/of_model/of_develop_2_of_python.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/of_model/resnet_model.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 0
- 181
dubhe_data_process/ofrecord.py View File

@@ -1,181 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# -*- coding: utf-8 -*-

import logging
import json
import os
import struct
import cv2
import sched
import numpy as np
import oneflow.core.record.record_pb2 as of_record
import luascript.delaytaskscript as delay_script
import time
import common.config as config
from datetime import datetime

schedule = sched.scheduler(time.time, time.sleep)

delayId = ""

class ImageCoder(object):
"""Helper class that provides image coding utilities."""

def __init__(self, size=None):
self.size = size

def _resize(self, image_data):
if self.size is not None and image_data.shape[:2] != self.size:
return cv2.resize(image_data, self.size)
return image_data

def image_to_jpeg(self, image_data):
image_data = cv2.imdecode(np.fromstring(image_data, np.uint8), 1)
image_data = self._resize(image_data)
return cv2.imencode(".jpg", image_data)[1].tostring(
), image_data.shape[0], image_data.shape[1]


def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'rb') as f:
image_data = f.read()
image_data, height, width = coder.image_to_jpeg(image_data)

return image_data, height, width


def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))


def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot


def extract_img_label(names, path):
"""Extract the images and labels into np array [index].
Args:
f: A file object that contain images and annotations.
Returns:
data: A 4D uint8 np array [index, h, w, depth].
labels: a 1D uint8 np array.
num_img: the number of images.
"""
train_img = os.path.join(path, 'origin/')
train_label = os.path.join(path, 'annotation/')
num_imgs = len(names)
data = []
labels = []
print('^^^^^^^^^^ start img_set for sycle')
for i in names:
name = os.path.splitext(i)[0]
print(name)
coder = ImageCoder((224, 224))
image_buffer, height, width = _process_image(
os.path.join(train_img, i), coder)

data += [image_buffer]

if os.path.exists(os.path.join(train_label, name)):

with open(os.path.join(train_label, name), "r", encoding='utf-8') as jsonFile:
la = json.load(jsonFile)
if la:
labels += [la[0]['category_id']]
else:
data.pop()
num_imgs -= 1
else:
print('File is not found')
print('^^^^^^^^^ img_set for end')
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
return num_imgs, data, labels


def execute(src_path, desc, label_map, files, part_id, key):
"""Execute ofrecord task method."""
global delayId
delayId = delayId = "\"" + eval(str(key, encoding="utf-8")) + "\""
logging.info(part_id)
num_imgs, images, labels = extract_img_label(files, src_path)
keys = sorted(list(map(int, label_map.keys())))
for i in range(len(keys)):
label_map[str(keys[i])] = i
if not num_imgs:
return False, 0, 0
try:
os.makedirs(desc)
except Exception as e:
print('{} exists.'.format(desc))
for i in range(num_imgs):
filename = 'part-{}'.format(part_id)
filename = os.path.join(desc, filename)
f = open(filename, 'wb')
print(filename)
img = images[i]
label = label_map[str(labels[i])]
sample = of_record.OFRecord(feature={
'class/label': of_record.Feature(int32_list=of_record.Int32List(value=[label])),
'encoded': _bytes_feature(img)
})
size = sample.ByteSize()
f.write(struct.pack("q", size))
f.write(sample.SerializeToString())
if f:
f.close()

def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
print("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
redisClient.eval(delay_script.delayTaskLua, 1, config.ofrecordStartQueue, delayId, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)

def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

+ 0
- 2
dubhe_data_process/oneflow-1/README.md View File

@@ -1,2 +0,0 @@
# oneflow
application by oneflow

+ 0
- 0
dubhe_data_process/oneflow-1/__init__.py View File


+ 0
- 167
dubhe_data_process/oneflow-1/gen_ofrecord.py View File

@@ -1,167 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""

# -*- coding: utf-8 -*-
import json
import math
import os
import struct
import cv2
import numpy as np
import oneflow.core.record.record_pb2 as of_record


class ImageCoder(object):
"""Helper class that provides image coding utilities."""

def __init__(self, size=None):
self.size = size

def _resize(self, image_data):
if self.size is not None and image_data.shape[:2] != self.size:
return cv2.resize(image_data, self.size)
return image_data

def image_to_jpeg(self, image_data):
image_data = cv2.imdecode(np.fromstring(image_data, np.uint8), 1)
image_data = self._resize(image_data)
return cv2.imencode(".jpg", image_data)[1].tostring(
), image_data.shape[0], image_data.shape[1]


def _process_image(filename, coder):
"""Process a single image file.
Args:
filename: string, path to an image file e.g., '/path/to/example.JPG'.
coder: instance of ImageCoder to provide image coding utils.
Returns:
image_buffer: string, JPEG encoding of RGB image.
height: integer, image height in pixels.
width: integer, image width in pixels.
"""
# Read the image file.
with open(filename, 'rb') as f:
image_data = f.read()
image_data, height, width = coder.image_to_jpeg(image_data)

return image_data, height, width


def _bytes_feature(value):
"""Wrapper for inserting bytes features into Example proto."""
return of_record.Feature(bytes_list=of_record.BytesList(value=[value]))


def dense_to_one_hot(labels_dense, num_classes):
"""Convert class labels from scalars to one-hot vectors."""
num_labels = labels_dense.shape[0]
index_offset = np.arange(num_labels) * num_classes
labels_one_hot = np.zeros((num_labels, num_classes))
labels_one_hot.flat[index_offset + labels_dense.ravel()] = 1
return labels_one_hot


def extract_img_label(f):
"""Extract the images and labels into np array [index].
Args:
f: A file object that contain images and annotations.
Returns:
data: A 4D uint8 np array [index, h, w, depth].
labels: a 1D uint8 np array.
num_img: the number of images.
"""
train_img = os.path.join(f, 'origin/')
train_label = os.path.join(f, 'annotation/')
img_set = os.listdir(train_img)
num_imgs = len(img_set)
data = []
labels = []
print('^^^^^^^^^^ start img_set for sycle')
for i in img_set:
name = os.path.splitext(i)[0]
coder = ImageCoder((224, 224))
image_buffer, height, width = _process_image(
os.path.join(train_img, i), coder)

data += [image_buffer]

if os.path.exists(os.path.join(train_label, name)):

with open(os.path.join(train_label, name), "r", encoding='utf-8') as jsonFile:
la = json.load(jsonFile)
if la:
labels += [la[0]['category_id']]
else:
data.pop()
num_imgs -= 1
else:
print('File is not found')
print('^^^^^^^^^ img_set for end')
data = np.array(data)
labels = np.array(labels)
print(data.shape, labels.shape)
return num_imgs, data, labels


def read_data_sets(src, desc, label_map,
part_num=8):
"""
Args:
src: The path where image and annotations saved.
desc: The path where OfRecord will be writen in.
part_num: The OfRecord will be writen in part_num parts.
label_map: id and its corresponding label
Returns:
Whether there is image for converting to ofRecord
num_images: The number of images.
part_num: The OfRecord will be writen in part_num parts.
"""
print('************** start read_data_sets func **********************')
num_images, images, labels = extract_img_label(src)
print('************** read_data_sets end **********************')
keys = sorted(list(map(int, label_map.keys())))
for i in range(len(keys)):
label_map[str(keys[i])] = i
if not num_images:
return False, 0, 0
os.makedirs(desc)
part_size = num_images / int(part_num)
part_id = -1
print('************** start for range num_images')
for i in range(num_images):
p = math.floor(i / part_size)
if p != part_id and p < part_num:
part_id = p
filename = 'part-{}'.format(part_id)
filename = os.path.join(desc, filename)
f = open(filename, 'wb')
print(filename)
img = images[i]
label = label_map[str(labels[i])]
sample = of_record.OFRecord(feature={
'class/label': of_record.Feature(int32_list=of_record.Int32List(value=[label])),
'encoded': _bytes_feature(img)
})
size = sample.ByteSize()
f.write(struct.pack("q", size))
f.write(sample.SerializeToString())
print('********************* end for range')
if f:
f.close()
return True, num_images, part_num

+ 0
- 161
dubhe_data_process/oneflow-1/imagenet_server.py View File

@@ -1,161 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""

#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import web
import os
import string
import _thread
import logging
import urllib
from queue import Queue
import time
import random
import json
import argparse
import sys
import codecs
import of_cnn_resnet
import numpy as np
from log_config import setup_log
from upload_config import Upload_cfg, MyApplication
urls = ('/auto_annotate', 'Upload')
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())


label_url = "api/data/datasets/files/annotations/auto/"
parser = argparse.ArgumentParser(description="config for imagenet label server")
parser.add_argument("-p", "--port", type=int, required=True)
parser.add_argument("-m", "--mode", type=str, default="test", required=False)
args = parser.parse_args()
url_json = './config/url.json'
with open(url_json) as f:
url_dict = json.loads(f.read())
label_url = url_dict[args.mode] + label_url
port = args.port
taskQueue = Queue()
taskInImages = {}
base_path = "/nfs/"


des_folder = os.path.join('./log', args.mode)
if not os.path.exists(des_folder):
os.makedirs(des_folder)

logging = setup_log(args.mode, 'imagenet-' + args.mode + '.log')


#############################label_server#####################################
def get_code():
return ''.join(random.sample(string.ascii_letters + string.digits, 8))


def get_32code():
return ''.join(random.sample(string.ascii_letters + string.digits, 32))


class Upload(Upload_cfg):
"""Recieve and analyze the post request"""

def POST(self):
try:
super().POST()
x = web.data()
x = json.loads(x.decode())
type_ = x['annotateType']
if_imagenet = x['labelType']
task_id = get_code()
task_images = {}
task_images[task_id] = {
"input": {
'type': type_, 'data': x}, "output": {
"annotations": []}, 'if_imagenet': if_imagenet}
logging.info(task_id)
web.t_queue.put(task_images)
return {"code": 200, "msg": "", "data": task_id}
except Exception as e:
logging.error("Error post")
logging.error(e)
return 'post error'


def imagenetProcess():
"""The implementation of imageNet auto labeling thread"""
global taskQueue
global label_url
logging.info('ImageNet auto labeling server start'.center(66,'-'))
logging.info(label_url)
while True:
try:
task_dict = taskQueue.get()
for task_id in task_dict:
id_list = []
image_path_list = []
type_ = task_dict[task_id]["input"]['type']
if_imagenet = task_dict[task_id]['if_imagenet']
for file in task_dict[task_id]["input"]['data']["files"]:
id_list.append(file["id"])
image_path_list.append(base_path + file["url"])
label_list = task_dict[task_id]["input"]['data']["labels"]
image_num = len(image_path_list)
logging.info(image_num)
logging.info(image_path_list)
annotations = []
if if_imagenet == 2:
for inds in range(len(image_path_list)):
temp = {}
temp['id'] = id_list[inds]
score, ca_id = of_cnn_resnet.resnet_inf(
image_path_list[inds])
temp['annotation'] = [
{'category_id': int(ca_id), 'score': np.float(score)}]
temp['annotation'] = json.dumps(temp['annotation'])
annotations.append(temp)
result = {"annotations": annotations}
logging.info(result)
send_data = json.dumps(result).encode()
task_url = label_url + task_id
headers = {'Content-Type': 'application/json'}
req = urllib.request.Request(task_url, headers=headers)
response = urllib.request.urlopen(
req, data=send_data, timeout=5)
logging.info(task_url)
logging.info(response.read())
logging.info("End imagenet")

except Exception as e:
logging.error("Error imagenet_Process")
logging.error(e)
logging.info(label_url)
time.sleep(0.01)


def imagenet_thread(no, interval):
"""Running the imageNet auto labeling thread"""
imagenetProcess()


if __name__ == "__main__":
of_cnn_resnet.init_resnet()
_thread.start_new_thread(imagenet_thread, (5, 5))
app = MyApplication(urls, globals())
web.t_queue = taskQueue
web.taskInImages = taskInImages
app.run(port=port)

+ 0
- 191
dubhe_data_process/oneflow-1/img_process_server.py View File

@@ -1,191 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import web
import os
import string
import cv2
import numpy as np
import _thread
import logging
import urllib
from queue import Queue
import time
import random
import json
import argparse
import sys
import codecs
import shutil
from augment_utils.ACE import ACE_color
from augment_utils.dehaze import deHaze, addHaze
from augment_utils.hist_equalize import adaptive_hist_equalize
from log_config import setup_log
from upload_config import Upload_cfg, MyApplication

urls = ('/img_process', 'Image_augmentation')
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

# task url suffix
img_pro_url = 'api/data/datasets/'

# arguments
parser = argparse.ArgumentParser(description="config for image augmentation server")
parser.add_argument("-p", "--port", type=int, required=True)
parser.add_argument("-m", "--mode", type=str, default="test", required=False)
args = parser.parse_args()

# url concat(ip + port + suffix)
url_json = './config/url.json'
with open(url_json) as f:
url_dict = json.loads(f.read())
img_pro_url = url_dict[args.mode] + img_pro_url
port = args.port

# creat task quene
imageProcessQuene = Queue()
base_path = "/nfs/"

# create log path and file
des_folder = os.path.join('./log', args.mode)
if not os.path.exists(des_folder):
os.makedirs(des_folder)

logging = setup_log(args.mode, 'enhance-' + args.mode + '.log')


class Image_augmentation(Upload_cfg):
"""Recieve and analyze the post request"""

def POST(self):
try:
super().POST()
x = web.data()
x = json.loads(x.decode())
dataset_id = x['id']
img_save_path = x['enhanceFilePath']
ann_save_path = x["enhanceAnnotationPath"]
file_list = x['fileDtos']
nums_, img_path_list, ann_path_list = img_ann_list_gen(file_list)
process_type = x['type']
re_task_id = ''.join(random.sample(string.ascii_letters + string.digits, 8))
img_process_config = [dataset_id, img_save_path,
ann_save_path, img_path_list,
ann_path_list, process_type, re_task_id]
web.t_queue2.put(img_process_config)
logging.info(str(nums_) + ' images for augment')
return {"code": 200, "msg": "", "data": re_task_id}
except Exception as e:
print(e)
print("Error Post")
logging.error("Error post")
logging.error(e)
return 'post error'


def image_process_thread():
"""The implementation of image augmentation thread"""
global img_pro_url
global imageProcessQuene

logging.info('img_process server start'.center(66, '-'))
logging.info(img_pro_url)
task_cond = []
while True:
try:
img_task = imageProcessQuene.get()
if img_task and img_task[0] not in task_cond:
index = len(task_cond)
task_cond.append(img_task[0])
dataset_id = img_task[0]
img_save_path = img_task[1]
ann_save_path = img_task[2]
img_list = img_task[3]
ann_list = img_task[4]
method = img_task[5]
re_task_id = img_task[6]
suffix = '_enchanced_' + re_task_id
logging.info("dataset_id " + str(dataset_id))
for j in range(len(ann_list)):
img_path = img_list[j]
ann_path = ann_list[j]
img_process(suffix, img_path, ann_path,
img_save_path, ann_save_path, method)

task_url = img_pro_url + 'enhance/finish'
send_data = {"id": re_task_id,
"suffix": suffix}

headers = {'Content-Type': 'application/json'}
req = urllib.request.Request(task_url,
data=json.dumps(send_data).encode(),
headers=headers)
response = urllib.request.urlopen(req, timeout=5)
logging.info('suffix:' + suffix)
logging.info(task_url)
logging.info(response.read())
logging.info("End img_process of dataset:" + str(dataset_id))
task_cond.pop(index)
else:
continue
except Exception as e:
logging.info(img_pro_url)
logging.error("Error imgProcess")
logging.error(e)
time.sleep(0.01)


def img_ann_list_gen(file_list):
"""Analyze the json request and convert to list"""
nums_ = len(file_list)
img_list = []
ann_list = []
for i in range(nums_):
img_list.append(file_list[i]['filePath'])
ann_list.append(file_list[i]['annotationPath'])
return nums_, img_list, ann_list


def img_process(suffix, img_path, ann_path, img_save_path, ann_save_path, method_ind):
"""Process images and save in specified path"""
inds2method = {1: deHaze, 2: addHaze, 3: ACE_color, 4: adaptive_hist_equalize}
method = inds2method[method_ind]
img_raw = cv2.imdecode(np.fromfile(img_path.encode('utf-8'), dtype=np.uint8), 1)
img_suffix = os.path.splitext(img_path)[-1]
ann_name = ann_path.replace(ann_save_path, '')
if method_ind <= 3:
processed_img = method(img_raw / 255.0) * 255
else:
processed_img = method(img_raw)
cv2.imwrite(img_save_path + ann_name + suffix + img_suffix,
processed_img.astype(np.uint8))
shutil.copyfile(ann_path.encode('utf-8'), (ann_path + suffix).encode('utf-8'))


def img_process_thread(no, interval):
"""Running the image augmentation thread"""
image_process_thread()


if __name__ == "__main__":
_thread.start_new_thread(img_process_thread, (5, 5))
app = MyApplication(urls, globals())
web.t_queue2 = imageProcessQuene
app.run(port=port)

+ 0
- 180
dubhe_data_process/oneflow-1/ofrecord_server.py View File

@@ -1,180 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""

# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import _thread
import argparse
import codecs
import json
import os
import shutil
import sys
import time
import urllib
from queue import Queue
import web
from upload_config import Upload_cfg, MyApplication
import gen_ofrecord as ofrecord
from log_config import setup_log

urls = ('/gen_ofrecord', 'Ofrecord')
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

parser = argparse.ArgumentParser(description="config for label server")
parser.add_argument("-p", "--port", type=int, required=True)
parser.add_argument("-m", "--mode", type=str, default="test", required=False)
args = parser.parse_args()

base_path = "/nfs/"
record_url = 'api/data/datasets/versions/'
url_json = './config/url.json'

with open(url_json) as f:
url_dict = json.loads(f.read())
record_url = url_dict[args.mode] + record_url
port = args.port
of_que = Queue()
of_cond = []

des_folder = os.path.join('./log', args.mode)
if not os.path.exists(des_folder):
os.makedirs(des_folder)

of_log = setup_log(args.mode, 'ofrecord-' + args.mode + '.log')


class Ofrecord(Upload_cfg):
"""Recieve and analyze the post request"""
def POST(self):
try:
super().POST()
x = web.data()
x = json.loads(x.decode())
print(x)
dataset_version_id = x['id']
label_map = x['datasetLabels']
if dataset_version_id not in web.of_cond:
web.of_cond.append(dataset_version_id)
src_path = base_path + x['datasetPath']
save_path = base_path + x['datasetPath'] + '/ofrecord'
# transform the windows path to linux path
src_path = '/'.join(src_path.split('\\'))
save_path = '/'.join(save_path.split('\\'))
of_config = [dataset_version_id, src_path, save_path,label_map]
of_log.info('Recv of_config:%s' % of_config)
web.t_queue1.put(of_config)
else:
pass
return {"code": 200, "msg": "", "data": dataset_version_id}
except Exception as e:
of_log.error("Error post")
of_log.error(e)
return 'post error'


def gen_ofrecord_thread():
"""The implementation of ofRecord generating thread"""
global record_url
global of_que
of_log.info('ofrecord server start'.center(66, '-'))
of_log.info(record_url)
while True:
try:
of_task = of_que.get()
debug_msg = '-------- OfRecord gen start: %s --------' % of_task[0] if of_task else ''
of_log.info(debug_msg)
if not of_task:
continue
dataset_version_id = of_task[0]
src_path = of_task[1]
save_path = of_task[2]
label_map = of_task[3]
of_log.info('[%s] not in of_cond' % dataset_version_id)
if os.path.exists(save_path):
shutil.rmtree(save_path)
os.makedirs(save_path)
task_url = record_url + str(dataset_version_id) + '/convert/finish'
of_log.info('key: label, type: int32')
of_log.info('key: img_raw, type: bytes')
desc = os.path.join(save_path, 'train')
of_log.info('desc: %s' % desc)
try:
con, num_images, num_part = ofrecord.read_data_sets(
src_path, desc,label_map)
except Exception as e:
error_msg = 'Error happened in ofrecord.read_data_sets'
of_log.error(error_msg)
if of_task[0] in web.of_cond:
web.of_cond.remove(of_task[0])
# send messages to DataManage
url_dbg = 'Request to [%s]' % task_url
of_log.info(url_dbg)
headers = {'Content-Type': 'application/json'}
req_body = bytes(json.dumps({'msg': str(e)}), 'utf8')

req = urllib.request.Request(
task_url, data=req_body, headers=headers)
response = urllib.request.urlopen(req, timeout=5)
debug_msg = "response.read(): %s; ret_code: %s" % (
response.read(), response.getcode())
of_log.info(debug_msg)
raise e
if not con:
error_msg = 'No annotated images, No ofrecord will be created'
of_log.warning(error_msg)
of_log.info(
'train: {} images in {} part files.\n'.format(
num_images, num_part))
of_log.info('generate ofrecord file done')

url_dbg = 'Request to [%s]' % task_url
of_log.info(url_dbg)
headers = {'Content-Type': 'application/json'}
req_body = {'msg': 'ok'}
req = urllib.request.Request(
task_url, data=json.dumps(req_body).encode(), headers=headers)
response = urllib.request.urlopen(req, timeout=5)
debug_msg = "response.read(): %s; ret_code: %s" % (
response.read(), response.getcode())
of_log.info(debug_msg)
web.of_cond.remove(of_task[0])


except Exception as e:
of_log.error("Error ofProcess")
of_log.error(e)
of_log.info(record_url)
debug_msg = '-------- OfRecord gen end --------'
of_log.info(debug_msg)

time.sleep(0.01)


def of_thread(no, interval):
"""Running the ofRecord generating thread"""
gen_ofrecord_thread()


if __name__ == "__main__":
_thread.start_new_thread(of_thread, (5, 5))
app = MyApplication(urls, globals())
web.of_cond = of_cond
web.t_queue1 = of_que
app.run(port=port)

+ 0
- 156
dubhe_data_process/oneflow-1/run_label_server.py View File

@@ -1,156 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# !/usr/bin/env python3
# -*- coding: utf-8 -*-
import _thread
import argparse
import codecs
import json
import os
import random
import string
import sys
import time
import urllib
from queue import Queue

import predict_with_print_box as yolo_demo
import web
from upload_config import Upload_cfg, MyApplication
from log_config import setup_log

'''Config urls and chinese coding'''
urls = ('/auto_annotate', 'Upload')
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

'''Set port and mode'''
parser = argparse.ArgumentParser(description="config for label server")
parser.add_argument("-p", "--port", type=int, required=True)
parser.add_argument("-m", "--mode", type=str, default="test", required=False)
args = parser.parse_args()

'''Set path'''
base_path = "/nfs/"
label_url = "api/data/datasets/files/annotations/auto/"
url_json = './config/url.json'

'''Init task queue and log'''
with open(url_json) as f:
url_dict = json.loads(f.read())
label_url = url_dict[args.mode] + label_url
port = args.port
taskQueue = Queue()
taskInImages = {}
des_folder = os.path.join('./log', args.mode)
if not os.path.exists(des_folder):
os.makedirs(des_folder)
label_log = setup_log(args.mode, 'label-' + args.mode + '.log')


def get_code():
"""Generate task_id"""
return ''.join(random.sample(string.ascii_letters + string.digits, 8))


class Upload(Upload_cfg):
"""Recieve and analyze the post request"""

def POST(self):
try:
super().POST()
x = web.data()
x = json.loads(x.decode())
type_ = x['annotateType']
task_id = get_code()
task_images = {}
task_images[task_id] = {"input": {'type': type_, 'data': x}, "output": {"annotations": []}}
print("Random_code:", task_id)
label_log.info(task_id)
label_log.info('web.t_queue length:%s' % web.t_queue.qsize())
label_log.info('Recv task_images:%s' % task_images)
web.t_queue.put(task_images)
return {"code": 200, "msg": "", "data": task_id}
except Exception as e:
label_log.error("Error post")
label_log.error(e)
return 'post error'


def bgProcess():
"""The implementation of automatic_label generating thread"""
global taskQueue
global label_url
label_log.info('auto label server start'.center(66, '-'))
label_log.info(label_url)
while True:
try:
task_dict = taskQueue.get()
for task_id in task_dict:
id_list = []
image_path_list = []
type_ = task_dict[task_id]["input"]['type']
for file in task_dict[task_id]["input"]['data']["files"]:
id_list.append(file["id"])
image_path_list.append(base_path + file["url"])
label_list = task_dict[task_id]["input"]['data']["labels"]
coco_flag = 0
if "labelType" in task_dict[task_id]["input"]['data']:
label_type = task_dict[task_id]["input"]['data']["labelType"]
if label_type == 3:
coco_flag = 80
label_log.info(coco_flag)
image_num = len(image_path_list)
if image_num < 16:
for i in range(16 - image_num):
image_path_list.append(image_path_list[0])
id_list.append(id_list[0])
label_log.info(image_num)
label_log.info(image_path_list)
annotations = yolo_obj.yolo_inference(type_, id_list, image_path_list, label_list, coco_flag)
annotations = annotations[0:image_num]
result = {"annotations": annotations}
label_log.info('Inference complete %s' % task_id)
send_data = json.dumps(result).encode()
task_url = label_url + task_id
headers = {'Content-Type': 'application/json'}
req = urllib.request.Request(task_url, headers=headers)
response = urllib.request.urlopen(req, data=send_data, timeout=2)
label_log.info(task_url)
label_log.info(response.read())
label_log.info("End automatic label")

except Exception as e:
label_log.error("Error bgProcess")
label_log.error(e)
label_log.info(label_url)
time.sleep(0.01)


def bg_thread(no, interval):
"""Running the automatic_label generating thread"""
bgProcess()


if __name__ == "__main__":
yolo_obj = yolo_demo.YoloInference(label_log)
_thread.start_new_thread(bg_thread, (5, 5))
app = MyApplication(urls, globals())
web.t_queue = taskQueue
web.taskInImages = taskInImages
app.run(port=port)

+ 0
- 46
dubhe_data_process/oneflow-1/upload_config.py View File

@@ -1,46 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""

import web


class Upload_cfg:
"""Recieve and analyze the post request"""

def GET(self):
web.header("Access-Control-Allow-Origin", "*")
web.header("Access-Control-Allow-Credentials", "true")
web.header('Access-Control-Allow-Headers',
'Content-Type, Access-Control-Allow-Origin, Access-Control-Allow-Headers, X-Requested-By, Access-Control-Allow-Methods')
web.header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')
return """<html><head></head><body>please send data in post
</body></html>"""

def POST(self):
web.header("Access-Control-Allow-Origin", "*")
web.header("Access-Control-Allow-Credentials", "true")
web.header('Access-Control-Allow-Headers',
'Content-Type, Access-Control-Allow-Origin, Access-Control-Allow-Headers, X-Requested-By, Access-Control-Allow-Methods')
web.header('Access-Control-Allow-Methods', 'POST, GET, PUT, DELETE')


class MyApplication(web.application):
def run(self, port, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))

+ 0
- 277
dubhe_data_process/predict_with_print_box.py View File

@@ -1,277 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
import json
import time

import cv2
import numpy as np
import oneflow_yolov3
from yolo_net import YoloPredictNet

import oneflow as flow


'''Init oneflow config'''
model_load_dir = "of_model/yolov3_model_python/"
label_to_name_file = "data/coco.names"
use_tensorrt = 0
gpu_num_per_node = 1
batch_size = 16
image_height = 608
image_width = 608
flow.config.load_library(oneflow_yolov3.lib_path())
func_config = flow.FunctionConfig()
func_config.default_distribute_strategy(flow.distribute.consistent_strategy())
func_config.default_data_type(flow.float)
if use_tensorrt != 0:
func_config.use_tensorrt(True)
label_2_name = []
with open(label_to_name_file, 'r') as f:
label_2_name = f.readlines()
nms = True
print("nms:", nms)
input_blob_def_dict = {
"images": flow.FixedTensorDef((batch_size, 3, image_height, image_width), dtype=flow.float),
"origin_image_info": flow.FixedTensorDef((batch_size, 2), dtype=flow.int32),
}


def xywh_2_x1y1x2y2(x, y, w, h, origin_image):
"""The format of box transform"""
x1 = (x - w / 2.) * origin_image[1]
x2 = (x + w / 2.) * origin_image[1]
y1 = (y - h / 2.) * origin_image[0]
y2 = (y + h / 2.) * origin_image[0]
return x1, y1, x2, y2


def batch_boxes(positions, probs, origin_image_info):
"""The images postprocessing"""
batch_size = positions.shape[0]
batch_list = []
if nms == True:
for k in range(batch_size):
box_list = []
for i in range(1, 81):
for j in range(positions.shape[2]):
if positions[k][i][j][2] != 0 and positions[k][i][j][3] != 0 and probs[k][i][j] != 0:
x1, y1, x2, y2 = xywh_2_x1y1x2y2(positions[k][i][j][0], positions[k][i][j][1],
positions[k][i][j][2], positions[k][i][j][3],
origin_image_info[k])
bbox = [i - 1, x1, y1, x2, y2, probs[k][i][j]]
box_list.append(bbox)
batch_list.append(np.asarray(box_list))
else:
for k in range(batch_size):
box_list = []
for j in range(positions.shape[1]):
for i in range(1, 81):
if positions[k][j][2] != 0 and positions[k][j][3] != 0 and probs[k][j][i] != 0:
x1, y1, x2, y2 = xywh_2_x1y1x2y2(positions[k][j][0], positions[k][j][1], positions[k][j][2],
positions[k][j][3], origin_image_info[k])
bbox = [i - 1, x1, y1, x2, y2, probs[k][j][i]]
box_list.append(bbox)
batch_list.append(np.asarray(box_list))
return batch_list


@flow.function(func_config)
def yolo_user_op_eval_job(images=input_blob_def_dict["images"],
origin_image_info=input_blob_def_dict["origin_image_info"]):
"""The model inference"""
yolo_pos_result, yolo_prob_result = YoloPredictNet(images, origin_image_info, trainable=False)
yolo_pos_result = flow.identity(yolo_pos_result, name="yolo_pos_result_end")
yolo_prob_result = flow.identity(yolo_prob_result, name="yolo_prob_result_end")
return yolo_pos_result, yolo_prob_result, origin_image_info


def yolo_show(image_path_list, batch_list):
"""Debug the result of Yolov3"""
font = cv2.FONT_HERSHEY_SIMPLEX
for img_path, batch in zip(image_path_list, batch_list):
result_list = batch.tolist()
img = cv2.imread(img_path)
for result in result_list:
cls = int(result[0])
bbox = result[1:-1]
score = result[-1]
print('img_file:', img_path)
print('cls:', cls)
print('bbox:', bbox)
c = ((int(bbox[0]) + int(bbox[2])) / 2, (int(bbox[1] + int(bbox[3])) / 2))
cv2.rectangle(img, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), (0, 255, 255), 1)
cv2.putText(img, str(cls), (int(c[0]), int(c[1])), font, 1, (0, 0, 255), 1)
result_name = img_path.split('/')[-1]
cv2.imwrite("data/results/" + result_name, img)


def resize_image(img, origin_h, origin_w, image_height, image_width):
"""The resize of image preprocessing"""
w = image_width
h = image_height
resized = np.zeros((3, image_height, image_width), dtype=np.float32)
part = np.zeros((3, origin_h, image_width), dtype=np.float32)
w_scale = (float)(origin_w - 1) / (w - 1)
h_scale = (float)(origin_h - 1) / (h - 1)

for c in range(w):
if c == w - 1 or origin_w == 1:
val = img[:, :, origin_w - 1]
else:
sx = c * w_scale
ix = int(sx)
dx = sx - ix
val = (1 - dx) * img[:, :, ix] + dx * img[:, :, ix + 1]
part[:, :, c] = val
for r in range(h):
sy = r * h_scale
iy = int(sy)
dy = sy - iy
val = (1 - dy) * part[:, iy, :]
resized[:, r, :] = val
if r == h - 1 or origin_h == 1:
continue
resized[:, r, :] = resized[:, r, :] + dy * part[:, iy + 1, :]
return resized


def batch_image_preprocess_v2(img_path_list, image_height, image_width):
"""The images preprocessing"""
result_list = []
origin_info_list = []
for img_path in img_path_list:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = img.transpose(2, 0, 1).astype(np.float32) # hwc->chw
img = img / 255 # /255
img[[0, 1, 2], :, :] = img[[2, 1, 0], :, :] # bgr2rgb

w = image_width
h = image_height
origin_h = img.shape[1]
origin_w = img.shape[2]
new_w = origin_w
new_h = origin_h
if w / origin_w < h / origin_h:
new_w = w
new_h = origin_h * w // origin_w
else:
new_h = h
new_w = origin_w * h // origin_h
resize_img = resize_image(img, origin_h, origin_w, new_h, new_w)

dw = (w - new_w) // 2
dh = (h - new_h) // 2

padh_before = int(dh)
padh_after = int(h - new_h - padh_before)
padw_before = int(dw)
padw_after = int(w - new_w - padw_before)
result = np.pad(resize_img, pad_width=((0, 0), (padh_before, padh_after), (padw_before, padw_after)),
mode='constant', constant_values=0.5)
origin_image_info = [origin_h, origin_w]
result_list.append(result)
origin_info_list.append(origin_image_info)
results = np.asarray(result_list).astype(np.float32)
origin_image_infos = np.asarray(origin_info_list).astype(np.int32)
return results, origin_image_infos


def coco_format(type_, id_list, file_list, result_list, label_list, coco_flag=0):
"""Transform the annotations to coco format"""
annotations = []
for i, result in enumerate(result_list):
temp = {}
id_name = id_list[i]
file_path = file_list[i]
temp['id'] = id_name
temp['annotation'] = []
im = cv2.imread(file_path)
height, width, _ = im.shape
if result.shape[0] == 0:
temp['annotation'] = json.dumps(temp['annotation'])
annotations.append(temp)
continue
else:
for j in range(result.shape[0]):
cls_id = int(result[j][0]) + 1 + coco_flag
x1 = result[j][1]
x2 = result[j][3]
y1 = result[j][2]
y2 = result[j][4]
score = result[j][5]
width = max(0, x2 - x1)
height = max(0, y2 - y1)
if cls_id in label_list:
temp['annotation'].append({
'area': width * height,
'bbox': [x1, y1, width, height],
'category_id': cls_id,
'iscrowd': 0,
'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],
'score': score
})
if type_ == 2 and len(temp['annotation']) > 0:
temp['annotation'] = [temp['annotation'][0]]
temp['annotation'][0].pop('area')
temp['annotation'][0].pop('bbox')
temp['annotation'][0].pop('iscrowd')
temp['annotation'][0].pop('segmentation')
temp['annotation'] = json.dumps(temp['annotation'])
annotations.append(temp)
return annotations


class YoloInference(object):
"""Yolov3 detection inference"""

def __init__(self, label_log):
self.label_log = label_log
flow.config.gpu_device_num(gpu_num_per_node)
flow.env.ctrl_port(9789)

check_point = flow.train.CheckPoint()
if not model_load_dir:
check_point.init()
else:
check_point.load(model_load_dir)
print("Load check_point success")
self.label_log.info("Load check_point success")

def yolo_inference(self, type_, id_list, image_path_list, label_list, coco_flag=0):
annotations = []
try:
if len(image_path_list) == 16:
t0 = time.time()
images, origin_image_info = batch_image_preprocess_v2(image_path_list, image_height, image_width)
yolo_pos, yolo_prob, origin_image_info = yolo_user_op_eval_job(images, origin_image_info).get()
batch_list = batch_boxes(yolo_pos, yolo_prob, origin_image_info)
annotations = coco_format(type_, id_list, image_path_list, batch_list, label_list, coco_flag)
t1 = time.time()
print('t1-t0:', t1 - t0)
except:
print("Forward Error")
self.label_log.error("Forward Error")
for i, image_path in enumerate(image_path_list):
temp = {}
id_name = id_list[i]
temp['id'] = id_name
temp['annotation'] = []
temp['annotation'] = json.dumps(temp['annotation'])
annotations.append(temp)
return annotations

+ 0
- 100
dubhe_data_process/taskexecutor.py View File

@@ -1,100 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# coding:utf-8

import codecs
import sched
import sys
import json
import logging
import time
import common.RedisUtil as f
import common.config as config
import annotation as annotation
from datetime import datetime
import luascript.delaytaskscript as delay_script

logging.basicConfig(format='%(asctime)s - %(pathname)s[line:%(lineno)d] - %(levelname)s: %(message)s',
level=logging.DEBUG)

schedule = sched.scheduler(time.time, time.sleep)
sys.stdout = codecs.getwriter("utf-8")(sys.stdout.detach())

delayId = ""


def annotationExecutor(redisClient, key):
"""Annotation task method.
Args:
redisClient: redis client.
key: annotation task key.
"""
global delayId
print('-------------process one-----------------')
try:
delayId = "\"" + eval(str(key, encoding="utf-8")) + "\""
logging.info('get element is {0}'.format(key))
key = key.decode()
jsonStr = f.getByKey(redisClient, key.replace('"', ''));
print(jsonStr)
jsonObject = json.loads(jsonStr.decode('utf-8'));
image_path_list = []
id_list = []
label_list = []
label_list = jsonObject['labels']
for fileObject in jsonObject['files']:
image_path_list.append('/nfs/' + fileObject['url'])
id_list.append(fileObject['id'])
print(image_path_list)
print(id_list)
print(label_list)
coco_flag = 0
if "labelType" in jsonObject:
label_type = jsonObject['labelType']
if label_type == 3:
coco_flag = 80
annotations = annotation._annotation(0, image_path_list, id_list, label_list, coco_flag);
result = {"task": key, "annotations": annotations}
f.pushToQueue(redisClient, config.annotationFinishQueue, json.dumps(result))
redisClient.zrem(config.annotationStartQueue, key)
except Exception as e:
print(e)


def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
print("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
redisClient.eval(delay_script.delayTaskLua, 1, config.annotationStartQueue, delayId, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)


def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

+ 0
- 93
dubhe_data_process/track.py View File

@@ -1,93 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
# -*- coding: utf-8 -*-
import sched

import common.config as config
import luascript.delaytaskscript as delay_script
from track_only.hog_track import *

schedule = sched.scheduler(time.time, time.sleep)

delayId = ""


def trackProcess(task, key):
"""Track task method.
Args:
task: dataset id.
key: video file path.
Returns:
True: track success
False: track failed
"""
global delayId
delayId = "\"" + eval(str(key, encoding="utf-8")) + "\""
task = json.loads(task.decode('utf-8'))
image_list = []
label_list = []
images_data = task['images']
path = task['path']

for file in images_data:
filePath = path + "/origin/" + file
annotationPath = path + "/annotation/" + file.split('.')[0]
if not os.path.exists(filePath):
continue
if not os.path.exists(annotationPath):
continue
image_list.append(filePath)
label_list.append(annotationPath)
image_num = len(label_list)
track_det = Detector(
'xxx.avi',
min_confidence=0.35,
max_cosine_distance=0.2,
max_iou_distance=0.7,
max_age=30,
out_dir='results/')
track_det.write_img = False
RET = track_det.run_track(image_list, label_list)
if RET == 'OK':
return True
else:
return False


def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
print("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
redisClient.eval(delay_script.delayTaskLua, 1, config.trackStartQueue, delayId, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)


def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

+ 1
- 1
dubhe_data_process/track_only/feature/feature_extractor_batch.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 1
dubhe_data_process/track_only/hog_track.py View File

@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 0
- 4
dubhe_data_process/track_only/mot_track_kc.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE


+ 1
- 1
dubhe_data_process/track_only/post_process.py View File

@@ -1,6 +1,6 @@
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 1
- 4
dubhe_data_process/track_only/sort/nn_matching.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
@@ -20,6 +16,7 @@ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE. SOFTWARE.
*/
""" """
import numpy as np import numpy as np




+ 0
- 4
dubhe_data_process/track_only/sort/preprocessing.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE


+ 0
- 4
dubhe_data_process/track_only/sort/track.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE


+ 0
- 4
dubhe_data_process/track_only/sort/tracker.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE


+ 1
- 1
dubhe_data_process/track_only/track_server.py View File

@@ -2,7 +2,7 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
""" """
/** /**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
* Copyright 2020 Tianshu AI Platform. All Rights Reserved.
* *
* Licensed under the Apache License, Version 2.0 (the "License"); * Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License. * you may not use this file except in compliance with the License.


+ 0
- 4
dubhe_data_process/track_only/util.py View File

@@ -1,18 +1,14 @@
""" """
MIT License MIT License

Copyright (c) 2020 Ziqiang Copyright (c) 2020 Ziqiang

Permission is hereby granted, free of charge, to any person obtaining a copy Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions: furnished to do so, subject to the following conditions:

The above copyright notice and this permission notice shall be included in all The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software. copies or substantial portions of the Software.

THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE


+ 0
- 100
dubhe_data_process/videosample.py View File

@@ -1,100 +0,0 @@
"""
/**
* Copyright 2020 Zhejiang Lab. All Rights Reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* =============================================================
*/
"""
import json
import os
import sched
import time
from datetime import datetime

import luascript.finishtaskscript as finish_script
import luascript.failedtaskscript as failed_script
import luascript.delaytaskscript as delay_script
import common.config as config

import cv2

schedule = sched.scheduler(time.time, time.sleep)

datasetIdKey = ""


def sampleProcess(datasetId, path, frameList, redisClient):
"""Video sampling method.
Args:
datasetId: dataset id.
path: video file path.
frameList: picture frame number list.
redisClient: redis client.
"""
global datasetIdKey
datasetIdJson = {'datasetIdKey': datasetId}
datasetIdKey = json.dumps(datasetIdJson, separators=(',', ':'))
try:
videoName = path.split('/')[-1]
save_path = path.split(videoName)[0].replace("video", "origin")
is_exists = os.path.exists(save_path)
if not is_exists:
os.makedirs(save_path)
print('path of %s is build' % save_path)
else:
print('path of %s already exist and start' % save_path)
cap = cv2.VideoCapture(path)
for i in frameList:
cap.set(cv2.CAP_PROP_POS_FRAMES, i)
success, video_capture = cap.read()
# 保存图片
if success is True and video_capture is not None:
save_name = save_path + videoName.split('.')[0] + '_' + str(i) + '.jpg'
cv2.imwrite(save_name, video_capture)
redisClient.lpush("videoSample_pictures:" + datasetId,
'{' + '\"pictureName\":' + "\"" + save_name + "\"" + '}')
print('image of %s is saved' % save_name)
print('video is all read')
redisClient.eval(finish_script.finishTaskLua, 3, config.videoStartQueue, config.videoFinishQueue,
"videoSample:" + str(datasetId),
datasetIdKey, str(datasetIdKey))
except Exception as e:
print(e)
redisClient.eval(failed_script.failedTaskLua, 4, config.videoStartQueue, config.videoFailedQueue,
"videoSample_pictures:" + datasetId,
"videoSample:" + str(datasetId),
datasetIdKey, str(datasetIdKey))


def delaySchduled(inc, redisClient):
"""Delay task method.
Args:
inc: scheduled task time.
redisClient: redis client.
"""
try:
print("delay:" + datetime.now().strftime("B%Y-%m-%d %H:%M:%S"))
redisClient.eval(delay_script.delayTaskLua, 1, config.videoStartQueue, datasetIdKey, int(time.time()))
schedule.enter(inc, 0, delaySchduled, (inc, redisClient))
except Exception as e:
print("delay error" + e)


def delayKeyThread(redisClient):
"""Delay task thread.
Args:
redisClient: redis client.
"""
schedule.enter(0, 0, delaySchduled, (5, redisClient))
schedule.run()

Loading…
Cancel
Save