Browse Source

update pipeline according to online demo requirements

按在线demo前端的要求,将输出改成单独一个numpy格式的图片
        Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10912907
master^2
qianmu.ywh yingda.chen 2 years ago
parent
commit
cc27e3a25e
2 changed files with 5 additions and 2 deletions
  1. +4
    -1
      modelscope/pipelines/cv/image_depth_estimation_pipeline.py
  2. +1
    -1
      tests/pipelines/test_image_depth_estimation.py

+ 4
- 1
modelscope/pipelines/cv/image_depth_estimation_pipeline.py View File

@@ -47,6 +47,9 @@ class ImageDepthEstimationPipeline(Pipeline):


def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]: def postprocess(self, inputs: Dict[str, Any]) -> Dict[str, Any]:
results = self.model.postprocess(inputs) results = self.model.postprocess(inputs)
outputs = {OutputKeys.DEPTHS: results[OutputKeys.DEPTHS]}
depths = results[OutputKeys.DEPTHS]
if isinstance(depths, torch.Tensor):
depths = depths.detach().cpu().squeeze().numpy()
outputs = {OutputKeys.DEPTHS: depths}


return outputs return outputs

+ 1
- 1
tests/pipelines/test_image_depth_estimation.py View File

@@ -25,7 +25,7 @@ class ImageDepthEstimationTest(unittest.TestCase, DemoCompatibilityCheck):
estimator = pipeline(Tasks.image_depth_estimation, model=self.model_id) estimator = pipeline(Tasks.image_depth_estimation, model=self.model_id)
result = estimator(input_location) result = estimator(input_location)
depths = result[OutputKeys.DEPTHS] depths = result[OutputKeys.DEPTHS]
depth_viz = depth_to_color(depths[0].squeeze().cpu().numpy())
depth_viz = depth_to_color(depths)
cv2.imwrite('result.jpg', depth_viz) cv2.imwrite('result.jpg', depth_viz)


print('test_image_depth_estimation DONE') print('test_image_depth_estimation DONE')


Loading…
Cancel
Save