Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10894248master^2
@@ -25,7 +25,14 @@ def seg_resize(input, | |||||
'the output would more aligned if ' | 'the output would more aligned if ' | ||||
f'input size {(input_h, input_w)} is `x+1` and ' | f'input size {(input_h, input_w)} is `x+1` and ' | ||||
f'out size {(output_h, output_w)} is `nx+1`') | f'out size {(output_h, output_w)} is `nx+1`') | ||||
return F.interpolate(input, size, scale_factor, mode, align_corners) | |||||
try: | |||||
return F.interpolate(input, size, scale_factor, mode, align_corners) | |||||
except ValueError: | |||||
if isinstance(size, tuple): | |||||
if len(size) == 3: | |||||
size = size[:2] | |||||
return F.interpolate(input, size, scale_factor, mode, align_corners) | |||||
def add_prefix(inputs, prefix): | def add_prefix(inputs, prefix): | ||||
@@ -38,8 +38,9 @@ class ImageSemanticSegmentationTest(unittest.TestCase, DemoCompatibilityCheck): | |||||
@unittest.skipUnless(test_level() >= 0, 'skip test in current test level') | @unittest.skipUnless(test_level() >= 0, 'skip test in current test level') | ||||
def test_image_semantic_segmentation_vitadapter(self): | def test_image_semantic_segmentation_vitadapter(self): | ||||
model_id = 'damo/cv_vitadapter_semantic-segmentation_cocostuff164k' | |||||
input_location = 'data/test/images/image_semantic_segmentation.jpg' | input_location = 'data/test/images/image_semantic_segmentation.jpg' | ||||
segmenter = pipeline(Tasks.image_segmentation, model=self.model_id) | |||||
segmenter = pipeline(Tasks.image_segmentation, model=model_id) | |||||
result = segmenter(input_location) | result = segmenter(input_location) | ||||
draw_img = semantic_seg_masks_to_image(result[OutputKeys.MASKS]) | draw_img = semantic_seg_masks_to_image(result[OutputKeys.MASKS]) | ||||