From 0e52a20d2889bca5c0f8165d3013bd46de4afccc Mon Sep 17 00:00:00 2001 From: "chaojie.mcj" Date: Wed, 28 Sep 2022 14:30:37 +0800 Subject: [PATCH] [to #42322933]update license MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit 以下算法进行了header变更: modelscope.models.cv.cmdssl_video_embedding modelscope.models.cv.action_recognition modelscope.models.cv.animal_recognition modelscope.models.multi_modal.multi_stage_diffusion modelscope.models.multi_modal.gemm modelscope.pipelines.cv.live_category_pipeline modelscope.pipelines.cv.video_category_pipeline modelscope.models.cv.image_to_image_translation modelscope.models.cv.image_to_image_generation modelscope.models.cv.video_inpainting modelscope.models.multi_modal.diffusion modelscope.models.multi_modal.team modelscope.models.cv.shop_segmentation modelscope.models.cv.text_driven_segmentation modelscope.models.cv.action_recognition modelscope.models.cv.face_emotion modelscope.models.cv.hand_static Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10268474 --- .../models/cv/action_recognition/models.py | 3 +++ modelscope/models/cv/action_recognition/s3dg.py | 3 +++ .../cv/action_recognition/tada_convnext.py | 4 ++++ .../models/cv/animal_recognition/resnet.py | 3 +++ .../models/cv/animal_recognition/splat.py | 3 +++ .../cv/cmdssl_video_embedding/__init__.py | 3 ++- .../models/cv/cmdssl_video_embedding/c3d.py | 8 ++++++++ .../cv/cmdssl_video_embedding/resnet2p1d.py | 8 ++++++++ .../cv/cmdssl_video_embedding/resnet3d.py | 8 ++++++++ .../models/cv/shop_segmentation/common.py | 14 ++++++-------- .../models/cv/shop_segmentation/head_fpn.py | 14 ++++++-------- .../models/cv/shop_segmentation/models.py | 14 ++++++-------- .../models/cv/shop_segmentation/neck_fpn.py | 14 ++++++-------- .../cv/shop_segmentation/shop_seg_base.py | 14 ++++++-------- .../cv/shop_segmentation/shop_seg_model.py | 2 ++ modelscope/models/cv/shop_segmentation/utils.py | 7 +++---- .../cv/text_driven_segmentation/__init__.py | 1 + .../models/cv/text_driven_segmentation/clip.py | 7 +++---- .../cv/text_driven_segmentation/lseg_base.py | 6 ++---- .../cv/text_driven_segmentation/lseg_blocks.py | 6 ++---- .../cv/text_driven_segmentation/lseg_model.py | 2 ++ .../cv/text_driven_segmentation/lseg_net.py | 6 ++---- .../cv/text_driven_segmentation/lseg_vit.py | 6 ++---- .../models/cv/text_driven_segmentation/model.py | 6 ++---- .../simple_tokenizer.py | 7 +++---- .../models/multi_modal/diffusion/diffusion.py | 3 +++ .../models/multi_modal/diffusion/model.py | 1 + .../multi_modal/diffusion/unet_generator.py | 3 +++ .../diffusion/unet_upsampler_1024.py | 3 +++ .../multi_modal/diffusion/unet_upsampler_256.py | 3 +++ modelscope/models/multi_modal/gemm/gemm_base.py | 17 +++++++++++------ .../models/multi_modal/gemm/gemm_model.py | 2 ++ modelscope/models/multi_modal/gemm/tokenizer.py | 12 ++++++++---- modelscope/models/multi_modal/mmr/__init__.py | 2 ++ .../mmr/dataloaders/rawvideo_util.py | 3 +++ .../models/multi_modal/mmr/models/__init__.py | 2 ++ .../mmr/models/clip_for_mm_video_embedding.py | 3 +++ .../mmr/models/dynamic_inverted_softmax.py | 3 +++ .../models/multi_modal/mmr/models/modeling.py | 2 ++ .../multi_modal/mmr/models/module_clip.py | 3 ++- .../multi_modal/mmr/models/module_cross.py | 3 +++ .../multi_modal/mmr/models/tokenization_clip.py | 3 +++ .../multi_modal/multi_stage_diffusion/clip.py | 3 ++- .../multi_stage_diffusion/decoder.py | 2 +- .../multi_stage_diffusion/gaussian_diffusion.py | 5 +++-- .../multi_modal/multi_stage_diffusion/model.py | 2 +- .../multi_modal/multi_stage_diffusion/prior.py | 2 +- .../multi_stage_diffusion/tokenizer.py | 3 ++- .../multi_stage_diffusion/upsampler.py | 2 +- .../multi_modal/multi_stage_diffusion/xglm.py | 5 +++-- .../models/multi_modal/team/team_model.py | 1 + modelscope/models/multi_modal/team/utils.py | 11 +++++++---- .../pipelines/cv/animal_recognition_pipeline.py | 1 + .../cv/cmdssl_video_embedding_pipeline.py | 2 ++ .../cv/general_recognition_pipeline.py | 1 + .../pipelines/cv/live_category_pipeline.py | 2 +- .../pipelines/cv/shop_segmentation_pipleline.py | 1 + .../cv/text_driven_segmentation_pipleline.py | 1 + .../pipelines/cv/video_category_pipeline.py | 2 +- ...generative_multi_modal_embedding_pipeline.py | 2 +- .../team_multi_modal_similarity_pipeline.py | 3 +-- tests/pipelines/test_cmdssl_video_embedding.py | 2 +- .../test_generative_multi_modal_embedding.py | 2 +- tests/pipelines/test_multi_modal_similarity.py | 2 +- 64 files changed, 188 insertions(+), 106 deletions(-) diff --git a/modelscope/models/cv/action_recognition/models.py b/modelscope/models/cv/action_recognition/models.py index a5964e21..f16805fb 100644 --- a/modelscope/models/cv/action_recognition/models.py +++ b/modelscope/models/cv/action_recognition/models.py @@ -1,3 +1,6 @@ +# The implementation is also open-sourced by the authors, +# and available at https://github.com/alibaba-mmai-research/TAdaConv +# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. import torch.nn as nn from .s3dg import Inception3D diff --git a/modelscope/models/cv/action_recognition/s3dg.py b/modelscope/models/cv/action_recognition/s3dg.py index f258df16..46e76892 100644 --- a/modelscope/models/cv/action_recognition/s3dg.py +++ b/modelscope/models/cv/action_recognition/s3dg.py @@ -1,3 +1,6 @@ +# The implementation is adopted from https://github.com/TengdaHan/CoCLR, +# made pubicly available under the Apache License, Version 2.0 at https://github.com/TengdaHan/CoCLR +# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. import torch import torch.nn as nn diff --git a/modelscope/models/cv/action_recognition/tada_convnext.py b/modelscope/models/cv/action_recognition/tada_convnext.py index 379b5271..b1de7af8 100644 --- a/modelscope/models/cv/action_recognition/tada_convnext.py +++ b/modelscope/models/cv/action_recognition/tada_convnext.py @@ -1,3 +1,7 @@ +# The implementation is adopted from https://github.com/facebookresearch/ConvNeXt, +# made pubicly available under the MIT License at https://github.com/facebookresearch/ConvNeXt +# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. + import math import torch diff --git a/modelscope/models/cv/animal_recognition/resnet.py b/modelscope/models/cv/animal_recognition/resnet.py index 73953de4..d7c03c29 100644 --- a/modelscope/models/cv/animal_recognition/resnet.py +++ b/modelscope/models/cv/animal_recognition/resnet.py @@ -1,3 +1,6 @@ +# The implementation is adopted from Split-Attention Network, A New ResNet Variant, +# made pubicly available under the Apache License 2.0 License +# at https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/models/resnet.py import math import torch diff --git a/modelscope/models/cv/animal_recognition/splat.py b/modelscope/models/cv/animal_recognition/splat.py index 0aab555e..a10d0abe 100644 --- a/modelscope/models/cv/animal_recognition/splat.py +++ b/modelscope/models/cv/animal_recognition/splat.py @@ -1,3 +1,6 @@ +# The implementation is adopted from Split-Attention Network, A New ResNet Variant, +# made pubicly available under the Apache License 2.0 License +# at https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/models/splat.py """Split-Attention""" import torch diff --git a/modelscope/models/cv/cmdssl_video_embedding/__init__.py b/modelscope/models/cv/cmdssl_video_embedding/__init__.py index e7e156a5..5bc67b63 100644 --- a/modelscope/models/cv/cmdssl_video_embedding/__init__.py +++ b/modelscope/models/cv/cmdssl_video_embedding/__init__.py @@ -1,4 +1,5 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + from typing import TYPE_CHECKING from modelscope.utils.import_utils import LazyImportModule diff --git a/modelscope/models/cv/cmdssl_video_embedding/c3d.py b/modelscope/models/cv/cmdssl_video_embedding/c3d.py index 62f0e0b9..53dd05a1 100644 --- a/modelscope/models/cv/cmdssl_video_embedding/c3d.py +++ b/modelscope/models/cv/cmdssl_video_embedding/c3d.py @@ -1,3 +1,11 @@ +# Copyright 2022 Davide Abati. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + +# The implementation here is modified based on c3d-pytorch, +# originally MIT License, Copyright (c) 2022 Davide Abati, +# and publicly available at https://github.com/DavideA/c3d-pytorch +""" C3D Model Architecture.""" + import torch import torch.nn as nn diff --git a/modelscope/models/cv/cmdssl_video_embedding/resnet2p1d.py b/modelscope/models/cv/cmdssl_video_embedding/resnet2p1d.py index 3b03cc74..b49069d1 100644 --- a/modelscope/models/cv/cmdssl_video_embedding/resnet2p1d.py +++ b/modelscope/models/cv/cmdssl_video_embedding/resnet2p1d.py @@ -1,3 +1,11 @@ +# Copyright (c) 2022 Kensho Hara. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + +# The implementation here is modified based on 3D-ResNets-PyTorch, +# originally MIT License, Copyright (c) 2022 Kensho Hara, +# and publicly available at https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/resnet2p1d.py +""" ResNet2plus1d Model Architecture.""" + import torch import torch.nn as nn diff --git a/modelscope/models/cv/cmdssl_video_embedding/resnet3d.py b/modelscope/models/cv/cmdssl_video_embedding/resnet3d.py index 24d50a8e..dddba06f 100644 --- a/modelscope/models/cv/cmdssl_video_embedding/resnet3d.py +++ b/modelscope/models/cv/cmdssl_video_embedding/resnet3d.py @@ -1,3 +1,11 @@ +# Copyright (c) 2022 Kensho Hara. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + +# The implementation here is modified based on 3D-ResNets-PyTorch, +# originally MIT License, Copyright (c) 2022 Kensho Hara, +# and publicly available at https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/resnet.py +""" ResNet3D Model Architecture.""" + import torch import torch.nn as nn diff --git a/modelscope/models/cv/shop_segmentation/common.py b/modelscope/models/cv/shop_segmentation/common.py index 00ba9996..8cb940a5 100644 --- a/modelscope/models/cv/shop_segmentation/common.py +++ b/modelscope/models/cv/shop_segmentation/common.py @@ -1,11 +1,9 @@ -""" -Base modules are adapted from https://github.com/open-mmlab/mmcv/, -originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, -https://github.com/open-mmlab/mmsegmentation/, -originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, -and adapted from https://github.com/raoyongming/DenseCLIP/, -originally MIT License, Copyright (c) 2022 Rao, Yongming. -""" +# Base modules are adapted from https://github.com/open-mmlab/mmcv/, +# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, +# https://github.com/open-mmlab/mmsegmentation/, +# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, +# and adapted from https://github.com/raoyongming/DenseCLIP/, +# originally MIT License, Copyright (c) 2022 Rao, Yongming. import warnings diff --git a/modelscope/models/cv/shop_segmentation/head_fpn.py b/modelscope/models/cv/shop_segmentation/head_fpn.py index b3faa9b8..cad389c7 100644 --- a/modelscope/models/cv/shop_segmentation/head_fpn.py +++ b/modelscope/models/cv/shop_segmentation/head_fpn.py @@ -1,11 +1,9 @@ -""" FPNHead -Base modules are adapted from https://github.com/open-mmlab/mmcv/, -originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, -https://github.com/open-mmlab/mmsegmentation/, -originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, -and adapted from https://github.com/raoyongming/DenseCLIP/, -originally MIT License, Copyright (c) 2022 Rao, Yongming. -""" +# Base modules are adapted from https://github.com/open-mmlab/mmcv/, +# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, +# https://github.com/open-mmlab/mmsegmentation/, +# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, +# and adapted from https://github.com/raoyongming/DenseCLIP/, +# originally MIT License, Copyright (c) 2022 Rao, Yongming. import numpy as np import torch diff --git a/modelscope/models/cv/shop_segmentation/models.py b/modelscope/models/cv/shop_segmentation/models.py index 171aafbd..3880d074 100644 --- a/modelscope/models/cv/shop_segmentation/models.py +++ b/modelscope/models/cv/shop_segmentation/models.py @@ -1,11 +1,9 @@ -""" -Base modules are adapted from https://github.com/open-mmlab/mmcv/, -originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, -https://github.com/open-mmlab/mmsegmentation/, -originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, -and adapted from https://github.com/raoyongming/DenseCLIP/, -originally MIT License, Copyright (c) 2022 Rao, Yongming. -""" +# Base modules are adapted from https://github.com/open-mmlab/mmcv/, +# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, +# https://github.com/open-mmlab/mmsegmentation/, +# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, +# and adapted from https://github.com/raoyongming/DenseCLIP/, +# originally MIT License, Copyright (c) 2022 Rao, Yongming. import math from collections import OrderedDict diff --git a/modelscope/models/cv/shop_segmentation/neck_fpn.py b/modelscope/models/cv/shop_segmentation/neck_fpn.py index 108cb043..aa4d7159 100644 --- a/modelscope/models/cv/shop_segmentation/neck_fpn.py +++ b/modelscope/models/cv/shop_segmentation/neck_fpn.py @@ -1,11 +1,9 @@ -""" FPNneck -Base modules are adapted from https://github.com/open-mmlab/mmcv/, -originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, -https://github.com/open-mmlab/mmsegmentation/, -originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, -and adapted from https://github.com/raoyongming/DenseCLIP/, -originally MIT License, Copyright (c) 2022 Rao, Yongming. -""" +# Base modules are adapted from https://github.com/open-mmlab/mmcv/, +# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, +# https://github.com/open-mmlab/mmsegmentation/, +# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, +# and adapted from https://github.com/raoyongming/DenseCLIP/, +# originally MIT License, Copyright (c) 2022 Rao, Yongming. import torch.nn as nn import torch.nn.functional as F diff --git a/modelscope/models/cv/shop_segmentation/shop_seg_base.py b/modelscope/models/cv/shop_segmentation/shop_seg_base.py index e3ae0d54..34686370 100644 --- a/modelscope/models/cv/shop_segmentation/shop_seg_base.py +++ b/modelscope/models/cv/shop_segmentation/shop_seg_base.py @@ -1,11 +1,9 @@ -""" -Base modules are adapted from https://github.com/open-mmlab/mmcv/, -originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, -https://github.com/open-mmlab/mmsegmentation/, -originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, -and adapted from https://github.com/raoyongming/DenseCLIP/, -originally MIT License, Copyright (c) 2022 Rao, Yongming. -""" +# Base modules are adapted from https://github.com/open-mmlab/mmcv/, +# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, +# https://github.com/open-mmlab/mmsegmentation/, +# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, +# and adapted from https://github.com/raoyongming/DenseCLIP/, +# originally MIT License, Copyright (c) 2022 Rao, Yongming. import torch import torch.nn as nn diff --git a/modelscope/models/cv/shop_segmentation/shop_seg_model.py b/modelscope/models/cv/shop_segmentation/shop_seg_model.py index 0aeeb1de..ac0d67fa 100644 --- a/modelscope/models/cv/shop_segmentation/shop_seg_model.py +++ b/modelscope/models/cv/shop_segmentation/shop_seg_model.py @@ -1,3 +1,5 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + import os.path as osp from typing import Any, Dict diff --git a/modelscope/models/cv/shop_segmentation/utils.py b/modelscope/models/cv/shop_segmentation/utils.py index c41f8a65..4035b0ef 100644 --- a/modelscope/models/cv/shop_segmentation/utils.py +++ b/modelscope/models/cv/shop_segmentation/utils.py @@ -1,7 +1,6 @@ -""" CLIP Tokenizer -Adapted from https://github.com/openai/CLIP. -Originally MIT License, Copyright (c) 2021 OpenAI. -""" +# CLIP Tokenizer +# Adapted from https://github.com/openai/CLIP. +# Originally MIT License, Copyright (c) 2021 OpenAI. import gzip import html diff --git a/modelscope/models/cv/text_driven_segmentation/__init__.py b/modelscope/models/cv/text_driven_segmentation/__init__.py index 46daad78..aefaa698 100644 --- a/modelscope/models/cv/text_driven_segmentation/__init__.py +++ b/modelscope/models/cv/text_driven_segmentation/__init__.py @@ -1 +1,2 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. from .lseg_base import TextDrivenSegmentation diff --git a/modelscope/models/cv/text_driven_segmentation/clip.py b/modelscope/models/cv/text_driven_segmentation/clip.py index 440cccea..1cec5f39 100644 --- a/modelscope/models/cv/text_driven_segmentation/clip.py +++ b/modelscope/models/cv/text_driven_segmentation/clip.py @@ -1,7 +1,6 @@ -""" CLIP -Adapted from https://github.com/openai/CLIP. -Originally MIT License, Copyright (c) 2021 OpenAI. -""" +# CLIP +# Adapted from https://github.com/openai/CLIP. +# Originally MIT License, Copyright (c) 2021 OpenAI. import hashlib import os diff --git a/modelscope/models/cv/text_driven_segmentation/lseg_base.py b/modelscope/models/cv/text_driven_segmentation/lseg_base.py index 20915396..c79861a7 100644 --- a/modelscope/models/cv/text_driven_segmentation/lseg_base.py +++ b/modelscope/models/cv/text_driven_segmentation/lseg_base.py @@ -1,7 +1,5 @@ -""" -Adapted from https://github.com/isl-org/lang-seg. -Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. -""" +# Adapted from https://github.com/isl-org/lang-seg. +# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. import torch import torch.nn as nn diff --git a/modelscope/models/cv/text_driven_segmentation/lseg_blocks.py b/modelscope/models/cv/text_driven_segmentation/lseg_blocks.py index cb550ab7..56d4a65d 100644 --- a/modelscope/models/cv/text_driven_segmentation/lseg_blocks.py +++ b/modelscope/models/cv/text_driven_segmentation/lseg_blocks.py @@ -1,7 +1,5 @@ -""" -Adapted from https://github.com/isl-org/lang-seg. -Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. -""" +# Adapted from https://github.com/isl-org/lang-seg. +# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. import torch import torch.nn as nn diff --git a/modelscope/models/cv/text_driven_segmentation/lseg_model.py b/modelscope/models/cv/text_driven_segmentation/lseg_model.py index 1d7ebdd1..9a5754c6 100644 --- a/modelscope/models/cv/text_driven_segmentation/lseg_model.py +++ b/modelscope/models/cv/text_driven_segmentation/lseg_model.py @@ -1,3 +1,5 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. + import os.path as osp from typing import Any, Dict diff --git a/modelscope/models/cv/text_driven_segmentation/lseg_net.py b/modelscope/models/cv/text_driven_segmentation/lseg_net.py index 1a558c5c..541a4a38 100644 --- a/modelscope/models/cv/text_driven_segmentation/lseg_net.py +++ b/modelscope/models/cv/text_driven_segmentation/lseg_net.py @@ -1,7 +1,5 @@ -""" -Adapted from https://github.com/isl-org/lang-seg. -Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. -""" +# Adapted from https://github.com/isl-org/lang-seg. +# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. import numpy as np import torch diff --git a/modelscope/models/cv/text_driven_segmentation/lseg_vit.py b/modelscope/models/cv/text_driven_segmentation/lseg_vit.py index be2813c2..5298832f 100644 --- a/modelscope/models/cv/text_driven_segmentation/lseg_vit.py +++ b/modelscope/models/cv/text_driven_segmentation/lseg_vit.py @@ -1,7 +1,5 @@ -""" -Adapted from https://github.com/isl-org/lang-seg. -Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. -""" +# Adapted from https://github.com/isl-org/lang-seg. +# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. import math import types diff --git a/modelscope/models/cv/text_driven_segmentation/model.py b/modelscope/models/cv/text_driven_segmentation/model.py index ece10bab..f98d480d 100644 --- a/modelscope/models/cv/text_driven_segmentation/model.py +++ b/modelscope/models/cv/text_driven_segmentation/model.py @@ -1,7 +1,5 @@ -""" -Adapted from https://github.com/isl-org/lang-seg. -Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. -""" +# Adapted from https://github.com/isl-org/lang-seg. +# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. from collections import OrderedDict from typing import Tuple, Union diff --git a/modelscope/models/cv/text_driven_segmentation/simple_tokenizer.py b/modelscope/models/cv/text_driven_segmentation/simple_tokenizer.py index 250d680f..361d67c6 100644 --- a/modelscope/models/cv/text_driven_segmentation/simple_tokenizer.py +++ b/modelscope/models/cv/text_driven_segmentation/simple_tokenizer.py @@ -1,7 +1,6 @@ -""" CLIP -Adapted from https://github.com/openai/CLIP. -Originally MIT License, Copyright (c) 2021 OpenAI. -""" +# CLIP +# Adapted from https://github.com/openai/CLIP. +# Originally MIT License, Copyright (c) 2021 OpenAI. import gzip import html diff --git a/modelscope/models/multi_modal/diffusion/diffusion.py b/modelscope/models/multi_modal/diffusion/diffusion.py index d71fe0ae..bfe7baf7 100644 --- a/modelscope/models/multi_modal/diffusion/diffusion.py +++ b/modelscope/models/multi_modal/diffusion/diffusion.py @@ -1,3 +1,6 @@ +# Part of the implementation is borrowed and modified from latent-diffusion, +# publicly avaialbe at https://github.com/CompVis/latent-diffusion. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math import torch diff --git a/modelscope/models/multi_modal/diffusion/model.py b/modelscope/models/multi_modal/diffusion/model.py index 8617b8dd..4229391f 100644 --- a/modelscope/models/multi_modal/diffusion/model.py +++ b/modelscope/models/multi_modal/diffusion/model.py @@ -1,3 +1,4 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import os.path as osp from typing import Any, Dict diff --git a/modelscope/models/multi_modal/diffusion/unet_generator.py b/modelscope/models/multi_modal/diffusion/unet_generator.py index 9b507223..539d3996 100644 --- a/modelscope/models/multi_modal/diffusion/unet_generator.py +++ b/modelscope/models/multi_modal/diffusion/unet_generator.py @@ -1,3 +1,6 @@ +# Part of the implementation is borrowed and modified from latent-diffusion, +# publicly avaialbe at https://github.com/CompVis/latent-diffusion. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math import torch diff --git a/modelscope/models/multi_modal/diffusion/unet_upsampler_1024.py b/modelscope/models/multi_modal/diffusion/unet_upsampler_1024.py index 1c66b2fe..38cff6a2 100644 --- a/modelscope/models/multi_modal/diffusion/unet_upsampler_1024.py +++ b/modelscope/models/multi_modal/diffusion/unet_upsampler_1024.py @@ -1,3 +1,6 @@ +# Part of the implementation is borrowed and modified from latent-diffusion, +# publicly avaialbe at https://github.com/CompVis/latent-diffusion. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math import torch diff --git a/modelscope/models/multi_modal/diffusion/unet_upsampler_256.py b/modelscope/models/multi_modal/diffusion/unet_upsampler_256.py index 0da8b805..ca5cd7d6 100644 --- a/modelscope/models/multi_modal/diffusion/unet_upsampler_256.py +++ b/modelscope/models/multi_modal/diffusion/unet_upsampler_256.py @@ -1,3 +1,6 @@ +# Part of the implementation is borrowed and modified from latent-diffusion, +# publicly avaialbe at https://github.com/CompVis/latent-diffusion. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math from functools import partial diff --git a/modelscope/models/multi_modal/gemm/gemm_base.py b/modelscope/models/multi_modal/gemm/gemm_base.py index db928212..09ef2480 100644 --- a/modelscope/models/multi_modal/gemm/gemm_base.py +++ b/modelscope/models/multi_modal/gemm/gemm_base.py @@ -1,9 +1,14 @@ -""" Generative Multimodal Model -Base modules are adapted from https://github.com/openai/CLIP/, -originally MIT License, Copyright (c) 2021 OpenAI, -and adapted from https://github.com/lucidrains/CoCa-pytorch/, -originally MIT License, Copyright (c) 2022 Phil Wang. -""" +# Copyright 2021 The OpenAI Team Authors. +# Copyright 2022 Phil Wang. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. +# +# The implementation here is modified based on OpenAI CLIP, +# originally MIT License, Copyright (c) 2021 OpenAI, +# and publicly available at https://github.com/openai/CLIP/. +# The implementation here is modified based on Coca-pytorch, +# originally MIT License, Copyright (c) 2022 Phil Wang, +# and publicly available at https://github.com/lucidrains/CoCa-pytorch/, +""" Generative Multimodal Model Architecture.""" import os from collections import OrderedDict diff --git a/modelscope/models/multi_modal/gemm/gemm_model.py b/modelscope/models/multi_modal/gemm/gemm_model.py index 356dc8d3..55b211c0 100644 --- a/modelscope/models/multi_modal/gemm/gemm_model.py +++ b/modelscope/models/multi_modal/gemm/gemm_model.py @@ -1,3 +1,5 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. +""" Generative Multimodal Model Wrapper.""" import os.path as osp from typing import Any, Dict diff --git a/modelscope/models/multi_modal/gemm/tokenizer.py b/modelscope/models/multi_modal/gemm/tokenizer.py index af962ceb..8b7cc094 100644 --- a/modelscope/models/multi_modal/gemm/tokenizer.py +++ b/modelscope/models/multi_modal/gemm/tokenizer.py @@ -1,7 +1,11 @@ -""" CLIP Tokenizer -Adapted from https://github.com/openai/CLIP. -Originally MIT License, Copyright (c) 2021 OpenAI. -""" +# Copyright 2021 The OpenAI Team Authors. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. +# +# The implementation here is modified based on OpenAI CLIP, +# originally MIT License, Copyright (c) 2021 OpenAI, +# and publicly available at https://github.com/openai/CLIP/. +""" CLIP Tokenizer.""" + import gzip import html import os diff --git a/modelscope/models/multi_modal/mmr/__init__.py b/modelscope/models/multi_modal/mmr/__init__.py index c5fb7419..9dac8409 100644 --- a/modelscope/models/multi_modal/mmr/__init__.py +++ b/modelscope/models/multi_modal/mmr/__init__.py @@ -1 +1,3 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + from .models import VideoCLIPForMultiModalEmbedding diff --git a/modelscope/models/multi_modal/mmr/dataloaders/rawvideo_util.py b/modelscope/models/multi_modal/mmr/dataloaders/rawvideo_util.py index eab1189f..c7ac3f94 100644 --- a/modelscope/models/multi_modal/mmr/dataloaders/rawvideo_util.py +++ b/modelscope/models/multi_modal/mmr/dataloaders/rawvideo_util.py @@ -1,3 +1,6 @@ +# The implementation is adopted from Huaishao Luo, +# made pubicly available under the MIT License at https://github.com/ArrowLuo/CLIP4Clip + import cv2 import numpy as np import torch as th diff --git a/modelscope/models/multi_modal/mmr/models/__init__.py b/modelscope/models/multi_modal/mmr/models/__init__.py index 6cd06bcd..da832719 100644 --- a/modelscope/models/multi_modal/mmr/models/__init__.py +++ b/modelscope/models/multi_modal/mmr/models/__init__.py @@ -1 +1,3 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + from .clip_for_mm_video_embedding import VideoCLIPForMultiModalEmbedding diff --git a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py index 8d13e745..5e8e2e7a 100644 --- a/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py +++ b/modelscope/models/multi_modal/mmr/models/clip_for_mm_video_embedding.py @@ -1,3 +1,6 @@ +# The implementation is adopated from the CLIP4Clip implementation, +# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip + import random from os.path import exists from typing import Any, Dict diff --git a/modelscope/models/multi_modal/mmr/models/dynamic_inverted_softmax.py b/modelscope/models/multi_modal/mmr/models/dynamic_inverted_softmax.py index 572f44bc..253a847c 100644 --- a/modelscope/models/multi_modal/mmr/models/dynamic_inverted_softmax.py +++ b/modelscope/models/multi_modal/mmr/models/dynamic_inverted_softmax.py @@ -1,3 +1,6 @@ +# The implementation is adopated from the CLIP4Clip implementation, +# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip + import numpy as np diff --git a/modelscope/models/multi_modal/mmr/models/modeling.py b/modelscope/models/multi_modal/mmr/models/modeling.py index 21cc4c80..dc6510bf 100644 --- a/modelscope/models/multi_modal/mmr/models/modeling.py +++ b/modelscope/models/multi_modal/mmr/models/modeling.py @@ -1,3 +1,5 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + import os import platform from collections import OrderedDict diff --git a/modelscope/models/multi_modal/mmr/models/module_clip.py b/modelscope/models/multi_modal/mmr/models/module_clip.py index 36e56196..53501720 100644 --- a/modelscope/models/multi_modal/mmr/models/module_clip.py +++ b/modelscope/models/multi_modal/mmr/models/module_clip.py @@ -1,4 +1,5 @@ -# Part of the implementation is borrowed and modified from The OpenAI CLIP project. +# The implementation is adopated from the CLIP4Clip implementation, +# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip import hashlib import os diff --git a/modelscope/models/multi_modal/mmr/models/module_cross.py b/modelscope/models/multi_modal/mmr/models/module_cross.py index 05edb853..b958d5bc 100644 --- a/modelscope/models/multi_modal/mmr/models/module_cross.py +++ b/modelscope/models/multi_modal/mmr/models/module_cross.py @@ -1,3 +1,6 @@ +# The implementation is adopated from the CLIP4Clip implementation, +# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip + from __future__ import absolute_import, division, print_function import logging from collections import OrderedDict diff --git a/modelscope/models/multi_modal/mmr/models/tokenization_clip.py b/modelscope/models/multi_modal/mmr/models/tokenization_clip.py index ee60f857..4e2c9b15 100644 --- a/modelscope/models/multi_modal/mmr/models/tokenization_clip.py +++ b/modelscope/models/multi_modal/mmr/models/tokenization_clip.py @@ -1,3 +1,6 @@ +# The implementation is adopated from the CLIP4Clip implementation, +# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip + import gzip import html import os diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/clip.py b/modelscope/models/multi_modal/multi_stage_diffusion/clip.py index 54e971f7..98727066 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/clip.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/clip.py @@ -1,4 +1,5 @@ -# The implementation here is modified based on OpenAI CLIP, publicly available at https://github.com/openai/CLIP. +# Part of the implementation is borrowed and modified from CLIP, publicly avaialbe at https://github.com/openai/CLIP. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/decoder.py b/modelscope/models/multi_modal/multi_stage_diffusion/decoder.py index 17daedaf..eb52a48b 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/decoder.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/decoder.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/gaussian_diffusion.py b/modelscope/models/multi_modal/multi_stage_diffusion/gaussian_diffusion.py index a4fc52e0..9677d7c4 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/gaussian_diffusion.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/gaussian_diffusion.py @@ -1,5 +1,6 @@ -# The implementation here is modified based on latent diffusion, publicly available -# at https://github.com/CompVis/latent-diffusion. +# Part of the implementation is borrowed and modified from latent-diffusion, +# publicly avaialbe at https://github.com/CompVis/latent-diffusion. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/model.py b/modelscope/models/multi_modal/multi_stage_diffusion/model.py index c2d83b34..59bd837d 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/model.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/model.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math import os.path as osp diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/prior.py b/modelscope/models/multi_modal/multi_stage_diffusion/prior.py index 380fa467..9f4ef2d5 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/prior.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/prior.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/tokenizer.py b/modelscope/models/multi_modal/multi_stage_diffusion/tokenizer.py index 6fd9bebe..59d6b304 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/tokenizer.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/tokenizer.py @@ -1,4 +1,5 @@ -# The implementation here is modified based on OpenAI CLIP, publicly available at https://github.com/openai/CLIP. +# Part of the implementation is borrowed and modified from CLIP, publicly avaialbe at https://github.com/openai/CLIP. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import gzip import html diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/upsampler.py b/modelscope/models/multi_modal/multi_stage_diffusion/upsampler.py index 4e99a514..a292edae 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/upsampler.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/upsampler.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/multi_stage_diffusion/xglm.py b/modelscope/models/multi_modal/multi_stage_diffusion/xglm.py index 8a0b3ff1..133da50b 100644 --- a/modelscope/models/multi_modal/multi_stage_diffusion/xglm.py +++ b/modelscope/models/multi_modal/multi_stage_diffusion/xglm.py @@ -1,5 +1,6 @@ -# The implementation here is modified based on HuggingFace XGLM, publicly available -# at https://github.com/huggingface/transformers. +# Part of the implementation is borrowed and modified from HuggingFace XGLM, +# publicly avaialbe at https://github.com/huggingface/transformers. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import math diff --git a/modelscope/models/multi_modal/team/team_model.py b/modelscope/models/multi_modal/team/team_model.py index 4aa77e17..8c0e288a 100644 --- a/modelscope/models/multi_modal/team/team_model.py +++ b/modelscope/models/multi_modal/team/team_model.py @@ -1,3 +1,4 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. from typing import Any, Dict import cv2 diff --git a/modelscope/models/multi_modal/team/utils.py b/modelscope/models/multi_modal/team/utils.py index 3b3e394e..73919179 100644 --- a/modelscope/models/multi_modal/team/utils.py +++ b/modelscope/models/multi_modal/team/utils.py @@ -1,7 +1,10 @@ -""" Generative Multimodal Model -Base Transformer code is adapted from https://github.com/openai/CLIP/, -originally MIT License, Copyright (c) 2021 OpenAI, -""" +# Copyright 2021 The OpenAI Team Authors. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. +# +# The implementation here is modified based on OpenAI CLIP, +# originally MIT License, Copyright (c) 2021 OpenAI, +# and publicly available at https://github.com/openai/CLIP/. + from collections import OrderedDict from typing import Tuple, Union diff --git a/modelscope/pipelines/cv/animal_recognition_pipeline.py b/modelscope/pipelines/cv/animal_recognition_pipeline.py index 18cba92c..fad14680 100644 --- a/modelscope/pipelines/cv/animal_recognition_pipeline.py +++ b/modelscope/pipelines/cv/animal_recognition_pipeline.py @@ -1,3 +1,4 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import os.path as osp from typing import Any, Dict diff --git a/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py b/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py index 9f4e2d93..deb17561 100644 --- a/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py +++ b/modelscope/pipelines/cv/cmdssl_video_embedding_pipeline.py @@ -1,3 +1,5 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. + import os.path as osp from typing import Any, Dict diff --git a/modelscope/pipelines/cv/general_recognition_pipeline.py b/modelscope/pipelines/cv/general_recognition_pipeline.py index 9ba5117b..07222086 100644 --- a/modelscope/pipelines/cv/general_recognition_pipeline.py +++ b/modelscope/pipelines/cv/general_recognition_pipeline.py @@ -1,3 +1,4 @@ +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import os.path as osp from typing import Any, Dict diff --git a/modelscope/pipelines/cv/live_category_pipeline.py b/modelscope/pipelines/cv/live_category_pipeline.py index c16ba6ba..715998cc 100644 --- a/modelscope/pipelines/cv/live_category_pipeline.py +++ b/modelscope/pipelines/cv/live_category_pipeline.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import os.path as osp from typing import Any, Dict diff --git a/modelscope/pipelines/cv/shop_segmentation_pipleline.py b/modelscope/pipelines/cv/shop_segmentation_pipleline.py index b7fd90b4..d08058c3 100644 --- a/modelscope/pipelines/cv/shop_segmentation_pipleline.py +++ b/modelscope/pipelines/cv/shop_segmentation_pipleline.py @@ -1,3 +1,4 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. from typing import Any, Dict from modelscope.metainfo import Pipelines diff --git a/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py b/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py index 0985b835..c7f9d4c2 100644 --- a/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py +++ b/modelscope/pipelines/cv/text_driven_segmentation_pipleline.py @@ -1,3 +1,4 @@ +# Copyright (c) Alibaba, Inc. and its affiliates. from typing import Any, Dict from modelscope.metainfo import Pipelines diff --git a/modelscope/pipelines/cv/video_category_pipeline.py b/modelscope/pipelines/cv/video_category_pipeline.py index 196d3115..e4c73649 100644 --- a/modelscope/pipelines/cv/video_category_pipeline.py +++ b/modelscope/pipelines/cv/video_category_pipeline.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import os.path as osp from typing import Any, Dict diff --git a/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py b/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py index d3b9fef3..13032314 100644 --- a/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py +++ b/modelscope/pipelines/multi_modal/generative_multi_modal_embedding_pipeline.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. from typing import Any, Dict diff --git a/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py b/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py index fc123e2f..cafd6555 100644 --- a/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py +++ b/modelscope/pipelines/multi_modal/team_multi_modal_similarity_pipeline.py @@ -1,5 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. - +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. from typing import Any, Dict from modelscope.metainfo import Pipelines diff --git a/tests/pipelines/test_cmdssl_video_embedding.py b/tests/pipelines/test_cmdssl_video_embedding.py index 68eae385..5807c075 100644 --- a/tests/pipelines/test_cmdssl_video_embedding.py +++ b/tests/pipelines/test_cmdssl_video_embedding.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. # !/usr/bin/env python import unittest diff --git a/tests/pipelines/test_generative_multi_modal_embedding.py b/tests/pipelines/test_generative_multi_modal_embedding.py index 9232ebd4..7061d736 100644 --- a/tests/pipelines/test_generative_multi_modal_embedding.py +++ b/tests/pipelines/test_generative_multi_modal_embedding.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import unittest diff --git a/tests/pipelines/test_multi_modal_similarity.py b/tests/pipelines/test_multi_modal_similarity.py index 192602b4..a54fbcf0 100644 --- a/tests/pipelines/test_multi_modal_similarity.py +++ b/tests/pipelines/test_multi_modal_similarity.py @@ -1,4 +1,4 @@ -# Copyright (c) Alibaba, Inc. and its affiliates. +# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. import unittest