以下算法进行了header变更: modelscope.models.cv.cmdssl_video_embedding modelscope.models.cv.action_recognition modelscope.models.cv.animal_recognition modelscope.models.multi_modal.multi_stage_diffusion modelscope.models.multi_modal.gemm modelscope.pipelines.cv.live_category_pipeline modelscope.pipelines.cv.video_category_pipeline modelscope.models.cv.image_to_image_translation modelscope.models.cv.image_to_image_generation modelscope.models.cv.video_inpainting modelscope.models.multi_modal.diffusion modelscope.models.multi_modal.team modelscope.models.cv.shop_segmentation modelscope.models.cv.text_driven_segmentation modelscope.models.cv.action_recognition modelscope.models.cv.face_emotion modelscope.models.cv.hand_static Link: https://code.alibaba-inc.com/Ali-MaaS/MaaS-lib/codereview/10268474master
@@ -1,3 +1,6 @@ | |||||
# The implementation is also open-sourced by the authors, | |||||
# and available at https://github.com/alibaba-mmai-research/TAdaConv | |||||
# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. | |||||
import torch.nn as nn | import torch.nn as nn | ||||
from .s3dg import Inception3D | from .s3dg import Inception3D | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopted from https://github.com/TengdaHan/CoCLR, | |||||
# made pubicly available under the Apache License, Version 2.0 at https://github.com/TengdaHan/CoCLR | |||||
# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,3 +1,7 @@ | |||||
# The implementation is adopted from https://github.com/facebookresearch/ConvNeXt, | |||||
# made pubicly available under the MIT License at https://github.com/facebookresearch/ConvNeXt | |||||
# Copyright 2021-2022 The Alibaba FVI Team Authors. All rights reserved. | |||||
import math | import math | ||||
import torch | import torch | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopted from Split-Attention Network, A New ResNet Variant, | |||||
# made pubicly available under the Apache License 2.0 License | |||||
# at https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/models/resnet.py | |||||
import math | import math | ||||
import torch | import torch | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopted from Split-Attention Network, A New ResNet Variant, | |||||
# made pubicly available under the Apache License 2.0 License | |||||
# at https://github.com/zhanghang1989/ResNeSt/blob/master/resnest/torch/models/splat.py | |||||
"""Split-Attention""" | """Split-Attention""" | ||||
import torch | import torch | ||||
@@ -1,4 +1,5 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from typing import TYPE_CHECKING | from typing import TYPE_CHECKING | ||||
from modelscope.utils.import_utils import LazyImportModule | from modelscope.utils.import_utils import LazyImportModule | ||||
@@ -1,3 +1,11 @@ | |||||
# Copyright 2022 Davide Abati. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# The implementation here is modified based on c3d-pytorch, | |||||
# originally MIT License, Copyright (c) 2022 Davide Abati, | |||||
# and publicly available at https://github.com/DavideA/c3d-pytorch | |||||
""" C3D Model Architecture.""" | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,3 +1,11 @@ | |||||
# Copyright (c) 2022 Kensho Hara. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# The implementation here is modified based on 3D-ResNets-PyTorch, | |||||
# originally MIT License, Copyright (c) 2022 Kensho Hara, | |||||
# and publicly available at https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/resnet2p1d.py | |||||
""" ResNet2plus1d Model Architecture.""" | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,3 +1,11 @@ | |||||
# Copyright (c) 2022 Kensho Hara. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# The implementation here is modified based on 3D-ResNets-PyTorch, | |||||
# originally MIT License, Copyright (c) 2022 Kensho Hara, | |||||
# and publicly available at https://github.com/kenshohara/3D-ResNets-PyTorch/blob/master/models/resnet.py | |||||
""" ResNet3D Model Architecture.""" | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,11 +1,9 @@ | |||||
""" | |||||
Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
https://github.com/open-mmlab/mmsegmentation/, | |||||
originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
""" | |||||
# Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
# https://github.com/open-mmlab/mmsegmentation/, | |||||
# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
# and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
# originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
import warnings | import warnings | ||||
@@ -1,11 +1,9 @@ | |||||
""" FPNHead | |||||
Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
https://github.com/open-mmlab/mmsegmentation/, | |||||
originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
""" | |||||
# Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
# https://github.com/open-mmlab/mmsegmentation/, | |||||
# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
# and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
# originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
import numpy as np | import numpy as np | ||||
import torch | import torch | ||||
@@ -1,11 +1,9 @@ | |||||
""" | |||||
Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
https://github.com/open-mmlab/mmsegmentation/, | |||||
originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
""" | |||||
# Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
# https://github.com/open-mmlab/mmsegmentation/, | |||||
# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
# and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
# originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
import math | import math | ||||
from collections import OrderedDict | from collections import OrderedDict | ||||
@@ -1,11 +1,9 @@ | |||||
""" FPNneck | |||||
Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
https://github.com/open-mmlab/mmsegmentation/, | |||||
originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
""" | |||||
# Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
# https://github.com/open-mmlab/mmsegmentation/, | |||||
# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
# and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
# originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
import torch.nn as nn | import torch.nn as nn | ||||
import torch.nn.functional as F | import torch.nn.functional as F | ||||
@@ -1,11 +1,9 @@ | |||||
""" | |||||
Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
https://github.com/open-mmlab/mmsegmentation/, | |||||
originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
""" | |||||
# Base modules are adapted from https://github.com/open-mmlab/mmcv/, | |||||
# originally Apache 2.0 License, Copyright (c) 2018-2022 OpenMMLab, | |||||
# https://github.com/open-mmlab/mmsegmentation/, | |||||
# originally Apache 2.0 License, Copyright (c) 2020-2021 OpenMMLab, | |||||
# and adapted from https://github.com/raoyongming/DenseCLIP/, | |||||
# originally MIT License, Copyright (c) 2022 Rao, Yongming. | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,3 +1,5 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,7 +1,6 @@ | |||||
""" CLIP Tokenizer | |||||
Adapted from https://github.com/openai/CLIP. | |||||
Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
""" | |||||
# CLIP Tokenizer | |||||
# Adapted from https://github.com/openai/CLIP. | |||||
# Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
import gzip | import gzip | ||||
import html | import html | ||||
@@ -1 +1,2 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
from .lseg_base import TextDrivenSegmentation | from .lseg_base import TextDrivenSegmentation |
@@ -1,7 +1,6 @@ | |||||
""" CLIP | |||||
Adapted from https://github.com/openai/CLIP. | |||||
Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
""" | |||||
# CLIP | |||||
# Adapted from https://github.com/openai/CLIP. | |||||
# Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
import hashlib | import hashlib | ||||
import os | import os | ||||
@@ -1,7 +1,5 @@ | |||||
""" | |||||
Adapted from https://github.com/isl-org/lang-seg. | |||||
Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
""" | |||||
# Adapted from https://github.com/isl-org/lang-seg. | |||||
# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,7 +1,5 @@ | |||||
""" | |||||
Adapted from https://github.com/isl-org/lang-seg. | |||||
Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
""" | |||||
# Adapted from https://github.com/isl-org/lang-seg. | |||||
# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
import torch | import torch | ||||
import torch.nn as nn | import torch.nn as nn | ||||
@@ -1,3 +1,5 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,7 +1,5 @@ | |||||
""" | |||||
Adapted from https://github.com/isl-org/lang-seg. | |||||
Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
""" | |||||
# Adapted from https://github.com/isl-org/lang-seg. | |||||
# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
import numpy as np | import numpy as np | ||||
import torch | import torch | ||||
@@ -1,7 +1,5 @@ | |||||
""" | |||||
Adapted from https://github.com/isl-org/lang-seg. | |||||
Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
""" | |||||
# Adapted from https://github.com/isl-org/lang-seg. | |||||
# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
import math | import math | ||||
import types | import types | ||||
@@ -1,7 +1,5 @@ | |||||
""" | |||||
Adapted from https://github.com/isl-org/lang-seg. | |||||
Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
""" | |||||
# Adapted from https://github.com/isl-org/lang-seg. | |||||
# Originally MIT License, Copyright (c) 2021 Intelligent Systems Lab Org. | |||||
from collections import OrderedDict | from collections import OrderedDict | ||||
from typing import Tuple, Union | from typing import Tuple, Union | ||||
@@ -1,7 +1,6 @@ | |||||
""" CLIP | |||||
Adapted from https://github.com/openai/CLIP. | |||||
Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
""" | |||||
# CLIP | |||||
# Adapted from https://github.com/openai/CLIP. | |||||
# Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
import gzip | import gzip | ||||
import html | import html | ||||
@@ -1,3 +1,6 @@ | |||||
# Part of the implementation is borrowed and modified from latent-diffusion, | |||||
# publicly avaialbe at https://github.com/CompVis/latent-diffusion. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
import torch | import torch | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,3 +1,6 @@ | |||||
# Part of the implementation is borrowed and modified from latent-diffusion, | |||||
# publicly avaialbe at https://github.com/CompVis/latent-diffusion. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
import torch | import torch | ||||
@@ -1,3 +1,6 @@ | |||||
# Part of the implementation is borrowed and modified from latent-diffusion, | |||||
# publicly avaialbe at https://github.com/CompVis/latent-diffusion. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
import torch | import torch | ||||
@@ -1,3 +1,6 @@ | |||||
# Part of the implementation is borrowed and modified from latent-diffusion, | |||||
# publicly avaialbe at https://github.com/CompVis/latent-diffusion. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
from functools import partial | from functools import partial | ||||
@@ -1,9 +1,14 @@ | |||||
""" Generative Multimodal Model | |||||
Base modules are adapted from https://github.com/openai/CLIP/, | |||||
originally MIT License, Copyright (c) 2021 OpenAI, | |||||
and adapted from https://github.com/lucidrains/CoCa-pytorch/, | |||||
originally MIT License, Copyright (c) 2022 Phil Wang. | |||||
""" | |||||
# Copyright 2021 The OpenAI Team Authors. | |||||
# Copyright 2022 Phil Wang. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# | |||||
# The implementation here is modified based on OpenAI CLIP, | |||||
# originally MIT License, Copyright (c) 2021 OpenAI, | |||||
# and publicly available at https://github.com/openai/CLIP/. | |||||
# The implementation here is modified based on Coca-pytorch, | |||||
# originally MIT License, Copyright (c) 2022 Phil Wang, | |||||
# and publicly available at https://github.com/lucidrains/CoCa-pytorch/, | |||||
""" Generative Multimodal Model Architecture.""" | |||||
import os | import os | ||||
from collections import OrderedDict | from collections import OrderedDict | ||||
@@ -1,3 +1,5 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
""" Generative Multimodal Model Wrapper.""" | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,7 +1,11 @@ | |||||
""" CLIP Tokenizer | |||||
Adapted from https://github.com/openai/CLIP. | |||||
Originally MIT License, Copyright (c) 2021 OpenAI. | |||||
""" | |||||
# Copyright 2021 The OpenAI Team Authors. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# | |||||
# The implementation here is modified based on OpenAI CLIP, | |||||
# originally MIT License, Copyright (c) 2021 OpenAI, | |||||
# and publicly available at https://github.com/openai/CLIP/. | |||||
""" CLIP Tokenizer.""" | |||||
import gzip | import gzip | ||||
import html | import html | ||||
import os | import os | ||||
@@ -1 +1,3 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from .models import VideoCLIPForMultiModalEmbedding | from .models import VideoCLIPForMultiModalEmbedding |
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopted from Huaishao Luo, | |||||
# made pubicly available under the MIT License at https://github.com/ArrowLuo/CLIP4Clip | |||||
import cv2 | import cv2 | ||||
import numpy as np | import numpy as np | ||||
import torch as th | import torch as th | ||||
@@ -1 +1,3 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from .clip_for_mm_video_embedding import VideoCLIPForMultiModalEmbedding | from .clip_for_mm_video_embedding import VideoCLIPForMultiModalEmbedding |
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopated from the CLIP4Clip implementation, | |||||
# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip | |||||
import random | import random | ||||
from os.path import exists | from os.path import exists | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopated from the CLIP4Clip implementation, | |||||
# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip | |||||
import numpy as np | import numpy as np | ||||
@@ -1,3 +1,5 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os | import os | ||||
import platform | import platform | ||||
from collections import OrderedDict | from collections import OrderedDict | ||||
@@ -1,4 +1,5 @@ | |||||
# Part of the implementation is borrowed and modified from The OpenAI CLIP project. | |||||
# The implementation is adopated from the CLIP4Clip implementation, | |||||
# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip | |||||
import hashlib | import hashlib | ||||
import os | import os | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopated from the CLIP4Clip implementation, | |||||
# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip | |||||
from __future__ import absolute_import, division, print_function | from __future__ import absolute_import, division, print_function | ||||
import logging | import logging | ||||
from collections import OrderedDict | from collections import OrderedDict | ||||
@@ -1,3 +1,6 @@ | |||||
# The implementation is adopated from the CLIP4Clip implementation, | |||||
# made pubicly available under Apache License, Version 2.0 at https://github.com/ArrowLuo/CLIP4Clip | |||||
import gzip | import gzip | ||||
import html | import html | ||||
import os | import os | ||||
@@ -1,4 +1,5 @@ | |||||
# The implementation here is modified based on OpenAI CLIP, publicly available at https://github.com/openai/CLIP. | |||||
# Part of the implementation is borrowed and modified from CLIP, publicly avaialbe at https://github.com/openai/CLIP. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,5 +1,6 @@ | |||||
# The implementation here is modified based on latent diffusion, publicly available | |||||
# at https://github.com/CompVis/latent-diffusion. | |||||
# Part of the implementation is borrowed and modified from latent-diffusion, | |||||
# publicly avaialbe at https://github.com/CompVis/latent-diffusion. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
import os.path as osp | import os.path as osp | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,4 +1,5 @@ | |||||
# The implementation here is modified based on OpenAI CLIP, publicly available at https://github.com/openai/CLIP. | |||||
# Part of the implementation is borrowed and modified from CLIP, publicly avaialbe at https://github.com/openai/CLIP. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import gzip | import gzip | ||||
import html | import html | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,5 +1,6 @@ | |||||
# The implementation here is modified based on HuggingFace XGLM, publicly available | |||||
# at https://github.com/huggingface/transformers. | |||||
# Part of the implementation is borrowed and modified from HuggingFace XGLM, | |||||
# publicly avaialbe at https://github.com/huggingface/transformers. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import math | import math | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from typing import Any, Dict | from typing import Any, Dict | ||||
import cv2 | import cv2 | ||||
@@ -1,7 +1,10 @@ | |||||
""" Generative Multimodal Model | |||||
Base Transformer code is adapted from https://github.com/openai/CLIP/, | |||||
originally MIT License, Copyright (c) 2021 OpenAI, | |||||
""" | |||||
# Copyright 2021 The OpenAI Team Authors. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# | |||||
# The implementation here is modified based on OpenAI CLIP, | |||||
# originally MIT License, Copyright (c) 2021 OpenAI, | |||||
# and publicly available at https://github.com/openai/CLIP/. | |||||
from collections import OrderedDict | from collections import OrderedDict | ||||
from typing import Tuple, Union | from typing import Tuple, Union | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,3 +1,5 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
from typing import Any, Dict | from typing import Any, Dict | ||||
from modelscope.metainfo import Pipelines | from modelscope.metainfo import Pipelines | ||||
@@ -1,3 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
from typing import Any, Dict | from typing import Any, Dict | ||||
from modelscope.metainfo import Pipelines | from modelscope.metainfo import Pipelines | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import os.path as osp | import os.path as osp | ||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from typing import Any, Dict | from typing import Any, Dict | ||||
@@ -1,5 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
from typing import Any, Dict | from typing import Any, Dict | ||||
from modelscope.metainfo import Pipelines | from modelscope.metainfo import Pipelines | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
# !/usr/bin/env python | # !/usr/bin/env python | ||||
import unittest | import unittest | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import unittest | import unittest | ||||
@@ -1,4 +1,4 @@ | |||||
# Copyright (c) Alibaba, Inc. and its affiliates. | |||||
# Copyright 2021-2022 The Alibaba Fundamental Vision Team Authors. All rights reserved. | |||||
import unittest | import unittest | ||||