安装timm

!pip install timm

简单使用

# 查看网络结构
import timm

m = timm.create_model('mobilenetv3_large_100', pretrained=True)
m.eval()
    Downloading: "https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/mobilenetv3_large_100_ra-f55367f5.pth" to /home/gem/.cache/torch/hub/checkpoints/mobilenetv3_large_100_ra-f55367f5.pth
    
    MobileNetV3(
      (conv_stem): Conv2d(3, 16, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
      (bn1): BatchNormAct2d(
        16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
        (drop): Identity()
        (act): Hardswish()
      )
      (blocks): Sequential(
        (0): Sequential(
          (0): DepthwiseSeparableConv(
            (conv_dw): Conv2d(16, 16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=16, bias=False)
            (bn1): BatchNormAct2d(
              16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): Identity()
            (conv_pw): Conv2d(16, 16, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn2): BatchNormAct2d(
              16, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (1): Sequential(
          (0): InvertedResidual(
            (conv_pw): Conv2d(16, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (conv_dw): Conv2d(64, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=64, bias=False)
            (bn2): BatchNormAct2d(
              64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): Identity()
            (conv_pwl): Conv2d(64, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (1): InvertedResidual(
            (conv_pw): Conv2d(24, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (conv_dw): Conv2d(72, 72, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=72, bias=False)
            (bn2): BatchNormAct2d(
              72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): Identity()
            (conv_pwl): Conv2d(72, 24, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              24, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (2): Sequential(
          (0): InvertedResidual(
            (conv_pw): Conv2d(24, 72, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (conv_dw): Conv2d(72, 72, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), groups=72, bias=False)
            (bn2): BatchNormAct2d(
              72, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(72, 24, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(24, 72, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(72, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (1): InvertedResidual(
            (conv_pw): Conv2d(40, 120, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              120, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (conv_dw): Conv2d(120, 120, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=120, bias=False)
            (bn2): BatchNormAct2d(
              120, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(120, 32, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(32, 120, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(120, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (2): InvertedResidual(
            (conv_pw): Conv2d(40, 120, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              120, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (conv_dw): Conv2d(120, 120, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=120, bias=False)
            (bn2): BatchNormAct2d(
              120, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): ReLU(inplace=True)
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(120, 32, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(32, 120, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(120, 40, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              40, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (3): Sequential(
          (0): InvertedResidual(
            (conv_pw): Conv2d(40, 240, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(240, 240, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), groups=240, bias=False)
            (bn2): BatchNormAct2d(
              240, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): Identity()
            (conv_pwl): Conv2d(240, 80, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (1): InvertedResidual(
            (conv_pw): Conv2d(80, 200, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(200, 200, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=200, bias=False)
            (bn2): BatchNormAct2d(
              200, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): Identity()
            (conv_pwl): Conv2d(200, 80, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (2): InvertedResidual(
            (conv_pw): Conv2d(80, 184, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              184, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(184, 184, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=184, bias=False)
            (bn2): BatchNormAct2d(
              184, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): Identity()
            (conv_pwl): Conv2d(184, 80, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (3): InvertedResidual(
            (conv_pw): Conv2d(80, 184, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              184, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(184, 184, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=184, bias=False)
            (bn2): BatchNormAct2d(
              184, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): Identity()
            (conv_pwl): Conv2d(184, 80, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              80, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (4): Sequential(
          (0): InvertedResidual(
            (conv_pw): Conv2d(80, 480, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(480, 480, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=480, bias=False)
            (bn2): BatchNormAct2d(
              480, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(480, 120, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(120, 480, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(480, 112, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              112, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (1): InvertedResidual(
            (conv_pw): Conv2d(112, 672, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(672, 672, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=672, bias=False)
            (bn2): BatchNormAct2d(
              672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(672, 168, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(168, 672, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(672, 112, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              112, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (5): Sequential(
          (0): InvertedResidual(
            (conv_pw): Conv2d(112, 672, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(672, 672, kernel_size=(5, 5), stride=(2, 2), padding=(2, 2), groups=672, bias=False)
            (bn2): BatchNormAct2d(
              672, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(672, 168, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(168, 672, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(672, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (1): InvertedResidual(
            (conv_pw): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(960, 960, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=960, bias=False)
            (bn2): BatchNormAct2d(
              960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(960, 240, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(240, 960, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(960, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
          (2): InvertedResidual(
            (conv_pw): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (conv_dw): Conv2d(960, 960, kernel_size=(5, 5), stride=(1, 1), padding=(2, 2), groups=960, bias=False)
            (bn2): BatchNormAct2d(
              960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (se): SqueezeExcite(
              (conv_reduce): Conv2d(960, 240, kernel_size=(1, 1), stride=(1, 1))
              (act1): ReLU(inplace=True)
              (conv_expand): Conv2d(240, 960, kernel_size=(1, 1), stride=(1, 1))
              (gate): Hardsigmoid()
            )
            (conv_pwl): Conv2d(960, 160, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn3): BatchNormAct2d(
              160, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Identity()
            )
            (drop_path): Identity()
          )
        )
        (6): Sequential(
          (0): ConvBnAct(
            (conv): Conv2d(160, 960, kernel_size=(1, 1), stride=(1, 1), bias=False)
            (bn1): BatchNormAct2d(
              960, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True
              (drop): Identity()
              (act): Hardswish()
            )
            (drop_path): Identity()
          )
        )
      )
      (global_pool): SelectAdaptivePool2d (pool_type=avg, flatten=Identity())
      (conv_head): Conv2d(960, 1280, kernel_size=(1, 1), stride=(1, 1))
      (act2): Hardswish()
      (flatten): Flatten(start_dim=1, end_dim=-1)
      (classifier): Linear(in_features=1280, out_features=1000, bias=True)
    )
# 查看所有已实现的网络
from pprint import pprint

model_names = timm.list_models(pretrained=True)
pprint(model_names)
    ['adv_inception_v3',
     'bat_resnext26ts',
     'beit_base_patch16_224',
     'beit_base_patch16_224_in22k',
     'beit_base_patch16_384',
     'beit_large_patch16_224',
     'beit_large_patch16_224_in22k',
     ....]
# 精准模型搜索
model_names = timm.list_models('*vit*')

pprint(model_names)
    ['convit_base',
     'convit_small',
     'convit_tiny',
     'crossvit_9_240',
     'crossvit_9_dagger_240',
     'crossvit_15_240',
     'crossvit_15_dagger_240',
     'crossvit_15_dagger_408',
     'crossvit_18_240',
     'crossvit_18_dagger_240',
     'crossvit_18_dagger_408',
     'crossvit_base_240',
     'crossvit_small_240',
     'crossvit_tiny_240',
     'gcvit_base',
     'gcvit_small',
     'gcvit_tiny',
     'gcvit_xtiny',
     'gcvit_xxtiny',
     'levit_128',
     'levit_128s',
     'levit_192',
     'levit_256',
     'levit_256d',
     'levit_384',
     'maxvit_base_224',
     'maxvit_large_224',
     'maxvit_nano_rw_256',
     'maxvit_pico_rw_256',
     'maxvit_rmlp_nano_rw_256',
     'maxvit_rmlp_pico_rw_256',
     'maxvit_rmlp_small_rw_224',
     'maxvit_rmlp_small_rw_256',
     'maxvit_rmlp_tiny_rw_256',
     'maxvit_small_224',
     'maxvit_tiny_224',
     'maxvit_tiny_pm_256',
     'maxvit_tiny_rw_224',
     'maxvit_tiny_rw_256',
     'maxvit_xlarge_224',
     'maxxvit_rmlp_nano_rw_256',
     'maxxvit_rmlp_small_rw_256',
     'maxxvit_rmlp_tiny_rw_256',
     'mobilevit_s',
     'mobilevit_xs',
     'mobilevit_xxs',
     'mobilevitv2_050',
     'mobilevitv2_075',
     'mobilevitv2_100',
     'mobilevitv2_125',
     'mobilevitv2_150',
     'mobilevitv2_150_384_in22ft1k',
     'mobilevitv2_150_in22ft1k',
     'mobilevitv2_175',
     'mobilevitv2_175_384_in22ft1k',
     'mobilevitv2_175_in22ft1k',
     'mobilevitv2_200',
     'mobilevitv2_200_384_in22ft1k',
     'mobilevitv2_200_in22ft1k',
     'mvitv2_base',
     'mvitv2_large',
     'mvitv2_small',
     'mvitv2_small_cls',
     'mvitv2_tiny',
     'semobilevit_s',
     'vit_base_patch8_224',
     'vit_base_patch8_224_dino',
     'vit_base_patch8_224_in21k',
     'vit_base_patch16_18x2_224',
     'vit_base_patch16_224',
     'vit_base_patch16_224_dino',
     'vit_base_patch16_224_in21k',
     'vit_base_patch16_224_miil',
     'vit_base_patch16_224_miil_in21k',
     'vit_base_patch16_224_sam',
     'vit_base_patch16_384',
     'vit_base_patch16_plus_240',
     'vit_base_patch16_rpn_224',
     'vit_base_patch32_224',
     'vit_base_patch32_224_clip_laion2b',
     'vit_base_patch32_224_in21k',
     'vit_base_patch32_224_sam',
     'vit_base_patch32_384',
     'vit_base_patch32_plus_256',
     'vit_base_r26_s32_224',
     'vit_base_r50_s16_224',
     'vit_base_r50_s16_224_in21k',
     'vit_base_r50_s16_384',
     'vit_base_resnet26d_224',
     'vit_base_resnet50_224_in21k',
     'vit_base_resnet50_384',
     'vit_base_resnet50d_224',
     'vit_giant_patch14_224',
     'vit_giant_patch14_224_clip_laion2b',
     'vit_gigantic_patch14_224',
     'vit_huge_patch14_224',
     'vit_huge_patch14_224_clip_laion2b',
     'vit_huge_patch14_224_in21k',
     'vit_large_patch14_224',
     'vit_large_patch14_224_clip_laion2b',
     'vit_large_patch16_224',
     'vit_large_patch16_224_in21k',
     'vit_large_patch16_384',
     'vit_large_patch32_224',
     'vit_large_patch32_224_in21k',
     'vit_large_patch32_384',
     'vit_large_r50_s32_224',
     'vit_large_r50_s32_224_in21k',
     'vit_large_r50_s32_384',
     'vit_relpos_base_patch16_224',
     'vit_relpos_base_patch16_cls_224',
     'vit_relpos_base_patch16_clsgap_224',
     'vit_relpos_base_patch16_plus_240',
     'vit_relpos_base_patch16_rpn_224',
     'vit_relpos_base_patch32_plus_rpn_256',
     'vit_relpos_medium_patch16_224',
     'vit_relpos_medium_patch16_cls_224',
     'vit_relpos_medium_patch16_rpn_224',
     'vit_relpos_small_patch16_224',
     'vit_relpos_small_patch16_rpn_224',
     'vit_small_patch8_224_dino',
     'vit_small_patch16_18x2_224',
     'vit_small_patch16_36x1_224',
     'vit_small_patch16_224',
     'vit_small_patch16_224_dino',
     'vit_small_patch16_224_in21k',
     'vit_small_patch16_384',
     'vit_small_patch32_224',
     'vit_small_patch32_224_in21k',
     'vit_small_patch32_384',
     'vit_small_r26_s32_224',
     'vit_small_r26_s32_224_in21k',
     'vit_small_r26_s32_384',
     'vit_small_resnet26d_224',
     'vit_small_resnet50d_s16_224',
     'vit_srelpos_medium_patch16_224',
     'vit_srelpos_small_patch16_224',
     'vit_tiny_patch16_224',
     'vit_tiny_patch16_224_in21k',
     'vit_tiny_patch16_384',
     'vit_tiny_r_s16_p8_224',
     'vit_tiny_r_s16_p8_224_in21k',
     'vit_tiny_r_s16_p8_384']

各种模型的精度比较:https://huggingface.co/docs/timm/results

参考

Logo

技术共进,成长同行——讯飞AI开发者社区

更多推荐