首页下载资源后端ConvNet ConvNeXt V1 - V2代码

ZIPConvNet ConvNeXt V1 - V2代码

m0_462562556.85KB需要积分:1

资源文件列表:

ConvNeXt V1 V2 model.zip 大约有2个文件
  1. convnext.py 11.44KB
  2. covnextv2.py 8.49KB

资源介绍:

A ConvNet for the 2020s 和 ConvNeXt V2: Co-designing and Scaling ConvNets with Masked Autoencoders的网络模型ConvNeXt V1 - V2代码
""" original code from facebook research: https://github.com/facebookresearch/ConvNeXt 来源: https://blog.csdn.net/jiangyangll/article/details/123931086?ops_request_misc=%257B%2522request%255Fid%2522%253A%2522172267156916800186537899%2522%252C%2522scm%2522%253A%252220140713.130102334..%2522%257D&request_id=172267156916800186537899&biz_id=0&utm_medium=distribute.pc_search_result.none-task-blog-2~all~sobaiduend~default-2-123931086-null-null.142^v100^pc_search_result_base5&utm_term=convnet%E7%BD%91%E7%BB%9C%E6%9E%B6%E6%9E%84%E5%9B%BE&spm=1018.2226.3001.4187 https://github.com/facebookresearch/ConvNeXt/blob/main/models/convnext.py """ import torch import torch.nn as nn import torch.nn.functional as F def drop_path(x, drop_prob: float = 0., training: bool = False): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). This is the same as the DropConnect impl I created for EfficientNet, etc networks, however, the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper... See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use 'survival rate' as the argument. """ if drop_prob == 0. or not training: return x keep_prob = 1 - drop_prob shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets random_tensor = keep_prob + torch.rand(shape, dtype=x.dtype, device=x.device) random_tensor.floor_() # binarize output = x.div(keep_prob) * random_tensor return output class DropPath(nn.Module): """Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks). """ def __init__(self, drop_prob=None): super(DropPath, self).__init__() self.drop_prob = drop_prob def forward(self, x): return drop_path(x, self.drop_prob, self.training) class LayerNorm(nn.Module): r""" LayerNorm that supports two data formats: channels_last (default) or channels_first. The ordering of the dimensions in the inputs. channels_last corresponds to inputs with shape (batch_size, height, width, channels) while channels_first corresponds to inputs with shape (batch_size, channels, height, width). """ # channels_first (batch_size, channels, height, width) pytorch官方默认使用 # channels_last (batch_size, height, width, channels) def __init__(self, normalized_shape, eps=1e-6, data_format="channels_last"): super().__init__() self.weight = nn.Parameter(torch.ones(normalized_shape), requires_grad=True) # weight bias对应γ β self.bias = nn.Parameter(torch.zeros(normalized_shape), requires_grad=True) self.eps = eps self.data_format = data_format if self.data_format not in ["channels_last", "channels_first"]: raise ValueError(f"not support data format '{self.data_format}'") self.normalized_shape = (normalized_shape,) def forward(self, x: torch.Tensor) -> torch.Tensor: if self.data_format == "channels_last": return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps) elif self.data_format == "channels_first": # [batch_size, channels, height, width] # 对channels 维度求均值 mean = x.mean(1, keepdim=True) # 方差 var = (x - mean).pow(2).mean(1, keepdim=True) # 减均值,除以标准差的操作 x = (x - mean) / torch.sqrt(var + self.eps) x = self.weight[:, None, None] * x + self.bias[:, None, None] return x # ConvNeXt Block class ConvNeXt_Block(nn.Module): r""" ConvNeXt Block. There are two equivalent implementations: (1) DwConv -> LayerNorm (channels_first) -> 1x1 Conv -> GELU -> 1x1 Conv; all in (N, C, H, W) (2) DwConv -> Permute to (N, H, W, C); LayerNorm (channels_last) -> Linear -> GELU -> Linear; Permute back We use (2) as we find it slightly faster in PyTorch Args: dim (int): Number of input channels. drop_rate (float): Stochastic depth rate. Default: 0.0 layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. """ def __init__(self, dim, drop_rate=0., layer_scale_init_value=1e-6): super().__init__() self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim) # depthwise conv self.norm = LayerNorm(dim, eps=1e-6, data_format="channels_last") self.pwconv1 = nn.Linear(dim, 4 * dim) # pointwise/1x1 convs, implemented with linear layers self.act = nn.GELU() self.pwconv2 = nn.Linear(4 * dim, dim) # gamma 针对layer scale的操作 self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim,)), requires_grad=True) if layer_scale_init_value > 0 else None self.drop_path = DropPath(drop_rate) if drop_rate > 0. else nn.Identity() # nn.Identity() 恒等映射 def forward(self, x: torch.Tensor) -> torch.Tensor: shortcut = x x = self.dwconv(x) x = x.permute(0, 2, 3, 1) # [N, C, H, W] -> [N, H, W, C] x = self.norm(x) x = self.pwconv1(x) x = self.act(x) x = self.pwconv2(x) if self.gamma is not None: x = self.gamma * x x = x.permute(0, 3, 1, 2) # [N, H, W, C] -> [N, C, H, W] x = shortcut + self.drop_path(x) return x class ConvNeXt(nn.Module): r""" ConvNeXt A PyTorch impl of : `A ConvNet for the 2020s` - https://arxiv.org/pdf/2201.03545.pdf Args: in_chans (int): Number of input image channels. Default: 3 num_classes (int): Number of classes for classification head. Default: 1000 depths (tuple(int)): Number of blocks at each stage. Default: [3, 3, 9, 3] dims (int): Feature dimension at each stage. Default: [96, 192, 384, 768] drop_path_rate (float): Stochastic depth rate. Default: 0. layer_scale_init_value (float): Init value for Layer Scale. Default: 1e-6. head_init_scale (float): Init scaling value for classifier weights and biases. Default: 1. """ def __init__(self, in_chans: int = 3, num_classes: int = 1000, depths: list = None, dims: list = None, drop_path_rate: float = 0., layer_scale_init_value: float = 1e-6, head_init_scale: float = 1.): super().__init__() # 最初下采样部分 self.downsample_layers = nn.ModuleList() # stem and 3 intermediate downsampling conv layers # Conv2d k4, s4 # LayerNorm stem = nn.Sequential(nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4), LayerNorm(dims[0], eps=1e-6, data_format="channels_first")) self.downsample_layers.append(stem) # 对应stage2-stage4前的3个downsample for i in range(3): downsample_layer = nn.Sequential(LayerNorm(dims[i], eps=1e-6, data_format="channels_first"), nn.Conv2d(dims[i], dims[i + 1], kernel_size=2, stride=2)) self.downsample_layers.append(downsample_layer) self.stages = nn.ModuleList() # 4 feature resolution stages, each consisting of multiple blocks # 等差数列,初始值0,到drop path rate,总共depths个数 dp_rates = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] cur = 0 # 构建每个stage中堆叠的block for i in range(4): stage = nn.Sequential( *[ConvNeXt_Block(dim=dims[i], drop_rate=dp_rates[cur + j], layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])] ) self.stages.append(stage)
100+评论
captcha