ResNet18的搭建请移步:使用PyTorch搭建ResNet18网络并使用CIFAR10数据集训练测试 ResNet34的搭建请移步:使用PyTorch搭建ResNet34网络 ResNet34的搭建请移步:使用PyTorch搭建ResNet50网络
参照我的ResNet50的搭建,由于50层以上几乎相同,叠加卷积单元数即可,所以没有写注释。 ResNet101和152的搭建注释可以参照我的ResNet50搭建中的注释 ResNet101和152的训练可以参照我的ResNet18搭建中的训练部分
ResNet101和152可以依旧参照ResNet50的网络图片:
上代码:
ResNet101的model.py模型:
代码语言:javascript复制import torch
import torch.nn as nn
from torch.nn import functional as F
class DownSample(nn.Module):
def __init__(self, in_channel, out_channel, stride):
super(DownSample, self).__init__()
self.down = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, padding=0, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.down(x)
return out
class ResNet101(nn.Module):
def __init__(self, classes_num): # 指定分类数
super(ResNet101, self).__init__()
self.pre = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# --------------------------------------------------------------------
self.layer1_first = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256)
)
self.layer1_next = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256)
)
# --------------------------------------------------------------------
self.layer2_first = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512)
)
self.layer2_next = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512)
)
# --------------------------------------------------------------------
self.layer3_first = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1024, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024)
)
self.layer3_next = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1024, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024)
)
# --------------------------------------------------------------------
self.layer4_first = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 2048, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(2048)
)
self.layer4_next = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 2048, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(2048)
)
# --------------------------------------------------------------------
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(2048 * 1 * 1, 1000),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(1000, classes_num)
)
def forward(self, x):
out = self.pre(x)
# --------------------------------------------------------------------
layer1_shortcut = DownSample(64, 256, 1)
layer1_shortcut.to('cuda:0')
layer1_identity = layer1_shortcut(out)
out = self.layer1_first(out)
out = F.relu(out layer1_identity, inplace=True)
for i in range(2):
identity = out
out = self.layer1_next(out)
out = F.relu(out identity, inplace=True)
# --------------------------------------------------------------------
layer2_shortcut = DownSample(256, 512, 2)
layer2_shortcut.to('cuda:0')
layer2_identity = layer2_shortcut(out)
out = self.layer2_first(out)
out = F.relu(out layer2_identity, inplace=True)
for i in range(3):
identity = out
out = self.layer2_next(out)
out = F.relu(out identity, inplace=True)
# --------------------------------------------------------------------
layer3_shortcut = DownSample(512, 1024, 2)
layer3_shortcut.to('cuda:0')
layer3_identity = layer3_shortcut(out)
out = self.layer3_first(out)
out = F.relu(out layer3_identity, inplace=True)
for i in range(22):
identity = out
out = self.layer3_next(out)
out = F.relu(out identity, inplace=True)
# --------------------------------------------------------------------
layer4_shortcut = DownSample(1024, 2048, 2)
layer4_shortcut.to('cuda:0')
layer4_identity = layer4_shortcut(out)
out = self.layer4_first(out)
out = F.relu(out layer4_identity, inplace=True)
for i in range(2):
identity = out
out = self.layer4_next(out)
out = F.relu(out identity, inplace=True)
# --------------------------------------------------------------------
out = self.avg_pool(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
ResNet152的model.py模型:
代码语言:javascript复制import torch
import torch.nn as nn
from torch.nn import functional as F
class DownSample(nn.Module):
def __init__(self, in_channel, out_channel, stride):
super(DownSample, self).__init__()
self.down = nn.Sequential(
nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=stride, padding=0, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU(inplace=True)
)
def forward(self, x):
out = self.down(x)
return out
class ResNet152(nn.Module):
def __init__(self, classes_num): # 指定了分类数目
super(ResNet152, self).__init__()
self.pre = nn.Sequential(
nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
)
# -----------------------------------------------------------------------
self.layer1_first = nn.Sequential(
nn.Conv2d(64, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256)
)
self.layer1_next = nn.Sequential(
nn.Conv2d(256, 64, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(64),
nn.ReLU(inplace=True),
nn.Conv2d(64, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256)
)
# -----------------------------------------------------------------------
self.layer2_first = nn.Sequential(
nn.Conv2d(256, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512)
)
self.layer2_next = nn.Sequential(
nn.Conv2d(512, 128, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(128),
nn.ReLU(inplace=True),
nn.Conv2d(128, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512)
)
# -----------------------------------------------------------------------
self.layer3_first = nn.Sequential(
nn.Conv2d(512, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1024, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024)
)
self.layer3_next = nn.Sequential(
nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(256),
nn.ReLU(inplace=True),
nn.Conv2d(256, 1024, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(1024)
)
# -----------------------------------------------------------------------
self.layer4_first = nn.Sequential(
nn.Conv2d(1024, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, stride=2, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 2048, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(2048)
)
self.layer4_next = nn.Sequential(
nn.Conv2d(2048, 512, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=False),
nn.BatchNorm2d(512),
nn.ReLU(inplace=True),
nn.Conv2d(512, 2048, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(2048)
)
# -----------------------------------------------------------------------
self.avg_pool = nn.AdaptiveAvgPool2d((1, 1))
self.fc = nn.Sequential(
nn.Dropout(p=0.5),
nn.Linear(2048 * 1 * 1, 1000),
nn.ReLU(inplace=True),
nn.Dropout(p=0.5),
nn.Linear(1000, classes_num)
)
def forward(self, x):
out = self.pre(x)
# -----------------------------------------------------------------------
layer1_shortcut = DownSample(64, 256, 1)
# layer1_shortcut.to('cuda:0')
layer1_identity = layer1_shortcut(out)
out = self.layer1_first(out)
out = F.relu(out layer1_identity, inplace=True)
for i in range(2):
identity = out
out = self.layer1_next(out)
out = F.relu(out identity, inplace=True)
# -----------------------------------------------------------------------
layer2_shortcut = DownSample(256, 512, 2)
# layer2_shortcut.to('cuda:0')
layer2_identity = layer2_shortcut(out)
out = self.layer2_first(out)
out = F.relu(out layer2_identity, inplace=True)
for i in range(7):
identity = out
out = self.layer2_next(out)
out = F.relu(out identity, inplace=True)
# -----------------------------------------------------------------------
layer3_shortcut = DownSample(512, 1024, 2)
# layer3_shortcut.to('cuda:0')
layer3_identity = layer3_shortcut(out)
out = self.layer3_first(out)
out = F.relu(out layer3_identity, inplace=True)
for i in range(35):
identity = out
out = self.layer3_next(out)
out = F.relu(out identity, inplace=True)
# -----------------------------------------------------------------------
layer4_shortcut = DownSample(1024, 2048, 2)
# layer4_shortcut.to('cuda:0')
layer4_identity = layer4_shortcut(out)
out = self.layer4_first(out)
out = F.relu(out layer4_identity, inplace=True)
for i in range(2):
identity = out
out = self.layer4_next(out)
out = F.relu(out identity, inplace=True)
# -----------------------------------------------------------------------
out = self.avg_pool(out)
out = out.reshape(out.size(0), -1)
out = self.fc(out)
return out
版权声明:本文内容由互联网用户自发贡献,该文观点仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 举报,一经查实,本站将立刻删除。
发布者:全栈程序员栈长,转载请注明出处:https://javaforall.cn/185167.html原文链接:https://javaforall.cn