Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

How can I train the FoveaBox with mobilenet ? #11

Open
xscjun opened this issue Feb 11, 2020 · 2 comments
Open

How can I train the FoveaBox with mobilenet ? #11

xscjun opened this issue Feb 11, 2020 · 2 comments

Comments

@xscjun
Copy link

xscjun commented Feb 11, 2020

I add a mobilenet.py as GETTING_STARTED.md said , and modify the config ,but I can't run the training, how can I train the foveabox with mobilenet,please ?

@taokong
Copy link
Owner

taokong commented Feb 11, 2020

I add a mobilenet.py as GETTING_STARTED.md said , and modify the config ,but I can't run the training, how can I train the foveabox with mobilenet,please ?

Please provide more information?

@xscjun
Copy link
Author

xscjun commented Feb 12, 2020

I add a mobilenet.py as GETTING_STARTED.md said , and modify the config ,but I can't run the training, how can I train the foveabox with mobilenet,please ?

Please provide more information?

1、my config :fovea_align_gn_ms_mobilenet_fpn_4gpu_2x.py
model = dict(
type='FOVEA',
pretrained=None,
backbone=dict(
type='MobileNet'
# width_multiplier=1,
# class_num=1
),
neck=dict(
type='FPN',
# in_channels=[256, 512, 1024, 2048], # mobilenetweb
in_channels=[128, 256, 512, 1024],
out_channels=256,
start_level=1,
num_outs=5,
add_extra_convs=True),
bbox_head=dict(
type='FoveaHead',
...
)
nothing else changed

2、backbone : Mobilenet
import torch
import torch.nn as nn
import logging
import torch.utils.checkpoint as cp

from mmcv.cnn import constant_init, kaiming_init
from mmcv.runner import load_checkpoint
from ..registry import BACKBONES

class DepthSeperabelConv2d(nn.Module):
def init(self, input_channels, output_channels, kernel_size, **kwargs):
super().init()
self.depthwise = nn.Sequential(
nn.Conv2d(
input_channels,
input_channels,
kernel_size,
groups=input_channels,
**kwargs),
nn.BatchNorm2d(input_channels),
nn.ReLU(inplace=True)
)

    self.pointwise = nn.Sequential(
        nn.Conv2d(input_channels, output_channels, 1),
        nn.BatchNorm2d(output_channels),
        nn.ReLU(inplace=True)
    )

def forward(self, x):
    x = self.depthwise(x)
    x = self.pointwise(x)
    return x

class BasicConv2d(nn.Module):
def init(self, input_channels, output_channels, kernel_size, **kwargs):

    super().init()
    self.conv = nn.Conv2d(input_channels, output_channels, kernel_size, **kwargs)
    self.bn   = nn.BatchNorm2d(output_channels)
    self.relu = nn.ReLU(inplace=True)

    def forward(self, x):
        x = self.conv(x)
        x = self.bn(x)
        x = self.relu(x)

    return x

@BACKBONES.register_module
class MobileNet(nn.Module):
def init(self, width_multiplier=1, class_num=1):
super(MobileNet, self).init()
alpha = width_multiplier
self.stem = nn.Sequential(
BasicConv2d(3, int(32 * alpha), 3, padding=1, bias=False),
DepthSeperabelConv2d(
int(32 * alpha),
int(64 * alpha),
3,
padding=1,
bias=False
)
)

    #downsample
    self.conv1 = nn.Sequential(
        DepthSeperabelConv2d(
            int(64 * alpha),
            int(128 * alpha),
            3,
            stride=2,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(128 * alpha),
            int(128 * alpha),
            3,
            padding=1,
            bias=False
        )
    )
    #downsample
    self.conv2 = nn.Sequential(
        DepthSeperabelConv2d(
            int(128 * alpha),
            int(256 * alpha),
            3,
            stride=2,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(256 * alpha),
            int(256 * alpha),
            3,
            padding=1,
            bias=False
        )
    )
    #downsample
    self.conv3 = nn.Sequential(
        DepthSeperabelConv2d(
            int(256 * alpha),
            int(512 * alpha),
            3,
            stride=2,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(512 * alpha),
            3,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(512 * alpha),
            3,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(512 * alpha),
            3,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(512 * alpha),
            3,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(512 * alpha),
            3,
            padding=1,
            bias=False
        )
    )
    #downsample
    self.conv4 = nn.Sequential(
        DepthSeperabelConv2d(
            int(512 * alpha),
            int(1024 * alpha),
            3,
            stride=2,
            padding=1,
            bias=False
        ),
        DepthSeperabelConv2d(
            int(1024 * alpha),
            int(1024 * alpha),
            3,
            padding=1,
            bias=False
        )
    )
    # self.fc = nn.Linear(int(1024 * alpha), class_num)
    # self.avg = nn.AdaptiveAvgPool2d(7)
def init_weights(self, pretrained=None):
    if isinstance(pretrained, str):
        logger = logging.getLogger()
        load_checkpoint(self, pretrained, strict=False, logger=logger)
    elif pretrained is None:
        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                kaiming_init(m)
            elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):
                constant_init(m, 1)
def forward(self, x):
    outs=[]
    x = self.stem(x)
    x=self.conv1(x)
    outs.append(x)
    x = self.conv2(x)
    outs.append(x)
    x = self.conv3(x)
    outs.append(x)
    x = self.conv4(x)
    # x = self.avg(x)
    outs.append(x)
    # x = x.view(x.size(0), -1)
    #x = self.fc(x)
    return tuple(outs)
def train(self, mode=True):
    super(MobileNet, self).train(mode)
    if mode:
        for m in self.modules():
        # trick: eval have effect on BatchNorm only
            if isinstance(m, nn.BatchNorm2d):
                m.train()

3、init.py:
from .mobilenet import MobileNet
all = ['ResNet', 'make_res_layer', 'ResNeXt', 'SSDVGG', 'HRNet','MobileNet']

4、training command: python tools/train.py ./configs/foveabox/fovea_align_gn_ms_mobilenet_fpn_4gpu_2x.py --gpus 1 --work_dir ./checkpoints/mobilenet_Humancrowd

5、Error:
File "/home/ubuntu/..../mmdet/models/backbones/mobilenet.py", line 181, in forward
x = self.stem(x)
File "/home/ubuntu/anaconda3/envs/torch-gpu/lib/python3.6/site-packages/torch/nn/modules/module.py", line 591, in getattr
type(self).name, name))
AttributeError: 'MobileNet' object has no attribute 'stem'

I can run the training with resnet50.
Is my mobilenet.py something wrong with mmdet?
could you show me your mobilenet.py and config ,please ?

Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment
Labels
None yet
Projects
None yet
Development

No branches or pull requests

2 participants