1zb/deformable-convolution-pytorch

performance issue

milani opened this issue · 4 comments

I have the following network:

class Model(nn.Module):
    def __init__(self):
        super(Model, self).__init__()
        self.conv1 = nn.Conv2d(1,8,3,padding=1,bias=False)
        self.batch1 = nn.BatchNorm2d(8,affine=False)
        #self.conv2 = nn.Conv2d(8,16,3,padding=0,bias=False)
        self.conv2offset = nn.Conv2d(8,2*3*3,3,padding=0,bias=False)
        self.deform_conv2 = ConvOffset2d(8,16,3,padding=0,num_deformable_groups=1)
        self.batch2 = nn.BatchNorm2d(16,affine=False)
        self.pooling = nn.MaxPool2d(2)
        self.fc1 = nn.Linear(6*6*16,10)
        self.activation = nn.ReLU()

    def forward(self,x):
        x = self.conv1(x)
        x = self.pooling(x)
        x = self.batch1(x)
        x = self.activation(x)
        #x = self.conv2(x)
        offset = self.conv2offset(x)
        x = self.deform_conv2(x,offset)
        x = self.pooling(x)
        x = self.batch2(x)
        x = self.activation(x)
        logits = self.fc1(x.view(-1,6*6*16))
        probas = F.softmax(logits, dim=1)
        return logits, probas

I train it on MNIST for 2 batches. It takes 327 seconds to run (97.64% accuracy on test set).

Now if I remove deform conv and replace it with normal convolution (commented in the code above), it takes 19 seconds for 2 batches (97.54% accuracy on test set).

What do you think is the cause?

Pytorch v0.3.0
Python v3.6.1

1zb commented

In original paper, they use deform conv in res5 which has spatial size of 7x7. So here I think you should use it after several strided convolution layers or spatial pooling layers.

So are you suggesting that deform conv is slow by its nature?

1zb commented

Backward is very slow. Forward is okay. Table 4 from the paper gives the forward time.

Hi,

I have measured the speed of resnet101 forward and backward, and modified resnet101 by changing a conv layer to a deformable layer. The deformable layers are used similarly to the example of Fast RCNN.

The running code is

from torch import nn
from modules import ConvOffset2d
import time
import torch
import math
from torchvision.models.resnet import Bottleneck
from torchvision.models import resnet101


class BottleneckDeform(nn.Module):
    expansion = 4

    def __init__(self, inplanes, planes, stride=1, downsample=None):
        super(BottleneckDeform, self).__init__()
        num_deformable_groups = 4
        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
        self.bn1 = nn.BatchNorm2d(planes)
        # change conv2 to deformable layer: offset + conv2
        self.offset = nn.Conv2d(planes,
                                num_deformable_groups * 2 * 3 * 3,
                                kernel_size=(3, 3),
                                stride=(stride, stride),
                                padding=(1, 1),
                                bias=False)
        self.conv2 = ConvOffset2d(planes,
                                  planes, (3, 3),
                                  stride=stride,
                                  padding=1,
                                  num_deformable_groups=num_deformable_groups)
        self.bn2 = nn.BatchNorm2d(planes)
        self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
        self.bn3 = nn.BatchNorm2d(planes * 4)
        self.relu = nn.ReLU(inplace=True)
        self.downsample = downsample
        self.stride = stride

    def forward(self, x):
        residual = x

        out = self.conv1(x)
        out = self.bn1(out)
        out = self.relu(out)

        offset = self.offset(out)
        out = self.conv2(out, offset)
        out = self.bn2(out)
        out = self.relu(out)

        out = self.conv3(out)
        out = self.bn3(out)

        if self.downsample is not None:
            residual = self.downsample(x)

        out += residual
        out = self.relu(out)

        return out
    
    
class ResNetDeform(nn.Module):
    def __init__(self, layers, num_classes=1000):
        self.inplanes = 64
        super(ResNetDeform, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
                               bias=False)
        self.bn1 = nn.BatchNorm2d(64)
        self.relu = nn.ReLU(inplace=True)
        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
        self.layer1 = self._make_layer(Bottleneck, 64, layers[0])
        self.layer2 = self._make_layer(Bottleneck, 128, layers[1], stride=2)
        self.layer3 = self._make_layer(Bottleneck, 256, layers[2], stride=2)
        # change this layer to deformable case
        self.layer4 = self._make_layer(BottleneckDeform, 512, layers[3], stride=2)
        self.avgpool = nn.AvgPool2d(7, stride=1)
        self.fc = nn.Linear(512 * Bottleneck.expansion, num_classes)

        for m in self.modules():
            if isinstance(m, nn.Conv2d):
                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
                m.weight.data.normal_(0, math.sqrt(2. / n))
            elif isinstance(m, nn.BatchNorm2d):
                m.weight.data.fill_(1)
                m.bias.data.zero_()

    def _make_layer(self, block, planes, blocks, stride=1):
        downsample = None
        if stride != 1 or self.inplanes != planes * block.expansion:
            downsample = nn.Sequential(
                nn.Conv2d(self.inplanes, planes * block.expansion,
                          kernel_size=1, stride=stride, bias=False),
                nn.BatchNorm2d(planes * block.expansion),
            )

        layers = []
        layers.append(block(self.inplanes, planes, stride, downsample))
        self.inplanes = planes * block.expansion
        for i in range(1, blocks):
            layers.append(block(self.inplanes, planes))

        return nn.Sequential(*layers)

    def forward(self, x):
        x = self.conv1(x)
        x = self.bn1(x)
        x = self.relu(x)
        x = self.maxpool(x)

        x = self.layer1(x)
        x = self.layer2(x)
        x = self.layer3(x)
        x = self.layer4(x)

        x = self.avgpool(x)
        x = x.view(x.size(0), -1)
        x = self.fc(x)

        return x
    
    
def test_time(data, model):
    start_time = time.time()
    for i in range(100):
        loss = model(data).sum()
        loss.backward()
        if i % 10 == 0 and i != 0:
            torch.cuda.synchronize()
            print("10 iterations on GPU (forward + backward)", time.time() - start_time)
            start_time = time.time()
            
            
data = torch.rand(16, 3, 224, 224, dtype=torch.float32).cuda() * 20 - 10
data.requires_grad = True

# model = ResNetDeform([3, 4, 23, 3]).cuda()
model = resnet101().cuda()
for name, param in model.named_parameters():
    param.requires_grad = True
    
test_time(data, model)

There are results for standard resnet101:

10 iterations on GPU (forward + backward) 1.6788671016693115
10 iterations on GPU (forward + backward) 1.4698841571807861
10 iterations on GPU (forward + backward) 1.476891040802002
10 iterations on GPU (forward + backward) 1.4722325801849365
10 iterations on GPU (forward + backward) 1.4735956192016602
10 iterations on GPU (forward + backward) 1.4764328002929688
10 iterations on GPU (forward + backward) 1.4757957458496094
10 iterations on GPU (forward + backward) 1.4783174991607666
10 iterations on GPU (forward + backward) 1.4843554496765137

and for resnet101 with deformable layers:

10 iterations on GPU (forward + backward) 1.9449117183685303
10 iterations on GPU (forward + backward) 1.6792359352111816
10 iterations on GPU (forward + backward) 1.685164213180542
10 iterations on GPU (forward + backward) 1.6824781894683838
10 iterations on GPU (forward + backward) 1.6872413158416748
10 iterations on GPU (forward + backward) 1.6899969577789307
10 iterations on GPU (forward + backward) 1.7001721858978271
10 iterations on GPU (forward + backward) 1.675361156463623
10 iterations on GPU (forward + backward) 1.6916632652282715