F.grid_sample可导性问题

  • Post author:
  • Post category:其他


可导

import torch
import torch.nn as nn
class g(nn.Module):
    def __init__(self):
        super(g, self).__init__()
        self.k=nn.Conv2d(in_channels=3,out_channels=2,kernel_size=1,padding=0,bias=False)
    def forward(self,z,grid):
        return torch.nn.functional.grid_sample(z, grid, align_corners=True, mode='bilinear')
z=torch.rand(1,3,10,10,requires_grad=True)
k=nn.Conv2d(in_channels=3,out_channels=2,kernel_size=1,padding=0,bias=False)
g=g()

optimizer = torch.optim.Adam([{"params":k.parameters(), 'lr':0.1},
                             {"params":g.parameters(), 'lr':0.1}])
offset=k(z)
y,x=torch.meshgrid(torch.arange(0,10),torch.arange(0,10))
y=y.view(1,1,y.size()[0],y.size()[1])+offset[:,:1,:,:]
x=x.view(1,1,x.size()[0],x.size()[1])+offset[:,1:,:,:]
x -= x.size()[3] / 2
x /= (x.size()[3] / 2)
y -= y.size()[2] / 2
y /= (y.size()[2] / 2)
tp=torch.cat([x,y],dim=1)
grid=tp.permute(0,2,3,1).contiguous()


s=g(z,grid)
p=s.sum()
loss = (p - 1) * (p - 1)
print(k.weight)
print(z.grad)
optimizer.zero_grad()
loss.backward()
optimizer.step()
print(k.weight)
print(z.grad)



版权声明:本文为qq_39861441原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。