from torch.autograd import Variable as V
import torch as t
from torch import nn
n, d, m = 3, 5, 7
embedding = nn.Embedding(n, d, max_norm=True)
W = t.randn((m, d), requires_grad=True)
idx = t.tensor([1, 2])
a = embedding.weight.clone() @ W.t() # weight must be cloned for this to be differentiable
b = embedding(idx) @ W.t() # modifies weight in-place
out = (a.unsqueeze(0) + b.unsqueeze(1))
print(a.unsqueeze(0).shape, b.unsqueeze(1).shape)
# with t.no_grad():
print(a.unsqueeze(0), "\n", b.unsqueeze(1), "\n", (a.unsqueeze(0) + b.unsqueeze(1)).shape)
print( (a.unsqueeze(0) + b.unsqueeze(1)))
loss = out.sigmoid().prod()
loss.backward()
/home/wangbin/anaconda3/envs/deep_learning/bin/python3.7 /home/wangbin/anaconda3/envs/deep_learning/project/main.py
torch.Size([1, 3, 7]) torch.Size([2, 1, 7])
tensor([[[ 1.0560, -0.3363, 1.9043, -0.8517, 0.0666, -0.1867, -0.1422],
[-1.0370, -0.8827, 0.1464, -0.4847, -0.0349, -0.2626, 0.5775],
[ 0.8159, -4.3308, 4.8820, -0.4751, -0.0614, -2.5736, -0.6585]]],
grad_fn=<UnsqueezeBackward0>)
tensor([[[-1.0370, -0.8827, 0.1464, -0.4847, -0.0349, -0.2626, 0.5775]],
[[ 0.3994, -2.1200, 2.3899, -0.2326, -0.0301, -1.2598, -0.3223]]],
grad_fn=<UnsqueezeBackward0>)
torch.Size([2, 3, 7])
tensor([[[ 0.0191, -1.2190, 2.0508, -1.3364, 0.0317, -0.4493, 0.4352],
[-2.0739, -1.7654, 0.2929, -0.9693, -0.0699, -0.5253, 1.1550],
[-0.2211, -5.2135, 5.0285, -0.9598, -0.0963, -2.8362, -0.0810]],
[[ 1.4554, -2.4563, 4.2942, -1.0843, 0.0366, -1.4465, -0.4646],
[-0.6376, -3.0027, 2.5363, -0.7172, -0.0650, -1.5225, 0.2551],
[ 1.2152, -6.4508, 7.2719, -0.7077, -0.0915, -3.8334, -0.9808]]],
grad_fn=<AddBackward0>)
Process finished with exit code 0
版权声明:本文为nyist_yangguang原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。