1 view方法
import torch
a = torch.Tensor([[[1,2,3],
[4,5,6]]])
b = torch.Tensor([1,2,3,4,5,6])
print(a.view(-1))
print(b.view(-1))
————
结果:
tensor([1., 2., 3., 4., 5., 6.])
tensor([1., 2., 3., 4., 5., 6.])
2 randint方法
a = torch.randint(low=0, high=10, size=(10,1))
print(a)
————
结果:
tensor([[3],
[9],
[6],
[9],
[1],
[0],
[9],
[8],
[4],
[2]])
3 clamp方法
a = torch.clamp(a, 3, 5)
print(a)
————
结果:
tensor([[3],
[5],
[5],
[5],
[3],
[3],
[5],
[5],
[4],
[3]])
4 requires_grad方法
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
print(a)
out = a.tanh()
print(out)
c = out.data # 需要走注意的是,通过.data “分离”得到的的变量会和原来的变量共用同样的数据,而且新分离得到的张量是不可求导的,c发生了变化,原来的张量也会发生变化
c.zero_() # 改变c的值,原来的out也会改变
print(c.requires_grad)
print(c)
print(out.requires_grad)
print(out)
print("----------------------------------------------")
out.sum().backward() # 对原来的out求导,
print(a.grad) # 不会报错,但是结果却并不正确
# #输出
# tensor([1., 2., 3.], requires_grad=True)
# tensor([0.7616, 0.9640, 0.9951], grad_fn=<TanhBackward>)
# False
# tensor([0., 0., 0.])
# True
# tensor([0., 0., 0.], grad_fn=<TanhBackward>)
# ----------------------------------------------
# tensor([1., 1., 1.])
————
结果:
tensor([1., 2., 3.], requires_grad=True)
tensor([0.7616, 0.9640, 0.9951], grad_fn=<TanhBackward>)
False
tensor([0., 0., 0.])
True
tensor([0., 0., 0.], grad_fn=<TanhBackward>)
----------------------------------------------
tensor([1., 1., 1.])
5 detach方法
import torch
a = torch.tensor([1, 2, 3.], requires_grad=True)
print(a)
out = a.tanh()
print(out)
#需要走注意的是,通过.detach() “分离”得到的的变量会和原来的变量共用同样的数据,
#而且新分离得到的张量是不可求导的,c发生了变化,原来的张量也会发生变化
c = out.detach()
c.zero_() # 改变c的值,原来的out也会改变
print(c.requires_grad)
print(c)
print(out.requires_grad)
print(out)
print("----------------------------------------------")
out.sum().backward() # 对原来的out求导,
print(a.grad) # 此时会报错,错误结果参考下面,显示梯度计算所需要的张量已经被“原位操作inplace”所更改了。
————
结果:
tensor([1., 2., 3.], requires_grad=True)
tensor([0.7616, 0.9640, 0.9951], grad_fn=<TanhBackward>)
False
tensor([0., 0., 0.])
True
tensor([0., 0., 0.], grad_fn=<TanhBackward>)
----------------------------------------------
报错情况:
17 print("----------------------------------------------")
18
---> 19 out.sum().backward() # 对原来的out求导,
20 print(a.grad) # 此时会报错,错误结果参考下面,显示梯度计算所需要的张量已经被“原位操作inplace”所更改了。
21
6 matmul+cuda方法
import torch
device = 'cuda:0'
a = torch.zeros(2, 3).to(device)
print(type(a))
b = torch.ones(3, 4).to(device)
print(type(b))
c = torch.matmul(a, b)
print(c)
————
结果:
<class 'torch.Tensor'>
<class 'torch.Tensor'>
tensor([[0., 0., 0., 0.],
[0., 0., 0., 0.]], device='cuda:0')
7 Embedding模型使用
import torch
from torch import nn
embedding1 = nn.Embedding(10, 3)
embedding2 = nn.Embedding(8, 4)
inputs = torch.LongTensor([[1, 2, 4, 5], [4, 3, 2, 9]])
print(inputs)
outputs = embedding1(inputs)
print(outputs)
————
结果:
tensor([[1, 2, 4, 5],
[4, 3, 2, 9]])
tensor([[[ 2.4182e+00, 5.1324e-01, -2.3636e-01],
[ 9.6180e-01, 1.3771e+00, 2.2036e+00],
[ 1.7772e-02, 3.0630e-01, -7.9741e-01],
[-9.2795e-01, 1.9076e+00, 3.5437e+00]],
[[ 1.7772e-02, 3.0630e-01, -7.9741e-01],
[ 1.6840e-03, 5.2214e-01, 7.9724e-01],
[ 9.6180e-01, 1.3771e+00, 2.2036e+00],
[ 1.7058e-01, 5.4104e-01, 1.4526e+00]]],
grad_fn=<EmbeddingBackward>)
8 计算RMSE和MAE
import math
a1 = np.array([0.7, 0.8, 1.2, 1])
a2 = np.array([1, 1, 1, 1])
error = a1 - a2
error
square_error = [i**2 for i in error]
abs_error = [abs(i) for i in error]
#RMSE
math.sqrt(np.array(square_error).sum()/len(square_error))
#MAE
np.array(abs_error).sum()/len(abs_error)
————
结果:
0.206155281280883#RMSE
0.175#MAE
版权声明:本文为ZYripe原创文章,遵循 CC 4.0 BY-SA 版权协议,转载请附上原文出处链接和本声明。