▪︎ 자동미분

x = torch.tensor([1.], requires_grad=True)
y = x ** 2
print(y)
print(x.grad)
# tensor([1.], grad_fn=<PowBackward0>)
# None

z = 3 * y
print(z)
# tensor([3.], grad_fn=<MulBackward0>)

z.backward()
print(x.grad)
# tensor([6.])
x = torch.tensor([1.], requires_grad=True)
a = x ** 2
b = a + 1
c = b ** 2

c.backward()
print(x.grad)
# tensor([8.])

result = x.grad.detach()
print(result)
print(result.requires_grad)
print(result.item())
# tensor([8.])
# False
# 8.0
x = torch.tensor([1.], requires_grad=True)
y = torch.tensor([3.], requires_grad=True)
z = (2 * (x ** 2)) + (y ** 2)

print(z)
# tensor([11.], grad_fn=<AddBackward0>)

z.backward()
print(x.grad)
print(y.grad)
# tensor([4.])
# tensor([6.])
x = torch.tensor([1.], requires_grad=True)

with torch.no_grad():
    y = x ** 2
    print(x.requires_grad)
    print(y)
    # True
    # tensor([1.])
    
print(x.requires_grad)
# True

y.backward() # with 구문 안쪽의 y로 미분 시도시 에러 발생

▪︎ 선형회귀모델

▫︎ 단일선형회귀

import torch
import torch.nn as nn
import torch.optim as optim
import matplotlib.pyplot as plt
import numpy as np