import torch
import torch.optim as optim
import tensorflow as tf
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
텐서플로
tf.random.set_seed(400)
x_data = np.array([[50, 47], [40, 43], [30, 15], [85, 83], [97, 85], [100, 100]], dtype=np.float32)
y_data = np.array([[0], [0], [0], [1], [1], [1]], dtype=np.float32)
w = tf.Variable(tf.random.normal([2, 1]))
b = tf.Variable(tf.random.normal([1]))
learning_rate = 0.0001
for i in range(50001):
with tf.GradientTape() as tape:
hypothesis = 1 / (1 + tf.exp(-(tf.matmul(x_data, w) + b)))
cost = tf.reduce_mean(-tf.reduce_sum(y_data*tf.math.log(hypothesis) + (1-y_data)*tf.math.log(1-hypothesis)))
w_grad, b_grad = tape.gradient(cost, [w, b])
w.assign_sub(learning_rate * w_grad)
b.assign_sub(learning_rate * b_grad)
if i % 500 == 0:
print(f'Step : {i}, Cost : {cost.numpy()}, Weight : {w.numpy()[0]}, Bias : {b.numpy()[0]}')
temp_x = np.array([[30, 25]], dtype=np.float32)
logistic = 1 / (1 + tf.exp(-(tf.matmul(temp_x, w) + b)))
print(f"점수가 {temp_x[0]} 일 때", tf.cast(logistic > 0.5, dtype=tf.float32).numpy()[0])
파이토치
x_data = np.array([[50, 47], [40, 43], [30, 15], [85, 83], [97, 85], [100, 100]], dtype=np.float32)
y_data = np.array([[0], [0], [0], [1], [1], [1]], dtype=np.float32)
x_data = torch.FloatTensor(x_data)
y_data = torch.FloatTensor(y_data)
w = torch.zeros((2, 1), requires_grad=True)
b = torch.zeros(1, requires_grad=True)
learning_rate = 0.0001
optimizer = optim.SGD([w, b], learning_rate)
for i in range(50001) :
hypothesis = torch.sigmoid(x_data.matmul(w) + b)
cost = -(y_data * torch.log(hypothesis) +
(1 - y_data) * torch.log(1 - hypothesis)).mean()
optimizer.zero_grad()
cost.backward()
optimizer.step()
if i % 500 == 0 :
print(f'Step : {i}, Cost : {cost.item()}')
temp_x = np.array([[30, 25]], dtype=np.float32)
temp_x = torch.FloatTensor(temp_x)
if torch.sigmoid(temp_x.matmul(w) + b).item() > 0.5 :
rst = 1
else :
rst = 0
print(rst)
파이토치(with nn_module)
class BinaryClassifier(nn.Module):
def __init__(self):
super().__init__()
self.linear = nn.Linear(2, 1)
self.sigmoid = nn.Sigmoid()
def forward(self, x):
return self.sigmoid(self.linear(x))
x_data = np.array([[50, 47], [40, 43], [30, 15], [85, 83], [97, 85], [100, 100]], dtype=np.float32)
y_data = np.array([[0], [0.], [0], [1], [1], [1]], dtype=np.float32)
x_data = torch.FloatTensor(x_data)
y_data = torch.FloatTensor(y_data)
model = BinaryClassifier()
learning_rate = 0.0001
optimizer = optim.SGD(model.parameters(), learning_rate)
for i in range(50001) :
hypothesis = model(x_data)
cost = F.binary_cross_entropy(hypothesis, y_data)
optimizer.zero_grad()
cost.backward()
optimizer.step()
if i % 500 == 0 :
prediction = hypothesis >= torch.FloatTensor([0.5])
correct_prediction = prediction.float() == y_data
accuracy = correct_prediction.sum().item() / len(correct_prediction)
print(f'Step : {i}, Cost : {cost.item()}')
temp_x = np.array([[30, 25]], dtype=np.float32)
temp_x = torch.FloatTensor(temp_x)
if torch.sigmoid(temp_x.matmul(w) + b).item() > 0.5 :
rst = 1
else :
rst = 0
print(rst)
반응형
'AI > PyTorch' 카테고리의 다른 글
PyTorch-실습 : 파이토치 소프트맥스 회귀 구현(vs TF) (0) | 2020.09.16 |
---|---|
PyTorch-실습 : 파이토치 다중 선형 회귀 구현(vs TF) (0) | 2020.09.16 |
PyTorch-실습 : 파이토치 선형 회귀 구현(vs TF) (0) | 2020.09.16 |
PyTorch-실습 : Tensor 조작하기(2) (0) | 2020.09.16 |
PyTorch-실습 : Tensor 조작하기(1) (0) | 2020.09.16 |