x_data = np.array([[1, 2, 1, 1],[2, 1, 3, 2],[3, 1, 3, 4],[4, 1, 5, 5],[1, 7, 5, 5],[1, 2, 5, 6],[1, 6, 6, 6],[1, 7, 7, 7]], dtype=np.float32)
y_data = np.array([[0, 0, 1],[0, 0, 1],[0, 0, 1],[0, 1, 0],[0, 1, 0],[0, 1, 0],[1, 0, 0],[1, 0, 0]], dtype=np.float32)

w = tf.Variable(tf.random.normal([4, 3]))
b = tf.Variable(tf.random.normal([3]))

learning_rate = 0.001

for i in range(5001):

    with tf.GradientTape() as tape:

        hypothesis = tf.nn.softmax(tf.matmul(x_data, W) + b)
        cost = tf.reduce_mean(-tf.reduce_sum(y_data*tf.math.log(hypothesis), axis=1))        
        W_grad, b_grad = tape.gradient(cost, [W, b])

        W.assign_sub(learning_rate * W_grad)
        b.assign_sub(learning_rate * b_grad)
    if i % 500 == 0:
        print(f'Step : {i}, Cost : {cost.numpy()}, Weight : {w.numpy()[0]}, Bias : {b.numpy()[0]}')
반응형

+ Recent posts