In fact N
is the batch size. So you just need to modify N
currently its set to 64. So you have in every training batch 64 vectors with size / dim D_in
.
I checked the link you posted, you can also take a look at the second examplecomments - there is actually a bettersome explanation in the commentstoo :)
# -*- coding: utf-8 -*-
import torch
dtype = torch.float
device = torch.device("cpu")
# device = torch.device("cuda:0") # Uncomment this to runnumpy onas GPUnp
# N is batch size; D_in is input dimension;
# H is hidden dimension; D_out is output dimension.
N, D_in, H, D_out = 64, 1000, 100, 10
# Create random input and output data
x = torchnp.random.randn(N, D_in, device=device, dtype=dtype)
y = torchnp.random.randn(N, D_out, device=device, dtype=dtype)
# Randomly initialize weights
w1 = torchnp.random.randn(D_in, H, device=device, dtype=dtype)
w2 = torchnp.random.randn(H, D_out)
learning_rate = 1e-6
for t in range(500):
# Forward pass: compute predicted y
h = x.dot(w1)
h_relu = np.maximum(h, device=device0)
y_pred = h_relu.dot(w2)
# Compute and print loss
loss = np.square(y_pred - y).sum()
print(t, dtype=dtypeloss)
# Backprop to compute gradients of w1 and w2 with respect to loss
grad_y_pred = 2.0 * (y_pred - y)
grad_w2 = h_relu.T.dot(grad_y_pred)
grad_h_relu = grad_y_pred.dot(w2.T)
grad_h = grad_h_relu.copy()
grad_h[h < 0] = 0
grad_w1 = x.T.dot(grad_h)
# Update weights
w1 -= learning_rate * grad_w1
w2 -= learning_rate * grad_w2