summaryrefslogtreecommitdiff
path: root/learn_torch/classification.py
diff options
context:
space:
mode:
authorzhang <zch921005@126.com>2022-05-04 08:47:54 +0800
committerzhang <zch921005@126.com>2022-05-04 08:47:54 +0800
commit2180c68999eb8dc0c7bcec015b2703f5b8b20223 (patch)
tree3ec71623038ff8b90a5bc4e32da14a7382d42d9d /learn_torch/classification.py
parent70aebb73b81b50911e2107cd4519e69f09971021 (diff)
ndarray axis
Diffstat (limited to 'learn_torch/classification.py')
-rw-r--r--learn_torch/classification.py95
1 files changed, 95 insertions, 0 deletions
diff --git a/learn_torch/classification.py b/learn_torch/classification.py
new file mode 100644
index 0000000..555da9f
--- /dev/null
+++ b/learn_torch/classification.py
@@ -0,0 +1,95 @@
+import torch
+from torch import nn
+from torch.utils.data import DataLoader
+from torchvision import datasets
+from torchvision.transforms import ToTensor
+
+
+# Get cpu or gpu device for training.
+device = "cuda" if torch.cuda.is_available() else "cpu"
+print("Using {} device".format(device))
+
+
+def gen_data():
+ # Download training data from open datasets.
+ training_data = datasets.FashionMNIST(
+ root="data",
+ train=True,
+ download=True,
+ transform=ToTensor(),
+ )
+
+ # Download test data from open datasets.
+ test_data = datasets.FashionMNIST(
+ root="data",
+ train=False,
+ download=True,
+ transform=ToTensor(),
+ )
+ return training_data, test_data
+
+
+# Define model
+class NeuralNetwork(nn.Module):
+ def __init__(self, in_features, out_features):
+ super(NeuralNetwork, self).__init__()
+ self.flatten = nn.Flatten()
+ self.linear_relu_stack = nn.Sequential(
+ nn.Linear(in_features, 512),
+ nn.ReLU(),
+ nn.Linear(512, 512),
+ nn.ReLU(),
+ nn.Linear(512, out_features)
+ )
+
+ def forward(self, x):
+ x = self.flatten(x)
+ logits = self.linear_relu_stack(x)
+ return logits
+
+
+def test(dataloader, model, loss_fn):
+ size = len(dataloader.dataset)
+ num_batches = len(dataloader)
+ model.eval()
+ test_loss, correct = 0, 0
+ with torch.no_grad():
+ for X, y in dataloader:
+ X, y = X.to(device), y.to(device)
+ pred = model(X)
+ test_loss += loss_fn(pred, y).item()
+ correct += (pred.argmax(1) == y).type(torch.float).sum().item()
+ test_loss /= num_batches
+ correct /= size
+ print(f"Test Error: \n Accuracy: {(100*correct):>0.1f}%, Avg loss: {test_loss:>8f} \n")
+
+
+if __name__ == '__main__':
+ batch_size = 64
+ training_data, test_data = gen_data()
+ # Create data loaders.
+ train_dataloader = DataLoader(training_data, batch_size=batch_size)
+ test_dataloader = DataLoader(test_data, batch_size=batch_size)
+
+ for i, (X, y) in enumerate(test_dataloader):
+ print("{}, Shape of X [N, C, H, W]: {}, y: {}".format(i, X.shape, y.shape))
+ break
+
+ model = NeuralNetwork(28*28, 10).to(device)
+ print(model)
+ criterion = nn.CrossEntropyLoss()
+ opt = torch.optim.SGD(model.parameters(), lr=1e-3)
+
+ n_epochs = 10
+
+ for epoch in range(n_epochs):
+ for i, (x_batch, y_batch) in enumerate(train_dataloader):
+ opt.zero_grad()
+ preds = model(x_batch)
+ loss = criterion(preds, y_batch)
+ loss.backward()
+ opt.step()
+ if (i+1) % 100 == 0:
+ print('Train Loss: {}({}/{}): {}'.format(epoch, i, len(train_dataloader), loss.data))
+
+ test(test_dataloader, model, criterion)