728x90
반응형
tensorflow mnist LeNet-5
import tensorflow as tf
from tensorflow.keras.utils import to_categorical
import numpy as np
from tensorflow.keras.layers import Conv2D,Dense,Flatten,Input,AveragePooling2D
from tensorflow.keras import Model
# Download the mnist dataset using keras
(X_train, y_train), (X_test,y_test) = tf.keras.datasets.mnist.load_data()
X_train = X_train.astype('float32')/255.0
X_test = X_test.astype('float32')/255.0
num_classes = 10
y_train = to_categorical(y_train, num_classes)
y_test = to_categorical(y_test, num_classes)
X_train = X_train[:, :, :, np.newaxis]
X_test = X_test[:, :, :, np.newaxis]
print(X_train.shape,X_test.shape, y_train.shape, y_test.shape)
image_input = Input(shape=(28, 28, 1))
x = Conv2D(6, kernel_size= (5,5), strides=1, activation = 'relu')(image_input)
x = AveragePooling2D(2)(x)
x = Conv2D(16, kernel_size= (5,5),strides=1, activation = 'relu')(x)
x = AveragePooling2D(2)(x)
x = Flatten()(x)
x= Dense(units = 120, activation = 'relu')(x)
x= Dense(units = 84, activation = 'relu')(x)
output = Dense(10, activation='softmax')(x)
model = Model(image_input, output)
model.summary()
model.compile(optimizer='adam',loss='categorical_crossentropy',metrics=['accuracy'])
model.fit(X_train,y_train, epochs=100, batch_size = 8,validation_data=(X_test, y_test), verbose=1)
pytorch mnist LeNet-5 => 향후 수정 필요 ***
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import matplotlib.pyplot as plt
# check device
device = 'cuda' if torch.cuda.is_available() else 'cpu'
random_seed = 42
lr = 0.001
batch_size = 32
n_epochs = 15
num_class = 10
kwargs = {'num_workers': 2, 'pin_memory': True}
mnist_train = datasets.MNIST(root='MNIST_data/',
train=True,
transform=transforms.ToTensor(),
download=True)
mnist_test = datasets.MNIST(root='MNIST_data/',
train=False, #
transform=transforms.ToTensor(),
download=True)
train_loader = DataLoader(dataset=mnist_train,
batch_size=batch_size,
shuffle=True, **kwargs)
test_loader = DataLoader(dataset=mnist_test,
batch_size=batch_size,
shuffle=False, **kwargs)
class MyModel(nn.Module):
def __init__(self):
super(MyModel, self).__init__()
self.conv1 = nn.Conv2d(in_channels=1, out_channels=6, kernel_size=5, padding=2)
self.conv2 = nn.Conv2d(in_channels=6, out_channels=16, kernel_size=5)
self.fc1 = nn.Linear(16 * 6 * 6, 120)
self.dens1 = torch.nn.Linear(in_features=120, out_features=84)
self.dens2 = torch.nn.Linear(in_features=84, out_features=10)
def forward(self,x):
x = F.max_pool2d(F.relu(self.conv1(x)), (2, 2))
x = F.max_pool2d(F.relu(self.conv2(x)), (2, 2))
x = x.view(-1, self.num_flat_features(x))
x = x.view(x.shape[0], -1)
x = F.relu(self.fc1(x))
x = F.relu(self.dens1(x))
x = self.dens2(x)
return x
def num_flat_features(self, x):
#x.size() return (256, 16, 5, 5),size의 값은 (16, 5, 5),256은 batch_size
size = x.size()[1:]
num_features = 1
for s in size:
num_features *= s
return num_features
net = MyModel()
print(net)
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=2e-3)
for _epoch in range(epoch):
for idx, (train_x, train_label) in enumerate(train_loader):
label_np = np.zeros((train_label.shape[0], 10))
optimizer.zero_grad()
predict_y = model(train_x.float())
_error = criterion(predict_y, train_label.long())
if idx % 10 == 0:
print('idx: {}, _error: {}'.format(idx, _error))
_error.backward()
optimizer.step()
correct = 0
_sum = 0
for idx, (test_x, test_label) in enumerate(test_loader):
predict_y = model(test_x.float()).detach()
predict_ys = np.argmax(predict_y, axis=-1)
label_np = test_label.numpy()
_ = predict_ys == test_label
correct += np.sum(_.numpy(), axis=-1)
_sum += _.shape[0]
print('accuracy: {:.2f}'.format(correct / _sum))
반응형
'Deep learning > 소스' 카테고리의 다른 글
yolov5 Multi-GPU Training (0) | 2021.09.22 |
---|---|
yolov5 coco data set training (0) | 2021.09.17 |
yolov5 모델pt (0) | 2021.03.26 |
classification model code (0) | 2021.01.14 |
python 한국 시간으로 설정 (0) | 2020.11.17 |