Skip to content
Snippets Groups Projects
Commit 2484eddb authored by ym13n22's avatar ym13n22
Browse files

command seccessfully and half for mlp code

parent 2ce2133a
No related branches found
No related tags found
No related merge requests found
...@@ -23,6 +23,8 @@ from torch.utils.data import DataLoader, TensorDataset, random_split ...@@ -23,6 +23,8 @@ from torch.utils.data import DataLoader, TensorDataset, random_split
import pickle import pickle
import joblib import joblib
#this is the file for seperated hardware detectors
class Window: class Window:
def __init__(self, root): def __init__(self, root):
self.root = root self.root = root
......
This diff is collapsed.
...@@ -14,6 +14,8 @@ import os ...@@ -14,6 +14,8 @@ import os
from PIL import Image, ImageTk from PIL import Image, ImageTk
from time import sleep, time from time import sleep, time
#this is the file for the combined hardware detector
class Window: class Window:
def __init__(self, root): def __init__(self, root):
self.input_port='COM9' self.input_port='COM9'
......
import torch
import torch.nn as nn
import torch.optim as optim
from sklearn.model_selection import train_test_split
# 生成一些随机数据
def generate_data(num_samples):
# 生成两类数据
label_0 = torch.randn(num_samples, 2) + torch.tensor([1, 1])
label_1 = torch.randn(num_samples, 2) + torch.tensor([-1, -1])
labels = torch.cat((torch.zeros(num_samples), torch.ones(num_samples)), dim=0)
data = torch.cat((label_0, label_1), dim=0)
return data, labels
# 定义MLP模型
class MLP(nn.Module):
def __init__(self):
super(MLP, self).__init__()
self.fc1 = nn.Linear(2, 16)
self.fc2 = nn.Linear(16, 16)
self.fc3 = nn.Linear(16, 2) # 二分类问题
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
x = self.fc3(x)
return x
# 超参数
num_samples = 1000
num_epochs = 50
batch_size = 32
learning_rate = 0.01
# 数据准备
data, labels = generate_data(num_samples)
train_data, test_data, train_labels, test_labels = train_test_split(data, labels, test_size=0.2)
# 数据集和数据加载器
train_dataset = torch.utils.data.TensorDataset(train_data, train_labels)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size, shuffle=True)
# 初始化模型、损失函数和优化器
model = MLP()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=learning_rate)
# 训练模型
for epoch in range(num_epochs):
for inputs, targets in train_loader:
optimizer.zero_grad()
outputs = model(inputs)
loss = criterion(outputs, targets.long())
loss.backward()
optimizer.step()
print(f'Epoch [{epoch + 1}/{num_epochs}], Loss: {loss.item():.4f}')
# 测试模型
model.eval()
with torch.no_grad():
test_outputs = model(test_data)
_, predicted = torch.max(test_outputs, 1)
accuracy = (predicted == test_labels).sum().item() / len(test_labels)
print(f'Accuracy on test data: {accuracy * 100:.2f}%')
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Please register or to comment