0% found this document useful (0 votes)
52 views3 pages

Báo Cáo Java 4

The document defines classes for layers and activation functions in a neural network. It loads data, splits it into training and test sets, initializes a network with fully connected and activation layers, trains the network to minimize a loss function using backpropagation, and predicts outputs on test data.

Uploaded by

nguyen95560
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as ODT, PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
52 views3 pages

Báo Cáo Java 4

The document defines classes for layers and activation functions in a neural network. It loads data, splits it into training and test sets, initializes a network with fully connected and activation layers, trains the network to minimize a loss function using backpropagation, and predicts outputs on test data.

Uploaded by

nguyen95560
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as ODT, PDF, TXT or read online on Scribd
You are on page 1/ 3

from abc import ABC, abstractmethod

import numpy as np
from sklearn.model_selection import train_test_split
import pandas as pd

# Load data and split into training and test sets


df = pd.read_csv('/content/drive/MyDrive/Data_hocmay2/data_logistic.csv')
data = df.to_numpy()
X = data[:, :-1] # All columns except the last one are features
y = data[:, -1].reshape(-1, 1) # Last column is the target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3,
random_state=42)

class Layer(ABC):
def __init__(self):
self.input = None
self.output = None
self.input_shape = None
self.output_shape = None

@abstractmethod
def forward_propagation(self, input):
raise NotImplementedError

@abstractmethod
def backward_propagation(self, output_error, learning_rate):
raise NotImplementedError

class FCLayer(Layer):
def __init__(self, input_shape, output_shape):
super().__init__()
self.input_shape = input_shape
self.output_shape = output_shape
self.weights = np.random.rand(input_shape[1], output_shape[1]) - 0.5
self.bias = np.random.rand(1, output_shape[1]) - 0.5

def forward_propagation(self, input):


self.input = input
self.output = np.dot(input, self.weights) + self.bias
return self.output

def backward_propagation(self, output_error, learning_rate):


# Ensure output_error is a 2D column vector
output_error = output_error.reshape(-1, 1)
input_error = np.dot(self.weights, output_error).reshape(self.input.shape)
weights_error = np.dot(self.input.reshape(-1, 1), output_error.T)

# Update parameters
self.weights -= learning_rate * weights_error
self.bias -= learning_rate * output_error.T # Transpose to match the bias
shape
return input_error

class ActivationLayer(Layer):
def __init__(self, input_shape, output_shape, activation, activation_prime):
super().__init__()
self.input_shape = input_shape
self.output_shape = output_shape
self.activation = activation
self.activation_prime = activation_prime

def forward_propagation(self, input):


self.input = input
self.output = self.activation(input)
return self.output

def backward_propagation(self, output_error, learning_rate):


# Reshape output_error to match the input shape for safe element-wise
multiplication
return self.activation_prime(self.input).reshape(output_error.shape) *
output_error

# Define the activation functions and their derivatives


def relu(z):
return np.maximum(0, z)

def relu_prime(z):
z = np.array(z)
return np.where(z > 0, 1, 0)

# Define the loss function and its derivative


def loss(y_true, y_pred):
return 0.5 * np.mean((y_pred - y_true) ** 2)

def loss_prime(y_true, y_pred):


return y_pred - y_true

class Network:
def __init__(self):
self.layers = []
self.loss = None
self.loss_prime = None

def add(self, layer):


self.layers.append(layer)

def setup_loss(self, loss, loss_prime):


self.loss = loss
self.loss_prime = loss_prime

def predict(self, input_data):


results = []
for i in range(len(input_data)):
output = input_data[i]
for layer in self.layers:
output = layer.forward_propagation(output)
results.append(output)
return results

def fit(self, x_train, y_train, learning_rate, epochs):


for epoch in range(epochs):
err = 0
for x, y in zip(x_train, y_train):
# Forward propagation
output = x
for layer in self.layers:
output = layer.forward_propagation(output)
# Compute error for display
err += self.loss(y, output)

# Backward propagation
error = self.loss_prime(y, output)
for layer in reversed(self.layers):
error = layer.backward_propagation(error, learning_rate)

err /= len(x_train)
print(f'Epoch {epoch+1}/{epochs} error: {err}')

# Initialize and add layers to the network


net = Network()
net.add(FCLayer((1, 2), (1, 3)))
net.add(ActivationLayer((1, 3), (1, 3), relu, relu_prime))
net.add(FCLayer((1, 3), (1, 1)))
net.add(ActivationLayer((1, 1), (1, 1), relu, relu_prime))

# Set up the loss function


net.setup_loss(loss, loss_prime)

# Train the network


net.fit(X_train, y_train, learning_rate=0.01, epochs=100)

# Predict using the network


out = net.predict([[4.8, 9.6]])
print(out)

You might also like