Program :
import numpy as np
X = [Link]([[0, 1], [1, 0], [1, 1], [0, 0]])
y = [Link]([[1], [1], [0], [0]])
def sigmoid(z):
return 1 / (1 + [Link](-z))
def sigmoid_deriv(z):
return sigmoid(z) * (1 - sigmoid(z))
[Link](1)
W1, B1 = [Link](2, 2), [Link](2, 1)
W2, B2 = [Link](1, 2), [Link](1, 1)
for _ in range(5000):
for i in range(4):
a0 = X[i].reshape(-1, 1)
z1 = W1 @ a0 + B1
a1 = sigmoid(z1)
z2 = W2 @ a1 + B2
a2 = sigmoid(z2)
dz2 = a2 - y[i]
dz1 = (W2.T @ dz2) * sigmoid_deriv(z1)
W2 -= 0.01 * dz2 @ a1.T
B2 -= 0.01 * dz2
W1 -= 0.01 * dz1 @ a0.T
B1 -= 0.01 * dz1
# Predictions
for x in X:
out = sigmoid(W2 @ sigmoid(W1 @ [Link](-1, 1) + B1) + B2)
print(f"Input: {x}, Output: {int(out >= 0.5)}")
Program :
import tensorflow as tf
from [Link] import mnist
from [Link] import to_categorical
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense
# Load and preprocess data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(-1, 28, 28, 1) / 255.0
x_test = x_test.reshape(-1, 28, 28, 1) / 255.0
# One-hot encode labels
y_train = to_categorical(y_train)
y_test = to_categorical(y_test)
# Build CNN model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
MaxPooling2D((2, 2)),
Flatten(),
Dense(100, activation='relu'),
Dense(10, activation='softmax')
])
# Compile model
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'] )
# Train model
[Link](x_train, y_train, epochs=5, batch_size=64, verbose=2)
# Evaluate model
test_loss, test_acc = [Link](x_test, y_test, verbose=0)
print(f"Test Accuracy: {test_acc:.4f}")
Program :
import os
[Link]['TF_ENABLE_ONEDNN_OPTS'] = '0' # Disable oneDNN custom ops as per
log
import numpy as np
import [Link] as plt
import seaborn as sns
from [Link] import fetch_lfw_people
from sklearn.model_selection import train_test_split
from [Link] import confusion_matrix
from [Link] import to_categorical
from [Link] import Sequential
from [Link] import Conv2D, MaxPooling2D, Flatten, Dense
# Load LFW dataset (resize and crop)
faces = fetch_lfw_people(min_faces_per_person=100, resize=1.0, slice_=(slice(60, 188),
slice(60, 188)), color=True)
x = [Link] / 255.0
y = [Link]
x = [Link](([Link][0], 128, 128, 3))
# Reduce dataset size for faster training
x, y = x[:500], y[:500]
y_cat = to_categorical(y)
class_names = faces.target_names
# Split into train and test sets
x_train, x_test, y_train, y_test = train_test_split( x, y_cat, stratify=y_cat, train_size=0.8,
random_state=42)
# Build CNN model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(128, 128, 3)),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(128, activation='relu'),
Dense(len(class_names), activation='softmax')
])
[Link](
optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy']
)
# Train model
[Link](x_train, y_train, epochs=5, batch_size=32, validation_data=(x_test, y_test))
print("Training completed.")
# Predict
pred = [Link](x_test)
print("Prediction completed.")
# Confusion Matrix
mat = confusion_matrix(y_test.argmax(axis=1), [Link](axis=1))
[Link](
mat,
annot=True,
fmt='d',
cmap='Blues',
xticklabels=class_names,
yticklabels=class_names
)
[Link]("Confusion Matrix")
[Link]("Predicted")
[Link]("Actual")
[Link]()
Program :
import string
import torch
import [Link] as nn
import random
import time
import math
import [Link] as plt
# All letters and EOS
all_letters = string.ascii_letters + " .,;'-"
n_letters = len(all_letters) + 1 # EOS
# Sample dataset (no files)
category_lines = {
'Russian': ['Ivan', 'Nikolai', 'Boris'],
'German': ['Fritz', 'Hans', 'Adolf'],
'Spanish': ['Jose', 'Carlos', 'Luis'],
'Chinese': ['Wei', 'Zhang', 'Ming']
}
all_categories = list(category_lines.keys())
n_categories = len(all_categories)
print('# categories:', n_categories, all_categories)
# RNN model
class RNN([Link]):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = [Link](n_categories + input_size + hidden_size, hidden_size)
self.i2o = [Link](n_categories + input_size + hidden_size, output_size)
self.o2o = [Link](hidden_size + output_size, output_size)
[Link] = [Link](0.1)
[Link] = [Link](dim=1)
def forward(self, category, input, hidden):
input_combined = [Link]((category, input, hidden), 1)
hidden = self.i2h(input_combined)
output = self.i2o(input_combined)
output_combined = [Link]((hidden, output), 1)
output = self.o2o(output_combined)
output = [Link](output)
output = [Link](output)
return output, hidden
def initHidden(self):
return [Link](1, self.hidden_size)
# Helper functions
def categoryTensor(category):
li = all_categories.index(category)
tensor = [Link](1, n_categories)
tensor[0][li] = 1
return tensor
def inputTensor(line):
tensor = [Link](len(line), 1, n_letters)
for li in range(len(line)):
letter = line[li]
tensor[li][0][all_letters.find(letter)] = 1
return tensor
def targetTensor(line):
letter_indexes = [all_letters.find(line[li]) for li in range(1, len(line))]
letter_indexes.append(n_letters - 1) # EOS
return [Link](letter_indexes)
def randomChoice(l):
return l[[Link](0, len(l) - 1)]
def randomTrainingPair():
category = randomChoice(all_categories)
line = randomChoice(category_lines[category])
return category, line
def randomTrainingExample():
category, line = randomTrainingPair()
return categoryTensor(category), inputTensor(line), targetTensor(line)
# Training
criterion = [Link]()
learning_rate = 0.0005
rnn = RNN(n_letters, 128, n_letters)
def train(category_tensor, input_line_tensor, target_line_tensor):
target_line_tensor.unsqueeze_(-1)
hidden = [Link]()
rnn.zero_grad()
loss = 0
for i in range(input_line_tensor.size(0)):
output, hidden = rnn(category_tensor, input_line_tensor[i], hidden)
l = criterion(output, target_line_tensor[i])
loss += l
[Link]()
for p in [Link]():
[Link].add_([Link], alpha=-learning_rate)
return output, [Link]() / input_line_tensor.size(0)
# Time tracking
def timeSince(since):
s = [Link]() - since
return '%dm %ds' % (s // 60, s % 60)
n_iters = 1000 # Reduced for lab runtime
print_every = 200
plot_every = 100
all_losses = []
total_loss = 0
start = [Link]()
for iter in range(1, n_iters + 1):
output, loss = train(*randomTrainingExample())
total_loss += loss
if iter % print_every == 0:
print('%s (%d %d%%) %.4f' % (timeSince(start), iter, iter / n_iters * 100, loss))
if iter % plot_every == 0:
all_losses.append(total_loss / plot_every)
total_loss = 0
# Plotting loss
[Link]()
[Link](all_losses)
[Link]()
# Sampling
max_length = 20
def sample(category, start_letter='A'):
with torch.no_grad():
category_tensor = categoryTensor(category)
input = inputTensor(start_letter)
hidden = [Link]()
output_name = start_letter
for i in range(max_length):
output, hidden = rnn(category_tensor, input[0], hidden)
topv, topi = [Link](1)
topi = topi[0][0].item()
if topi == n_letters - 1:
break
else:
letter = all_letters[topi]
output_name += letter
input = inputTensor(letter)
return output_name
def samples(category, start_letters='ABC'):
for start_letter in start_letters:
print(sample(category, start_letter))
# Test
samples('Russian', 'RUS')
samples('German', 'GER')
samples('Spanish', 'SPA')
samples('Chinese', 'CHI')
Program :
import numpy as np
import tensorflow as tf
from [Link] import pad_sequences
from [Link] import Sequential
from [Link] import Embedding, LSTM, Dense
# Sample offline-compatible dataset
# Each sentence is tokenized (word -> integer) and given a label: 1 for positive, 0 for
negative
sentences = [
[1, 2, 3, 4, 5], # Positive
[6, 7, 8, 9], # Negative
[1, 10, 11, 12, 13, 5], # Positive
[14, 15, 8, 16], # Negative
[17, 18, 3, 4, 19], # Positive
[6, 20, 21, 22], # Negative
]
labels = [1, 0, 1, 0, 1, 0]
# Parameters
max_features = 100 # Size of vocabulary
max_len = 10 # Max review length
embedding_size = 8
batch_size = 2
epochs = 10
# Pad sequences
x_data = pad_sequences(sentences, maxlen=max_len)
y_data = [Link](labels)
# Split into train/test manually
split_index = int(len(x_data) * 0.8)
x_train, y_train = x_data[:split_index], y_data[:split_index]
x_test, y_test = x_data[split_index:], y_data[split_index:]
# Build the model
model = Sequential()
[Link](Embedding(max_features, embedding_size, input_length=max_len))
[Link](LSTM(16))
[Link](Dense(1, activation='sigmoid'))
# Compile the model
[Link](loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# Train the model
[Link](x_train, y_train, batch_size=batch_size, epochs=epochs,
validation_data=(x_test, y_test))
# Evaluate
loss, accuracy = [Link](x_test, y_test)
print("Loss:", loss)
print("Accuracy:", accuracy)
Program :
import numpy as np
import tensorflow as tf
from [Link] import Model
from [Link] import Input, LSTM, Dense, Embedding
from [Link] import pad_sequences
# Input and output text data
input_texts = ['I love coding', 'This is a pen', 'She sings well']
target_texts = ['PRP VB NNP', 'DT VBZ DT NN', 'PRP VBZ RB']
# Vocabulary building
input_words = set()
target_words = set()
for input_text, target_text in zip(input_texts, target_texts):
input_words.update(input_text.split())
target_words.update(target_text.split())
target_words.update(['<sos>', '<eos>'])
# Word-Index mappings
input_word2idx = {word: idx for idx, word in enumerate(sorted(input_words))}
input_idx2word = {idx: word for word, idx in input_word2idx.items()}
target_word2idx = {word: idx for idx, word in enumerate(sorted(target_words))}
target_idx2word = {idx: word for word, idx in target_word2idx.items()}
# Sequence lengths
max_encoder_seq_length = max(len([Link]()) for text in input_texts)
max_decoder_seq_length = max(len([Link]()) for text in target_texts) + 2 # +2 for
<sos>, <eos>
# Prepare encoder input
encoder_input_data = [Link]((len(input_texts), max_encoder_seq_length))
for i, input_text in enumerate(input_texts):
for t, word in enumerate(input_text.split()):
encoder_input_data[i, t] = input_word2idx[word]
# Prepare decoder input and target
decoder_input_data = [Link]((len(target_texts), max_decoder_seq_length))
decoder_target_data = [Link]((len(target_texts), max_decoder_seq_length,
len(target_words)))
for i, target_text in enumerate(target_texts):
target_sequence = ['<sos>'] + target_text.split() + ['<eos>']
for t, word in enumerate(target_sequence):
decoder_input_data[i, t] = target_word2idx[word]
if t > 0:
decoder_target_data[i, t - 1, target_word2idx[word]] = 1.0
# Model architecture
encoder_inputs = Input(shape=(None,))
enc_emb = Embedding(len(input_words), 256)(encoder_inputs)
encoder_outputs, state_h, state_c = LSTM(256, return_state=True)(enc_emb)
encoder_states = [state_h, state_c]
decoder_inputs = Input(shape=(None,))
dec_emb = Embedding(len(target_words), 256)(decoder_inputs)
decoder_lstm = LSTM(256, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(dec_emb, initial_state=encoder_states)
decoder_dense = Dense(len(target_words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Compile and train
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
[Link]([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=64, epochs=50, validation_split=0.2)
# Inference setup
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_input_h = Input(shape=(256,))
decoder_state_input_c = Input(shape=(256,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
dec_emb2 = Embedding(len(target_words), 256)(decoder_inputs)
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2,
initial_state=decoder_states_inputs)
decoder_outputs2 = decoder_dense(decoder_outputs2)
decoder_model = Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs2, state_h2, state_c2])
# Inference function
def generate_pos_tags(input_sequence):
states_value = encoder_model.predict(input_sequence)
target_seq = [Link]((1, 1))
target_seq[0, 0] = target_word2idx['<sos>']
stop_condition = False
pos_tags = []
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
sampled_token_index = [Link](output_tokens[0, -1, :])
sampled_word = target_idx2word[sampled_token_index]
pos_tags.append(sampled_word)
if sampled_word == '<eos>' or len(pos_tags) > max_decoder_seq_length:
stop_condition = True
target_seq[0, 0] = sampled_token_index
states_value = [h, c]
return ' '.join(pos_tags[:-1]) # Exclude <eos>
# Testing
for input_text in input_texts:
input_seq = pad_sequences(
[[input_word2idx[word] for word in input_text.split()]],
maxlen=max_encoder_seq_length
)
predicted_tags = generate_pos_tags(input_seq)
print('Input:', input_text)
print('Predicted POS Tags:', predicted_tags)
Program :
import numpy as np
import tensorflow as tf
from [Link] import Model
from [Link] import Input, LSTM, Dense, Embedding
from [Link] import pad_sequences
# Define the input and output sequences
input_texts = ['I love coding', 'This is a pen', 'She sings well']
target_texts = ['Ich liebe das Coden', 'Das ist ein Stift', 'Sie singt gut']
# Create a set of all unique words in the input and target sequences
input_words = set()
target_words = set()
for input_text, target_text in zip(input_texts, target_texts):
input_words.update(input_text.split())
target_words.update(target_text.split())
# Add <sos> and <eos> tokens to target_words
target_words.add('<sos>')
target_words.add('<eos>')
# Create dictionaries to map words to integers
input_word2idx = {word: idx for idx, word in enumerate(input_words)}
input_idx2word = {idx: word for idx, word in enumerate(input_words)}
target_word2idx = {word: idx for idx, word in enumerate(target_words)}
target_idx2word = {idx: word for idx, word in enumerate(target_words)}
# Define the maximum sequence lengths
max_encoder_seq_length = max([len([Link]()) for text in input_texts])
max_decoder_seq_length = max([len([Link]()) for text in target_texts]) + 2 # +2
for <sos> and <eos>
# Prepare the encoder input data
encoder_input_data = [Link]((len(input_texts), max_encoder_seq_length),
dtype='float32')
for i, input_text in enumerate(input_texts):
for t, word in enumerate(input_text.split()):
encoder_input_data[i, t] = input_word2idx[word]
# Prepare the decoder input and target data
decoder_input_data = [Link]((len(input_texts), max_decoder_seq_length),
dtype='float32')
decoder_target_data = [Link]((len(input_texts), max_decoder_seq_length,
len(target_words)), dtype='float32')
for i, target_text in enumerate(target_texts):
target_sequence = ['<sos>'] + target_text.split() + ['<eos>']
for t, word in enumerate(target_sequence):
decoder_input_data[i, t] = target_word2idx[word]
if t > 0:
decoder_target_data[i, t - 1, target_word2idx[word]] = 1.0
# Define the encoder
encoder_inputs = Input(shape=(None,))
encoder_embedding = Embedding(len(input_words), 256)(encoder_inputs)
encoder_lstm = LSTM(256, return_state=True)
encoder_outputs, state_h, state_c = encoder_lstm(encoder_embedding)
encoder_states = [state_h, state_c]
# Define the decoder
decoder_inputs = Input(shape=(None,))
decoder_embedding = Embedding(len(target_words), 256)(decoder_inputs)
decoder_lstm = LSTM(256, return_sequences=True, return_state=True)
decoder_outputs, _, _ = decoder_lstm(decoder_embedding,
initial_state=encoder_states)
decoder_dense = Dense(len(target_words), activation='softmax')
decoder_outputs = decoder_dense(decoder_outputs)
# Define the full model
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
# Compile and train the model
[Link](optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
[Link]([encoder_input_data, decoder_input_data], decoder_target_data,
batch_size=64, epochs=50, validation_split=0.2)
# Define encoder model for inference
encoder_model = Model(encoder_inputs, encoder_states)
# Define decoder model for inference
decoder_state_input_h = Input(shape=(256,))
decoder_state_input_c = Input(shape=(256,))
decoder_states_inputs = [decoder_state_input_h, decoder_state_input_c]
dec_emb2 = Embedding(len(target_words), 256)(decoder_inputs)
decoder_outputs2, state_h2, state_c2 = decoder_lstm(dec_emb2,
initial_state=decoder_states_inputs)
decoder_states = [state_h2, state_c2]
decoder_outputs = decoder_dense(decoder_outputs2)
decoder_model = Model([decoder_inputs] + decoder_states_inputs,
[decoder_outputs2] + decoder_states2)
# Inference function
def translate(input_sequence):
states_value = encoder_model.predict(input_sequence)
target_sequence = [Link]((1, 1))
target_sequence[0, 0] = target_word2idx['<sos>']
stop_condition = False
translation = []
while not stop_condition:
output_tokens, h, c = decoder_model.predict([target_sequence] + states_value)
sampled_token_index = [Link](output_tokens[0, -1, :])
sampled_word = target_idx2word[sampled_token_index]
[Link](sampled_word)
if sampled_word == '<eos>' or len(translation) > max_decoder_seq_length:
stop_condition = True
target_sequence = [Link]((1, 1))
target_sequence[0, 0] = sampled_token_index
states_value = [h, c]
return ' '.join(translation[:-1]) # remove <eos>
# Test the model
for input_text in input_texts:
input_seq = pad_sequences([[input_word2idx[word] for word in input_text.split()]],
maxlen=max_encoder_seq_length)
translated_text = translate(input_seq)
print('Input:', input_text)
print('Translated Text:', translated_text)
print()
Program :
import pandas as pd
import numpy as np
import [Link] as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from [Link] import mean_squared_error, r2_score
# Load the dataset
df = pd.read_csv(r'C:\Users\rohit\Downloads\[Link]')
# Selecting feature columns and target column
X = df[['feature1', 'feature2']]
y = df['target']
# Split the dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, random_state=42)
# Train the regression model
reg = LinearRegression()
[Link](X_train, y_train)
# Make predictions on the test set
y_pred = [Link](X_test)
# Evaluate the model
print('Mean Squared Error: %.2f' % mean_squared_error(y_test, y_pred))
print('Coefficient of Determination (R²): %.2f' % r2_score(y_test, y_pred))
# Plot the results (using feature1 for visualization)
[Link](X_test['feature1'], y_test, color='black', label="Actual Data")
[Link](X_test['feature1'], y_pred, color='blue', linewidth=3, label="Regression Line")
[Link]("Feature 1")
[Link]("Target")
[Link]()
[Link]()
Output :
Epoch 1/5 C:\Users\rohit\anaconda3\Lib\site-
packages\keras\src\layers\convolutional\base_conv.py:107: UserWarning: Do not pass an
`input_shape`/`input_dim` argument to a layer. When using Sequential models, prefer using an
`Input(shape)` object as the first layer in the model instead.
super(). init (activity_regularizer=activity_regularizer, **kwargs)
13/13 ━━━━━━━━━━━━━━━━━━━━━ 3s 158ms/step - accuracy: 0.3264 - loss:
1.5228 - val_accuracy: 0.4600 - val_loss: 1.4287
Epoch 2/5
13/13 ━━━━━━━━━━━━━━━━━━━━━ 2s 152ms/step - accuracy: 0.4968 - loss:
1.3866 - val_accuracy: 0.4600 - val_loss: 1.4622
Epoch 3/5
13/13 ━━━━━━━━━━━━━━━━━━━━━ 2s 148ms/step - accuracy: 0.4497 - loss:
1.4845 - val_accuracy: 0.4600 - val_loss: 1.4306
Epoch 4/5
13/13 ━━━━━━━━━━━━━━━━━━━━━ 2s 153ms/step - accuracy: 0.4509 - loss:
1.4498 - val_accuracy: 0.4600 - val_loss: 1.4264
Epoch 5/5
13/13 ━━━━━━━━━━━━━━━━━━━━━ 2s 145ms/step - accuracy: 0.4656 - loss:
1.4287 - val_accuracy: 0.4600 - val_loss: 1.4183
Training completed.
4/4 ━━━━━━━━━━━━━━━━━━━━ 0s 37ms/step Prediction completed.
Result :
Output :
Result :
Output :
Epoch 1/10
2/2 ━━━━━━━━━━━━━━━━━━━━ 1s 224ms/step - accuracy: 0.6667 - loss:
0.6917 - val_accuracy: 0.5000 - val_loss: 0.6945
Epoch 2/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 50ms/step - accuracy: 0.8333 - loss:
0.6902 - val_accuracy: 0.0000e+00 - val_loss: 0.6946
Epoch 3/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 42ms/step - accuracy: 0.8333 - loss:
0.6869 - val_accuracy: 0.5000 - val_loss: 0.6945
Epoch 4/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 42ms/step -accuracy: 0.8333 - loss:
0.6857 - val_accuracy: 0.5000 - val_loss: 0.6945
Epoch 5/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 44ms/step - accuracy: 0.6667 - loss:
0.6867 - val_accuracy: 0.5000 - val_loss: 0.6945
Epoch 6/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 43ms/step - accuracy: 0.6667 - loss:
0.6851 - val_accuracy: 0.5000 - val_loss: 0.6945
Epoch 7/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 42ms/step - accuracy: 1.0000 - loss:
0.680 - val_accuracy: 0.5000 - val_loss: 0.6944
Epoch 8/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 45ms/step - accuracy: 1.0000 - loss:
0.6796 - val_accuracy: 0.5000 - val_loss: 0.6944
Epoch 9/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 43ms/step - accuracy: 1.0000 - loss:
0.6807 - val_accuracy: 0.5000 - val_loss: 0.6944
Epoch 10/10
2/2 ━━━━━━━━━━━━━━━━━━━━━━━━ 0s 44ms/step - accuracy: 1.0000 - loss:
0.6759 - val_accuracy: 0.5000 - val_loss: 0.6944
1/1 ━━━━━━━━━━━━━━━━━━━━━ 0s 203ms/step - accuracy: 0.5000 - loss:
0.6944
Loss: 0.6943531632423401
Accuracy: 0.5
Result :
Output :
Epoch 1/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 3s 3s/step - accuracy: 0.0000e+00 - loss:
1.6484 - val_accuracy: 0.3333 - val_loss: 1.4638
Epoch 2/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 99ms/step - accuracy: 0.6667 - loss:
1.6246 - val_accuracy: 0.3333 - val_loss: 1.4635
Epoch 3/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 94ms/step - accuracy: 0.7500 - loss:
1.6003 - val_accuracy: 0.6667 - val_loss: 1.4632
Epoch 4/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 99ms/step - accuracy: 0.9167 - loss:
1.5739 - val_accuracy: 0.6667 - val_loss: 1.4629
Epoch 5/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 107ms/step - accuracy: 0.9167 - loss:
1.5440 - val_accuracy: 0.6667 - val_loss: 1.4625
Epoch 6/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 88ms/step - accuracy: 0.8333 - loss:
1.5089 - val_accuracy: 0.6667 - val_loss: 1.4620
Epoch 7/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 88ms/step - accuracy: 0.8333 - loss:
1.4672 - val_accuracy: 0.6667 - val_loss: 1.4613
Epoch 8/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 85ms/step - accuracy: 0.8333 - loss:
1.4168 - val_accuracy: 0.6667 - val_loss: 1.4605
Epoch 9/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 87ms/step - accuracy: 0.8333 - loss:
1.3562 - val_accuracy: 0.6667 - val_loss: 1.4595
Epoch 10/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 95ms/step - accuracy: 0.8333 - loss:
1.2841 - val_accuracy: 0.6667 - val_loss: 1.4584
Epoch 11/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 83ms/step - accuracy: 0.8333 - loss:
1.2012 - val_accuracy: 0.6667 - val_loss: 1.4574
Epoch 12/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 91ms/step - accuracy: 0.8333 - loss:
1.1117 - val_accuracy: 0.5000 - val_loss: 1.4576
Epoch 13/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 98ms/step - accuracy: 0.9167 - loss:
1.0234 - val_accuracy: 0.5000 - val_loss: 1.4600
Epoch 14/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 88ms/step - accuracy: 0.9167 - loss:
0.9431 - val_accuracy: 0.6667 - val_loss: 1.4659
Epoch 15/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 86ms/step - accuracy: 0.9167 - loss:
0.8741 - val_accuracy: 0.6667 - val_loss: 1.4756
Epoch 16/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 86ms/step - accuracy: 0.9167 - loss:
0.8158 - val_accuracy: 0.6667 - val_loss: 1.4890
Epoch 17/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 87ms/step - accuracy: 0.9167 - loss:
0.7663 - val_accuracy: 0.6667 - val_loss: 1.5057
Epoch 18/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 93ms/step - accuracy: 0.9167 - loss:
0.7252 - val_accuracy: 0.6667 - val_loss: 1.5255
Epoch 19/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.9167 - loss:
0.6922 - val_accuracy: 0.6667 - val_loss: 1.5478
Epoch 20/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 87ms/step - accuracy: 0.9167 - loss:
0.6668 - val_accuracy: 0.6667 - val_loss: 1.5719
Epoch 21/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 85ms/step - accuracy: 0.9167 - loss:
0.6490 - val_accuracy: 0.6667 - val_loss: 1.5967
Epoch 22/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 0.6378 - 81ms/step - accuracy: 0.9167 - loss:
val_accuracy: 0.5000 - val_loss: 1.6211
Epoch 23/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 0.6290 - 79ms/step - accuracy: 0.8333 - loss:
val_accuracy: 0.5000 - val_loss: 1.6434
Epoch 24/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 0.6157 - 93ms/step - accuracy: 0.8333 - loss:
val_accuracy: 0.5000 - val_loss: 1.6624
Epoch 25/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 0.5921 - 133ms/step - accuracy: 0.8333 - loss:
val_accuracy: 0.5000 - val_loss: 1.6778
Epoch 26/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 0.5562 - 85ms/step - accuracy: 0.8333 - loss:
val_accuracy: 0.6667 - val_loss: 1.6903
Epoch 27/50
1/1 ━━━━━━━━━━━━━━━━━━━ 0s 85ms/step - accuracy: 0.9167 - loss:
0.5104 - val_accuracy: 0.6667 - val_loss: 1.7001
Epoch 28/50
1/1 ━━━━━━━━━━━━━━━━━━━ 0s 83ms/step - accuracy: 1.0000 - loss:
0.4616 - val_accuracy: 0.6667 - val_loss: 1.7071
Epoch 29/50
1/1 ━━━━━━━━━━━━━━━━━━ 0s 116ms/step - accuracy: 1.0000 - loss:
0.4168 - val_accuracy: 0.6667 - val_loss: 1.7107
Epoch 30/50
1/1 ━━━━━━━━━━━━━━━━━━ 0s 125ms/step - accuracy: 1.0000 - loss:
0.3772 - val_accuracy: 0.6667 - val_loss: 1.7115
Epoch 31/50
1/1 ━━━━━━━━━━━━━━━━━━ 0s 96ms/step - accuracy: 1.0000 - loss:
0.3438 - val_accuracy: 0.6667 - val_loss: 1.7116
Epoch 32/50
1/1 ━━━━━━━━━━━━━━━━━━ 0s 95ms/step - accuracy: 1.0000 - loss:
0.3183 - val_accuracy: 0.6667 - val_loss: 1.7129
Epoch 33/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 98ms/step - accuracy: 1.0000 - loss:
0.2951 - val_accuracy: 0.6667 - val_loss: 1.7165
Epoch 34/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 95ms/step - accuracy: 1.0000 - loss:
0.2699 - val_accuracy: 0.6667 - val_loss: 1.7231
Epoch 35/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 114ms/step - accuracy: 1.0000 - loss:
0.2466 - val_accuracy: 0.6667 - val_loss: 1.7324
Epoch 36/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 86ms/step - accuracy: 1.0000 - loss:
0.2283 - val_accuracy: 0.5000 - val_loss: 1.7429
Epoch 37/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 91ms/step - accuracy: 1.0000 - loss:
0.2134 - val_accuracy: 0.5000 - val_loss: 1.7537
Epoch 38/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 103ms/step - accuracy: 1.0000 - loss:
0.1987 - val_accuracy: 0.5000 - val_loss: 1.7655
Epoch 39/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 107ms/step - accuracy: 1.0000 - loss:
0.1816 - val_accuracy: 0.5000 - val_loss: 1.7793
Epoch 40/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 111ms/step - accuracy: 1.0000 - loss:
0.1656 - val_accuracy: 0.5000 - val_loss: 1.7957
Epoch 41/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 106ms/step - accuracy: 1.0000 - loss:
0.1522 - val_accuracy: 0.5000 - val_loss: 1.8137
Epoch 42/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 105ms/step - accuracy: 1.0000 - loss:
0.1411 - val_accuracy: 0.5000 - val_loss: 1.8317
Epoch 43/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 101ms/step - accuracy: 1.0000 - loss:
0.1315 - val_accuracy: 0.5000 - val_loss: 1.8480
Epoch 44/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 91ms/step - accuracy: 1.0000 - loss:
0.1216 - val_accuracy: 0.6667 - val_loss: 1.8617
Epoch 45/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 101ms/step - accuracy: 1.0000 - loss:
0.1129 - val_accuracy: 0.6667 - val_loss: 1.8733
Epoch 46/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 84ms/step - accuracy: 1.0000 - loss:
0.1066 - val_accuracy: 0.6667 - val_loss: 1.8836
Epoch 47/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 189ms/step - accuracy: 1.0000 - loss:
0.1019 - val_accuracy: 0.6667 - val_loss: 1.8939
Epoch 48/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 90ms/step - accuracy: 1.0000 - loss:
0.0974 - val_accuracy: 0.6667 - val_loss: 1.9048
Epoch 49/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 115ms/step - accuracy: 1.0000 - loss:
0.0916 - val_accuracy: 0.6667 - val_loss: 1.9162
Epoch 50/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 91ms/step - accuracy: 1.0000 - loss:
0.0856 - val_accuracy: 0.6667 - val_loss: 1.9270
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 131ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 126ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
Input: I love coding
Predicted POS Tags: PRP VB NNP
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 32ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
Input: This is a pen
Predicted POS Tags: DT VBZ DT NN
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
Input: She sings well
Predicted POS Tags: PRP VB NNP
2025-07-18 [Link].112108: I tensorflow/core/util/[Link]] oneDNN custom operations are on.
You may see slightly different numerical results due to floating-point round-off errors from different
computation orders. To turn them off, set the environment variable
`TF_ENABLE_ONEDNN_OPTS=0`.
2025-07-18 [Link].167947: I tensorflow/core/util/[Link]] oneDNN custom operations are on.
You may see slightly different numerical results due to floating-point round-off errors from different
computation orders. To turn them off, set the environment variable
`TF_ENABLE_ONEDNN_OPTS=0`.
2025-07-18 [Link].579811: I tensorflow/core/platform/cpu_feature_guard.cc:210] This
TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.
To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild
TensorFlow with the appropriate compiler flags.
Result :
Output :
Epoch 1/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 2s 2s/step - accuracy: 0.0000e+00- loss:
1.9238 - val_accuracy: 0.0000e+00 - val_loss: 1.2898
Epoch 2/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 80ms/step - accuracy: 0.2500 - loss:
1.8943 - val_accuracy: 0.0000e+00 - val_loss: 1.2952
Epoch 3/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.6250 - loss:
1.8638 - val_accuracy: 0.0000e+00 - val_loss: 1.3013
Epoch 4/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.6250 - loss:
1.8305 - val_accuracy: 0.0000e+00 - val_loss: 1.3083
Epoch 5/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.6250 - loss:
1.7927 - val_accuracy: 0.0000e+00 - val_loss: 1.3164
Epoch 6/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.6250 - loss:
1.7486 - val_accuracy: 0.0000e+00 - val_loss: 1.3261
Epoch 7/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.6250 - loss:
1.6960 - val_accuracy: 0.0000e+00 - val_loss: 1.3377
Epoch 8/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.5000 - loss:
1.6327 - val_accuracy: 0.0000e+00 - val_loss: 1.3516
Epoch 9/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.5000 - loss:
1.5561 - val_accuracy: 0.0000e+00 - val_loss: 1.3685
Epoch 10/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.5000 - loss:
1.4636 - val_accuracy: 0.0000e+00 - val_loss: 1.3891
Epoch 11/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 74ms/step - accuracy: 0.5000 - loss:
1.3534 - val_accuracy: 0.0000e+00 - val_loss: 1.4141
Epoch 12/50
1/1 ━━━━━━━━━━━━━━━━━━━0s 73ms/step - accuracy: 0.5000 - loss:
1.2257 - val_accuracy: 0.0000e+00 - val_loss: 1.4439
Epoch 13/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.5000 - loss:
1.0863 - val_accuracy: 0.0000e+00 - val_loss: 1.4774
Epoch 14/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 74ms/step - accuracy: 0.5000 - loss:
0.9492 - val_accuracy: 0.0000e+00 - val_loss: 1.5115
Epoch 15/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 72ms/step - accuracy: 0.5000 - loss:
0.8324 - val_accuracy: 0.0000e+00 - val_loss: 1.5438
Epoch 16/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.5000 - loss:
0.7427 - val_accuracy: 0.0000e+00 - val_loss: 1.5741
Epoch 17/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.5000 - loss:
0.6744 - val_accuracy: 0.0000e+00 - val_loss: 1.6041
Epoch 18/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 74ms/step - accuracy: 0.7500 - loss:
0.6228 - val_accuracy: 0.0000e+00 - val_loss: 1.6354
Epoch 19/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.5869 - val_accuracy: 0.0000e+00 - val_loss: 1.6685
Epoch 20/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.7500 - loss:
0.5638 - val_accuracy: 0.0000e+00 - val_loss: 1.7030
Epoch 21/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 78ms/step - accuracy: 0.7500 - loss:
0.5455 - val_accuracy: 0.0000e+00 - val_loss: 1.7384
Epoch 22/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.5243 - val_accuracy: 0.0000e+00 - val_loss: 1.7753
Epoch 23/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - accuracy: 0.7500 - loss:
0.4997 - val_accuracy: 0.0000e+00 - val_loss: 1.8145
Epoch 24/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.7500 - loss:
0.4755 - val_accuracy: 0.0000e+00 - val_loss: 1.8561
Epoch 25/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.4530 - val_accuracy: 0.0000e+00 - val_loss: 1.8997
Epoch 26/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step -accuracy: 0.7500 - loss:
0.4261 - val_accuracy: 0.0000e+00 - val_loss: 1.9452
Epoch 27/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.3879 - val_accuracy: 0.0000e+00 - val_loss: 1.9938
Epoch 28/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.3382 - val_accuracy: 0.0000e+00 - val_loss: 2.0479
Epoch 29/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.2836 - val_accuracy: 0.0000e+00 - val_loss: 2.1101
Epoch 30/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.2328 - val_accuracy: 0.0000e+00 - val_loss: 2.1819
Epoch 31/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 79ms/step - accuracy: 0.7500 - loss:
0.1928 - val_accuracy: 0.0000e+00 - val_loss: 2.2612
Epoch 32/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.1648 - val_accuracy: 0.0000e+00 - val_loss: 2.3419
Epoch 33/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.1434 - val_accuracy: 0.0000e+00 - val_loss: 2.4173
Epoch 34/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 74ms/step - accuracy: 0.7500 - loss:
0.1236 - val_accuracy: 0.0000e+00 - val_loss: 2.4850
Epoch 35/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - accuracy: 0.7500 - loss:
0.1074 - val_accuracy: 0.0000e+00 - val_loss: 2.5471
Epoch 36/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 72ms/step - accuracy: 0.7500 - loss:
0.0976 - val_accuracy: 0.0000e+00 - val_loss: 2.6094
Epoch 37/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 74ms/step - accuracy: 0.7500 - loss:
0.0927 - val_accuracy: 0.0000e+00 - val_loss: 2.6780
Epoch 38/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.0881 - val_accuracy: 0.0000e+00 - val_loss: 2.7562
Epoch 39/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.0809 - val_accuracy: 0.0000e+00 - val_loss: 2.8428
Epoch 40/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.0725 - val_accuracy: 0.0000e+00 - val_loss: 2.9317
Epoch 41/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 72ms/step - accuracy: 0.7500 - loss:
0.0653 - val_accuracy: 0.0000e+00 - val_loss: 3.0141
Epoch 42/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 77ms/step - accuracy: 0.7500 - loss:
0.0599 - val_accuracy: 0.0000e+00 - val_loss: 3.0824
Epoch 43/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 76ms/step - accuracy: 0.7500 - loss:
0.0556 - val_accuracy: 0.0000e+00 - val_loss: 3.1343
Epoch 44/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.0516 - val_accuracy: 0.0000e+00 - val_loss: 3.1738
Epoch 45/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 73ms/step - accuracy: 0.7500 - loss:
0.0483 - val_accuracy: 0.0000e+00 - val_loss: 3.2077
Epoch 46/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.0461 - val_accuracy: 0.0000e+00 - val_loss: 3.2426
Epoch 47/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 72ms/step - accuracy: 0.7500 - loss:
0.0446 - val_accuracy: 0.0000e+00 - val_loss: 3.2823
Epoch 48/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 88ms/step - accuracy: 0.7500 - loss:
0.0428 - val_accuracy: 0.0000e+00 - val_loss: 3.3268
Epoch 49/50
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 75ms/step - accuracy: 0.7500 - loss:
0.0408 - val_accuracy: 0.0000e+00 - val_loss: 3.3730
Epoch 50/50
1/1 ━━━━━━━━━━━━━━━━━━ 0s 77ms/step - accuracy: 0.7500 - loss:
0.0387 - val_accuracy: 0.0000e+00 - val_loss: 3.4166
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 116ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 124ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
Input: I love coding
Translated Text: liebe das Coden Coden ist
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 27ms/step
Input: This is a pen
Translated Text: ist ein Stift Coden ist
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 25ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 29ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 28ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 30ms/step
Input: She sings well
Translated Text: ist ein Stift Coden ist
2025-07-18 [Link].862762: I tensorflow/core/util/[Link]] oneDNN custom
operations are on. You may see slightly different numerical results due to floating-point
round-off errors from different computation orders. To turn them off, set the environment variable
`TF_ENABLE_ONEDNN_OPTS=0`.
2025-07-18 [Link].876939: I tensorflow/core/util/[Link]] oneDNN custom
operations are on. You may see slightly different numerical results due to floating-point round-
off errors from different computation orders. To turn them off, set the environment variable
`TF_ENABLE_ONEDNN_OPTS=0`.
2025-07-18 [Link].263196: I tensorflow/core/platform/cpu_feature_guard.cc:210] This
TensorFlow binary is optimized to use available CPU instructions in performance-critical
operations.
To enable the following instructions: AVX2 AVX_VNNI FMA, in other operations, rebuild
TensorFlow with the appropriate compiler flags.
Result :
Output :
Mean Squared Error: 2.04
Coefficient of Determination (R²): -4.68
Result :