Autoencoder
n=int(input("Enter the number of digits :"))
print("n=",n)
i=0
sum=0
w=1
while (i<n-1):
w*=100
i+=1
i=0
while (i<n):
sum*=100
print("Enter the",i+1,"digit :")
x=int(input())
sum+=x
print("W",i+1,"is:",w)
w/=100
i+=1
print("Sum=",sum)
while (sum>0):
y=sum%100
print("the",n,"th digit is:",y)
sum=int(sum/100)
n-=1
GRU
import tensorflow as tf
from tensorflow import keras
from [Link] import layers
model = [Link]()
[Link]([Link](64, input_shape=(28, 28)))
[Link]([Link]())
[Link]([Link](10))
print([Link]())
mnist = [Link]
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train/255.0, x_test/255.0
x_validate, y_validate = x_test[:-10], y_test[:-10]
x_test, y_test = x_test[-10:], y_test[-10:]
[Link](
loss=[Link](from_logits=True),
optimizer="sgd",
metrics=["accuracy"],
[Link](
x_train, y_train, validation_data=(x_validate, y_validate), batch_size=64, epochs=10
for i in range(10):
result = [Link]([Link](tf.expand_dims(x_test[i], 0)), axis=1)
print([Link](), y_test[i])
Bidirectional LSTM
import numpy as np
from [Link] import Sequential
from [Link] import pad_sequences
from [Link] import Dropout
from [Link] import Dense, Embedding, LSTM, Bidirectional
from [Link] import imdb
(x_train, y_train),(x_test, y_test) = imdb.load_data(num_words=10000)
max_len = 200
x_train= pad_sequences(x_train,padding='post', maxlen=max_len)
x_test = pad_sequences(x_test,padding='post', maxlen=max_len)
y_test = [Link](y_test)
y_train = [Link](y_train)
model = Sequential()
[Link](Embedding(10000, 128, input_length=max_len))
[Link](Bidirectional(LSTM(64)))
[Link](Dropout(0.5))
[Link](Dense(1, activation='sigmoid'))
[Link](loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history=[Link](x_train, y_train,
batch_size=64,
epochs=4,
validation_data=[x_test, y_test])
print([Link]['loss'])
print([Link]['accuracy'])
from matplotlib import pyplot
[Link]([Link]['loss'])
[Link]([Link]['accuracy'])
[Link]('model loss vs accuracy')
[Link]('epoch')
[Link](['loss', 'accuracy'], loc='upper right')
[Link]()
GRU Time Series
import numpy as np
from [Link] import Sequential
from [Link] import GRU, Dense
data = [Link]([[i for i in range(10)] for _ in range(100)])
X, y = data[:, :-1], data[:, -1]
X = [Link](([Link][0], [Link][1], 1))
model = Sequential()
[Link](GRU(64,activation='relu', input_shape=(9,1)))
[Link](Dense(1))
[Link](optimizer='adam', loss='mse')
[Link](X, y, epochs=10, verbose=0)
test_input = [Link]([7, 8, 9, 10, 11, 12, 13, 14, 15])
test_input = test_input.reshape((1, 9, 1))
print(test_input)
predicted_value = [Link](test_input, verbose=0)
print(f'Predicted value: {predicted_value[0][0]}')
Bidirectional GRU
import numpy as np
from [Link] import Sequential
from [Link] import pad_sequences
from [Link] import Dropout
from [Link] import Dense, Embedding, GRU, Bidirectional
from [Link] import imdb
(x_train, y_train),(x_test, y_test) = imdb.load_data(num_words=10000)
max_len = 200
x_train= pad_sequences(x_train,padding='post', maxlen=max_len)
x_test = pad_sequences(x_test,padding='post', maxlen=max_len)
y_test = [Link](y_test)
y_train = [Link](y_train)
model = Sequential()
[Link](Embedding(10000, 128, input_length=max_len))
[Link](Bidirectional(GRU(64)))
[Link](Dropout(0.5))
[Link](Dense(1, activation='sigmoid'))
[Link](loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
history=[Link](x_train, y_train,
batch_size=64,
epochs=4,
validation_data=[x_test, y_test])
print([Link]['loss'])
print([Link]['accuracy'])
from matplotlib import pyplot
[Link]([Link]['loss'])
[Link]([Link]['accuracy'])
[Link]('model loss vs accuracy')
[Link]('epoch')
[Link](['loss', 'accuracy'], loc='upper right')
[Link]()