EX 22 Linear Regression
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#Generate a random data
np.random.seed(0)
area = 2.5 * np.random.randn(100) + 25
price = 25 * area + 5 + np.random.randint(20,50, size = len(area))
data = np.array([area, price])
data = pd.DataFrame(data = data.T, columns=['area','price'])
plt.scatter(data['area'], data['price'])
plt.show()
W = sum(price*(area-np.mean(area))) / sum((area-np.mean(area))**2)
b = np.mean(price) - W*np.mean(area)
print("The regression coefficients are", W,b)
y_pred = W * area + b
plt.plot(area, y_pred, color='red',label="Predicted Price")
plt.scatter(data['area'], data['price'], label="Training Data")
plt.xlabel("Area")
EX 23 Linear Regression
We will consider the Boston housing price dataset (http://lib.stat.cmu.edu/ datasets/boston)
collected by Harrison and Rubinfield in 1978.
The dataset contains 506 sample cases.
Each house is assigned 14 attributes:
• CRIM – per capita crime rate by town
• ZN – proportion of residential land zoned for lots over 25,000 sq.ft.
• INDUS – proportion of non-retail business acres per town
• CHAS – Charles River dummy variable (1 if tract bounds river; 0 otherwise)
• NOX – nitric oxide concentration (parts per 10 million)
• RM – average number of rooms per dwelling • AGE – proportion of owner-occupied units built
prior to 1940
• DIS – weighted distances to five Boston employment centers
• RAD – index of accessibility to radial highways
• TAX – full-value property-tax rate per $10,000
• PTRATIO – pupil-teacher ratio by town
• B – 1000(Bk - 0.63)^2 where Bk is the proportion of blacks by town
• LSTAT – percentage of lower status citizens in the population
• MEDV – median value of owner-occupied homes in $1,000s
import tensorflow as tf
import pandas as pd
import tensorflow.feature_column as fc
from tensorflow.keras.datasets import boston_housing
(x_train, y_train), (x_test, y_test) = boston_housing.load_data()
features = ['CRIM', 'ZN', 'INDUS','CHAS','NOX','RM','AGE', 'DIS',
'RAD', 'TAX', 'PTRATIO', 'B', 'LSTAT']
x_train_df = pd.DataFrame(x_train, columns= features)
x_test_df = pd.DataFrame(x_test, columns= features)
y_train_df = pd.DataFrame(y_train, columns=['MEDV'])
y_test_df = pd.DataFrame(y_test, columns=['MEDV'])
x_train_df.head()
feature_columns = []
for feature_name in features:
feature_columns.append(fc.numeric_column(feature_name,
dtype=tf.float32))
def estimator_input_fn(df_data, df_label, epochs=10, shuffle=True,
batch_size=32):
def input_function():
ds = tf.data.Dataset.from_tensor_slices((dict(df_data), df_label))
if shuffle:
ds = ds.shuffle(100)
ds = ds.batch(batch_size).repeat(epochs)
return ds
return input_function
train_input_fn = estimator_input_fn(x_train_df, y_train_df)
val_input_fn = estimator_input_fn(x_test_df, y_test_df, epochs=1,
shuffle=False)
linear_est =
tf.estimator.LinearRegressor(feature_columns=feature_columns)
linear_est.train(train_input_fn, steps=100)
result = linear_est.evaluate(val_input_fn)
result = linear_est.predict(val_input_fn)
for pred,exp in zip(result, y_test[:32]):
print("Predicted Value: ", pred['predictions'][0], "Expected: ", exp)
Ex. 24 Logistic regression
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
((train_data, train_labels), (eval_data, eval_labels)) =
tf.keras.datasets.mnist.load_data()
train_data = train_data/np.float32(255)
train_labels = train_labels.astype(np.int32)
eval_data = eval_data/np.float32(255)
eval_labels = eval_labels.astype(np.int32)
feature_columns = [tf.feature_column.numeric_column("x", shape=[28,
28])]
classifier=tf.estimator.LinearClassifier(
feature_columns=feature_columns,
n_classes=10,
model_dir="mnist_model/")
train_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": train_data},
y=train_labels,
batch_size=100,
num_epochs=None,
shuffle=True)
classifier.train(input_fn=train_input_fn, steps=10)
val_input_fn = tf.compat.v1.estimator.inputs.numpy_input_fn(
x={"x": eval_data},
y=eval_labels,
num_epochs=1,
shuffle=False)
eval_results = classifier.evaluate(input_fn=val_input_fn)
print(eval_results)
Ex.25
Image Reading (Byra dataset)
55 patients
Upload to drive folder – sample data
import scipy.io
fname='/content/sample_data/
dataset_liver_bmodes_steatosis_assessment_IJCARS.mat'
mat = scipy.io.loadmat(fname)
import tensorflow as tf
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
my_df = pd.DataFrame(mat['data'][0])
display(my_df)
clas
id fat images
s
[[1 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0 [[0]] [[3]]
]] 0, 0, 0...
[[2 [[[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
1 [[0]] [[4]]
]] 0, 0, 0...
from PIL import Image
count = 1
for path in mat['data'][0][0][3]:
print("Image",count)
image2 = Image.fromarray(path)
display(path)
count=count+1
Sample image output:
Image 10
ndarray (434, 636) show data
Image 10
ndarray (434, 636) hide data
array([[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
...,
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0],
[0, 0, 0, ..., 0, 0, 0]], dtype=uint8)
Ex. 26 LSTM for prediction
lstm
import numpy as np
import pandas as pd
from keras.models import Sequential
from keras.layers import LSTM, Dense
# data sequence, timestep
def split_sequence(sequence, n_steps):
X, y = list(), list()
for i in range(len(sequence)):
end_ix = i + n_steps
if end_ix > len(sequence)-1:
break
seq_x, seq_y = sequence[i:end_ix], sequence[end_ix]
X.append(seq_x)
y.append(seq_y)
return np.array(X), np.array(y)
# csv file has sequence;
#csv file read code to be inserted
#data = pd.read_csv('/content/sample_data/temperaturecsv.csv')
#seq = np.array(data)
seq = [10,15, 12, 14, 20, 13, 26, 32, 14, 23, 15, 16]
n_steps = 3
X, y = split_sequence(seq, n_steps)
#reshape with training sequence sample,timestep,number of features
n_features = 1
X = X.reshape((X.shape[0], X.shape[1], n_features))
model = Sequential()
model.add(LSTM(50, activation='relu', input_shape=(n_steps,
n_features)))
model.add(Dense(1))
model.compile(optimizer='adam', loss='mse')
model.fit(X, y, epochs=20, verbose=1)
#testing sequence
input_sequence = array(seq[-n_steps:]).reshape((1, n_steps,
n_features))
prediction = model.predict(input_sequence, verbose=1)
print(input_sequence)
print("predict",prediction)