Code 1
(.env) [boris@fedora35server MULTITARGET]$ cat lossMultiBoston.py
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from tqdm import tqdm
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
boston = datasets.load_boston()
X = boston.data
Y = boston.target
print(X.shape)
from sklearn.preprocessing import StandardScaler
sc=StandardScaler()
X_transform=sc.fit_transform(X)
def predicted_y(weight,x,intercept):
y_lst=[]
for i in range(len(x)):
y_lst.append(weight@x[i]+intercept)
return np.array(y_lst)
# linear loss
def loss(y,y_predicted):
n=len(y)
s=0
for i in range(n):
s+=(y[i]-y_predicted[i])**2
return (1/n)*s
#derivative of loss w.r.t weight
def dldw(x,y,y_predicted):
s=0
n=len(y)
for i in range(n):
s+=-x[i]*(y[i]-y_predicted[i])
return (2/n)*s
# derivative of loss w.r.t bias
def dldb(y,y_predicted):
n=len(y)
s=0
for i in range(len(y)):
s+=-(y[i]-y_predicted[i])
return (2/n) * s
def gradient_descent(x,y):
weight_vector=np.random.randn(x.shape[1])
intercept=0
epoch = 2000
n = len(x)
linear_loss=[]
learning_rate = 0.001
for i in range(epoch):
y_predicted = predicted_y(weight_vector,x,intercept)
weight_vector = weight_vector - learning_rate *dldw(x,y,y_predicted)
intercept = intercept - learning_rate * dldb(y,y_predicted)
linear_loss.append(loss(y,y_predicted))
print(i)
plt.plot(np.arange(1,epoch),linear_loss[1:])
plt.xlabel("number of epoch")
plt.ylabel("loss")
plt.show()
return weight_vector,intercept
w,b=gradient_descent(X_transform,Y)
print("weight:",w)
print("bias:",b)
No comments:
Post a Comment