Int 246 : Machine Learning Algorithms PPTS

1.Introduction Machine  Learning 


Ever since computers were invented, we have wondered whether they might be made to learn.
If we could understand how to program them to learn-to improve automatically with experience-the impact would be dramatic.

Imagine computers
learning from medical records which treatments are most effective for new diseases,
houses learning from experience to optimize energy costs based on the particular usage patterns of their occupants,
personal software assistants learning the evolving interests of their users in order to highlight especially relevant stories from the online morning newspaper.

2.DESIGNING A LEARNING SYSTEM


Choosing the Training Experience
The type of training experience available can have a significant impact on success or failure of the learner.
One key attribute is whether the training experience provides direct or indirect feedback regarding the choices made by the performance system.
For example, in learning to play checkers, the system might learn from direct training examples consisting of individual checkers board states and the correct move for each. Alternatively, it might have available only indirect information consisting of the move sequences and final outcomes of various games played.
Here the learner faces an additional problem of credit assignment, or determining the degree to which each move in the sequence deserves credit or blame for the final outcome.
A second important attribute of the training experience is the degree to which the learner controls the sequence of training examples.
For example, the learner might rely on the teacher to select informative board states and to provide the correct move for each. Alternatively, the learner might itself propose board states that it finds particularly confusing and ask the teacher for the correct move.
A third important attribute of the training experience is how well it represents the distribution of examples over which the final system performance P must be measured.




Topics  in PPts

#  Convex Learning Problems
#  Formal Learning Model
# Boosting & Validation





For Download PPT  :Csenotes12 

Important Stuff:


# rb function
# perceptron
import numpy as np
class Perceptron(object):
   
    def _init_(self,eta=0.01, n_iter=30):
        self.eta = eta #Learning rate(between 0.0and 1.0)
        self.n_iter = n_iter #Passes over the training dataset
    def fit(self,X,y):
        self.w_ =np.zeros(1 + X.shape[1])
        k=1
        for _ in range (self.n_iter):
            print("\n Iteration:", k)
            flag=0
            for xi, target in zip(X,y):
                print("Input is ",xi,"Target is ",target)
                print("Output is ", self.predict(xi))
                error = target-self.predict(xi)
                print("Actual Weights", self.w_)
                if error !=0:
                    flag=1
                    update = self.eta * error
                    self.w_[1:] += update * xi
                    self.w_[0] += update
                print("updated weight is ", self.w_, "\n" )
            k+=1
            if flag ==0:
                break
        return self
    def net_input(self, X):
        """Calculate net input """
        y=np.dot(X,self.w_[1:]) + self.w_[0]
        #print(y)
        return y
    def predict(self, X):
        return 1/(1+np.exp(self.net_input(X)))
X=np.array([[1,0.018],[0.37,0.37],[0.37,0.37],[0.018,1]])
print('Input :\n',X)
# target
y=np.array([0,1,1,0])
##call perceptron
ppn=Perceptron(eta=1, n_iter=10)
ppn.fit(X,y)
#prdict output
print("Output: ",ppn.predict(X))



#forward and backward propagation

import numpy as np
# inpiut array
x=np.array([[0,0],[1,0],[0,1],[1,1]])
#output
y=np.array([[0],[1],[1],[0]])
#simoid fun
def sigmoid (x):
    return 1/(1+np.exp(-x))
#derivat of sigmoid funj
def derivative_sigmoid(x):
    return x*(1-x)
#variable initializartion
epoch=9000#setting training itewrartion
lr=0.1#setting learning rate
inputlayer_neurons=x.shape[1]# no of feautre in data set
hiddenlayer_neurons=2 # no of huiddwn ayer
output_neurons=1 # no of neyron ar out layer
wh=np.random.uniform(size=(inputlayer_neurons,hiddenlayer_neurons))
bh=np.random.uniform(size=(1,hiddenlayer_neurons))
wout=np.random.uniform(size=(hiddenlayer_neurons,output_neurons))
bout=np.random.uniform(size=(1,output_neurons))
print(bout)

for i in range(9000):
     # forward propafgartion
            hidden_layer_input1=np.dot(x,wh)
            hidden_layer_input=hidden_layer_input1+bh
            hidden_layer_activations=sigmoid(hidden_layer_input)
            output_layer_input1=np.dot(hidden_layer_activations,wout)
            output_layer_input=output_neurons+bout
            output=sigmoid(output_layer_input)
            #backtraconkimg
            E=y-output
            slope_output_layer=derivative_sigmoid(output)
            slope_hidden_layer=derivative_sigmoid(hidden_layer_activations)
            d_output=E*slope_output_layer
            Error_at_hidden_layer=d_output.dot(wout.T)
            d_hiddenlayer=Error_at_hidden_layer*slope_hidden_layer
            wout +=hidden_layer_activations.T.dot(d_output) * lr
            bout +=np.sum(d_output,axis=0,keepdims=True)*lr
            wh +=x.T.dot(d_hiddenlayer)*lr
            bh +=np.sum(d_hiddenlayer,axis=0,keepdims=True)*lr

print(output)
print(E)

       
       

   # kohenon selforganising algorithm

import numpy as np
x=np.array([[0,0,1,1],[1,0,0,0],[0,1,1,0],[0,0,0,1]])
w_=np.array([[0.2,0.9],[0.4,0.7],[0.6,0.5],[0.8,0.3]])
lr=0.5
epoch=2

for iter in range(epoch):
    for i in range(x.shape[0]):
        temp=[]
        print("for input",x[i])       
        for j in range(w_.shape[1]):
            sum=0
            for k in range(w_.shape[0]):
               
                sum=sum+((x[i][k]-w_[k][j])**2)
            temp.append(sum)
        if(temp[0]<temp[1]):
            index=0
        else:
            index=1
        print("neroun at posiotion ",index,"won")
        for p in range(w_.shape[0]) :
            w_[p][index]=w_[p][index]+lr*(x[i][p]-w_[p][index])
           
           
        print(w_)
    lr=lr*0.5      


# linear vector quantization

import numpy as np
x=np.array([[0,0,1,1],[1,0,0,0],[0,1,1,0],[0,0,0,1]])
w_=np.array([[0.2,0.9],[0.4,0.7],[0.6,0.5],[0.8,0.3]])
t=[0,1,1,0]
lr=0.5
epoch=2

for iter in range(epoch):
    for i in range(x.shape[0]):
        temp=[]
        print("for input",x[i])       
        for j in range(w_.shape[1]):
            sum=0
            for k in range(w_.shape[0]):
               
                sum=sum+((x[i][k]-w_[k][j])**2)
            temp.append(sum)
        if(temp[0]<temp[1]):
            index=0
        else:
            index=1
        print("neroun at posiotion ",index,"won and target is ",t[i])
        if(index==t[i]):
            for p in range(w_.shape[0]) :
                w_[p][index]=w_[p][index]+lr*(x[i][p]-w_[p][index])
        else:
            for p in range(w_.shape[0]) :
                w_[p][index]=w_[p][index]-lr*(x[i][p]-w_[p][index])
           
           
        print(w_)
    lr=lr*0.5   



# support vector machines
from sklearn import datasets
import matplotlib.pyplot as plt
import numpy as np
iris=datasets.load_iris()
X=iris.data[:,[2,3]]
y=iris.target

plt.plot(iris.data[:,[2,3]])
from sklearn.model_selection import train_test_split
X_train,X_test,y_train,y_test=train_test_split(
     X,y,test_size=0.3,random_state=0)
from sklearn.svm import SVC
svm=SVC(kernel='linear',random_state=0) # no random as date in sheet  kernal types  rbf , pol
svm.fit(X_train,y_train)
y_pred=svm.predict(X_test)
print('misclasified sample : %d'%(y_test!=y_pred).sum())
from sklearn.metrics import accuracy_score
print('acurracy:%.2f'%accuracy_score(y_test,y_pred))


# fuzzy
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
# new antedecent/comsequent objecta hold inivesee vatraibles amnd menber
#fuction
mte=ctrl.Antecedent(np.arange(0,101,1),'mte')
ete=ctrl.Antecedent(np.arange(0,101,1),'ete')
cgpa=ctrl.Antecedent(np.arange(0,11,1),'cgpa')
# auto mrmbership functrion popiulation is possible with automf (3,5)

mte.automf(3)
ete.automf(3)
cgpa['low']=fuzz.trimf(cgpa.universe,[0,4,6])
cgpa['medium']=fuzz.trimf(cgpa.universe,[5,7,8])
cgpa['high']=fuzz.trimf(cgpa.universe,[7,10,10])
# you can see how these look with .view()
mte['average'].view()
ete.view()
cgpa.view()      


# fuzzy
import numpy as np
import skfuzzy as fuzz
from skfuzzy import control as ctrl
# new antedecent/comsequent objecta hold inivesee vatraibles amnd menber
#fuction
mte=ctrl.Antecedent(np.arange(0,101,1),'mte')
ete=ctrl.Antecedent(np.arange(0,101,1),'ete')
cgpa=ctrl.Antecedent(np.arange(0,11,1),'cgpa')
# auto mrmbership functrion popiulation is possible with automf (3,5)

mte.automf(3)
ete.automf(3)
cgpa['low']=fuzz.trimf(cgpa.universe,[0,4,6])
cgpa['medium']=fuzz.trimf(cgpa.universe,[5,7,8])
cgpa['high']=fuzz.trimf(cgpa.universe,[7,10,10])
# you can see how these look with .view()
mte['average'].view()
ete.view()
cgpa.view()

Comments