A multilayer perceptron MLP is given below with initial weig

A multi-layer perceptron (MLP) is given below with initial weights. Please train the MLP by using the following training data set. You need to write down the detailed process how each weight is recalculated for just one iteration (using all training instance once). Deliverable: clearly show how each weight is recalculated for the first iteration. Due: by the end of Nov. 4.

Solution

There is some problem with the figure. The output needs to be one node.

Assuming 0.1, 0.7 and 0.3 to be initial weights of the output layer.

Also, adding a bias input node, with 0.5 initial weights each.

Code

import math
import random
import string
import numpy as np
from time import sleep
random.seed(0)

# calculate a random number where: a <= rand < b
def rand(a, b):
    return (b-a)*random.random() + a

# our sigmoid function, tanh is a little nicer than the standard 1/(1+e^-x)
def sigmoid(x):
    return math.tanh(x)

# derivative of our sigmoid function, in terms of the output (i.e. y)
def dsigmoid(y):
    return 1.0 - y**2

class NN:
    def __init__(self, ni, nh, no):
        # number of input, hidden, and output nodes
        self.ni = ni + 1 # +1 for bias node
        #self.ni = ni # assuming no bias node
        self.nh = nh
        self.no = no

        # activations for nodes
        self.ai = [1.0]*self.ni
        self.ah = [1.0]*self.nh
        self.ao = [1.0]*self.no

        # create weights
        self.wi = [[0.5, 0.5, 0.5], [0.2, 0.6, 0.5], [0.7, 0.9, 0.1]]
        self.wo = [[0.1], [0.7], [0.3]]

        # last change in weights for momentum
        self.ci = np.zeros([self.ni, self.nh])
        self.co = np.zeros([self.nh, self.no])

    def update(self, inputs):
        if len(inputs) != self.ni-1:
            raise ValueError(\'wrong number of inputs\')

        # input activations
        for i in range(self.ni-1):
            #self.ai[i] = sigmoid(inputs[i])
            self.ai[i] = inputs[i]

        # hidden activations
        for j in range(self.nh):
            sum = 0.0
            for i in range(self.ni):
                sum = sum + self.ai[i] * self.wi[i][j]
            self.ah[j] = sigmoid(sum)

        # output activations
        for k in range(self.no):
            sum = 0.0
            for j in range(self.nh):
                sum = sum + self.ah[j] * self.wo[j][k]
            self.ao[k] = sigmoid(sum)

        return self.ao[:]


    def backPropagate(self, targets, N, M):
        if len(targets) != self.no:
            raise ValueError(\'wrong number of target values\')

        # calculate error terms for output
        output_deltas = [0.0] * self.no
        for k in range(self.no):
            error = targets[k]-self.ao[k]
            output_deltas[k] = dsigmoid(self.ao[k]) * error

        # calculate error terms for hidden
        hidden_deltas = [0.0] * self.nh
        for j in range(self.nh):
            error = 0.0
            for k in range(self.no):
                error = error + output_deltas[k]*self.wo[j][k]
            hidden_deltas[j] = dsigmoid(self.ah[j]) * error

        # update output weights
        for j in range(self.nh):
            for k in range(self.no):
                change = output_deltas[k]*self.ah[j]
                self.wo[j][k] = self.wo[j][k] + N*change + M*self.co[j][k]
                self.co[j][k] = change
                #print N*change, M*self.co[j][k]

        # update input weights
        for i in range(self.ni):
            for j in range(self.nh):
                change = hidden_deltas[j]*self.ai[i]
                self.wi[i][j] = self.wi[i][j] + N*change + M*self.ci[i][j]
                self.ci[i][j] = change

        # calculate error
        error = 0.0
        for k in range(len(targets)):
            error = error + 0.5*(targets[k]-self.ao[k])**2
        return error


    def test(self, patterns):
        for p in patterns:
            print(p[0], \'->\', self.update(p[0]))

    def weights(self):
        print(\'Input weights:\')
        for i in range(self.ni):
            print(self.wi[i])
        print()
        print(\'Output weights:\')
        for j in range(self.nh):
                print(self.wo[j])

    def train(self, patterns, iterations=1000, N=0.5, M=0.1):
        # N: learning rate
        # M: momentum factor
        for i in range(iterations):
            error = 0.0
            print i, \'-->\', self.weights()
            sleep(1)
            for p in patterns:
                inputs = p[0]
                targets = p[1]
                self.update(inputs)
                error = error + self.backPropagate(targets, N, M)
            if i % 100 == 0:
                print(\'error %-.5f\' % error)


def demo():
    # Teach network XOR function
    pat = [
        [[0,0], [0]],
        [[0,1], [1]],
        [[1,0], [1]],
        [[1,1], [0]]
    ]

    # create a network with two input, two hidden, and one output nodes
    n = NN(2, 3, 1)
    # train it with some patterns
    n.train(pat)
    # test it
    n.test(pat)

if __name__ == \'__main__\':
    demo()

After first iteration:

Input weights:

[0.49785290453745162, 0.51018227291554696, 0.51080703222823587]
[0.19419441275850063, 0.61400974594688751, 0.52198047390242153]
[0.68195942838455803, 0.85302041596764344, 0.081417564976490747]

Output weights:
[0.070142933548956982]
[0.67227568946170968]
[0.32081768594608279]

 A multi-layer perceptron (MLP) is given below with initial weights. Please train the MLP by using the following training data set. You need to write down the d
 A multi-layer perceptron (MLP) is given below with initial weights. Please train the MLP by using the following training data set. You need to write down the d
 A multi-layer perceptron (MLP) is given below with initial weights. Please train the MLP by using the following training data set. You need to write down the d

Get Help Now

Submit a Take Down Notice

Tutor
Tutor: Dr Jack
Most rated tutor on our site