Inspiration

 We had prior experience with neural networks, genetic algorithms, SBCs, and microcontrollers, and we decided to combine strengths.

What it does

 The system uses a genetic algorithm to generate solutions to the traveling salesman problem by distributing computing tasks amongst several Arduinos

How we built it

 The system was built on a breadboard using a Raspberry Pi as a power bus.

Accomplishments that we're proud of

 We were able to develop an effective serial communication protocol between 
Share this project:
×

Updates

hfroedge posted an update

from scipy.special import expit import numpy as np import scipy import random import dataBuilds import generate_prime_trainingset class Network(object):

def __init__(self,inputLayerSize,hiddenLayerSize,outputLayerSize,seed=None):
    np.random.seed(seed)
    #Layers represented both by their weights array and activation and inputsums vectors.
    self.layer1 = np.random.randn(hiddenLayerSize,inputLayerSize)
    self.layer2 = np.random.randn(outputLayerSize,hiddenLayerSize)

    self.layer1_activations = np.zeros((hiddenLayerSize, 1))
    self.layer2_activations = np.zeros((outputLayerSize, 1))

    self.layer1_inputsums = np.zeros((hiddenLayerSize, 1))
    self.layer2_inputsums = np.zeros((outputLayerSize, 1))

    self.layer1_errorsignals = np.zeros((hiddenLayerSize, 1))
    self.layer2_errorsignals = np.zeros((outputLayerSize, 1))

    self.layer1_deltaw = np.zeros((hiddenLayerSize, inputLayerSize))
    self.layer2_deltaw = np.zeros((outputLayerSize, hiddenLayerSize))

    self.outputLayerSize = outputLayerSize
    self.inputLayerSize = inputLayerSize
    self.hiddenLayerSize = hiddenLayerSize
    print()
    print(self.layer1)
    print()
    print(self.layer2)
    print()
    # self.weights = [np.random.randn(y,x)
    #                 for x, y in zip(sizes[:-1], sizes[1:])]

def feedforward(self, network_input):
    #Calculate inputsum and and activations for each neuron in the first layer
    for neuron in range(self.hiddenLayerSize):
        self.layer1_inputsums[neuron] = network_input * self.layer1[neuron]
        self.layer1_activations[neuron] = self.sigmoid(self.layer1_inputsums[neuron])

    # Calculate inputsum and and activations for each neuron in the second layer. Notice that each neuron in the second layer represented by
    # weights vector, consisting of all weights leading out of the kth neuron in (l-1) layer to the jth neuron in layer l.
    self.layer2_inputsums = np.zeros((self.outputLayerSize, 1))
    for neuron in range(self.outputLayerSize):
        for weight in range(self.hiddenLayerSize):
            self.layer2_inputsums[neuron] += self.layer1_activations[weight]*self.layer2[neuron][weight]
        self.layer2_activations[neuron] = self.sigmoid(self.layer2_inputsums[neuron])

    return self.layer2_activations

def interpreted_output(self, network_input):
    #convert layer 2 activation numbers to a single output. The neuron (weight vector) with highest activation will be output.
    if self.feedforward(network_input) > 0.5:
        return("composite")
    else:
        return("prime")
    #outputs = [x / 10 for x in range(-int((self.outputLayerSize/2)), int((self.outputLayerSize/2))+1, 1)] #range(-10, 11, 1)


# def build_expected_output(self, training_data):
#     #Views expected output number y for each x to generate an expected output vector from the network
#     index=0
#     for pair in training_data:
#         expected_output_vector = np.zeros((self.outputLayerSize,1))
#         x = training_data[0]
#         y = training_data[1]
#         for i in range(-int((self.outputLayerSize / 2)), int((self.outputLayerSize / 2)) + 1, 1):
#             if y == i / 10:
#                 expected_output_vector[i] = 1
#                 #expect the target category to be a 1.
#                 break
#         training_data[index][1] = expected_output_vector
#         index+=1
#     return training_data

def train(self, training_data, learn_rate=0.1):
    self.backpropagate(training_data, learn_rate)

def backpropagate(self, train_data, learn_rate=0.1):
    #Perform for each x,y pair.
    for datapair in range(len(train_data)):
        x = train_data[datapair][0]
        y = train_data[datapair][1]
        self.feedforward(x)
       # print("l2a " + str(self.layer2_activations))
       # print("l1a " + str(self.layer1_activations))
       # print("l2 " + str(self.layer2))
       # print("l1 " + str(self.layer1))
        for neuron in range(self.outputLayerSize):
            #Calculate first error equation for error signals of output layer neurons. Note that y[0] accesses inside of array, [neuron] specifies specific element
            self.layer2_errorsignals[neuron] = (self.layer2_activations[neuron] - np.array(y[0][neuron])) * self.sigmoid_prime(self.layer2_inputsums[neuron])


        #Use recursive formula to calculate error signals of hidden layer neurons
        self.layer1_errorsignals = np.multiply(np.array(np.matrix(self.layer2.T) * np.matrix(self.layer2_errorsignals)) , self.sigmoid_prime(self.layer1_inputsums))
        #print(self.layer1_errorsignals)
        # for neuron in range(self.hiddenLayerSize):
        #     #Use recursive formula to calculate error signals of hidden layer neurons
        #     self.layer1_errorsignals[neuron] = np.multiply(self.layer2[neuron].T,self.layer2_errorsignals[neuron]) * self.sigmoid_prime(self.layer1_inputsums[neuron])

        #Partial derivative of C with respect to weight for connection from kth neuron in (l-1)th layer to jth neuron in lth layer is
        #(jth error signal in lth layer) * (kth activation in (l-1)th layer.)
        #Update all weights for network at each iteration of a training pair.

        #Update weights in second layer
        for neuron in range(self.outputLayerSize):
            for weight in range(self.hiddenLayerSize):
                self.layer2_deltaw[neuron][weight] = self.layer2_errorsignals[neuron]*self.layer1_activations[weight]*(-learn_rate)

        self.layer2 += self.layer2_deltaw

        #Update weights in first layer
        for neuron in range(self.hiddenLayerSize):
            self.layer1_deltaw[neuron] = self.layer1_errorsignals[neuron]*(x)*(-learn_rate)

        self.layer1 += self.layer1_deltaw
        # if(datapair % 50 == 0):
        #     print(self.evaluate())
        #     print()


def evaluate(self):
    """
    Evaluates Accuracy of the Neural Network
    :param training_data: Data to train Network on.
    :return: A formatted list of evaluation information
    """
    #x is integer, y is single element np.array
    output = self.feedforward(10)
    return output
    #
    # return(self.feedforward(10))


def sigmoid(self, z):
    return expit(z)

def sigmoid_prime(self, z):
    return (1 - self.sigmoid(z)) * self.sigmoid(z)

primeNet = Network(1,1000,1,4) primeNet.train(generate_prime_trainingset.createTrainingSet('10000primes.txt'),0.001) print("------") print(primeNet.feedforward([90001]))

Log in or sign up for Devpost to join the conversation.