Mein neuronales Netzwerk versucht, eine faule Lösung anstelle der optimalen Lösung zu finden, um einen Mittelwert zu finPython

Python-Programme
Anonymous
 Mein neuronales Netzwerk versucht, eine faule Lösung anstelle der optimalen Lösung zu finden, um einen Mittelwert zu fin

Post by Anonymous »

Dies ist ein neuronales Netzwerk, das ich versuchte, ich bin nicht allzu sicher über meinen Backpropogation, aber es scheint mindestens Antworten zu konvergieren, obwohl es bei 4 Zahlen und der Lösung, den Mittelwert der 4 Zahlen zu finden, zu konvergieren, scheint es zu 50 zu konvergieren, dass es "sicher" ist, anstatt das zugrunde liegende Muster zu lernen, entweder mit Problemen mit meinem Code oder mit der Theorie hinter dem Problem, dank!

Code: Select all

import numpy as np

import random

class NeuralNetwork:
def __init__(self,layers):
self.layersConfig = layers
self.learningRate = 0.01
self.createActivations()
self.createWeights()
self.createBiases()

def reLU(self,x):
return x * (x > 0)
#return np.maximum(0, x)

def lreLU(self,x):
return x*0.99 * (x > 0) + abs(x)*0.01

# return np.maximum(0.01*np.abs(x), x)

def reLUd(self,x):
return 1 * (x > 0)

def lreLUd(self,x):
return 1*0.99 * (x >  0) +1*0.01

def sigmoid(self,x):
pass
def createActivations(self):
self.activations = self.layersConfig.copy() #check
for i in range(len(self.activations)):
if i == 0:
self.activations[i] = np.random.rand(self.activations[i])# temporary input layer
else:
self.activations[i] = np.zeros(self.activations[i])
#print(self.activations) #maybe they should all be 0??

def createWeights(self):
self.weights = []
for i in range(len(self.layersConfig)-1):
self.weights.append(np.random.randn(self.layersConfig[i+1],self.layersConfig[i])) #create matrixes of weight connections between layers
#print(self.weights)
# print(self.weights[1][1])
# print(np.flip(np.rot90((self.weights[1]))))
def createBiases(self):
self.biases = self.layersConfig.copy() #check
for i in range(len(self.biases)-1):
self.biases[i] = np.random.randn(self.biases[i+1])
self.biases = self.biases[:-1]
#self.biases = (np.random.rand(len(self.layersConfig))) #create biases for each layer
#print(self.biases)

def findCost(self,desired,actual):
return (actual-desired)**2

def feedFwd(self,k,n): #n denoting the layer that calculations are being done with, k denoting the next layers node being activated
##print(self.biases[n][k],n,k)
##print("looped")
return self.reLU(np.dot(self.weights[n][k],self.activations[n]) + self.biases[n][k])

def forwardPropogation(self,inputs): #working??
self.activations[0] = np.array(inputs)
self.preActivations = self.activations.copy()
for i in range(len(self.layersConfig)-1):
for j in range(len(self.activations[i+1])):
self.activations[i+1][j] = self.feedFwd(j,i)
self.preActivations[i+1][j] = np.dot(self.weights[i][j],self.activations[i]) + self.biases[i][j]

def backPropogation2(self,desiredValues):
self.gradVecB = self.biases.copy()
self.gradVecW = self.weights.copy()
self.errorVec = self.activations.copy()
#print(self.errorVec)
desiredValues = np.array(desiredValues)

# dz = self.reLUd(self.activations) #wil work

# dzda = self.reLUd(self.errorVec)
#update that part of the gradVecs and work backwards
#print(self.errorVec[-1],self.findCost(desiredValues,self.errorVec[-1]))
#self.errorVec[-1] = self.findCost(desiredValues,self.errorVec[-1]) #need to find the desired values

Quick Reply

Change Text Case: 
   
  • Similar Topics
    Replies
    Views
    Last post