Simple Q-learning neural network using numpy
import numpy as np
from numpy import exp, array, random, dot
R = np.matrix([[-1, -1, -1, -1,1, -1], # for correct action the
reward is 1 and for wrong action it's -1
[-1, -1, -1, 1, -1, 1],
[-1, -1, -1, 1, -1, -1],
[-1, 1, 1, -1, 1, -1],
[-1, 1, 1, -1, -1, 1],
[-1, 1, -1, -1, 1, 1]])
Q = np.matrix(np.zeros([6, 6])) # Q matrix
gamma = 0.99 # Gamma (learning parameter).
lr = 0.1 # learning rate
initial_state = 1 # Initial state.
w = 2*np.random.random((6,1)) - 1 # random weights for each action
def sigmoid(x):
return 1 / (1 + exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
def available_actions(state): # This function returns all available actions
in the state given as an argument
current_state_row = R[state,]
return current_state_row
def sample_next_action(available_actions_range): # This function chooses at
random which action to be performed within the rangeof all the available
actions.
next_action = int(np.random.choice(available_actions_range, 1))
print("next_action: ", next_action)
return next_action
def update(action, gamma,predict): # This function updates the Q matrix
according to the path selected
Q[current_state, action] = action + gamma * predict
return Q[current_state, action]
for i in range(1000): ## Training
current_state = np.random.randint(0, int(Q.shape[0]))
available_act = np.array(available_actions(current_state))
action = np.max(available_act)
Qout = np.multiply(available_act,w) # forward propagation # multiplying
the selected action with the weights)
predict = sigmoid(np.max(Qout))
Qtarget = update(action, gamma, predict) # target q values(rewards)
loss = np.sum(Qtarget - Qout) # backward prop # calculating the error
between predicted and target q values
adjustment = np.multiply(action ,loss , Qout)
w = w + adjustment # adjusting weights
Q += Qout
print("Trained network:")
print("--------")
print("weights : ", w)
np.set_printoptions(precision=2,suppress=True)
print(Q / np.max(Q) * 100) # Normalize the "trained" Q matrix
I am a newbie into neural networks and reinforcement learning. I am trying to create a neural network (for learning purpose) which contains a single neuron with 6 inputs and 1 output. The problems that I am facing are as follows -
I am not sure if the way I am trying to predict the Q value through my Q network is correct ? and I am unsure as the calculation that I am doing to find out the target Q value is correct.
When I run the code the weights become 'Nan' after a few iterations and this leads the Q matrix to have all the Nan values.
Topic q-learning implementation neural-network
Category Data Science