torus.width <- 15 torus.height <- 15 agent.count <- 10 # Distance agents move per tick (second or whatever) agent.speed.per.tick <- 1 payoff.collide <- 0 payoff.move <- 10 collision.radius <- 1; # Discount factor lambda <- 1 # Possible directions to take, in radians k <- 8 n <- (2*pi)/k A <- seq(0, 2*pi - n, n) # We give all agents some random uniform starting position along the torus agent.coordinates <- matrix ( data = c(runif(agent.count, 0, torus.width), runif(agent.count, 0, torus.height)), ncol = agent.count, nrow = 2, dimnames = list( c("x","y"), c()), byrow = T) # Agent propensities theta <- matrix ( data = 1, ncol = agent.count, nrow = length(A) ) # Look in the file if you're interested source("Functions.R") # Epsilon-decreasing approach # Similar to the epsilon-greedy strategy, except that the value of epsilon decreases as # the experiment progresses, resulting in highly explorative behaviour at the start and # highly exploitative behaviour at the finish. epsilon.min <- 0.1 epsilon.max <- 0.9 # We use nnet's which.is.max so we can break ties at random. Which I think is more elegant require(nnet) # For now repeat the process 100 times max.ticks <- 100 plotData <- matrix(data = NA, ncol = length(A), nrow = max.ticks) for(tick in 1:max.ticks) { # Start out at epsilon.min and gradually increase to epsilon.max epsilon <- 1 - (epsilon.min + (tick / max.ticks) * (epsilon.max - epsilon.min)) # We use the Epsilon-greedy method # So we randomly decide for each agent whether it will explore or not do.explore <- runif(agent.count, 0, 1) <= epsilon # Maybe we can make this prettier but we need the agent index in theta directions <- sapply(1:agent.count, function(agent.id) { if(do.explore[agent.id]) { # Choose a uniform random direction return(sample(A,1)) } else{ # Choose the direction with the highest propensity, break ties at random return(A[which.is.max(theta[, agent.id])]) } }) # Give us the new coordinates for all agents if they would have moved coords.new <- t(move(torus.width, torus.height, agent.coordinates["x",], agent.coordinates["y",], agent.speed.per.tick, directions)) collisions <- sapply(1:agent.count, function(agent.id) { # Note that we use the entire column for x2 and y2 so we get a matrix of the correct size # We fix this later on collision (torus.width, torus.height, collision.radius, x1 = coords.new["x", agent.id], x2 = agent.coordinates["x",], y1 = coords.new["y", agent.id], y2 = agent.coordinates["y",]) }) # Fix what we did above and ignore agent collisions with itself diag(collisions) <- F # Reduce the matrix to a logical vector, handy for later on collisions <- apply(collisions, 2, function(collision) { any(collision) }) # Create the unit matrix with 1's for each direction chosen per agent e <- matrix(data = 0, nrow = length(A), ncol = agent.count) e[cbind(match(directions, A), 1:length(directions))] <- 1 # Create the payoff vector by looking at collisions and giving the appropriate payoff u <- diag(ifelse(collisions, payoff.collide, payoff.move)) theta <- calculateNewMean(theta, (e %*% u), tick) plotData[tick, ] <- rowMeans(theta) agent.coordinates <- sapply(1:agent.count, function(agent.id) { if(collisions[agent.id]) { agent.coordinates[,agent.id] } else { coords.new[,agent.id] } }) } matplot(plotData, type = "l", xlab = "Time", ylab = "Mean propensity", main = "Epsilon-greedy Search")