Commit 006ced51 authored by Julien Lin's avatar Julien Lin
Browse files

Merge branch 'evolutionnary'

parents 440add61 83ea7e30
......@@ -5,6 +5,14 @@
"version": "0.2.0",
"configurations": [
{
"name": "Python: num_evolutionary",
"type": "python",
"request": "launch",
"module": "snp",
"args": ["--solver","num_evolutionary", "--nb-population","100", "--nb-offspring", "100", "--nb-selected", "10"]
},
{
"name": "Python: num_random",
"type": "python",
......
......@@ -2,11 +2,15 @@
# Algorithms
########################################################################
import numpy as np
def random(func, init, again):
"""Iterative random search template."""
best_sol = init()
best_val = func(best_sol)
val,sol = best_val,best_sol
val, sol = best_val, best_sol
i = 0
while again(i, best_val, best_sol):
sol = init()
......@@ -22,7 +26,7 @@ def greedy(func, init, neighb, again):
"""Iterative randomized greedy heuristic template."""
best_sol = init()
best_val = func(best_sol)
val,sol = best_val,best_sol
val, sol = best_val, best_sol
i = 1
while again(i, best_val, best_sol):
sol = neighb(best_sol)
......@@ -34,13 +38,14 @@ def greedy(func, init, neighb, again):
i += 1
return best_val, best_sol
# TODO add a simulated-annealing template.
def simulated_annealing(func, init, neigh, temp, proba, rand, again):
"""Recuit simulé."""
best_sol, t = init()
best_val = func(best_sol)
cur_val, cur_sol = best_val,best_sol
cur_val, cur_sol = best_val, best_sol
k = 1
# t = 2000
while again(k, best_val, best_sol):
......@@ -55,7 +60,26 @@ def simulated_annealing(func, init, neigh, temp, proba, rand, again):
best_val = cur_val
k += 1
return best_val, best_sol
# TODO add a population-based stochastic heuristic template.
def evolutionary(func, init, best, selection, evaluate, variation, replacement, again):
pop = init()
pop_val = evaluate(pop, func)
best_sol, best_val = best(pop, pop_val)
k = 1
while again(k, best_val, best_sol):
parents, parents_val = selection(pop, pop_val)
offsprings = variation(parents)
offsprings_val = evaluate(offsprings, func)
sol, val = best(
np.concatenate((parents, offsprings), axis=0),
np.concatenate((parents_val, offsprings_val), axis=0),
)
if val > best_val:
best_sol = sol
best_val = val
pop, pop_val = replacement(parents, parents_val, offsprings, offsprings_val)
k += 1
return best_val, best_sol
......@@ -7,24 +7,30 @@ import numpy as np
def func(cover, **kwargs):
"""Make an objective function from the given function.
An objective function takes a solution and returns a scalar."""
def f(sol):
return cover(sol,**kwargs)
return cover(sol, **kwargs)
return f
def init(init, **kwargs):
"""Make an initialization operator from the given function.
An init. op. returns a solution."""
def f():
return init(**kwargs)
return f
def neig(neighb, **kwargs):
"""Make an neighborhood operator from the given function.
A neighb. op. takes a solution and returns another one."""
def f(sol):
return neighb(sol, **kwargs)
return f
......@@ -32,8 +38,10 @@ def iter(iters, **kwargs):
"""Make an iterations operator from the given function.
A iter. op. takes a value and a solution and returns
the current number of iterations."""
def f(i, val, sol):
return iters(i, val, sol, **kwargs)
return f
......@@ -43,14 +51,39 @@ def temp(lamda):
return f
def proba():
def f(delta_energy, temp):
return np.exp(-delta_energy / temp)
return f
def rand():
def f():
return np.random.rand()
return f
### Evolutionary
def selection(func, **kwargs):
def f(population, population_val):
return func(population, population_val, **kwargs)
return f
def variation(func, **kwargs):
def f(population):
return func(population, **kwargs)
return f
def replacement(func, **kwargs):
def f(parents, parents_val, offsprings, offsprings_val):
return func(parents, parents_val, offsprings, offsprings_val, **kwargs)
return f
\ No newline at end of file
import math
import numpy as np
from numpy.random import Generator
from . import pb
......@@ -14,24 +15,24 @@ def to_sensors(sol):
>>> to_sensors([0,1,2,3])
[(0, 1), (2, 3)]
"""
assert(len(sol)>0)
assert len(sol) > 0
sensors = []
for i in range(0,len(sol),2):
sensors.append( ( int(math.floor(sol[i])), int(math.floor(sol[i+1])) ) )
for i in range(0, len(sol), 2):
sensors.append((int(math.floor(sol[i])), int(math.floor(sol[i + 1]))))
return sensors
def cover_sum(sol, domain_width, sensor_range, dim):
"""Compute the coverage quality of the given vector."""
assert(0 < sensor_range <= math.sqrt(2))
assert(0 < domain_width)
assert(dim > 0)
assert(len(sol) >= dim)
domain = np.zeros((domain_width,domain_width))
assert 0 < sensor_range <= math.sqrt(2)
assert 0 < domain_width
assert dim > 0
assert len(sol) >= dim
domain = np.zeros((domain_width, domain_width))
sensors = to_sensors(sol)
cov = pb.coverage(domain, sensors, sensor_range*domain_width)
cov = pb.coverage(domain, sensors, sensor_range * domain_width)
s = np.sum(cov)
assert(s >= len(sensors))
assert s >= len(sensors)
return s
......@@ -39,23 +40,112 @@ def cover_sum(sol, domain_width, sensor_range, dim):
# Initialization
########################################################################
def rand(dim, scale):
"""Draw a random vector in [0,scale]**dim."""
return np.random.random(dim) * scale
def init_with_t(t,**kwargs):
def init_with_t(t, **kwargs):
"""Same as rand but add a time."""
return rand(**kwargs), t
def init_evolutionary(dim, scale, nb_population):
population = []
for _ in range(nb_population):
population.append(rand(dim, scale))
return np.array(population)
########################################################################
# Neighborhood
########################################################################
def neighb_square(sol, scale, domain_width):
"""Draw a random vector in a square of witdh `scale` in [0,1]
as a fraction of the domain width around the given solution."""
assert(0 < scale <= 1)
assert 0 < scale <= 1
side = domain_width * scale
new = sol + (np.random.random(len(sol)) * side - side/2)
new = sol + (np.random.random(len(sol)) * side - side / 2)
return new
########################################################################
# Evolutionary specific functions
########################################################################
def best(population, population_val):
assert len(population) == len(population_val)
best_val = max(population_val)
best_individu_index = np.argmax(best_val)
best_individu = population[best_individu_index]
return best_individu, best_val
def selection(population, population_val, nb_selected):
assert len(population) == len(population_val)
assert nb_selected <= len(population)
sorted_population = sorted(
zip(population, population_val),
key=lambda individual: individual[1],
reverse=True,
) # Sort by the score
return (
np.array([individual[0] for individual in sorted_population[:nb_selected]]),
np.array([individual[1] for individual in sorted_population[:nb_selected]]),
)
def variation(population, nb_offspring, nb_sensors, domain_width, variation_scale):
"""
population : is an array of individu. An individu is an np array of size dim * nb sensors.
"""
individual_index = 0
new_population = []
for _ in range(nb_offspring):
individual = population[individual_index]
parent1, parent2 = population[np.random.randint(0,len(population)-1)],population[np.random.randint(0,len(population)-1)]
new_individu = neighb_square((parent1+parent2)/2, variation_scale, domain_width)
while not all(map(lambda x: 0<= x <= domain_width, new_individu)):
new_individu = neighb_square((parent1+parent2)/2, variation_scale, domain_width)
assert all(map(lambda x: 0<= x <= domain_width, new_individu))
new_population.append(np.array(new_individu))
individual_index = (individual_index + 1) % len(population)
return np.array(new_population)
def evaluate(population, func):
evaluation = []
for individual in population:
evaluation.append(func(individual))
return np.array(evaluation)
def replacement(
parents, parents_val, offsprings, offsprings_val, strat, nb_next_generation
):
if strat == "keep_offspring":
return offsprings, offsprings_val
else:
new_population = []
new_population_val = []
sorted_population = sorted(
zip(np.concatenate((parents, offsprings), axis=0),
np.concatenate((parents_val, offsprings_val), axis=0)),
key=lambda indiviudal: indiviudal[1],
reverse=True,
)
for i in range(nb_next_generation):
new_population.append(sorted_population[i][0])
new_population_val.append(sorted_population[i][1])
return new_population, new_population_val
......@@ -63,7 +63,13 @@ if __name__ == "__main__":
help="Random pseudo-generator seed (none for current epoch)",
)
solvers = ["num_greedy", "bit_greedy", "num_sim_anneal", "num_random"]
solvers = [
"num_greedy",
"bit_greedy",
"num_sim_anneal",
"num_random",
"num_evolutionary",
]
can.add_argument(
"-m",
"--solver",
......@@ -109,6 +115,38 @@ if __name__ == "__main__":
help="Scale of the variation operators (as a ration of the domain width)",
)
can.add_argument(
"--nb-population",
metavar="NB",
default=10,
type=int,
help="Size of the initial population for evolutionary algorithm.",
)
can.add_argument(
"--nb-selected",
metavar="NB",
type=int,
default=10,
help="Number of selected individuals before each generation.",
)
can.add_argument(
"--nb-offspring",
metavar="NB",
default=10,
type=int,
help="Number of offspring for each generation.",
)
can.add_argument(
"--quality-threshold",
metavar="DVAL",
default=600,
type=float,
help="Quality threshold. Used to plot the probability of a run being under the quality threshold.",
)
the = can.parse_args()
# Minimum checks.
......@@ -122,7 +160,7 @@ if __name__ == "__main__":
np.random.seed(the.seed)
# Weird numpy way to ensure single line print of array.
np.set_printoptions(linewidth=np.inf)
np.set_printoptions(linewidth=np.inf) # type: ignore
# Common termination and checkpointing.
history: list[Any] = []
......@@ -252,10 +290,41 @@ if __name__ == "__main__":
dim=d * the.nb_sensors,
scale=the.domain_width,
),
iters
iters,
)
sensors = num.to_sensors(sol)
elif the.solver == "num_evolutionary":
val, sol = algo.evolutionary(
make.func(
num.cover_sum,
domain_width=the.domain_width,
sensor_range=the.sensor_range,
dim=d * the.nb_sensors,
),
make.init(
num.init_evolutionary,
dim=d * the.nb_sensors,
scale=the.domain_width,
nb_population=the.nb_population,
),
num.best,
make.selection(num.selection, nb_selected=the.nb_selected),
num.evaluate,
make.variation(
num.variation,
nb_offspring=the.nb_offspring,
domain_width=the.domain_width,
variation_scale=the.variation_scale,
nb_sensors=the.nb_sensors,
),
make.replacement(
num.replacement,
nb_next_generation=the.nb_selected,
strat=None,
),
iters,
)
sensors = num.to_sensors(sol)
# Fancy output.
print("\n{} : {}".format(val, sensors))
......@@ -271,12 +340,20 @@ if __name__ == "__main__":
f = make.func(
num.cover_sum,
domain_width=the.domain_width,
sensor_range=the.sensor_range * the.domain_width,
sensor_range=the.sensor_range,
dim=d * the.nb_sensors
)
plot.surface(ax1, shape, f)
plot.path(ax1, shape, history)
else:
ax2 = fig.add_subplot(111)
ax2 = fig.add_subplot(121)
ax3 = fig.add_subplot(122)
values = np.array([epoch[0] for epoch in history])
values = values >= the.quality_threshold
proba = values
ax3.step(np.arange(len(history)), proba)
domain = np.zeros(shape)
domain = pb.coverage(domain, sensors, the.sensor_range * the.domain_width)
......
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment