Commits (30)
__pycache__
*.csv
*.swp
*.json
......@@ -83,7 +83,7 @@ Problem modelization
>
> - mono-objective,
> - multi-modal,
> - multi-objectives.
> - multi-objectives (cf. Pareto optimality).
## Constraints management
......@@ -108,8 +108,8 @@ What is performance
>
> Additional performance axis:
>
> - robustness,
> - stability.
> - robustness (cf. "external" problem structure),
> - stability (cf. "internal" randomisation).
>
> Golden rule: the output of a metaheuristic is a distribution, not a solution.
......@@ -162,7 +162,6 @@ one should relize that we can only *estimates* the performances, considering *at
### Robustness and stability
Empirical evaluation
--------------------
......
......@@ -123,8 +123,33 @@ Most exercises consists in adding a single function in an existing module
(or your own module) and use assemble it in the main executable.
1. Implement a simulated annealing.
You can use `num_simulated_annealing` or `bit_simulated_annealing` solvers
2. Implement an evolutionary algorithm.
You can use `num_genetic` or `bit_genetic` solvers
3. Implement an expected run time empirical cumulative density function.
You can execute `ert.py` which will plot 3 ert for two algorithms.
You can also use `draw_ert.py` with -N option for the number of run and optionally -v option to choose a target value (if not use will draw a surface for all target)
4. Implement a simple design of experiment to determine the best solver.
5. Provide a solver for a competition.
You can use `try_solver.py` to list the different parameter you want to try and get the best parameter (you can chose the alogorithms at the begenin of the script)
5. Provide a solver for a competition.
For the competion, symply use my `snp.py`.
I have had the following penalisation to `num.py` at the end of `cover_sum` :
```python3
if len(sol) > dim:
s *= 0.5
for v in sol:
if v < 0:
s += v/domain_width
if v >= domain_width:
s -= (v-domain_width)/domain_width
```
#encoding: utf-8
from os import system, remove, path
import matplotlib.pyplot as plt
import numpy as np
import json
########################################################################
# Interface
########################################################################
if __name__=="__main__":
import argparse
can = argparse.ArgumentParser()
can.add_argument("-N", "--nb-run", metavar="NB", default=100, type=int,
help="Number of runs")
can.add_argument("-v", "--for-value", metavar="NB", type=int,
help="Print ERT with probabilities to obtain this value")
can.add_argument("-save", "--save-proba", action='store_true', default=False,
help="Disable graphical output and save proba")
arg = can.parse_known_args()
the = arg[0]
# Minimum checks.
assert(0 < the.nb_run)
if path.isfile('run.csv'):
remove('run.csv')
for i in range(the.nb_run):
print("\rRun {}".format(i), end = '')
system("python3 snp.py -o run -no -s " + str(np.random.randint(0, 10*the.nb_run)) + " "+" ".join(arg[1]))
print("\r")
# Data extraction
data = []
attempt = {}
current_best = 0
temps_max = 0
value_max = 0
with open('run.csv', 'r') as fd:
for line in fd:
line = line.split(' ; ')
line[0] = int(line[0])
line[1] = float(line[1])
if line[0] > temps_max:
temps_max = line[0]
if line[1] > value_max:
value_max = line[1]
if line[0] == 1:
if len(attempt) > 0:
data.append(attempt)
current_best = 0
attempt = {}
if line[1] > current_best:
attempt[line[0]] = line[1]
current_best = line[1]
data.append(attempt)
def probability(data,time,value):
num = 0
for attempt in data:
good = False
for t,v in attempt.items():
if t <= time and v >= value:
good = True
break
if good:
num += 1
return num/len(data)
probability = np.vectorize(probability, excluded=[0])
# Plot
if the.save_proba:
with open('proba.json', 'w') as outfile:
json.dump(data, outfile)
elif the.for_value is not None:
P = probability(data,range(1,temps_max+1),the.for_value)
plt.bar(range(1,temps_max+1), P)
plt.title("ERT for target {}".format(the.for_value))
plt.xlabel('run')
plt.ylabel('probability')
plt.ylim(0,1)
plt.show()
else:
temps = range(0,temps_max+2)
values = range(0,int(value_max+2))
X, Y = np.meshgrid(temps, values)
Z = probability(data,X,Y)
proba = np.linspace(0.0, 1.0, 11)
cs = plt.contourf(X, Y, Z, levels=proba)
cbar = plt.colorbar(cs)
cbar.set_label('probability')
plt.xlabel('run')
plt.ylabel('target')
plt.show()
#remove('run.csv')
from os import system
import json
import numpy as np
import matplotlib.pyplot as plt
Nb_run = 50
solvers = [
'-m num_greedy',
'-m bit_greedy'
]
datas = []
for solver in solvers:
print("python3 draw_ert.py --nb-run " + str(Nb_run) + " --save-proba " + solver)
system("python3 draw_ert.py --nb-run " + str(Nb_run) + " --save-proba " + solver)
with open('proba.json') as json_file:
data = json.load(json_file)
datas.append(data)
temps_max = 0
value_max = 0
value_min = np.inf
for data in datas:
for run in data:
m = max([int(k) for k,v in run.items()])
if m > temps_max:
temps_max = m
m = max([v for k,v in run.items()])
if m > value_max:
value_max = m
m = min([v for k,v in run.items()])
if m < value_min:
value_min = m
def probability(data,time,value):
num = 0
for attempt in data:
good = False
for t,v in attempt.items():
if int(t) <= time and v >= value:
good = True
break
if good:
num += 1
return num/len(data)
probability = np.vectorize(probability, excluded=[0])
j = 1
for v in [640,660,670]:
for i in range(len(solvers)):
plt.subplot(320+j)
P = probability(datas[i],range(1,temps_max+1),v)
plt.bar(range(1,temps_max+1), P)
plt.title("ERT for target {} and solver\n{}".format(v, solvers[i]))
plt.xlabel('run')
plt.ylabel('probability')
plt.ylim(0,1)
j += 1
plt.tight_layout()
plt.show()
......@@ -35,7 +35,60 @@ def greedy(func, init, neighb, again):
i += 1
return best_val, best_sol
# TODO add a simulated-annealing template.
# TODO add a population-based stochastic heuristic template.
def simulated_annealing(func, init, neighb, temp, again):
"""Iterative randomized simulated-annealing template."""
old_sol = init()
old_val = func(old_sol)
best_val,best_sol = old_val,old_sol
i = 1
while again(i, old_val, old_sol):
new_sol = neighb(old_sol)
new_val = func(new_sol)
# Use >= and not >, so as to avoid random walk on plateus.
if new_val >= old_val:
old_val = new_val
old_sol = new_sol
elif np.random.random() < np.exp(-(old_val-new_val)/temp(i, old_val, old_sol)):
old_val = new_val
old_sol = new_sol
if new_val >= best_val:
best_val = new_val
best_sol = new_sol
i += 1
return best_val, best_sol
def genetic(func, init, crossover, neighb, population_size, again):
"""Iterative randomized genetic heuristic template."""
P_sol = [init() for i in range(population_size)]
P_val = [func(sol) for sol in P_sol]
best_sol = P_sol[np.argmax(P_val)]
best_val = P_val[np.argmax(P_val)]
i = 1
while again(i, best_val, best_sol):
# Crossover and mutation
new_sol = []
for j in range(5*population_size):
e1,e2 = np.random.randint(0,population_size,2)
sol = neighb(crossover(P_sol[e1],P_sol[e2]))
new_sol.append(sol)
# Selection
new_val = [func(sol) for sol in new_sol]
# Probability folowing value order
order_val = np.argsort(new_val)
proba = np.zeros(len(new_val))
for j in range(len(new_val)):
proba[order_val[j]] = j
proba = np.exp(proba)/sum(np.exp(proba))
P_i = np.random.choice(range(len(new_sol)), size = population_size, replace = False, p=proba)
P_sol = [new_sol[i] for i in P_i]
P_val = [new_val[i] for i in P_i]
# Keeping best value
if P_val[np.argmax(P_val)] > best_val:
best_sol = P_sol[np.argmax(P_val)]
best_val = P_val[np.argmax(P_val)]
i += 1
return best_val, best_sol
......@@ -50,6 +50,63 @@ def rand(domain_width, nb_sensors):
return domain
def unif(domain_width, nb_sensors):
""""Draw a uniform domain containing nb_sensors ones."""
domain = np.zeros( (domain_width,domain_width) )
c = np.sqrt(2*nb_sensors/np.sqrt(3))
elem = [
np.abs(np.trunc(c) * np.trunc(np.sqrt(3)*c) / 2- nb_sensors),
np.abs(np.trunc(c) * np.ceil(np.sqrt(3)*c) / 2 - nb_sensors),
np.abs(np.ceil(c) * np.trunc(np.sqrt(3)*c) / 2 - nb_sensors),
np.abs(np.ceil(c) * np.ceil(np.sqrt(3)*c) / 2 - nb_sensors)
]
if (np.argmin(elem) == 0):
c = np.trunc(c)
l = np.trunc(np.sqrt(3)*c)
elif (np.argmin(elem) == 1):
c = np.trunc(c)
l = np.ceil(np.sqrt(3)*c)
elif (np.argmin(elem) == 2):
c = np.ceil(c)
l = np.trunc(np.sqrt(3)*c)
elif (np.argmin(elem) == 3):
c = np.ceil(c)
l = np.ceil(np.sqrt(3)*c)
n = 0
for i in range(int(l)):
for j in range(int(c)):
if n < nb_sensors:
if (i % 2 == 0 and j % 2 == 0) or (i % 2 == 1 and j % 2 == 1):
domain[int((i+0.5)*domain_width/l)][int((j+0.5)*domain_width/c)] = 1
n += 1
if n < nb_sensors:
for i in range(int(l)):
if n < nb_sensors:
if (i % 2 == 1 and c % 2 == 0) or (i % 2 == 0 and c % 2 == 1):
domain[int((i+0.5)*domain_width/l)][int((c-0.5)*domain_width/c)] = 1
n += 1
if n < nb_sensors:
for j in range(int(c)):
if n < nb_sensors:
if (l % 2 == 0 and j % 2 == 1) or (l % 2 == 1 and j % 2 == 0):
domain[int((l-0.5)*domain_width/l)][int((j+0.5)*domain_width/c)] = 1
n += 1
while n < nb_sensors :
i,j = np.random.randint(0, domain_width, 2)
if domain[i][j] == 0:
domain[i][j] = 1
n += 1
return domain
########################################################################
# Neighborhood
########################################################################
......@@ -78,3 +135,59 @@ def neighb_square(sol, scale, domain_width):
# else pass
return new
########################################################################
# Crossover
########################################################################
def rand_crossover(sol1, sol2):
x = []
y = []
sol = sol1 + sol2
for i in range(len(sol)):
for j in range(len(sol[i])):
if sol[i][j] >= 1:
y.append(i)
x.append(j)
v = np.random.randint(0, len(x), len(to_sensors(sol1)))
domain = np.zeros( (len(sol),len(sol[0])) )
for i in v:
domain[y[i]][x[i]] = 1
return domain
def split_crossover(sol1, sol2):
p1 = np.array(to_sensors(sol1))
p2 = np.array(to_sensors(sol2))
sol = np.concatenate((p1,p2))
x = sol[::2]
m = np.median(x)
r = []
for p in p1:
if p[0] <= m:
r.append(p)
for p in p2:
if p[0] > m:
r.append(p)
if len(r) < len(p1):
q = []
for p in p1:
if p[0] > m:
q.append(p)
for p in p2:
if p[0] <= m:
q.append(p)
for i in np.random.randint(0,len(p),(len(p1)-len(r))):
r.append(q[i])
while len(r) > len(p1):
r.pop(np.random.randint(0,len(r)))
rmatrix = np.zeros((len(sol1),len(sol1)))
for p in r:
rmatrix[p[0],p[1]] = 1
return rmatrix
......@@ -32,6 +32,13 @@ def cover_sum(sol, domain_width, sensor_range, dim):
cov = pb.coverage(domain, sensors, sensor_range*domain_width)
s = np.sum(cov)
assert(s >= len(sensors))
if len(sol) > dim:
s *= 0.5
for v in sol:
if v < 0:
s += v/domain_width
if v >= domain_width:
s -= (v-domain_width)/domain_width
return s
......@@ -43,6 +50,61 @@ def rand(dim, scale):
"""Draw a random vector in [0,scale]**dim."""
return np.random.random(dim) * scale
def unif(dim, scale):
""""Draw a uniform domain containing nb_sensors ones."""
nb_sensors = dim/2
domain_width = scale
sensors = []
c = np.sqrt(2*nb_sensors/np.sqrt(3))
elem = [
np.abs(np.trunc(c) * np.trunc(np.sqrt(3)*c) / 2- nb_sensors),
np.abs(np.trunc(c) * np.ceil(np.sqrt(3)*c) / 2 - nb_sensors),
np.abs(np.ceil(c) * np.trunc(np.sqrt(3)*c) / 2 - nb_sensors),
np.abs(np.ceil(c) * np.ceil(np.sqrt(3)*c) / 2 - nb_sensors)
]
if (np.argmin(elem) == 0):
c = np.trunc(c)
l = np.trunc(np.sqrt(3)*c)
elif (np.argmin(elem) == 1):
c = np.trunc(c)
l = np.ceil(np.sqrt(3)*c)
elif (np.argmin(elem) == 2):
c = np.ceil(c)
l = np.trunc(np.sqrt(3)*c)
elif (np.argmin(elem) == 3):
c = np.ceil(c)
l = np.ceil(np.sqrt(3)*c)
for i in range(int(l)):
for j in range(int(c)):
if len(sensors) < dim:
if (i % 2 == 0 and j % 2 == 0) or (i % 2 == 1 and j % 2 == 1):
sensors.append((i+0.5)*domain_width/l)
sensors.append((j+0.5)*domain_width/c)
if len(sensors) < dim:
for i in range(int(l)):
if len(sensors) < dim:
if (i % 2 == 1 and c % 2 == 0) or (i % 2 == 0 and c % 2 == 1):
sensors.append((i+0.5)*domain_width/l)
sensors.append((c-0.5)*domain_width/c)
if len(sensors) < dim:
for j in range(int(c)):
if len(sensors) < dim:
if (l % 2 == 0 and j % 2 == 1) or (l % 2 == 1 and j % 2 == 0):
sensors.append((l-0.5)*domain_width/l)
sensors.append((j+0.5)*domain_width/c)
if len(sensors) < dim:
sensors += np.random.random(dim-len(sensors)) * scale
return sensors
########################################################################
# Neighborhood
......@@ -56,3 +118,53 @@ def neighb_square(sol, scale, domain_width):
new = sol + (np.random.random(len(sol)) * side - side/2)
return new
########################################################################
# Crossover
########################################################################
def rand_crossover(sol1, sol2):
sol = np.concatenate((sol1, sol2))
v = np.random.randint(0, len(sol) // 2, len(sol1) // 2)
r = []
for i in v:
r.append(sol[2*i])
r.append(sol[2*i+1])
return r
def split_crossover(sol1, sol2):
sol = np.concatenate((sol1,sol2))
x = sol[::2]
m = np.median(x)
r = []
for i in range(len(sol1)//2):
if sol1[2*i] <= m:
r.append(sol1[2*i])
r.append(sol1[2*i+1])
for i in range(len(sol2)//2):
if sol2[2*i] > m:
r.append(sol2[2*i])
r.append(sol2[2*i+1])
if len(r) < len(sol1):
q = []
for i in range(len(sol1)//2):
if sol1[2*i] > m:
q.append(sol1[2*i])
q.append(sol1[2*i+1])
for i in range(len(sol2)//2):
if sol2[2*i] <= m:
q.append(sol2[2*i])
q.append(sol2[2*i+1])
for i in np.random.randint(0,len(q)//2,(len(sol1)-len(r))//2):
r.append(q[2*i])
r.append(q[2*i+1])
while len(r) > len(sol1):
i = np.random.randint(0,len(r)//2)
r.pop(2*i)
r.pop(2*i)
return r
########################################################################
# Temperature programme
########################################################################
class continus:
"""Continus decrease temperature from T_0 and with T_{i+1} = \alpha x T_i"""
def __init__(self, T_0, alpha):
self.T = T_0
self.alpha = alpha
def __call__(self, i, val, sol):
T = self.T
self.T *= self.alpha
return T
class level:
"""Level decrease temperature from T_0 and with T_{i+1} = T_i - \alpha after \beta changes"""
def __init__(self, T_0, alpha, beta):
self.T = T_0
self.alpha = alpha
self.beta = beta
self.val = None
self.count = 0
def __call__(self, i, val, sol):
T = self.T
if val != self.val:
self.count += 1
self.val = val
if self.count > self.beta:
self.count = 0
self.T -= self.alpha
print()
print("T = "+str(T))
return T
\ No newline at end of file
......@@ -3,7 +3,7 @@ import math
import numpy as np
import matplotlib.pyplot as plt
from sho import make, algo, iters, plot, num, bit, pb
from sho import make, algo, iters, plot, num, bit, pb, temp
########################################################################
# Interface
......@@ -32,7 +32,7 @@ if __name__=="__main__":
can.add_argument("-s", "--seed", metavar="VAL", default=None, type=int,
help="Random pseudo-generator seed (none for current epoch)")
solvers = ["num_greedy","bit_greedy"]
solvers = ["num_greedy","bit_greedy","num_simulated_annealing","bit_simulated_annealing","num_genetic","bit_genetic"]
can.add_argument("-m", "--solver", metavar="NAME", choices=solvers, default="num_greedy",
help="Solver to use, among: "+", ".join(solvers))
......@@ -48,6 +48,15 @@ if __name__=="__main__":
can.add_argument("-a", "--variation-scale", metavar="RATIO", default=0.3, type=float,
help="Scale of the variation operators (as a ration of the domain width)")
can.add_argument("-no", "--no-graphical-output", action='store_true', default=False,
help="Disable graphical output")
can.add_argument("-o", "--output-file", metavar="NAME", default=None, type=str,
help="Output file where log are put")
can.add_argument("-p", "--population-size", metavar="NB", default=10, type=int,
help="Size of the population for genetic algorithms")
the = can.parse_args()
# Minimum checks.
......@@ -63,6 +72,10 @@ if __name__=="__main__":
# Weird numpy way to ensure single line print of array.
np.set_printoptions(linewidth = np.inf)
# Default filename
if the.output_file == None:
the.output_file = the.solver
# Common termination and checkpointing.
history = []
......@@ -72,8 +85,8 @@ if __name__=="__main__":
make.iter(iters.max,
nb_it = the.iters),
make.iter(iters.save,
filename = the.solver+".csv",
fmt = "{it} ; {val} ; {sol}\n"),
filename = the.output_file+".csv",
fmt = "{it} ; {val}\n"),
make.iter(iters.log,
fmt="\r{it} {val}"),
make.iter(iters.history,
......@@ -95,7 +108,7 @@ if __name__=="__main__":
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(num.rand,
make.init(num.unif,
dim = d * the.nb_sensors,
scale = the.domain_width),
make.neig(num.neighb_square,
......@@ -111,7 +124,7 @@ if __name__=="__main__":
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(bit.rand,
make.init(bit.unif,
domain_width = the.domain_width,
nb_sensors = the.nb_sensors),
make.neig(bit.neighb_square,
......@@ -121,29 +134,101 @@ if __name__=="__main__":
)
sensors = bit.to_sensors(sol)
elif the.solver == "num_simulated_annealing":
val,sol = algo.simulated_annealing(
make.func(num.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(num.unif,
dim = d * the.nb_sensors,
scale = the.domain_width),
make.neig(num.neighb_square,
scale = the.variation_scale,
domain_width = the.domain_width),
temp.continus(100,0.99),
iters
)
sensors = num.to_sensors(sol)
elif the.solver == "bit_simulated_annealing":
val,sol = algo.simulated_annealing(
make.func(bit.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(bit.unif,
domain_width = the.domain_width,
nb_sensors = the.nb_sensors),
make.neig(bit.neighb_square,
scale = the.variation_scale,
domain_width = the.domain_width),
temp.continus(100,0.99),
iters
)
sensors = bit.to_sensors(sol)
elif the.solver == "num_genetic":
val,sol = algo.genetic(
make.func(num.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(num.rand,
dim = d * the.nb_sensors,
scale = the.domain_width),
num.split_crossover,
make.neig(num.neighb_square,
scale = the.variation_scale,
domain_width = the.domain_width),
the.population_size,
iters
)
sensors = num.to_sensors(sol)
elif the.solver == "bit_genetic":
val,sol = algo.genetic(
make.func(bit.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range,
dim = d * the.nb_sensors),
make.init(bit.unif,
domain_width = the.domain_width,
nb_sensors = the.nb_sensors),
bit.split_crossover,
make.neig(bit.neighb_square,
scale = the.variation_scale,
domain_width = the.domain_width),
the.population_size,
iters
)
sensors = bit.to_sensors(sol)
# Fancy output.
print("\n{} : {}".format(val,sensors))
shape=(the.domain_width, the.domain_width)
if the.no_graphical_output is False:
shape=(the.domain_width, the.domain_width)
fig = plt.figure()
fig = plt.figure()
if the.nb_sensors ==1 and the.domain_width <= 50:
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
if the.nb_sensors ==1 and the.domain_width <= 50:
ax1 = fig.add_subplot(121, projection='3d')
ax2 = fig.add_subplot(122)
f = make.func(num.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range * the.domain_width)
plot.surface(ax1, shape, f)
plot.path(ax1, shape, history)
else:
ax2=fig.add_subplot(111)
f = make.func(num.cover_sum,
domain_width = the.domain_width,
sensor_range = the.sensor_range * the.domain_width)
plot.surface(ax1, shape, f)
plot.path(ax1, shape, history)
else:
ax2=fig.add_subplot(111)
domain = np.zeros(shape)
domain = pb.coverage(domain, sensors,
the.sensor_range * the.domain_width)
domain = plot.highlight_sensors(domain, sensors)
ax2.imshow(domain)
domain = np.zeros(shape)
domain = pb.coverage(domain, sensors,
the.sensor_range * the.domain_width)
domain = plot.highlight_sensors(domain, sensors)
ax2.imshow(domain)
plt.show()
plt.show()
from os import system
import json
import numpy as np
from scipy.integrate import dblquad
import matplotlib.pyplot as plt
Nb_run = 5
solvers = [
'-m num_greedy -n 45 -i 20 -r 0.08 -t 10000 -w 100 -a 0.01',
'-m bit_greedy -n 45 -i 20 -r 0.08 -t 10000 -w 100 -a 0.01'
]
datas = []
for solver in solvers:
print("python3 draw_ert.py --nb-run " + str(Nb_run) + " --save-proba " + solver)
system("python3 draw_ert.py --nb-run " + str(Nb_run) + " --save-proba " + solver)
with open('proba.json') as json_file:
data = json.load(json_file)
datas.append(data)
temps_max = 0
value_max = 0
value_min = np.inf
for data in datas:
for run in data:
m = max([int(k) for k,v in run.items()])
if m > temps_max:
temps_max = m
m = max([v for k,v in run.items()])
if m > value_max:
value_max = m
m = min([v for k,v in run.items()])
if m < value_min:
value_min = m
def probability(data,time,value):
num = 0
for attempt in data:
good = False
for t,v in attempt.items():
if int(t) <= time and v >= value:
good = True
break
if good:
num += 1
return num/len(data)
probability = np.vectorize(probability, excluded=[0])
m = 0
v = 0
for i in range(len(solvers)):
volume = dblquad(lambda x,y: probability(datas[i],x,y),value_min,value_max,0,temps_max)[0]
print(solvers[i])
print(volume)
if volume > v:
m = i
v = volume
print("Best solveur:")
print(solvers[m])
\ No newline at end of file