### launching minimizations in parallel for the estimation
results_all_positions = {}
for position in position_lst:
distribution_converged = ['no'] * len(noise_lst) ### is the distribution converged for each noise level?
results, storing_values_for_convergence_check, storing_processes, reference_value, param_names, nb_parameters = helpers.initialize_lsts_and_params(parameters_to_identify, noise_lst, "noise")
########### computation of reference displacement field
if position=='Supine+Prone':
### computing displacement field in supine position
Umeas_supine, Vmeas_supine = compute_disp.compute_disp(position = 'Supine', parameters_to_identify = parameters_to_identify, noise='', dirpath = current_directory)
V0_supine = dolfin.assemble(dolfin.Constant(1)*Vmeas_supine)
Umeas_norm_supine = (dolfin.assemble(dolfin.inner(Umeas_supine, Umeas_supine)*Vmeas_supine)/2/V0_supine)**(1/2)
### writing displacement field supine position
file_supine = dolfin.File("refSupine/displacement_exhal_to_inhal.xml")
file_supine << Umeas_supine
### computing displacement field in prone position
Umeas_prone, Vmeas_prone = compute_disp.compute_disp(position = 'Prone', parameters_to_identify = parameters_to_identify, noise='', dirpath = current_directory)
V0_prone = dolfin.assemble(dolfin.Constant(1)*Vmeas_prone)
Umeas_norm_prone = (dolfin.assemble(dolfin.inner(Umeas_prone, Umeas_prone)*Vmeas_prone)/2/V0_prone)**(1/2)
### writing displacement field prone position
file_prone = dolfin.File("refProne/displacement_exhal_to_inhal.xml")
file_prone << Umeas_prone
else :
Umeas, Vmeas = compute_disp.compute_disp(position = position, parameters_to_identify = parameters_to_identify, noise = '', dirpath = current_directory)
V0 = dolfin.assemble(dolfin.Constant(1)*Vmeas)
Umeas_norm = (dolfin.assemble(dolfin.inner(Umeas, Umeas)*Vmeas)/2/V0)**(1/2)
### writing displacement field in prone or supine (depending on the case investigated) position
file_prone_or_supine = dolfin.File("ref"+str(position)+"/displacement_exhal_to_inhal.xml")
file_prone_or_supine << Umeas
### checking how many CPUs are available to start computations
number_cpus = multiprocessing.cpu_count()
min_iterations = number_cpus // len(noise_lst) ### attribute the same number of CPU for each noise level
converged = False ### convergence of the error distributions - checks if the calculation is over for all noise levels
converged_for_noise_level = False ### variable checking if error distribution converged for a given noise level
while not converged:
converged = all(convergence_status == "converged" for convergence_status in distribution_converged) ### checks if the calculation is over for all noise levels
if converged: ### if converged for all noise levels
break
over = False ### variable used to check if all computations launched in parallel are over for a given noise level
for i in range(len(noise_lst)):
if distribution_converged[i] == 'converged': ### if error distribution converged for a given noise level
storing_processes.pop(str(noise_lst[i]), None) ### no more calculation is launched for this noise level
elif distribution_converged[i] == 'no': ### if finished but did not converge
storing_processes[str(noise_lst[i])] = [] ### should launch new computations
for noise, lst in storing_values_for_convergence_check.items(): ### should launch new computations
if distribution_converged[noise_lst.index(float(noise))] == 'no': ### all computations finished for a particular noise level but did not converge, should launch new computations
for iteration in range(0, min_iterations): ### relaunching all processes for a given noise level
ini_calculation = []
for param, param_value in parameters_to_identify.items(): ### initializing the computation between -30 and + 30% of the ground-truth value of the parameters
ini_calculation.append(float(numpy.random.normal(loc = param_value, scale = abs(0.3*param_value), size = 1)[0]))
process = subprocess.Popen(["python", "-W", "%s" %"ignore", "./minimization.py", "--position", "%s" %position, "--noise_level", "%s" %noise, "--parameters_to_identify", "%s" %parameters_to_identify, "--iteration", "%s" %iteration, "--dirpath", "%s" %current_directory, "--initialization", "%s" %ini_calculation], stdout=subprocess.PIPE ) ### launching computation in parallel
storing_processes[str(noise)].append(process) ### storing each process launched
distribution_converged[noise_lst.index(float(noise))] = 'waiting' ### changing status from 'no', i.e., finished but not converged, to 'waiting', i.e., no action is done until all processes reach their end (for a given noise level)
while not over: ### while all processes of a given noise level did not finish
time.sleep(1) ### wait before checking again
for noise, lst in storing_processes.items():
noise = float(noise)
over = helpers.checking_if_processes_converged(storing_processes[str(noise)]) ### check if all processes of a given noise finished
if over: ### if all processes ended for a given noise level
for process in storing_processes[str(noise)]: ### for each process launched
try: ### read the output of the minimization
out = process.communicate()[0]
out = out.decode("utf-8").split()
except: ### if minimization unsuccessful (e.g., if did not converge fast enough -max number of iteration specified to avoid computations that are too expensive)
pass
process.terminate() ### ensuring process ends
solution = {} ### storing results
if out != []: ### out is [] if the minimization did not converge
results['noise'].append(noise) ### storing the results
for i in range(0, nb_parameters): ### getting the initial values, and the error (in %) of the initialization
lst_name = "ini_"+str(param_names[i])
results[lst_name].append(((float(out[i])-reference_value[i]))/reference_value[i]*100)
for k in range (nb_parameters, 2*nb_parameters): ### getting the estimated values, and computing the error (in %)
i = k - nb_parameters
results[param_names[i]].append((float(out[k]) - reference_value[i])/reference_value[i]*100)
storing_values_for_convergence_check[str(noise)][i].append(float(out[k])) ### storing values to check if the error distribution converged
if len(storing_values_for_convergence_check[str(noise)][0]) > min_iterations+1: ### minimum number of iterations to check for convergence
converged_for_noise_level, crit = helpers.checking_if_converged(storing_values_for_convergence_check[str(noise)], len(storing_processes[str(noise)]), tol=5e-2)
if converged_for_noise_level: ### if the distribution converged
distribution_converged[noise_lst.index(float(noise))] = 'converged' ### no more minimizations launched for this noise level
else:
distribution_converged[noise_lst.index(float(noise))] = 'no' ### new processes will be launched
break
results_all_positions[position] = results ### dictionary storing results for all positions
df = pd.DataFrame(results)
df_sorted = df.sort_values(by="noise") ### sorting by noise
myfile= open("./results_estimation-model_error-position="+str(position), 'w') ### sorting by noise
myfile.write(df_sorted.to_string(index=False)) ### writing the error for each position
myfile.close()
##### plotting results
for parameter_name, value in parameters_to_identify.items():
fig, ax = plt.subplots()
ax.cla()
### plotting parameters
plt.rc("xtick", labelsize=18)
plt.rc("ytick", labelsize=18)
plt.rc("legend", fontsize=16)
plt.ylim([-100, 100])
plt.xlabel("Signal to Noise Ratio (SNR)", fontsize=14)
plt.ylabel("Estimation error (%)", fontsize=14)
color_lst=['royalblue', 'firebrick', 'forestgreen'] ### different color for each position
color_lst_lines=['royalblue', 'firebrick', 'darkgreen']
alpha_lst=[0.6, 0.45, 0.6] ### transparenciesto be able to see all distributions superimposed
label_lst = position_lst.copy()
for position in position_lst:
### reorganize data
results_position = results_all_positions[position]
frame = pd.DataFrame(results_position)
sorted_frame = frame.sort_values(by="noise")
parametrization_name = ''
stats_results = sorted_frame.groupby("noise")[str(parameter_name)].agg(['mean', 'std'])
stats_results['mean_'+str(parameter_name)] = stats_results['mean']
stats_results['mean_plus_std'+str(parameter_name)] = stats_results['mean'] + stats_results['std']
stats_results['mean_minus_std'+str(parameter_name)] = stats_results['mean'] - stats_results['std']
parametrization_name += parameter_name
#### plot data
plt.plot(SNR_lst, stats_results['mean_'+str(parameter_name)], color=color_lst_lines[0], label = label_lst[0])
ax.fill_between(SNR_lst, stats_results['mean_minus_std'+str(parameter_name)], stats_results['mean_plus_std'+str(parameter_name)], alpha = alpha_lst[0], color = color_lst[0])
color_lst = color_lst[1:]
label_lst = label_lst[1:]
alpha_lst = alpha_lst[1:]
color_lst_lines = color_lst_lines[1:]
plt.xlim([2.5, 44])
plt.gca().set_xscale('log')
ax.errorbar(SNR_lst, len(SNR_lst)*[0], yerr = 30, linewidth = 1, markersize = 10, color = 'black', fmt = 'x', capsize = 5, label = "Initial distribution")
plt.legend()
plt.grid()
plt.savefig("parametrization="+parametrization_name+"_identification_parameter="+str(parameter_name)+"_position="+str(position)+"_impact_measurement_errors.pdf", bbox_inches='tight')
plt.show()