2 """@namespace IMP.nestor.compare_runs_v2_w_pyplot
3 Plotting script to compare NestOR runs"""
10 import matplotlib
as mpl
11 from matplotlib
import pyplot
as plt
15 runs_to_compare = sys.argv[2:]
17 plt.rcParams[
"font.family"] =
"sans-serif"
20 mpl.rcParams[
"font.size"] = 12
23 def get_all_results(all_runs: list) -> dict:
27 with open(os.path.join(run,
"nestor_output.yaml"),
"r") as resf:
28 result = yaml.safe_load(resf)
29 results[run.split("/")[-1]] = result
33 def mean_type_plotter(results: dict, key: str, ylabel: str):
36 for parent
in results:
39 for run_set
in results[parent]:
41 for run
in results[parent][run_set]:
43 val = float(results[parent][run_set][run][key])
44 except ValueError
as err:
45 print(f
"Terminating due to the following error...\n{err}")
48 x_vals.append(run_set)
49 y_vals.append(np.mean(all_vals))
50 data.append((x_vals, y_vals, parent))
55 datum[0] = [str(x.split(
"_")[-1])
for x
in datum[0]]
56 plt.scatter(datum[0], datum[1], label=datum[2], alpha=transparency)
58 plt.xlabel(
"Representation (number of residues per bead)")
62 fig.savefig(f
"{ylabel}_comparison.png", bbox_inches=
"tight", dpi=600)
66 def errorbar_type_plotter(results: dict, key: str, ylabel: str):
68 for parent
in results:
72 for run_set
in results[parent]:
75 float(results[parent][run_set][run][key])
76 for run
in results[parent][run_set]
78 yvals.append(np.mean(all_vals))
79 yerr.append(np.std(all_vals) / (math.sqrt(len(all_vals))))
81 data.append((xvals, yvals, yerr, parent))
84 for idx, datum
in enumerate(data):
86 datum[0] = [str(x.split(
"_")[-1])
for x
in datum[0]]
96 plt.xlabel(
"Representation (number of residues per bead)")
99 ylabel =
"Mean log$Z$"
103 fig.legend(bbox_to_anchor=(1.15, 1.0), loc=
"upper right")
104 fig.savefig(f
"{ylabel}_comparison.png", bbox_inches=
"tight", dpi=600)
109 """Plots standard error comparison"""
111 for parent
in results:
114 for run_set
in results[parent]:
116 for run
in results[parent][run_set]:
117 r_temp = results[parent][run_set][run]
118 log_evi.append(float(r_temp[
"log_estimated_evidence"]))
119 stderr_log_evi = np.std(log_evi) / (math.sqrt(len(log_evi)))
120 x_vals.append(run_set)
121 y_vals.append(stderr_log_evi)
122 data.append((x_vals, y_vals, parent))
127 [x.split(
"_")[-1]
for x
in datum[0]],
133 plt.xlabel(
"Representation (number of residues per bead)")
134 plt.ylabel(
"Standard error on log(Evidence)")
137 fig.savefig(
"stderr_comparison.png", bbox_inches=
"tight", dpi=600)
145 nestor_results = get_all_results(runs_to_compare)
148 toPlot_meanType: dict = {
149 "analytical_uncertainty":
"Mean analytical uncertainties",
152 toPlot_errorbarType: dict = {
153 "last_iter":
"Mean iterations",
154 "log_estimated_evidence":
"Mean log(Z)",
155 "nestor_process_time":
"Mean NestOR process time",
159 for key, y_lbl
in toPlot_meanType.items():
166 for key, y_lbl
in toPlot_errorbarType.items():
167 errorbar_type_plotter(
def plot_sterr
Plots standard error comparison.