1 from __future__
import print_function
2 from IMP
import ArgumentParser
6 __doc__ =
"Perform analysis to determine sampling convergence."
17 parser = ArgumentParser(
18 description=
"First stages of analysis for assessing sampling "
21 '--sysname',
'-n', dest=
"sysname",
22 help=
'name of the system', default=
"")
24 '--path',
'-p', dest=
"path",
25 help=
'path to the good-scoring models', default=
"./")
27 '--extension',
'-e', dest=
"extension",
28 help=
'extension of the file', choices=[
'rmf',
'pdb'], default=
"rmf")
30 '--mode',
'-m', dest=
"mode", help=
'pyRMSD calculator',
31 choices=[
'cuda',
'cpu_omp',
'cpu_serial'], default=
"cuda")
33 '--matrix-cores',
'-c', dest=
"cores", type=int,
34 help=
'number of cores for parallel RMSD matrix calculations; '
35 'only for cpu_omp', default=1)
37 '--cluster-cores',
'-cc', dest=
"cores2", type=int,
38 help=
'number of cores for clustering at different thresholds'
39 ' and parallel IO; only for cpu_omp', default=1)
41 '--resolution',
'-r', dest=
"resolution", type=int,
42 help=
'resolution at which to select proteins in a multiscale system',
45 '--subunit',
'-su', dest=
"subunit",
46 help=
'calculate RMSD/sampling and cluster precision/densities '
47 'etc over this subunit only', default=
None)
49 '--align',
'-a', dest=
"align",
50 help=
'boolean flag to allow superposition of models',
51 default=
False, action=
'store_true')
53 '--ambiguity',
'-amb', dest=
"symmetry_groups",
54 help=
'file containing symmetry groups', default=
None)
56 '--scoreA',
'-sa', dest=
"scoreA",
57 help=
'name of the file having the good-scoring scores for sample A',
58 default=
"scoresA.txt")
60 '--scoreB',
'-sb', dest=
"scoreB",
61 help=
'name of the file having the good-scoring scores for sample B',
62 default=
"scoresB.txt")
64 '--rmfA',
'-ra', dest=
"rmf_A",
65 help=
'RMF file with conformations from Sample A', default=
None)
67 '--rmfB',
'-rb', dest=
"rmf_B",
68 help=
'RMF file with conformations from Sample B', default=
None)
70 '--gridsize',
'-g', dest=
"gridsize", type=float,
71 help=
'grid size for calculating sampling precision', default=10.0)
73 '--skip',
'-s', dest=
"skip_sampling_precision",
74 help=
"This option will bypass the calculation of sampling "
75 "precision. This option needs to be used with the clustering "
76 "threshold option. Otherwise by default, sampling precision "
77 "is calculated and the clustering threshold is the "
78 "calculated sampling precision.", default=
False,
81 '--cluster_threshold',
'-ct', dest=
"cluster_threshold", type=float,
82 help=
'final clustering threshold to visualize clusters. Assumes '
83 'that the user has previously calculated sampling precision '
84 'and wants clusters defined at a threshold higher than the '
85 'sampling precision for ease of analysis (lesser number of '
86 'clusters).', default=30.0)
88 '--voxel',
'-v', dest=
"voxel", type=float,
89 help=
'voxel size for the localization densities', default=5.0)
91 '--density_threshold',
'-dt', type=float,
92 dest=
"density_threshold",
93 help=
'threshold for localization densities', default=20.0)
95 '--density',
'-d', dest=
"density",
96 help=
'file containing dictionary of density custom ranges',
99 '--gnuplot',
'-gp', dest=
"gnuplot",
100 help=
"plotting automatically with gnuplot", default=
False,
103 '--selection',
'-sn', dest=
"selection",
104 help=
'file containing dictionary'
105 'of selected subunits and residues'
106 'for RMSD and clustering calculation'
107 "each entry in the dictionary takes the form"
108 "'selection name': [(residue_start, residue_end, protein name)",
111 '--prism',
'-pr', dest=
"prism",
112 help=
"Save input files for PrISM", default=
False,
114 return parser.parse_args()
117 def make_cluster_centroid(infname, frame, outfname, cluster_index,
118 cluster_size, precision, metadata_fname, path):
122 if hasattr(RMF.NodeHandle,
'replace_child'):
123 print(infname, outfname)
124 inr = RMF.open_rmf_file_read_only(infname)
125 outr = RMF.create_rmf_file(outfname)
126 cpf = RMF.ClusterProvenanceFactory(outr)
127 RMF.clone_file_info(inr, outr)
128 RMF.clone_hierarchy(inr, outr)
129 RMF.clone_static_frame(inr, outr)
130 inr.set_current_frame(RMF.FrameID(frame))
132 RMF.clone_loaded_frame(inr, outr)
133 rn = outr.get_root_node()
134 children = rn.get_children()
135 if len(children) == 0:
138 prov = [c
for c
in rn.get_children()
if c.get_type() == RMF.PROVENANCE]
143 newp = rn.replace_child(
144 prov,
"cluster.%d" % cluster_index, RMF.PROVENANCE)
146 cp.set_members(cluster_size)
147 cp.set_precision(precision)
148 cp.set_density(os.path.abspath(metadata_fname))
152 print(infname, frame, outfname)
153 subprocess.call([
'rmf_slice', path + infname,
'-f', str(frame),
167 from IMP.sampcon import scores_convergence, clustering_rmsd
168 from IMP.sampcon import rmsd_calculation, precision_rmsd
174 metadata_fname =
"%s.output.json" % args.sysname
177 metadata[
'producer'] = {
'name':
'IMP.sampcon',
178 'version': IMP.sampcon.__version__,
181 idfile_A =
"Identities_A.txt"
182 idfile_B =
"Identities_B.txt"
188 with open(os.path.join(args.path, args.scoreA),
'r') as f:
190 score_A.append(float(line.strip(
"\n")))
192 with open(os.path.join(args.path, args.scoreB),
'r') as f:
194 score_B.append(float(line.strip(
"\n")))
196 scores = score_A + score_B
199 scores_convergence.get_top_scorings_statistics(scores, 0, args.sysname)
202 scores_convergence.get_scores_distributions_KS_Stats(
203 score_A, score_B, 100, args.sysname)
206 if args.extension ==
"pdb":
209 conforms, masses, radii, models_name = \
210 rmsd_calculation.get_pdbs_coordinates(
211 args.path, idfile_A, idfile_B)
212 metadata[
'input_frames'] = models_name
214 args.extension =
"rmf3"
215 if args.selection
is not None:
216 rmsd_custom_ranges = \
217 precision_rmsd.parse_custom_ranges(args.selection)
219 rmsd_custom_ranges =
None
221 if args.rmf_A
is not None:
222 metadata[
'input_files'] = {
'A': args.rmf_A,
'B': args.rmf_B}
223 (ps_names, masses, radii, conforms, symm_groups, models_name,
224 n_models) = rmsd_calculation.get_rmfs_coordinates_one_rmf(
225 args.path, args.rmf_A, args.rmf_B,
227 args.symmetry_groups,
235 (ps_names, masses, radii, conforms,
236 models_name) = rmsd_calculation.get_rmfs_coordinates(
237 args.path, idfile_A, idfile_B, args.subunit,
238 selection=rmsd_custom_ranges,
239 resolution=args.resolution)
240 metadata[
'input_frames'] = models_name
242 print(
"Size of conformation matrix", conforms.shape)
244 if not args.skip_sampling_precision:
247 numpy.save(
"conforms", conforms)
248 inner_data = rmsd_calculation.get_rmsds_matrix(
249 conforms, args.mode, args.align, args.cores, symm_groups)
250 print(
"Size of RMSD matrix (flattened):", inner_data.shape)
252 conforms = numpy.load(
"conforms.npy")
253 os.unlink(
'conforms.npy')
255 from pyRMSD.matrixHandler
import MatrixHandler
256 mHandler = MatrixHandler()
257 mHandler.loadMatrix(
"Distances_Matrix.data")
259 rmsd_matrix = mHandler.getMatrix()
260 distmat = rmsd_matrix.get_data()
262 distmat_full = sp.spatial.distance.squareform(distmat)
263 print(
"Size of RMSD matrix (unpacked, N x N):", distmat_full.shape)
266 if args.rmf_A
is not None:
267 sampleA_all_models = list(range(n_models[0]))
268 sampleB_all_models = list(range(n_models[0],
269 n_models[1] + n_models[0]))
270 total_num_models = n_models[1] + n_models[0]
273 sampleB_all_models) = clustering_rmsd.get_sample_identity(
275 total_num_models = len(sampleA_all_models) + len(sampleB_all_models)
276 all_models = list(sampleA_all_models) + list(sampleB_all_models)
277 print(
"Size of Sample A:", len(sampleA_all_models),
278 " ; Size of Sample B: ", len(sampleB_all_models),
279 "; Total", total_num_models)
281 if not args.skip_sampling_precision:
283 print(
"Calculating sampling precision")
287 gridSize = args.gridsize
290 cutoffs_list = clustering_rmsd.get_cutoffs_list(distmat, gridSize)
291 print(
"Clustering at thresholds:", cutoffs_list)
294 pvals, cvs, percents = clustering_rmsd.get_clusters(
295 cutoffs_list, distmat_full, all_models, total_num_models,
296 sampleA_all_models, sampleB_all_models, args.sysname,
298 metadata[
'chi_square_grid_stats'] = {
299 'cutoffs': list(cutoffs_list),
302 'percent_clustered': percents}
306 (sampling_precision, pval_converged, cramersv_converged,
307 percent_converged) = clustering_rmsd.get_sampling_precision(
308 cutoffs_list, pvals, cvs, percents)
311 with open(
"%s.Sampling_Precision_Stats.txt"
312 % args.sysname,
'w+')
as fpv:
313 print(
"The sampling precision is defined as the largest allowed "
314 "RMSD between the cluster centroid and a ", args.sysname,
315 "model within any cluster in the finest clustering for "
316 "which each sample contributes models proportionally to "
317 "its size (considering both significance and magnitude of "
318 "the difference) and for which a sufficient proportion of "
319 "all models occur in sufficiently large clusters. The "
320 "sampling precision for our ", args.sysname,
321 " modeling is %.3f" % (sampling_precision),
" A.", file=fpv)
323 print(
"Sampling precision, P-value, Cramer's V and percentage "
324 "of clustered models below:", file=fpv)
325 print(
"%.3f\t%.3f\t%.3f\t%.3f"
326 % (sampling_precision, pval_converged, cramersv_converged,
327 percent_converged), file=fpv)
330 final_clustering_threshold = sampling_precision
331 metadata[
'precision'] = {
332 'sampling_precision': sampling_precision,
333 'p_value': pval_converged,
334 'cramers_v': cramersv_converged,
335 'percent_clustered': percent_converged}
338 final_clustering_threshold = args.cluster_threshold
340 metadata[
'clustering_threshold'] = final_clustering_threshold
343 print(
"Clustering at threshold %.3f" % final_clustering_threshold)
344 (cluster_centers, cluster_members) = clustering_rmsd.precision_cluster(
345 distmat_full, total_num_models, final_clustering_threshold)
347 (ctable, retained_clusters) = clustering_rmsd.get_contingency_table(
348 len(cluster_centers), cluster_members, all_models,
349 sampleA_all_models, sampleB_all_models)
350 print(
"Contingency table:", ctable)
353 with open(
"%s.Cluster_Population.txt" % args.sysname,
'w+')
as fcp:
354 for rows
in range(len(ctable)):
355 print(rows, ctable[rows][0], ctable[rows][1], file=fcp)
358 density_custom_ranges = precision_rmsd.parse_custom_ranges(args.density)
359 metadata[
'density_custom_ranges'] = density_custom_ranges
362 fpc = open(
"%s.Cluster_Precision.txt" % args.sysname,
'w+')
364 metadata[
'clusters'] = []
367 for i
in range(len(retained_clusters)):
368 cmeta = {
'name':
'cluster.%d' % i}
369 clus = retained_clusters[i]
373 conform_0 = conforms[all_models[cluster_members[clus][0]]]
376 if not os.path.exists(
"./cluster.%s" % i):
377 os.mkdir(
"./cluster.%s" % i)
378 os.mkdir(
"./cluster.%s/Sample_A/" % i)
379 os.mkdir(
"./cluster.%s/Sample_B/" % i)
381 shutil.rmtree(
"./cluster.%s" % i)
382 os.mkdir(
"./cluster.%s" % i)
383 os.mkdir(
"./cluster.%s/Sample_A/" % i)
384 os.mkdir(
"./cluster.%s/Sample_B/" % i)
387 prism_file =
'cluster.'+str(i)+
'.prism.npz'
388 superposed_coords_cluster = []
391 gmd1 = precision_rmsd.GetModelDensity(
392 custom_ranges=density_custom_ranges,
393 resolution=args.density_threshold, voxel=args.voxel,
395 gmd2 = precision_rmsd.GetModelDensity(
396 custom_ranges=density_custom_ranges,
397 resolution=args.density_threshold, voxel=args.voxel,
399 gmdt = precision_rmsd.GetModelDensity(
400 custom_ranges=density_custom_ranges,
401 resolution=args.density_threshold, voxel=args.voxel,
405 both_file = open(
'cluster.'+str(i)+
'.all.txt',
'w')
406 sampleA_file = open(
'cluster.'+str(i)+
'.sample_A.txt',
'w')
407 sampleB_file = open(
'cluster.'+str(i)+
'.sample_B.txt',
'w')
412 for pi
in range(len(conform_0)):
421 cluster_precision = 0.0
424 'A': [int(x)
for x
in cluster_members[clus]
425 if x
in sampleA_all_models],
426 'B': [int(x)
for x
in cluster_members[clus]
427 if x
in sampleB_all_models]}
432 for mem
in cluster_members[clus]:
434 model_index = all_models[mem]
438 rmsd, superposed_ps, trans = \
439 precision_rmsd.get_particles_from_superposed(
440 conforms[model_index], conform_0, args.align,
441 ps, trans, symm_groups)
445 cluster_precision += rmsd
448 gmdt.add_subunits_density(superposed_ps)
449 print(model_index, file=both_file)
451 if model_index
in sampleA_all_models:
453 gmd1.add_subunits_density(superposed_ps)
454 print(model_index, file=sampleA_file)
457 gmd2.add_subunits_density(superposed_ps)
458 print(model_index, file=sampleB_file)
460 superposed_coords = \
462 for s_ps
in superposed_ps]
463 superposed_coords_cluster.append(
464 numpy.array(superposed_coords))
472 numpy.array(superposed_coords_cluster),
475 numpy.array(ps_names))
476 cluster_precision /= float(len(cluster_members[clus]) - 1.0)
477 cmeta[
'precision'] = cluster_precision
478 print(
"Cluster precision (average distance to cluster centroid) "
479 "of cluster ", str(i),
" is %.3f" % cluster_precision,
"A",
487 cmeta[
'density'] = gmdt.write_mrc(path=
"./cluster.%s" % i,
489 cmeta[
'densityA'] = gmd1.write_mrc(path=
"./cluster.%s/Sample_A/" % i,
491 cmeta[
'densityB'] = gmd2.write_mrc(path=
"./cluster.%s/Sample_B/" % i,
495 cluster_center_index = cluster_members[clus][0]
496 if args.rmf_A
is not None:
497 outfname = os.path.join(
"cluster.%d" % i,
498 "cluster_center_model.rmf3")
499 cluster_center_model_id = cluster_center_index
500 if cluster_center_index < n_models[0]:
501 make_cluster_centroid(
502 os.path.join(args.path, args.rmf_A),
503 cluster_center_index,
504 outfname, i, len(cluster_members[clus]),
505 cluster_precision, metadata_fname, args.path)
507 make_cluster_centroid(
508 os.path.join(args.path, args.rmf_B),
509 cluster_center_index - n_models[0],
510 outfname, i, len(cluster_members[clus]),
511 cluster_precision, metadata_fname, args.path)
514 cluster_center_model_id = all_models[cluster_center_index]
515 outfname = os.path.join(
"cluster.%d" % i,
516 "cluster_center_model." + args.extension)
517 if 'rmf' in args.extension:
518 make_cluster_centroid(
519 models_name[cluster_center_model_id], 0, outfname,
520 i, len(cluster_members[clus]),
521 cluster_precision, metadata_fname, args.path)
523 shutil.copy(models_name[cluster_center_model_id], outfname)
524 cmeta[
'centroid'] = {
'index': cluster_center_index,
526 metadata[
'clusters'].append(cmeta)
528 with open(metadata_fname,
'w')
as jfh:
529 json.dump(metadata, jfh)
536 for filename
in sorted(glob.glob(os.path.join(gnuplotdir,
"*.plt"))):
537 cmd = [
'gnuplot',
'-e',
'sysname="%s"' % args.sysname, filename]
539 subprocess.check_call(cmd)
542 if __name__ ==
'__main__':
def get_data_path
Return the full path to one of this module's data files.
static XYZR setup_particle(Model *m, ParticleIndex pi)
double get_mass(ResidueType c)
Get the mass from the residue type.
static XYZ setup_particle(Model *m, ParticleIndex pi)
Class for storing model, its restraints, constraints, and particles.
static Mass setup_particle(Model *m, ParticleIndex pi, Float mass)
A decorator for a particle with x,y,z coordinates.
Class to handle individual particles of a Model object.
Sampling exhaustiveness protocol.
A decorator for a particle with x,y,z coordinates and a radius.