IMP logo
IMP Reference Guide  develop.1a86c4215a,2024/04/24
The Integrative Modeling Platform
/output.py
1 """@namespace IMP.pmi1.output
2  Classes for writing output files and processing them.
3 """
4 
5 from __future__ import print_function, division
6 import IMP
7 import IMP.atom
8 import IMP.core
9 import IMP.pmi1
10 import IMP.pmi1.tools
11 import IMP.pmi1.io
12 import os
13 import sys
14 import ast
15 import RMF
16 import numpy as np
17 import operator
18 import string
19 try:
20  import cPickle as pickle
21 except ImportError:
22  import pickle
23 
24 class _ChainIDs(object):
25  """Map indices to multi-character chain IDs.
26  We label the first 26 chains A-Z, then we move to two-letter
27  chain IDs: AA through AZ, then BA through BZ, through to ZZ.
28  This continues with longer chain IDs."""
29  def __getitem__(self, ind):
30  chars = string.ascii_uppercase
31  lc = len(chars)
32  ids = []
33  while ind >= lc:
34  ids.append(chars[ind % lc])
35  ind = ind // lc - 1
36  ids.append(chars[ind])
37  return "".join(reversed(ids))
38 
39 
40 class ProtocolOutput(object):
41  """Base class for capturing a modeling protocol.
42  Unlike simple output of model coordinates, a complete
43  protocol includes the input data used, details on the restraints,
44  sampling, and clustering, as well as output models.
45  Use via IMP.pmi1.representation.Representation.add_protocol_output()
46  (for PMI 1) or
47  IMP.pmi1.topology.System.add_protocol_output() (for PMI 2).
48 
49  @see IMP.pmi1.mmcif.ProtocolOutput for a concrete subclass that outputs
50  mmCIF files.
51  """
52  pass
53 
54 def _flatten(seq):
55  l = []
56  for elt in seq:
57  t = type(elt)
58  if t is tuple or t is list:
59  for elt2 in _flatten(elt):
60  l.append(elt2)
61  else:
62  l.append(elt)
63  return l
64 
65 class Output(object):
66  """Class for easy writing of PDBs, RMFs, and stat files
67 
68  \note Model should be updated prior to writing outputs.
69  """
70  def __init__(self, ascii=True,atomistic=False):
71  self.dictionary_pdbs = {}
72  self.dictionary_rmfs = {}
73  self.dictionary_stats = {}
74  self.dictionary_stats2 = {}
75  self.best_score_list = None
76  self.nbestscoring = None
77  self.suffixes = []
78  self.replica_exchange = False
79  self.ascii = ascii
80  self.initoutput = {}
81  self.residuetypekey = IMP.StringKey("ResidueName")
82  # 1-character chain IDs, suitable for PDB output
83  self.chainids = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
84  # Multi-character chain IDs, suitable for mmCIF output
85  self.multi_chainids = _ChainIDs()
86  self.dictchain = {} # keys are molecule names, values are chain ids
87  self.particle_infos_for_pdb = {}
88  self.atomistic=atomistic
89 
90  def get_pdb_names(self):
91  return list(self.dictionary_pdbs.keys())
92 
93  def get_rmf_names(self):
94  return list(self.dictionary_rmfs.keys())
95 
96  def get_stat_names(self):
97  return list(self.dictionary_stats.keys())
98 
99  def _init_dictchain(self, name, prot, multichar_chain=False):
100  self.dictchain[name] = {}
101 
102  chainids = self.multi_chainids if multichar_chain else self.chainids
103  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
104  self.dictchain[name][i.get_name()] = chainids[n]
105 
106  def init_pdb(self, name, prot):
107  """Init PDB Writing.
108  @param name The PDB filename
109  @param prot The hierarchy to write to this pdb file
110  \note if the PDB name is 'System' then will use Selection to get molecules
111  """
112  flpdb = open(name, 'w')
113  flpdb.close()
114  self.dictionary_pdbs[name] = prot
115  self._init_dictchain(name, prot)
116 
117  def write_psf(self,filename,name):
118  flpsf=open(filename,'w')
119  flpsf.write("PSF CMAP CHEQ"+"\n")
120  index_residue_pair_list={}
121  (particle_infos_for_pdb, geometric_center)=self.get_particle_infos_for_pdb_writing(name)
122  nparticles=len(particle_infos_for_pdb)
123  flpsf.write(str(nparticles)+" !NATOM"+"\n")
124  for n,p in enumerate(particle_infos_for_pdb):
125  atom_index=n+1
126  residue_type=p[2]
127  chain=p[3]
128  resid=p[4]
129  flpsf.write('{0:8d}{1:1s}{2:4s}{3:1s}{4:4s}{5:1s}{6:4s}{7:1s}{8:4s}{9:1s}{10:4s}{11:14.6f}{12:14.6f}{13:8d}{14:14.6f}{15:14.6f}'.format(atom_index," ",chain," ",str(resid)," ",'"'+residue_type.get_string()+'"'," ","C"," ","C",1.0,0.0,0,0.0,0.0))
130  flpsf.write('\n')
131  #flpsf.write(str(atom_index)+" "+str(chain)+" "+str(resid)+" "+str(residue_type).replace('"','')+" C C "+"1.0 0.0 0 0.0 0.0\n")
132  if chain not in index_residue_pair_list:
133  index_residue_pair_list[chain]=[(atom_index,resid)]
134  else:
135  index_residue_pair_list[chain].append((atom_index,resid))
136 
137 
138  #now write the connectivity
139  indexes_pairs=[]
140  for chain in sorted(index_residue_pair_list.keys()):
141 
142  ls=index_residue_pair_list[chain]
143  #sort by residue
144  ls=sorted(ls, key=lambda tup: tup[1])
145  #get the index list
146  indexes=[x[0] for x in ls]
147  # get the contiguous pairs
148  indexes_pairs+=list(IMP.pmi1.tools.sublist_iterator(indexes,lmin=2,lmax=2))
149  nbonds=len(indexes_pairs)
150  flpsf.write(str(nbonds)+" !NBOND: bonds"+"\n")
151 
152  sublists=[indexes_pairs[i:i+4] for i in range(0,len(indexes_pairs),4)]
153 
154  # save bonds in fized column format
155  for ip in sublists:
156  if len(ip)==4:
157  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}{6:8d}{7:8d}'.format(ip[0][0],ip[0][1],
158  ip[1][0],ip[1][1],ip[2][0],ip[2][1],ip[3][0],ip[3][1]))
159  elif len(ip)==3:
160  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}'.format(ip[0][0],ip[0][1],ip[1][0],
161  ip[1][1],ip[2][0],ip[2][1]))
162  elif len(ip)==2:
163  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}'.format(ip[0][0],ip[0][1],ip[1][0],ip[1][1]))
164  elif len(ip)==1:
165  flpsf.write('{0:8d}{1:8d}'.format(ip[0][0],ip[0][1]))
166  flpsf.write('\n')
167 
168  del particle_infos_for_pdb
169  flpsf.close()
170 
171  def write_pdb(self,name,
172  appendmode=True,
173  translate_to_geometric_center=False,
174  write_all_residues_per_bead=False):
175  if appendmode:
176  flpdb = open(name, 'a')
177  else:
178  flpdb = open(name, 'w')
179 
180  (particle_infos_for_pdb,
181  geometric_center) = self.get_particle_infos_for_pdb_writing(name)
182 
183  if not translate_to_geometric_center:
184  geometric_center = (0, 0, 0)
185 
186  for n,tupl in enumerate(particle_infos_for_pdb):
187  (xyz, atom_type, residue_type,
188  chain_id, residue_index, all_indexes, radius) = tupl
189  if atom_type is None:
190  atom_type = IMP.atom.AT_CA
191  if ( (write_all_residues_per_bead) and (all_indexes is not None) ):
192  for residue_number in all_indexes:
193  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
194  xyz[1] - geometric_center[1],
195  xyz[2] - geometric_center[2]),
196  n+1, atom_type, residue_type,
197  chain_id, residue_number,' ',1.00,radius))
198  else:
199  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
200  xyz[1] - geometric_center[1],
201  xyz[2] - geometric_center[2]),
202  n+1, atom_type, residue_type,
203  chain_id, residue_index,' ',1.00,radius))
204  flpdb.write("ENDMDL\n")
205  flpdb.close()
206 
207  del particle_infos_for_pdb
208 
209  def get_prot_name_from_particle(self, name, p):
210  """Get the protein name from the particle.
211  This is done by traversing the hierarchy."""
213  p, self.dictchain[name])
214 
215  def get_particle_infos_for_pdb_writing(self, name):
216  # index_residue_pair_list={}
217 
218  # the resindexes dictionary keep track of residues that have been already
219  # added to avoid duplication
220  # highest resolution have highest priority
221  resindexes_dict = {}
222 
223  # this dictionary dill contain the sequence of tuples needed to
224  # write the pdb
225  particle_infos_for_pdb = []
226 
227  geometric_center = [0, 0, 0]
228  atom_count = 0
229  atom_index = 0
230 
231  ps = IMP.atom.get_leaves(self.dictionary_pdbs[name])
232 
233  for n, p in enumerate(ps):
234  protname, is_a_bead = self.get_prot_name_from_particle(name, p)
235 
236  if protname not in resindexes_dict:
237  resindexes_dict[protname] = []
238 
239  if IMP.atom.Atom.get_is_setup(p) and self.atomistic:
240  residue = IMP.atom.Residue(IMP.atom.Atom(p).get_parent())
241  rt = residue.get_residue_type()
242  resind = residue.get_index()
243  atomtype = IMP.atom.Atom(p).get_atom_type()
244  xyz = list(IMP.core.XYZ(p).get_coordinates())
245  radius = IMP.core.XYZR(p).get_radius()
246  geometric_center[0] += xyz[0]
247  geometric_center[1] += xyz[1]
248  geometric_center[2] += xyz[2]
249  atom_count += 1
250  particle_infos_for_pdb.append((xyz,
251  atomtype, rt, self.dictchain[name][protname], resind, None, radius))
252  resindexes_dict[protname].append(resind)
253 
255 
256  residue = IMP.atom.Residue(p)
257  resind = residue.get_index()
258  # skip if the residue was already added by atomistic resolution
259  # 0
260  if resind in resindexes_dict[protname]:
261  continue
262  else:
263  resindexes_dict[protname].append(resind)
264  rt = residue.get_residue_type()
265  xyz = IMP.core.XYZ(p).get_coordinates()
266  radius = IMP.core.XYZR(p).get_radius()
267  geometric_center[0] += xyz[0]
268  geometric_center[1] += xyz[1]
269  geometric_center[2] += xyz[2]
270  atom_count += 1
271  particle_infos_for_pdb.append((xyz, None,
272  rt, self.dictchain[name][protname], resind, None, radius))
273 
274  elif IMP.atom.Fragment.get_is_setup(p) and not is_a_bead:
275  resindexes = list(IMP.pmi1.tools.get_residue_indexes(p))
276  resind = resindexes[len(resindexes) // 2]
277  if resind in resindexes_dict[protname]:
278  continue
279  else:
280  resindexes_dict[protname].append(resind)
281  rt = IMP.atom.ResidueType('BEA')
282  xyz = IMP.core.XYZ(p).get_coordinates()
283  radius = IMP.core.XYZR(p).get_radius()
284  geometric_center[0] += xyz[0]
285  geometric_center[1] += xyz[1]
286  geometric_center[2] += xyz[2]
287  atom_count += 1
288  particle_infos_for_pdb.append((xyz, None,
289  rt, self.dictchain[name][protname], resind, resindexes, radius))
290 
291  else:
292  if is_a_bead:
293  rt = IMP.atom.ResidueType('BEA')
294  resindexes = list(IMP.pmi1.tools.get_residue_indexes(p))
295  if len(resindexes) > 0:
296  resind = resindexes[len(resindexes) // 2]
297  xyz = IMP.core.XYZ(p).get_coordinates()
298  radius = IMP.core.XYZR(p).get_radius()
299  geometric_center[0] += xyz[0]
300  geometric_center[1] += xyz[1]
301  geometric_center[2] += xyz[2]
302  atom_count += 1
303  particle_infos_for_pdb.append((xyz, None,
304  rt, self.dictchain[name][protname], resind, resindexes, radius))
305 
306  if atom_count > 0:
307  geometric_center = (geometric_center[0] / atom_count,
308  geometric_center[1] / atom_count,
309  geometric_center[2] / atom_count)
310 
311  # sort by chain ID, then residue index. Longer chain IDs (e.g. AA)
312  # should always come after shorter (e.g. Z)
313  particle_infos_for_pdb = sorted(particle_infos_for_pdb,
314  key=lambda x: (len(x[3]), x[3], x[4]))
315 
316  return (particle_infos_for_pdb, geometric_center)
317 
318 
319  def write_pdbs(self, appendmode=True):
320  for pdb in self.dictionary_pdbs.keys():
321  self.write_pdb(pdb, appendmode)
322 
323  def init_pdb_best_scoring(self,
324  suffix,
325  prot,
326  nbestscoring,
327  replica_exchange=False):
328  # save only the nbestscoring conformations
329  # create as many pdbs as needed
330 
331  self.suffixes.append(suffix)
332  self.replica_exchange = replica_exchange
333  if not self.replica_exchange:
334  # common usage
335  # if you are not in replica exchange mode
336  # initialize the array of scores internally
337  self.best_score_list = []
338  else:
339  # otherwise the replicas must cominucate
340  # through a common file to know what are the best scores
341  self.best_score_file_name = "best.scores.rex.py"
342  self.best_score_list = []
343  best_score_file = open(self.best_score_file_name, "w")
344  best_score_file.write(
345  "self.best_score_list=" + str(self.best_score_list))
346  best_score_file.close()
347 
348  self.nbestscoring = nbestscoring
349  for i in range(self.nbestscoring):
350  name = suffix + "." + str(i) + ".pdb"
351  flpdb = open(name, 'w')
352  flpdb.close()
353  self.dictionary_pdbs[name] = prot
354  self._init_dictchain(name, prot)
355 
356  def write_pdb_best_scoring(self, score):
357  if self.nbestscoring is None:
358  print("Output.write_pdb_best_scoring: init_pdb_best_scoring not run")
359 
360  # update the score list
361  if self.replica_exchange:
362  # read the self.best_score_list from the file
363  exec(open(self.best_score_file_name).read())
364 
365  if len(self.best_score_list) < self.nbestscoring:
366  self.best_score_list.append(score)
367  self.best_score_list.sort()
368  index = self.best_score_list.index(score)
369  for suffix in self.suffixes:
370  for i in range(len(self.best_score_list) - 2, index - 1, -1):
371  oldname = suffix + "." + str(i) + ".pdb"
372  newname = suffix + "." + str(i + 1) + ".pdb"
373  # rename on Windows fails if newname already exists
374  if os.path.exists(newname):
375  os.unlink(newname)
376  os.rename(oldname, newname)
377  filetoadd = suffix + "." + str(index) + ".pdb"
378  self.write_pdb(filetoadd, appendmode=False)
379 
380  else:
381  if score < self.best_score_list[-1]:
382  self.best_score_list.append(score)
383  self.best_score_list.sort()
384  self.best_score_list.pop(-1)
385  index = self.best_score_list.index(score)
386  for suffix in self.suffixes:
387  for i in range(len(self.best_score_list) - 1, index - 1, -1):
388  oldname = suffix + "." + str(i) + ".pdb"
389  newname = suffix + "." + str(i + 1) + ".pdb"
390  os.rename(oldname, newname)
391  filenametoremove = suffix + \
392  "." + str(self.nbestscoring) + ".pdb"
393  os.remove(filenametoremove)
394  filetoadd = suffix + "." + str(index) + ".pdb"
395  self.write_pdb(filetoadd, appendmode=False)
396 
397  if self.replica_exchange:
398  # write the self.best_score_list to the file
399  best_score_file = open(self.best_score_file_name, "w")
400  best_score_file.write(
401  "self.best_score_list=" + str(self.best_score_list))
402  best_score_file.close()
403 
404  def init_rmf(self, name, hierarchies, rs=None, geometries=None, listofobjects=None):
405  """
406  This function initialize an RMF file
407 
408  @param name the name of the RMF file
409  @param hierarchies the hierarchies to be included (it is a list)
410  @param rs optional, the restraint sets (it is a list)
411  @param geometries optional, the geometries (it is a list)
412  @param listofobjects optional, the list of objects for the stat (it is a list)
413  """
414  rh = RMF.create_rmf_file(name)
415  IMP.rmf.add_hierarchies(rh, hierarchies)
416  cat=None
417  outputkey_rmfkey=None
418 
419  if rs is not None:
421  if geometries is not None:
422  IMP.rmf.add_geometries(rh,geometries)
423  if listofobjects is not None:
424  cat = rh.get_category("stat")
425  outputkey_rmfkey={}
426  for l in listofobjects:
427  if not "get_output" in dir(l):
428  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
429  output=l.get_output()
430  for outputkey in output:
431  rmftag=RMF.string_tag
432  if type(output[outputkey]) is float:
433  rmftag=RMF.float_tag
434  elif type(output[outputkey]) is int:
435  rmftag=RMF.int_tag
436  elif type(output[outputkey]) is str:
437  rmftag = RMF.string_tag
438  else:
439  rmftag = RMF.string_tag
440  rmfkey=rh.get_key(cat, outputkey, rmftag)
441  outputkey_rmfkey[outputkey]=rmfkey
442  outputkey_rmfkey["rmf_file"]=rh.get_key(cat, "rmf_file", RMF.string_tag)
443  outputkey_rmfkey["rmf_frame_index"]=rh.get_key(cat, "rmf_frame_index", RMF.int_tag)
444 
445  self.dictionary_rmfs[name] = (rh,cat,outputkey_rmfkey,listofobjects)
446 
447  def add_restraints_to_rmf(self, name, objectlist):
448  flatobjectlist=_flatten(objectlist)
449  for o in flatobjectlist:
450  try:
451  rs = o.get_restraint_for_rmf()
452  except:
453  rs = o.get_restraint()
455  self.dictionary_rmfs[name][0],
456  rs.get_restraints())
457 
458  def add_geometries_to_rmf(self, name, objectlist):
459  for o in objectlist:
460  geos = o.get_geometries()
461  IMP.rmf.add_geometries(self.dictionary_rmfs[name][0], geos)
462 
463  def add_particle_pair_from_restraints_to_rmf(self, name, objectlist):
464  for o in objectlist:
465 
466  pps = o.get_particle_pairs()
467  for pp in pps:
469  self.dictionary_rmfs[name][0],
471 
472  def write_rmf(self, name):
473  IMP.rmf.save_frame(self.dictionary_rmfs[name][0])
474  if self.dictionary_rmfs[name][1] is not None:
475  cat=self.dictionary_rmfs[name][1]
476  outputkey_rmfkey=self.dictionary_rmfs[name][2]
477  listofobjects=self.dictionary_rmfs[name][3]
478  for l in listofobjects:
479  output=l.get_output()
480  for outputkey in output:
481  rmfkey=outputkey_rmfkey[outputkey]
482  try:
483  self.dictionary_rmfs[name][0].get_root_node().set_value(rmfkey,output[outputkey])
484  except NotImplementedError:
485  continue
486  rmfkey = outputkey_rmfkey["rmf_file"]
487  self.dictionary_rmfs[name][0].get_root_node().set_value(rmfkey, name)
488  rmfkey = outputkey_rmfkey["rmf_frame_index"]
489  nframes=self.dictionary_rmfs[name][0].get_number_of_frames()
490  self.dictionary_rmfs[name][0].get_root_node().set_value(rmfkey, nframes-1)
491  self.dictionary_rmfs[name][0].flush()
492 
493  def close_rmf(self, name):
494  rh = self.dictionary_rmfs[name][0]
495  del self.dictionary_rmfs[name]
496  del rh
497 
498  def write_rmfs(self):
499  for rmfinfo in self.dictionary_rmfs.keys():
500  self.write_rmf(rmfinfo[0])
501 
502  def init_stat(self, name, listofobjects):
503  if self.ascii:
504  flstat = open(name, 'w')
505  flstat.close()
506  else:
507  flstat = open(name, 'wb')
508  flstat.close()
509 
510  # check that all objects in listofobjects have a get_output method
511  for l in listofobjects:
512  if not "get_output" in dir(l):
513  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
514  self.dictionary_stats[name] = listofobjects
515 
516  def set_output_entry(self, key, value):
517  self.initoutput.update({key: value})
518 
519  def write_stat(self, name, appendmode=True):
520  output = self.initoutput
521  for obj in self.dictionary_stats[name]:
522  d = obj.get_output()
523  # remove all entries that begin with _ (private entries)
524  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
525  output.update(dfiltered)
526 
527  if appendmode:
528  writeflag = 'a'
529  else:
530  writeflag = 'w'
531 
532  if self.ascii:
533  flstat = open(name, writeflag)
534  flstat.write("%s \n" % output)
535  flstat.close()
536  else:
537  flstat = open(name, writeflag + 'b')
538  cPickle.dump(output, flstat, 2)
539  flstat.close()
540 
541  def write_stats(self):
542  for stat in self.dictionary_stats.keys():
543  self.write_stat(stat)
544 
545  def get_stat(self, name):
546  output = {}
547  for obj in self.dictionary_stats[name]:
548  output.update(obj.get_output())
549  return output
550 
551  def write_test(self, name, listofobjects):
552 # write the test:
553 # output=output.Output()
554 # output.write_test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
555 # run the test:
556 # output=output.Output()
557 # output.test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
558  flstat = open(name, 'w')
559  output = self.initoutput
560  for l in listofobjects:
561  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
562  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
563  self.dictionary_stats[name] = listofobjects
564 
565  for obj in self.dictionary_stats[name]:
566  try:
567  d = obj.get_test_output()
568  except:
569  d = obj.get_output()
570  # remove all entries that begin with _ (private entries)
571  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
572  output.update(dfiltered)
573  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
574  #output.update(
575  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
576  flstat.write("%s \n" % output)
577  flstat.close()
578 
579  def test(self, name, listofobjects, tolerance=1e-5):
580  output = self.initoutput
581  for l in listofobjects:
582  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
583  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
584  for obj in listofobjects:
585  try:
586  output.update(obj.get_test_output())
587  except:
588  output.update(obj.get_output())
589  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
590  #output.update(
591  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
592 
593  flstat = open(name, 'r')
594 
595  passed=True
596  for l in flstat:
597  test_dict = ast.literal_eval(l)
598  for k in test_dict:
599  if k in output:
600  old_value = str(test_dict[k])
601  new_value = str(output[k])
602  try:
603  float(old_value)
604  is_float = True
605  except ValueError:
606  is_float = False
607 
608  if is_float:
609  fold = float(old_value)
610  fnew = float(new_value)
611  diff = abs(fold - fnew)
612  if diff > tolerance:
613  print("%s: test failed, old value: %s new value %s; "
614  "diff %f > %f" % (str(k), str(old_value),
615  str(new_value), diff,
616  tolerance), file=sys.stderr)
617  passed=False
618  elif test_dict[k] != output[k]:
619  if len(old_value) < 50 and len(new_value) < 50:
620  print("%s: test failed, old value: %s new value %s"
621  % (str(k), old_value, new_value), file=sys.stderr)
622  passed=False
623  else:
624  print("%s: test failed, omitting results (too long)"
625  % str(k), file=sys.stderr)
626  passed=False
627 
628  else:
629  print("%s from old objects (file %s) not in new objects"
630  % (str(k), str(name)), file=sys.stderr)
631  return passed
632 
633  def get_environment_variables(self):
634  import os
635  return str(os.environ)
636 
637  def get_versions_of_relevant_modules(self):
638  import IMP
639  versions = {}
640  versions["IMP_VERSION"] = IMP.get_module_version()
641  try:
642  import IMP.pmi1
643  versions["PMI_VERSION"] = IMP.pmi1.get_module_version()
644  except (ImportError):
645  pass
646  try:
647  import IMP.isd2
648  versions["ISD2_VERSION"] = IMP.isd2.get_module_version()
649  except (ImportError):
650  pass
651  try:
652  import IMP.isd_emxl
653  versions["ISD_EMXL_VERSION"] = IMP.isd_emxl.get_module_version()
654  except (ImportError):
655  pass
656  return versions
657 
658 #-------------------
659  def init_stat2(
660  self,
661  name,
662  listofobjects,
663  extralabels=None,
664  listofsummedobjects=None):
665  # this is a new stat file that should be less
666  # space greedy!
667  # listofsummedobjects must be in the form [([obj1,obj2,obj3,obj4...],label)]
668  # extralabels
669 
670  if listofsummedobjects is None:
671  listofsummedobjects = []
672  if extralabels is None:
673  extralabels = []
674  flstat = open(name, 'w')
675  output = {}
676  stat2_keywords = {"STAT2HEADER": "STAT2HEADER"}
677  stat2_keywords.update(
678  {"STAT2HEADER_ENVIRON": str(self.get_environment_variables())})
679  stat2_keywords.update(
680  {"STAT2HEADER_IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
681  stat2_inverse = {}
682 
683  for l in listofobjects:
684  if not "get_output" in dir(l):
685  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
686  else:
687  d = l.get_output()
688  # remove all entries that begin with _ (private entries)
689  dfiltered = dict((k, v)
690  for k, v in d.items() if k[0] != "_")
691  output.update(dfiltered)
692 
693  # check for customizable entries
694  for l in listofsummedobjects:
695  for t in l[0]:
696  if not "get_output" in dir(t):
697  raise ValueError("Output: object %s doesn't have get_output() method" % str(t))
698  else:
699  if "_TotalScore" not in t.get_output():
700  raise ValueError("Output: object %s doesn't have _TotalScore entry to be summed" % str(t))
701  else:
702  output.update({l[1]: 0.0})
703 
704  for k in extralabels:
705  output.update({k: 0.0})
706 
707  for n, k in enumerate(output):
708  stat2_keywords.update({n: k})
709  stat2_inverse.update({k: n})
710 
711  flstat.write("%s \n" % stat2_keywords)
712  flstat.close()
713  self.dictionary_stats2[name] = (
714  listofobjects,
715  stat2_inverse,
716  listofsummedobjects,
717  extralabels)
718 
719  def write_stat2(self, name, appendmode=True):
720  output = {}
721  (listofobjects, stat2_inverse, listofsummedobjects,
722  extralabels) = self.dictionary_stats2[name]
723 
724  # writing objects
725  for obj in listofobjects:
726  od = obj.get_output()
727  dfiltered = dict((k, v) for k, v in od.items() if k[0] != "_")
728  for k in dfiltered:
729  output.update({stat2_inverse[k]: od[k]})
730 
731  # writing summedobjects
732  for l in listofsummedobjects:
733  partial_score = 0.0
734  for t in l[0]:
735  d = t.get_output()
736  partial_score += float(d["_TotalScore"])
737  output.update({stat2_inverse[l[1]]: str(partial_score)})
738 
739  # writing extralabels
740  for k in extralabels:
741  if k in self.initoutput:
742  output.update({stat2_inverse[k]: self.initoutput[k]})
743  else:
744  output.update({stat2_inverse[k]: "None"})
745 
746  if appendmode:
747  writeflag = 'a'
748  else:
749  writeflag = 'w'
750 
751  flstat = open(name, writeflag)
752  flstat.write("%s \n" % output)
753  flstat.close()
754 
755  def write_stats2(self):
756  for stat in self.dictionary_stats2.keys():
757  self.write_stat2(stat)
758 
759 
760 class OutputStatistics(object):
761  """Collect statistics from ProcessOutput.get_fields().
762  Counters of the total number of frames read, plus the models that
763  passed the various filters used in get_fields(), are provided."""
764  def __init__(self):
765  self.total = 0
766  self.passed_get_every = 0
767  self.passed_filterout = 0
768  self.passed_filtertuple = 0
769 
770 
771 class ProcessOutput(object):
772  """A class for reading stat files (either rmf or ascii v1 and v2)"""
773  def __init__(self, filename):
774  self.filename = filename
775  self.isstat1 = False
776  self.isstat2 = False
777  self.isrmf = False
778 
779  # open the file
780  if not self.filename is None:
781  f = open(self.filename, "r")
782  else:
783  raise ValueError("No file name provided. Use -h for help")
784 
785  try:
786  #let's see if that is an rmf file
787  rh = RMF.open_rmf_file_read_only(self.filename)
788  self.isrmf=True
789  cat=rh.get_category('stat')
790  rmf_klist=rh.get_keys(cat)
791  self.rmf_names_keys=dict([(rh.get_name(k),k) for k in rmf_klist])
792  del rh
793 
794  except IOError:
795  # try with an ascii stat file
796  # get the keys from the first line
797  for line in f.readlines():
798  d = ast.literal_eval(line)
799  self.klist = list(d.keys())
800  # check if it is a stat2 file
801  if "STAT2HEADER" in self.klist:
802  self.isstat2 = True
803  for k in self.klist:
804  if "STAT2HEADER" in str(k):
805  # if print_header: print k, d[k]
806  del d[k]
807  stat2_dict = d
808  # get the list of keys sorted by value
809  kkeys = [k[0]
810  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
811  self.klist = [k[1]
812  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
813  self.invstat2_dict = {}
814  for k in kkeys:
815  self.invstat2_dict.update({stat2_dict[k]: k})
816  else:
817  IMP.handle_use_deprecated("statfile v1 is deprecated. "
818  "Please convert to statfile v2.\n")
819  self.isstat1 = True
820  self.klist.sort()
821 
822  break
823  f.close()
824 
825 
826  def get_keys(self):
827  if self.isrmf:
828  return sorted(self.rmf_names_keys.keys())
829  else:
830  return self.klist
831 
832  def show_keys(self, ncolumns=2, truncate=65):
833  IMP.pmi1.tools.print_multicolumn(self.get_keys(), ncolumns, truncate)
834 
835  def get_fields(self, fields, filtertuple=None, filterout=None, get_every=1,
836  statistics=None):
837  '''
838  Get the desired field names, and return a dictionary.
839  Namely, "fields" are the queried keys in the stat file (eg. ["Total_Score",...])
840  The returned data structure is a dictionary, where each key is a field and the value
841  is the time series (ie, frame ordered series)
842  of that field (ie, {"Total_Score":[Score_0,Score_1,Score_2,Score_3,...],....} )
843 
844  @param fields (list of strings) queried keys in the stat file (eg. "Total_Score"....)
845  @param filterout specify if you want to "grep" out something from
846  the file, so that it is faster
847  @param filtertuple a tuple that contains
848  ("TheKeyToBeFiltered",relationship,value)
849  where relationship = "<", "==", or ">"
850  @param get_every only read every Nth line from the file
851  @param statistics if provided, accumulate statistics in an
852  OutputStatistics object
853  '''
854 
855  if statistics is None:
856  statistics = OutputStatistics()
857  outdict = {}
858  for field in fields:
859  outdict[field] = []
860 
861  # print fields values
862  if self.isrmf:
863  rh = RMF.open_rmf_file_read_only(self.filename)
864  nframes=rh.get_number_of_frames()
865  for i in range(nframes):
866  statistics.total += 1
867  # "get_every" and "filterout" not enforced for RMF
868  statistics.passed_get_every += 1
869  statistics.passed_filterout += 1
870  IMP.rmf.load_frame(rh, RMF.FrameID(i))
871  if not filtertuple is None:
872  keytobefiltered = filtertuple[0]
873  relationship = filtertuple[1]
874  value = filtertuple[2]
875  datavalue=rh.get_root_node().get_value(self.rmf_names_keys[keytobefiltered])
876  if self.isfiltered(datavalue,relationship,value): continue
877 
878  statistics.passed_filtertuple += 1
879  for field in fields:
880  outdict[field].append(rh.get_root_node().get_value(self.rmf_names_keys[field]))
881 
882  else:
883  f = open(self.filename, "r")
884  line_number = 0
885 
886  for line in f.readlines():
887  statistics.total += 1
888  if not filterout is None:
889  if filterout in line:
890  continue
891  statistics.passed_filterout += 1
892  line_number += 1
893 
894  if line_number % get_every != 0:
895  if line_number == 1 and self.isstat2:
896  statistics.total -= 1
897  statistics.passed_filterout -= 1
898  continue
899  statistics.passed_get_every += 1
900  #if line_number % 1000 == 0:
901  # print "ProcessOutput.get_fields: read line %s from file %s" % (str(line_number), self.filename)
902  try:
903  d = ast.literal_eval(line)
904  except:
905  print("# Warning: skipped line number " + str(line_number) + " not a valid line")
906  continue
907 
908  if self.isstat1:
909 
910  if not filtertuple is None:
911  keytobefiltered = filtertuple[0]
912  relationship = filtertuple[1]
913  value = filtertuple[2]
914  datavalue=d[keytobefiltered]
915  if self.isfiltered(datavalue, relationship, value): continue
916 
917  statistics.passed_filtertuple += 1
918  [outdict[field].append(d[field]) for field in fields]
919 
920  elif self.isstat2:
921  if line_number == 1:
922  statistics.total -= 1
923  statistics.passed_filterout -= 1
924  statistics.passed_get_every -= 1
925  continue
926 
927  if not filtertuple is None:
928  keytobefiltered = filtertuple[0]
929  relationship = filtertuple[1]
930  value = filtertuple[2]
931  datavalue=d[self.invstat2_dict[keytobefiltered]]
932  if self.isfiltered(datavalue, relationship, value): continue
933 
934  statistics.passed_filtertuple += 1
935  [outdict[field].append(d[self.invstat2_dict[field]]) for field in fields]
936 
937  f.close()
938 
939  return outdict
940 
941  def isfiltered(self,datavalue,relationship,refvalue):
942  dofilter=False
943  try:
944  fdatavalue=float(datavalue)
945  except ValueError:
946  raise ValueError("ProcessOutput.filter: datavalue cannot be converted into a float")
947 
948  if relationship == "<":
949  if float(datavalue) >= refvalue:
950  dofilter=True
951  if relationship == ">":
952  if float(datavalue) <= refvalue:
953  dofilter=True
954  if relationship == "==":
955  if float(datavalue) != refvalue:
956  dofilter=True
957  return dofilter
958 
959 
961  """ class to allow more advanced handling of RMF files.
962  It is both a container and a IMP.atom.Hierarchy.
963  - it is iterable (while loading the corresponding frame)
964  - Item brackets [] load the corresponding frame
965  - slice create an iterator
966  - can relink to another RMF file
967  """
968  def __init__(self,model,rmf_file_name):
969  """
970  @param model: the IMP.Model()
971  @param rmf_file_name: str, path of the rmf file
972  """
973  self.model=model
974  try:
975  self.rh_ref = RMF.open_rmf_file_read_only(rmf_file_name)
976  except TypeError:
977  raise TypeError("Wrong rmf file name or type: %s"% str(rmf_file_name))
978  hs = IMP.rmf.create_hierarchies(self.rh_ref, self.model)
979  IMP.rmf.load_frame(self.rh_ref, RMF.FrameID(0))
980  self.root_hier_ref = hs[0]
981  IMP.atom.Hierarchy.__init__(self, self.root_hier_ref)
982  self.model.update()
983  self.ColorHierarchy=None
984 
985 
986  def link_to_rmf(self,rmf_file_name):
987  """
988  Link to another RMF file
989  """
990  self.rh_ref = RMF.open_rmf_file_read_only(rmf_file_name)
991  IMP.rmf.link_hierarchies(self.rh_ref, [self])
992  if self.ColorHierarchy:
993  self.ColorHierarchy.method()
994  RMFHierarchyHandler.set_frame(self,0)
995 
996  def set_frame(self,index):
997  try:
998  IMP.rmf.load_frame(self.rh_ref, RMF.FrameID(index))
999  except:
1000  print("skipping frame %s:%d\n"%(self.current_rmf, index))
1001  self.model.update()
1002 
1003  def get_number_of_frames(self):
1004  return self.rh_ref.get_number_of_frames()
1005 
1006  def __getitem__(self,int_slice_adaptor):
1007  if type(int_slice_adaptor) is int:
1008  self.set_frame(int_slice_adaptor)
1009  return int_slice_adaptor
1010  elif type(int_slice_adaptor) is slice:
1011  return self.__iter__(int_slice_adaptor)
1012  else:
1013  raise TypeError("Unknown Type")
1014 
1015  def __len__(self):
1016  return self.get_number_of_frames()
1017 
1018  def __iter__(self,slice_key=None):
1019  if slice_key is None:
1020  for nframe in range(len(self)):
1021  yield self[nframe]
1022  else:
1023  for nframe in list(range(len(self)))[slice_key]:
1024  yield self[nframe]
1025 
1026 class CacheHierarchyCoordinates(object):
1027  def __init__(self,StatHierarchyHandler):
1028  self.xyzs=[]
1029  self.nrms=[]
1030  self.rbs=[]
1031  self.nrm_coors={}
1032  self.xyz_coors={}
1033  self.rb_trans={}
1034  self.current_index=None
1035  self.rmfh=StatHierarchyHandler
1036  rbs,xyzs=IMP.pmi1.tools.get_rbs_and_beads([self.rmfh])
1037  self.model=self.rmfh.get_model()
1038  self.rbs=rbs
1039  for xyz in xyzs:
1041  nrm=IMP.core.NonRigidMember(xyz)
1042  self.nrms.append(nrm)
1043  else:
1044  fb=IMP.core.XYZ(xyz)
1045  self.xyzs.append(fb)
1046 
1047  def do_store(self,index):
1048  self.rb_trans[index]={}
1049  self.nrm_coors[index]={}
1050  self.xyz_coors[index]={}
1051  for rb in self.rbs:
1052  self.rb_trans[index][rb]=rb.get_reference_frame()
1053  for nrm in self.nrms:
1054  self.nrm_coors[index][nrm]=nrm.get_internal_coordinates()
1055  for xyz in self.xyzs:
1056  self.xyz_coors[index][xyz]=xyz.get_coordinates()
1057  self.current_index=index
1058 
1059  def do_update(self,index):
1060  if self.current_index!=index:
1061  for rb in self.rbs:
1062  rb.set_reference_frame(self.rb_trans[index][rb])
1063  for nrm in self.nrms:
1064  nrm.set_internal_coordinates(self.nrm_coors[index][nrm])
1065  for xyz in self.xyzs:
1066  xyz.set_coordinates(self.xyz_coors[index][xyz])
1067  self.current_index=index
1068  self.model.update()
1069 
1070  def get_number_of_frames(self):
1071  return len(self.rb_trans.keys())
1072 
1073  def __getitem__(self,index):
1074  if type(index) is int:
1075  if index in self.rb_trans.keys():
1076  return True
1077  else:
1078  return False
1079  else:
1080  raise TypeError("Unknown Type")
1081 
1082  def __len__(self):
1083  return self.get_number_of_frames()
1084 
1085 
1086 
1087 
1089  """ class to link stat files to several rmf files """
1090  def __init__(self,model=None,stat_file=None,number_best_scoring_models=None,score_key=None,StatHierarchyHandler=None,cache=None):
1091  """
1092 
1093  @param model: IMP.Model()
1094  @param stat_file: either 1) a list or 2) a single stat file names (either rmfs or ascii, or pickled data or pickled cluster), 3) a dictionary containing an rmf/ascii
1095  stat file name as key and a list of frames as values
1096  @param number_best_scoring_models:
1097  @param StatHierarchyHandler: copy constructor input object
1098  @param cache: cache coordinates and rigid body transformations.
1099  """
1100 
1101  if not StatHierarchyHandler is None:
1102  #overrides all other arguments
1103  #copy constructor: create a copy with different RMFHierarchyHandler
1104  self.model=StatHierarchyHandler.model
1105  self.data=StatHierarchyHandler.data
1106  self.number_best_scoring_models=StatHierarchyHandler.number_best_scoring_models
1107  self.is_setup=True
1108  self.current_rmf=StatHierarchyHandler.current_rmf
1109  self.current_frame=None
1110  self.current_index=None
1111  self.score_threshold=StatHierarchyHandler.score_threshold
1112  self.score_key=StatHierarchyHandler.score_key
1113  self.cache=StatHierarchyHandler.cache
1114  RMFHierarchyHandler.__init__(self, self.model,self.current_rmf)
1115  if self.cache:
1116  self.cache=CacheHierarchyCoordinates(self)
1117  else:
1118  self.cache=None
1119  self.set_frame(0)
1120 
1121  else:
1122  #standard constructor
1123  self.model=model
1124  self.data=[]
1125  self.number_best_scoring_models=number_best_scoring_models
1126  self.cache=cache
1127 
1128  if score_key is None:
1129  self.score_key="Total_Score"
1130  else:
1131  self.score_key=score_key
1132  self.is_setup=None
1133  self.current_rmf=None
1134  self.current_frame=None
1135  self.current_index=None
1136  self.score_threshold=None
1137 
1138  if type(stat_file) is str:
1139  self.add_stat_file(stat_file)
1140  elif type(stat_file) is list:
1141  for f in stat_file:
1142  self.add_stat_file(f)
1143 
1144  def add_stat_file(self,stat_file):
1145  try:
1146  import cPickle as pickle
1147  except ImportError:
1148  import pickle
1149 
1150  try:
1151  '''check that it is not a pickle file with saved data from a previous calculation'''
1152  self.load_data(stat_file)
1153 
1154  if self.number_best_scoring_models:
1155  scores = self.get_scores()
1156  max_score = sorted(scores)[0:min(len(self), self.number_best_scoring_models)][-1]
1157  self.do_filter_by_score(max_score)
1158 
1159  except pickle.UnpicklingError:
1160  '''alternatively read the ascii stat files'''
1161  try:
1162  scores,rmf_files,rmf_frame_indexes,features = self.get_info_from_stat_file(stat_file, self.score_threshold)
1163  except KeyError:
1164  # in this case check that is it an rmf file, probably without stat stored in
1165  try:
1166  # let's see if that is an rmf file
1167  rh = RMF.open_rmf_file_read_only(stat_file)
1168  nframes = rh.get_number_of_frames()
1169  scores=[0.0]*nframes
1170  rmf_files=[stat_file]*nframes
1171  rmf_frame_indexes=range(nframes)
1172  features={}
1173  except:
1174  return
1175 
1176 
1177  if len(set(rmf_files)) > 1:
1178  raise ("Multiple RMF files found")
1179 
1180  if not rmf_files:
1181  print("StatHierarchyHandler: Error: Trying to set none as rmf_file (probably empty stat file), aborting")
1182  return
1183 
1184  for n,index in enumerate(rmf_frame_indexes):
1185  featn_dict=dict([(k,features[k][n]) for k in features])
1186  self.data.append(IMP.pmi1.output.DataEntry(stat_file,rmf_files[n],index,scores[n],featn_dict))
1187 
1188  if self.number_best_scoring_models:
1189  scores=self.get_scores()
1190  max_score=sorted(scores)[0:min(len(self),self.number_best_scoring_models)][-1]
1191  self.do_filter_by_score(max_score)
1192 
1193  if not self.is_setup:
1194  RMFHierarchyHandler.__init__(self, self.model,self.get_rmf_names()[0])
1195  if self.cache:
1196  self.cache=CacheHierarchyCoordinates(self)
1197  else:
1198  self.cache=None
1199  self.is_setup=True
1200  self.current_rmf=self.get_rmf_names()[0]
1201 
1202  self.set_frame(0)
1203 
1204  def save_data(self,filename='data.pkl'):
1205  try:
1206  import cPickle as pickle
1207  except ImportError:
1208  import pickle
1209  fl=open(filename,'wb')
1210  pickle.dump(self.data,fl)
1211 
1212  def load_data(self,filename='data.pkl'):
1213  try:
1214  import cPickle as pickle
1215  except ImportError:
1216  import pickle
1217  fl=open(filename,'rb')
1218  data_structure=pickle.load(fl)
1219  #first check that it is a list
1220  if not type(data_structure) is list:
1221  raise TypeError("%filename should contain a list of IMP.pmi1.output.DataEntry or IMP.pmi1.output.Cluster" % filename)
1222  # second check the types
1223  if all(isinstance(item, IMP.pmi1.output.DataEntry) for item in data_structure):
1224  self.data=data_structure
1225  elif all(isinstance(item, IMP.pmi1.output.Cluster) for item in data_structure):
1226  nmodels=0
1227  for cluster in data_structure:
1228  nmodels+=len(cluster)
1229  self.data=[None]*nmodels
1230  for cluster in data_structure:
1231  for n,data in enumerate(cluster):
1232  index=cluster.members[n]
1233  self.data[index]=data
1234  else:
1235  raise TypeError("%filename should contain a list of IMP.pmi1.output.DataEntry or IMP.pmi1.output.Cluster" % filename)
1236 
1237  def set_frame(self,index):
1238  if self.cache is not None and self.cache[index]:
1239  self.cache.do_update(index)
1240  else:
1241  nm=self.data[index].rmf_name
1242  fidx=self.data[index].rmf_index
1243  if nm != self.current_rmf:
1244  self.link_to_rmf(nm)
1245  self.current_rmf=nm
1246  self.current_frame=-1
1247  if fidx!=self.current_frame:
1248  RMFHierarchyHandler.set_frame(self, fidx)
1249  self.current_frame=fidx
1250  if self.cache is not None:
1251  self.cache.do_store(index)
1252 
1253  self.current_index = index
1254 
1255  def __getitem__(self,int_slice_adaptor):
1256  if type(int_slice_adaptor) is int:
1257  self.set_frame(int_slice_adaptor)
1258  return self.data[int_slice_adaptor]
1259  elif type(int_slice_adaptor) is slice:
1260  return self.__iter__(int_slice_adaptor)
1261  else:
1262  raise TypeError("Unknown Type")
1263 
1264  def __len__(self):
1265  return len(self.data)
1266 
1267  def __iter__(self,slice_key=None):
1268  if slice_key is None:
1269  for i in range(len(self)):
1270  yield self[i]
1271  else:
1272  for i in range(len(self))[slice_key]:
1273  yield self[i]
1274 
1275  def do_filter_by_score(self,maximum_score):
1276  self.data=[d for d in self.data if d.score<=maximum_score]
1277 
1278  def get_scores(self):
1279  return [d.score for d in self.data]
1280 
1281  def get_feature_series(self,feature_name):
1282  return [d.features[feature_name] for d in self.data]
1283 
1284  def get_feature_names(self):
1285  return self.data[0].features.keys()
1286 
1287  def get_rmf_names(self):
1288  return [d.rmf_name for d in self.data]
1289 
1290  def get_stat_files_names(self):
1291  return [d.stat_file for d in self.data]
1292 
1293  def get_rmf_indexes(self):
1294  return [d.rmf_index for d in self.data]
1295 
1296  def get_info_from_stat_file(self, stat_file, score_threshold=None):
1297  po=ProcessOutput(stat_file)
1298  fs=po.get_keys()
1299  models = IMP.pmi1.io.get_best_models([stat_file],
1300  score_key=self.score_key,
1301  feature_keys=fs,
1302  rmf_file_key="rmf_file",
1303  rmf_file_frame_key="rmf_frame_index",
1304  prefiltervalue=score_threshold,
1305  get_every=1)
1306 
1307 
1308 
1309  scores = [float(y) for y in models[2]]
1310  rmf_files = models[0]
1311  rmf_frame_indexes = models[1]
1312  features=models[3]
1313  return scores, rmf_files, rmf_frame_indexes,features
1314 
1315 
1316 class DataEntry(object):
1317  '''
1318  A class to store data associated to a model
1319  '''
1320  def __init__(self,stat_file=None,rmf_name=None,rmf_index=None,score=None,features=None):
1321  self.rmf_name=rmf_name
1322  self.rmf_index=rmf_index
1323  self.score=score
1324  self.features=features
1325  self.stat_file=stat_file
1326 
1327  def __repr__(self):
1328  s= "IMP.pmi1.output.DataEntry\n"
1329  s+="---- stat file %s \n"%(self.stat_file)
1330  s+="---- rmf file %s \n"%(self.rmf_name)
1331  s+="---- rmf index %s \n"%(str(self.rmf_index))
1332  s+="---- score %s \n"%(str(self.score))
1333  s+="---- number of features %s \n"%(str(len(self.features.keys())))
1334  return s
1335 
1336 
1337 class Cluster(object):
1338  '''
1339  A container for models organized into clusters
1340  '''
1341  def __init__(self,cid=None):
1342  self.cluster_id=cid
1343  self.members=[]
1344  self.precision=None
1345  self.center_index=None
1346  self.members_data={}
1347 
1348  def add_member(self,index,data=None):
1349  self.members.append(index)
1350  self.members_data[index]=data
1351  self.average_score=self.compute_score()
1352 
1353  def compute_score(self):
1354  try:
1355  score=sum([d.score for d in self])/len(self)
1356  except AttributeError:
1357  score=None
1358  return score
1359 
1360  def __repr__(self):
1361  s= "IMP.pmi1.output.Cluster\n"
1362  s+="---- cluster_id %s \n"%str(self.cluster_id)
1363  s+="---- precision %s \n"%str(self.precision)
1364  s+="---- average score %s \n"%str(self.average_score)
1365  s+="---- number of members %s \n"%str(len(self.members))
1366  s+="---- center index %s \n"%str(self.center_index)
1367  return s
1368 
1369  def __getitem__(self,int_slice_adaptor):
1370  if type(int_slice_adaptor) is int:
1371  index=self.members[int_slice_adaptor]
1372  return self.members_data[index]
1373  elif type(int_slice_adaptor) is slice:
1374  return self.__iter__(int_slice_adaptor)
1375  else:
1376  raise TypeError("Unknown Type")
1377 
1378  def __len__(self):
1379  return len(self.members)
1380 
1381  def __iter__(self,slice_key=None):
1382  if slice_key is None:
1383  for i in range(len(self)):
1384  yield self[i]
1385  else:
1386  for i in range(len(self))[slice_key]:
1387  yield self[i]
1388 
1389  def __add__(self, other):
1390  self.members+=other.members
1391  self.members_data.update(other.members_data)
1392  self.average_score=self.compute_score()
1393  self.precision=None
1394  self.center_index=None
1395  return self
1396 
1397 
1398 def plot_clusters_populations(clusters):
1399  indexes=[]
1400  populations=[]
1401  for cluster in clusters:
1402  indexes.append(cluster.cluster_id)
1403  populations.append(len(cluster))
1404 
1405  import matplotlib.pyplot as plt
1406  fig, ax = plt.subplots()
1407  ax.bar(indexes, populations, 0.5, color='r') #, yerr=men_std)
1408  ax.set_ylabel('Population')
1409  ax.set_xlabel(('Cluster index'))
1410  plt.show()
1411 
1412 def plot_clusters_precisions(clusters):
1413  indexes=[]
1414  precisions=[]
1415  for cluster in clusters:
1416  indexes.append(cluster.cluster_id)
1417 
1418  prec=cluster.precision
1419  print(cluster.cluster_id,prec)
1420  if prec is None:
1421  prec=0.0
1422  precisions.append(prec)
1423 
1424  import matplotlib.pyplot as plt
1425  fig, ax = plt.subplots()
1426  ax.bar(indexes, precisions, 0.5, color='r') #, yerr=men_std)
1427  ax.set_ylabel('Precision [A]')
1428  ax.set_xlabel(('Cluster index'))
1429  plt.show()
1430 
1431 def plot_clusters_scores(clusters):
1432  indexes=[]
1433  values=[]
1434  for cluster in clusters:
1435  indexes.append(cluster.cluster_id)
1436  values.append([])
1437  for data in cluster:
1438  values[-1].append(data.score)
1439 
1440  plot_fields_box_plots("scores.pdf", values, indexes, frequencies=None,
1441  valuename="Scores", positionname="Cluster index", xlabels=None,scale_plot_length=1.0)
1442 
1443 class CrossLinkIdentifierDatabase(object):
1444  def __init__(self):
1445  self.clidb=dict()
1446 
1447  def check_key(self,key):
1448  if key not in self.clidb:
1449  self.clidb[key]={}
1450 
1451  def set_unique_id(self,key,value):
1452  self.check_key(key)
1453  self.clidb[key]["XLUniqueID"]=str(value)
1454 
1455  def set_protein1(self,key,value):
1456  self.check_key(key)
1457  self.clidb[key]["Protein1"]=str(value)
1458 
1459  def set_protein2(self,key,value):
1460  self.check_key(key)
1461  self.clidb[key]["Protein2"]=str(value)
1462 
1463  def set_residue1(self,key,value):
1464  self.check_key(key)
1465  self.clidb[key]["Residue1"]=int(value)
1466 
1467  def set_residue2(self,key,value):
1468  self.check_key(key)
1469  self.clidb[key]["Residue2"]=int(value)
1470 
1471  def set_idscore(self,key,value):
1472  self.check_key(key)
1473  self.clidb[key]["IDScore"]=float(value)
1474 
1475  def set_state(self,key,value):
1476  self.check_key(key)
1477  self.clidb[key]["State"]=int(value)
1478 
1479  def set_sigma1(self,key,value):
1480  self.check_key(key)
1481  self.clidb[key]["Sigma1"]=str(value)
1482 
1483  def set_sigma2(self,key,value):
1484  self.check_key(key)
1485  self.clidb[key]["Sigma2"]=str(value)
1486 
1487  def set_psi(self,key,value):
1488  self.check_key(key)
1489  self.clidb[key]["Psi"]=str(value)
1490 
1491  def get_unique_id(self,key):
1492  return self.clidb[key]["XLUniqueID"]
1493 
1494  def get_protein1(self,key):
1495  return self.clidb[key]["Protein1"]
1496 
1497  def get_protein2(self,key):
1498  return self.clidb[key]["Protein2"]
1499 
1500  def get_residue1(self,key):
1501  return self.clidb[key]["Residue1"]
1502 
1503  def get_residue2(self,key):
1504  return self.clidb[key]["Residue2"]
1505 
1506  def get_idscore(self,key):
1507  return self.clidb[key]["IDScore"]
1508 
1509  def get_state(self,key):
1510  return self.clidb[key]["State"]
1511 
1512  def get_sigma1(self,key):
1513  return self.clidb[key]["Sigma1"]
1514 
1515  def get_sigma2(self,key):
1516  return self.clidb[key]["Sigma2"]
1517 
1518  def get_psi(self,key):
1519  return self.clidb[key]["Psi"]
1520 
1521  def set_float_feature(self,key,value,feature_name):
1522  self.check_key(key)
1523  self.clidb[key][feature_name]=float(value)
1524 
1525  def set_int_feature(self,key,value,feature_name):
1526  self.check_key(key)
1527  self.clidb[key][feature_name]=int(value)
1528 
1529  def set_string_feature(self,key,value,feature_name):
1530  self.check_key(key)
1531  self.clidb[key][feature_name]=str(value)
1532 
1533  def get_feature(self,key,feature_name):
1534  return self.clidb[key][feature_name]
1535 
1536  def write(self,filename):
1537  import pickle
1538  with open(filename, 'wb') as handle:
1539  pickle.dump(self.clidb,handle)
1540 
1541  def load(self,filename):
1542  import pickle
1543  with open(filename, 'rb') as handle:
1544  self.clidb=pickle.load(handle)
1545 
1546 def plot_fields(fields, framemin=None, framemax=None):
1547  import matplotlib as mpl
1548  mpl.use('Agg')
1549  import matplotlib.pyplot as plt
1550 
1551  plt.rc('lines', linewidth=4)
1552  fig, axs = plt.subplots(nrows=len(fields))
1553  fig.set_size_inches(10.5, 5.5 * len(fields))
1554  plt.rc('axes', color_cycle=['r'])
1555 
1556  n = 0
1557  for key in fields:
1558  if framemin is None:
1559  framemin = 0
1560  if framemax is None:
1561  framemax = len(fields[key])
1562  x = list(range(framemin, framemax))
1563  y = [float(y) for y in fields[key][framemin:framemax]]
1564  if len(fields) > 1:
1565  axs[n].plot(x, y)
1566  axs[n].set_title(key, size="xx-large")
1567  axs[n].tick_params(labelsize=18, pad=10)
1568  else:
1569  axs.plot(x, y)
1570  axs.set_title(key, size="xx-large")
1571  axs.tick_params(labelsize=18, pad=10)
1572  n += 1
1573 
1574  # Tweak spacing between subplots to prevent labels from overlapping
1575  plt.subplots_adjust(hspace=0.3)
1576  plt.show()
1577 
1578 
1580  name, values_lists, valuename=None, bins=40, colors=None, format="png",
1581  reference_xline=None, yplotrange=None, xplotrange=None,normalized=True,
1582  leg_names=None):
1583 
1584  '''Plot a list of histograms from a value list.
1585  @param name the name of the plot
1586  @param value_lists the list of list of values eg: [[...],[...],[...]]
1587  @param valuename the y-label
1588  @param bins the number of bins
1589  @param colors If None, will use rainbow. Else will use specific list
1590  @param format output format
1591  @param reference_xline plot a reference line parallel to the y-axis
1592  @param yplotrange the range for the y-axis
1593  @param normalized whether the histogram is normalized or not
1594  @param leg_names names for the legend
1595  '''
1596 
1597  import matplotlib as mpl
1598  mpl.use('Agg')
1599  import matplotlib.pyplot as plt
1600  import matplotlib.cm as cm
1601  fig = plt.figure(figsize=(18.0, 9.0))
1602 
1603  if colors is None:
1604  colors = cm.rainbow(np.linspace(0, 1, len(values_lists)))
1605  for nv,values in enumerate(values_lists):
1606  col=colors[nv]
1607  if leg_names is not None:
1608  label=leg_names[nv]
1609  else:
1610  label=str(nv)
1611  h=plt.hist(
1612  [float(y) for y in values],
1613  bins=bins,
1614  color=col,
1615  normed=normalized,histtype='step',lw=4,
1616  label=label)
1617 
1618  # plt.title(name,size="xx-large")
1619  plt.tick_params(labelsize=12, pad=10)
1620  if valuename is None:
1621  plt.xlabel(name, size="xx-large")
1622  else:
1623  plt.xlabel(valuename, size="xx-large")
1624  plt.ylabel("Frequency", size="xx-large")
1625 
1626  if not yplotrange is None:
1627  plt.ylim()
1628  if not xplotrange is None:
1629  plt.xlim(xplotrange)
1630 
1631  plt.legend(loc=2)
1632 
1633  if not reference_xline is None:
1634  plt.axvline(
1635  reference_xline,
1636  color='red',
1637  linestyle='dashed',
1638  linewidth=1)
1639 
1640  plt.savefig(name + "." + format, dpi=150, transparent=True)
1641  plt.show()
1642 
1643 
1644 def plot_fields_box_plots(name, values, positions, frequencies=None,
1645  valuename="None", positionname="None", xlabels=None,scale_plot_length=1.0):
1646  '''
1647  Plot time series as boxplots.
1648  fields is a list of time series, positions are the x-values
1649  valuename is the y-label, positionname is the x-label
1650  '''
1651 
1652  import matplotlib as mpl
1653  mpl.use('Agg')
1654  import matplotlib.pyplot as plt
1655  from matplotlib.patches import Polygon
1656 
1657  bps = []
1658  fig = plt.figure(figsize=(float(len(positions))*scale_plot_length, 5.0))
1659  fig.canvas.set_window_title(name)
1660 
1661  ax1 = fig.add_subplot(111)
1662 
1663  plt.subplots_adjust(left=0.1, right=0.990, top=0.95, bottom=0.4)
1664 
1665  bps.append(plt.boxplot(values, notch=0, sym='', vert=1,
1666  whis=1.5, positions=positions))
1667 
1668  plt.setp(bps[-1]['boxes'], color='black', lw=1.5)
1669  plt.setp(bps[-1]['whiskers'], color='black', ls=":", lw=1.5)
1670 
1671  if frequencies is not None:
1672  for n,v in enumerate(values):
1673  plist=[positions[n]]*len(v)
1674  ax1.plot(plist, v, 'gx', alpha=0.7, markersize=7)
1675 
1676  # print ax1.xaxis.get_majorticklocs()
1677  if not xlabels is None:
1678  ax1.set_xticklabels(xlabels)
1679  plt.xticks(rotation=90)
1680  plt.xlabel(positionname)
1681  plt.ylabel(valuename)
1682 
1683  plt.savefig(name+".pdf",dpi=150)
1684  plt.show()
1685 
1686 
1687 def plot_xy_data(x,y,title=None,out_fn=None,display=True,set_plot_yaxis_range=None,
1688  xlabel=None,ylabel=None):
1689  import matplotlib as mpl
1690  mpl.use('Agg')
1691  import matplotlib.pyplot as plt
1692  plt.rc('lines', linewidth=2)
1693 
1694  fig, ax = plt.subplots(nrows=1)
1695  fig.set_size_inches(8,4.5)
1696  if title is not None:
1697  fig.canvas.set_window_title(title)
1698 
1699  #plt.rc('axes', color='r')
1700  ax.plot(x,y,color='r')
1701  if set_plot_yaxis_range is not None:
1702  x1,x2,y1,y2=plt.axis()
1703  y1=set_plot_yaxis_range[0]
1704  y2=set_plot_yaxis_range[1]
1705  plt.axis((x1,x2,y1,y2))
1706  if title is not None:
1707  ax.set_title(title)
1708  if xlabel is not None:
1709  ax.set_xlabel(xlabel)
1710  if ylabel is not None:
1711  ax.set_ylabel(ylabel)
1712  if out_fn is not None:
1713  plt.savefig(out_fn+".pdf")
1714  if display:
1715  plt.show()
1716  plt.close(fig)
1717 
1718 def plot_scatter_xy_data(x,y,labelx="None",labely="None",
1719  xmin=None,xmax=None,ymin=None,ymax=None,
1720  savefile=False,filename="None.eps",alpha=0.75):
1721 
1722  import matplotlib as mpl
1723  mpl.use('Agg')
1724  import matplotlib.pyplot as plt
1725  import sys
1726  from matplotlib import rc
1727  #rc('font', **{'family':'serif','serif':['Palatino']})
1728  rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
1729  #rc('text', usetex=True)
1730 
1731  fig, axs = plt.subplots(1)
1732 
1733  axs0 = axs
1734 
1735  axs0.set_xlabel(labelx, size="xx-large")
1736  axs0.set_ylabel(labely, size="xx-large")
1737  axs0.tick_params(labelsize=18, pad=10)
1738 
1739  plot2 = []
1740 
1741  plot2.append(axs0.plot(x, y, 'o', color='k',lw=2, ms=0.1, alpha=alpha, c="w"))
1742 
1743  axs0.legend(
1744  loc=0,
1745  frameon=False,
1746  scatterpoints=1,
1747  numpoints=1,
1748  columnspacing=1)
1749 
1750  fig.set_size_inches(8.0, 8.0)
1751  fig.subplots_adjust(left=0.161, right=0.850, top=0.95, bottom=0.11)
1752  if (not ymin is None) and (not ymax is None):
1753  axs0.set_ylim(ymin,ymax)
1754  if (not xmin is None) and (not xmax is None):
1755  axs0.set_xlim(xmin,xmax)
1756 
1757  #plt.show()
1758  if savefile:
1759  fig.savefig(filename, dpi=300)
1760 
1761 
1762 def get_graph_from_hierarchy(hier):
1763  graph = []
1764  depth_dict = {}
1765  depth = 0
1766  (graph, depth, depth_dict) = recursive_graph(
1767  hier, graph, depth, depth_dict)
1768 
1769  # filters node labels according to depth_dict
1770  node_labels_dict = {}
1771  node_size_dict = {}
1772  for key in depth_dict:
1773  node_size_dict = 10 / depth_dict[key]
1774  if depth_dict[key] < 3:
1775  node_labels_dict[key] = key
1776  else:
1777  node_labels_dict[key] = ""
1778  draw_graph(graph, labels_dict=node_labels_dict)
1779 
1780 
1781 def recursive_graph(hier, graph, depth, depth_dict):
1782  depth = depth + 1
1783  nameh = IMP.atom.Hierarchy(hier).get_name()
1784  index = str(hier.get_particle().get_index())
1785  name1 = nameh + "|#" + index
1786  depth_dict[name1] = depth
1787 
1788  children = IMP.atom.Hierarchy(hier).get_children()
1789 
1790  if len(children) == 1 or children is None:
1791  depth = depth - 1
1792  return (graph, depth, depth_dict)
1793 
1794  else:
1795  for c in children:
1796  (graph, depth, depth_dict) = recursive_graph(
1797  c, graph, depth, depth_dict)
1798  nameh = IMP.atom.Hierarchy(c).get_name()
1799  index = str(c.get_particle().get_index())
1800  namec = nameh + "|#" + index
1801  graph.append((name1, namec))
1802 
1803  depth = depth - 1
1804  return (graph, depth, depth_dict)
1805 
1806 
1807 def draw_graph(graph, labels_dict=None, graph_layout='spring',
1808  node_size=5, node_color=None, node_alpha=0.3,
1809  node_text_size=11, fixed=None, pos=None,
1810  edge_color='blue', edge_alpha=0.3, edge_thickness=1,
1811  edge_text_pos=0.3,
1812  validation_edges=None,
1813  text_font='sans-serif',
1814  out_filename=None):
1815 
1816  import matplotlib as mpl
1817  mpl.use('Agg')
1818  import networkx as nx
1819  import matplotlib.pyplot as plt
1820  from math import sqrt, pi
1821 
1822  # create networkx graph
1823  G = nx.Graph()
1824 
1825  # add edges
1826  if type(edge_thickness) is list:
1827  for edge,weight in zip(graph,edge_thickness):
1828  G.add_edge(edge[0], edge[1], weight=weight)
1829  else:
1830  for edge in graph:
1831  G.add_edge(edge[0], edge[1])
1832 
1833  if node_color==None:
1834  node_color_rgb=(0,0,0)
1835  node_color_hex="000000"
1836  else:
1838  tmpcolor_rgb=[]
1839  tmpcolor_hex=[]
1840  for node in G.nodes():
1841  cctuple=cc.rgb(node_color[node])
1842  tmpcolor_rgb.append((cctuple[0]/255,cctuple[1]/255,cctuple[2]/255))
1843  tmpcolor_hex.append(node_color[node])
1844  node_color_rgb=tmpcolor_rgb
1845  node_color_hex=tmpcolor_hex
1846 
1847  # get node sizes if dictionary
1848  if type(node_size) is dict:
1849  tmpsize=[]
1850  for node in G.nodes():
1851  size=sqrt(node_size[node])/pi*10.0
1852  tmpsize.append(size)
1853  node_size=tmpsize
1854 
1855  for n,node in enumerate(G.nodes()):
1856  color=node_color_hex[n]
1857  size=node_size[n]
1858  nx.set_node_attributes(G, "graphics", {node : {'type': 'ellipse','w': size, 'h': size,'fill': '#'+color, 'label': node}})
1859  nx.set_node_attributes(G, "LabelGraphics", {node : {'type': 'text','text':node, 'color':'#000000', 'visible':'true'}})
1860 
1861  for edge in G.edges():
1862  nx.set_edge_attributes(G, "graphics", {edge : {'width': 1,'fill': '#000000'}})
1863 
1864  for ve in validation_edges:
1865  print(ve)
1866  if (ve[0],ve[1]) in G.edges():
1867  print("found forward")
1868  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#00FF00'}})
1869  elif (ve[1],ve[0]) in G.edges():
1870  print("found backward")
1871  nx.set_edge_attributes(G, "graphics", {(ve[1],ve[0]) : {'width': 1,'fill': '#00FF00'}})
1872  else:
1873  G.add_edge(ve[0], ve[1])
1874  print("not found")
1875  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#FF0000'}})
1876 
1877  # these are different layouts for the network you may try
1878  # shell seems to work best
1879  if graph_layout == 'spring':
1880  print(fixed, pos)
1881  graph_pos = nx.spring_layout(G,k=1.0/8.0,fixed=fixed,pos=pos)
1882  elif graph_layout == 'spectral':
1883  graph_pos = nx.spectral_layout(G)
1884  elif graph_layout == 'random':
1885  graph_pos = nx.random_layout(G)
1886  else:
1887  graph_pos = nx.shell_layout(G)
1888 
1889 
1890  # draw graph
1891  nx.draw_networkx_nodes(G, graph_pos, node_size=node_size,
1892  alpha=node_alpha, node_color=node_color_rgb,
1893  linewidths=0)
1894  nx.draw_networkx_edges(G, graph_pos, width=edge_thickness,
1895  alpha=edge_alpha, edge_color=edge_color)
1896  nx.draw_networkx_labels(
1897  G, graph_pos, labels=labels_dict, font_size=node_text_size,
1898  font_family=text_font)
1899  if out_filename:
1900  plt.savefig(out_filename)
1901  nx.write_gml(G,'out.gml')
1902  plt.show()
1903 
1904 
1905 def draw_table():
1906 
1907  # still an example!
1908 
1909  from ipyD3 import d3object
1910  from IPython.display import display
1911 
1912  d3 = d3object(width=800,
1913  height=400,
1914  style='JFTable',
1915  number=1,
1916  d3=None,
1917  title='Example table with d3js',
1918  desc='An example table created created with d3js with data generated with Python.')
1919  data = [
1920  [1277.0,
1921  654.0,
1922  288.0,
1923  1976.0,
1924  3281.0,
1925  3089.0,
1926  10336.0,
1927  4650.0,
1928  4441.0,
1929  4670.0,
1930  944.0,
1931  110.0],
1932  [1318.0,
1933  664.0,
1934  418.0,
1935  1952.0,
1936  3581.0,
1937  4574.0,
1938  11457.0,
1939  6139.0,
1940  7078.0,
1941  6561.0,
1942  2354.0,
1943  710.0],
1944  [1783.0,
1945  774.0,
1946  564.0,
1947  1470.0,
1948  3571.0,
1949  3103.0,
1950  9392.0,
1951  5532.0,
1952  5661.0,
1953  4991.0,
1954  2032.0,
1955  680.0],
1956  [1301.0,
1957  604.0,
1958  286.0,
1959  2152.0,
1960  3282.0,
1961  3369.0,
1962  10490.0,
1963  5406.0,
1964  4727.0,
1965  3428.0,
1966  1559.0,
1967  620.0],
1968  [1537.0,
1969  1714.0,
1970  724.0,
1971  4824.0,
1972  5551.0,
1973  8096.0,
1974  16589.0,
1975  13650.0,
1976  9552.0,
1977  13709.0,
1978  2460.0,
1979  720.0],
1980  [5691.0,
1981  2995.0,
1982  1680.0,
1983  11741.0,
1984  16232.0,
1985  14731.0,
1986  43522.0,
1987  32794.0,
1988  26634.0,
1989  31400.0,
1990  7350.0,
1991  3010.0],
1992  [1650.0,
1993  2096.0,
1994  60.0,
1995  50.0,
1996  1180.0,
1997  5602.0,
1998  15728.0,
1999  6874.0,
2000  5115.0,
2001  3510.0,
2002  1390.0,
2003  170.0],
2004  [72.0, 60.0, 60.0, 10.0, 120.0, 172.0, 1092.0, 675.0, 408.0, 360.0, 156.0, 100.0]]
2005  data = [list(i) for i in zip(*data)]
2006  sRows = [['January',
2007  'February',
2008  'March',
2009  'April',
2010  'May',
2011  'June',
2012  'July',
2013  'August',
2014  'September',
2015  'October',
2016  'November',
2017  'Deecember']]
2018  sColumns = [['Prod {0}'.format(i) for i in range(1, 9)],
2019  [None, '', None, None, 'Group 1', None, None, 'Group 2']]
2020  d3.addSimpleTable(data,
2021  fontSizeCells=[12, ],
2022  sRows=sRows,
2023  sColumns=sColumns,
2024  sRowsMargins=[5, 50, 0],
2025  sColsMargins=[5, 20, 10],
2026  spacing=0,
2027  addBorders=1,
2028  addOutsideBorders=-1,
2029  rectWidth=45,
2030  rectHeight=0
2031  )
2032  html = d3.render(mode=['html', 'show'])
2033  display(html)
class to link stat files to several rmf files
Definition: /output.py:1088
def sublist_iterator
Yield all sublists of length >= lmin and <= lmax.
Definition: /tools.py:1168
def get_prot_name_from_particle
Get the protein name from the particle.
Definition: /output.py:209
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: Residue.h:158
def get_rbs_and_beads
Returns unique objects in original order.
Definition: /tools.py:1891
atom::Hierarchies create_hierarchies(RMF::FileConstHandle fh, Model *m)
RMF::FrameID save_frame(RMF::FileHandle file, std::string name="")
Save the current state of the linked objects as a new RMF frame.
Collect statistics from ProcessOutput.get_fields().
Definition: /output.py:760
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: atom/Atom.h:245
Change color code to hexadecimal to rgb.
Definition: /tools.py:1494
A container for models organized into clusters.
Definition: /output.py:1337
Class for easy writing of PDBs, RMFs, and stat files.
Definition: /output.py:65
Legacy PMI1 module to represent, score, sample and analyze models.
void handle_use_deprecated(std::string message)
Break in this method in gdb to find deprecated uses at runtime.
void write_pdb(const Selection &mhd, TextOutput out, unsigned int model=1)
Utility classes and functions for reading and storing PMI files.
def init_pdb
Init PDB Writing.
Definition: /output.py:106
Base class for capturing a modeling protocol.
Definition: /output.py:40
def get_residue_indexes
Retrieve the residue indexes for the given particle.
Definition: /tools.py:1064
def get_prot_name_from_particle
Return the component name provided a particle and a list of names.
Definition: /tools.py:1044
def plot_field_histogram
Plot a list of histograms from a value list.
Definition: /output.py:1579
static bool get_is_setup(Model *m, ParticleIndex pi)
Definition: Fragment.h:46
Miscellaneous utilities.
Definition: /tools.py:1
std::string get_module_version()
Return the version of this module, as a string.
The standard decorator for manipulating molecular structures.
Ints get_index(const ParticlesTemp &particles, const Subset &subset, const Subsets &excluded)
int get_number_of_frames(const ::npctransport_proto::Assignment &config, double time_step)
A decorator for a particle representing an atom.
Definition: atom/Atom.h:238
def link_to_rmf
Link to another RMF file.
Definition: /output.py:986
The type for a residue.
class to allow more advanced handling of RMF files.
Definition: /output.py:960
A class to store data associated to a model.
Definition: /output.py:1316
void load_frame(RMF::FileConstHandle file, RMF::FrameID frame)
Load the given RMF frame into the state of the linked objects.
A decorator for a particle with x,y,z coordinates.
Definition: XYZ.h:30
A base class for Keys.
Definition: Key.h:45
void add_hierarchies(RMF::NodeHandle fh, const atom::Hierarchies &hs)
void add_geometries(RMF::NodeHandle parent, const display::GeometriesTemp &r)
Add geometries to a given parent node.
void add_restraints(RMF::NodeHandle fh, const Restraints &hs)
A decorator for a particle that is part of a rigid body but not rigid.
Definition: rigid_bodies.h:768
Display a segment connecting a pair of particles.
Definition: XYZR.h:170
A decorator for a residue.
Definition: Residue.h:137
Basic functionality that is expected to be used by a wide variety of IMP users.
def get_best_models
Given a list of stat files, read them all and find the best models.
A class for reading stat files (either rmf or ascii v1 and v2)
Definition: /output.py:771
def init_rmf
This function initialize an RMF file.
Definition: /output.py:404
void link_hierarchies(RMF::FileConstHandle fh, const atom::Hierarchies &hs)
void add_geometry(RMF::FileHandle file, display::Geometry *r)
Add a single geometry to the file.
Functionality for loading, creating, manipulating and scoring atomic structures.
def get_fields
Get the desired field names, and return a dictionary.
Definition: /output.py:835
Hierarchies get_leaves(const Selection &h)
def plot_fields_box_plots
Plot time series as boxplots.
Definition: /output.py:1644
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: rigid_bodies.h:770
std::string get_module_version()
Return the version of this module, as a string.
A decorator for a particle with x,y,z coordinates and a radius.
Definition: XYZR.h:27