IMP logo
IMP Reference Guide  2.6.1
The Integrative Modeling Platform
output.py
1 """@namespace IMP.pmi.output
2  Classes for writing output files and processing them.
3 """
4 
5 from __future__ import print_function, division
6 import IMP
7 import IMP.atom
8 import IMP.core
9 import IMP.pmi
10 import IMP.pmi.tools
11 import os
12 import sys
13 import RMF
14 import numpy as np
15 import operator
16 try:
17  import cPickle as pickle
18 except ImportError:
19  import pickle
20 
21 def _flatten(seq):
22  l = []
23  for elt in seq:
24  t = type(elt)
25  if t is tuple or t is list:
26  for elt2 in _flatten(elt):
27  l.append(elt2)
28  else:
29  l.append(elt)
30  return l
31 
32 class Output(object):
33  """Class for easy writing of PDBs, RMFs, and stat files"""
34  def __init__(self, ascii=True,atomistic=False):
35  self.dictionary_pdbs = {}
36  self.dictionary_rmfs = {}
37  self.dictionary_stats = {}
38  self.dictionary_stats2 = {}
39  self.best_score_list = None
40  self.nbestscoring = None
41  self.suffixes = []
42  self.replica_exchange = False
43  self.ascii = ascii
44  self.initoutput = {}
45  self.residuetypekey = IMP.StringKey("ResidueName")
46  self.chainids = "ABCDEFGHIJKLMNOPQRSTUVXYWZabcdefghijklmnopqrstuvxywz0123456789"
47  self.dictchain = {} # keys are molecule names, values are chain ids
48  self.particle_infos_for_pdb = {}
49  self.atomistic=atomistic
50  self.use_pmi2 = False
51 
52  def get_pdb_names(self):
53  return list(self.dictionary_pdbs.keys())
54 
55  def get_rmf_names(self):
56  return list(self.dictionary_rmfs.keys())
57 
58  def get_stat_names(self):
59  return list(self.dictionary_stats.keys())
60 
61  def init_pdb(self, name, prot):
62  """Init PDB Writing.
63  @param name The PDB filename
64  @param prot The hierarchy to write to this pdb file
65  \note if the PDB name is 'System' then will use Selection to get molecules
66  """
67  flpdb = open(name, 'w')
68  flpdb.close()
69  self.dictionary_pdbs[name] = prot
70  self.dictchain[name] = {}
71  self.use_pmi2 = False
72 
73  # attempt to find PMI objects.
74  if IMP.pmi.get_is_canonical(prot):
75  self.use_pmi2 = True
76  self.atomistic = True #detects automatically
77  for n,mol in enumerate(IMP.atom.get_by_type(prot,IMP.atom.MOLECULE_TYPE)):
78  chid = IMP.atom.Chain(mol).get_id()
79  self.dictchain[name][IMP.pmi.get_molecule_name_and_copy(mol)] = chid
80  else:
81  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
82  self.dictchain[name][i.get_name()] = self.chainids[n]
83 
84  def write_psf(self,filename,name):
85  flpsf=open(filename,'w')
86  flpsf.write("PSF CMAP CHEQ"+"\n")
87  index_residue_pair_list={}
88  (particle_infos_for_pdb, geometric_center)=self.get_particle_infos_for_pdb_writing(name)
89  nparticles=len(particle_infos_for_pdb)
90  flpsf.write(str(nparticles)+" !NATOM"+"\n")
91  for n,p in enumerate(particle_infos_for_pdb):
92  atom_index=n+1
93  atomtype=p[1]
94  residue_type=p[2]
95  chain=p[3]
96  resid=p[4]
97  flpsf.write('{0:8d}{1:1s}{2:4s}{3:1s}{4:4s}{5:1s}{6:4s}{7:1s}{8:4s}{9:1s}{10:4s}{11:14.6f}{12:14.6f}{13:8d}{14:14.6f}{15:14.6f}'.format(atom_index," ",chain," ",str(resid)," ",'"'+residue_type.get_string()+'"'," ","C"," ","C",1.0,0.0,0,0.0,0.0))
98  flpsf.write('\n')
99  #flpsf.write(str(atom_index)+" "+str(chain)+" "+str(resid)+" "+str(residue_type).replace('"','')+" C C "+"1.0 0.0 0 0.0 0.0\n")
100  if chain not in index_residue_pair_list:
101  index_residue_pair_list[chain]=[(atom_index,resid)]
102  else:
103  index_residue_pair_list[chain].append((atom_index,resid))
104 
105 
106  #now write the connectivity
107  indexes_pairs=[]
108  for chain in sorted(index_residue_pair_list.keys()):
109 
110  ls=index_residue_pair_list[chain]
111  #sort by residue
112  ls=sorted(ls, key=lambda tup: tup[1])
113  #get the index list
114  indexes=[x[0] for x in ls]
115  # get the contiguous pairs
116  indexes_pairs+=list(IMP.pmi.tools.sublist_iterator(indexes,lmin=2,lmax=2))
117  nbonds=len(indexes_pairs)
118  flpsf.write(str(nbonds)+" !NBOND: bonds"+"\n")
119 
120  sublists=[indexes_pairs[i:i+4] for i in range(0,len(indexes_pairs),4)]
121 
122  # save bonds in fized column format
123  for ip in sublists:
124  if len(ip)==4:
125  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}{6:8d}{7:8d}'.format(ip[0][0],ip[0][1],
126  ip[1][0],ip[1][1],ip[2][0],ip[2][1],ip[3][0],ip[3][1]))
127  elif len(ip)==3:
128  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}'.format(ip[0][0],ip[0][1],ip[1][0],
129  ip[1][1],ip[2][0],ip[2][1]))
130  elif len(ip)==2:
131  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}'.format(ip[0][0],ip[0][1],ip[1][0],ip[1][1]))
132  elif len(ip)==1:
133  flpsf.write('{0:8d}{1:8d}'.format(ip[0][0],ip[0][1]))
134  flpsf.write('\n')
135 
136  del particle_infos_for_pdb
137  flpsf.close()
138 
139  def write_pdb(self,name,
140  appendmode=True,
141  translate_to_geometric_center=False):
142  if appendmode:
143  flpdb = open(name, 'a')
144  else:
145  flpdb = open(name, 'w')
146 
147  (particle_infos_for_pdb,
148  geometric_center) = self.get_particle_infos_for_pdb_writing(name)
149 
150  if not translate_to_geometric_center:
151  geometric_center = (0, 0, 0)
152 
153  for n,tupl in enumerate(particle_infos_for_pdb):
154  (xyz, atom_type, residue_type,
155  chain_id, residue_index,radius) = tupl
156  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
157  xyz[1] - geometric_center[1],
158  xyz[2] - geometric_center[2]),
159  n+1, atom_type, residue_type,
160  chain_id, residue_index,' ',1.00,radius))
161  flpdb.write("ENDMDL\n")
162  flpdb.close()
163 
164  del particle_infos_for_pdb
165 
166  def get_particle_infos_for_pdb_writing(self, name):
167  # index_residue_pair_list={}
168 
169  # the resindexes dictionary keep track of residues that have been already
170  # added to avoid duplication
171  # highest resolution have highest priority
172  resindexes_dict = {}
173 
174  # this dictionary dill contain the sequence of tuples needed to
175  # write the pdb
176  particle_infos_for_pdb = []
177 
178  geometric_center = [0, 0, 0]
179  atom_count = 0
180  atom_index = 0
181 
182  if self.use_pmi2:
183  # select highest resolution
184  ps = IMP.atom.Selection(self.dictionary_pdbs[name],resolution=0).get_selected_particles()
185  else:
186  ps = IMP.atom.get_leaves(self.dictionary_pdbs[name])
187 
188  for n, p in enumerate(ps):
189  # this loop gets the protein name from the
190  # particle leave by descending into the hierarchy
191  if self.use_pmi2:
193  is_a_bead = True
194  else:
195  (protname, is_a_bead) = IMP.pmi.tools.get_prot_name_from_particle(
196  p, self.dictchain[name])
197 
198  if protname not in resindexes_dict:
199  resindexes_dict[protname] = []
200 
201  if IMP.atom.Atom.get_is_setup(p) and self.atomistic:
202  residue = IMP.atom.Residue(IMP.atom.Atom(p).get_parent())
203  rt = residue.get_residue_type()
204  resind = residue.get_index()
205  atomtype = IMP.atom.Atom(p).get_atom_type()
206  xyz = list(IMP.core.XYZ(p).get_coordinates())
207  radius = IMP.core.XYZR(p).get_radius()
208  geometric_center[0] += xyz[0]
209  geometric_center[1] += xyz[1]
210  geometric_center[2] += xyz[2]
211  atom_count += 1
212  particle_infos_for_pdb.append((xyz,
213  atomtype, rt, self.dictchain[name][protname], resind,radius))
214  resindexes_dict[protname].append(resind)
215 
217 
218  residue = IMP.atom.Residue(p)
219  resind = residue.get_index()
220  # skip if the residue was already added by atomistic resolution
221  # 0
222  if resind in resindexes_dict[protname]:
223  continue
224  else:
225  resindexes_dict[protname].append(resind)
226  rt = residue.get_residue_type()
227  xyz = IMP.core.XYZ(p).get_coordinates()
228  radius = IMP.core.XYZR(p).get_radius()
229  geometric_center[0] += xyz[0]
230  geometric_center[1] += xyz[1]
231  geometric_center[2] += xyz[2]
232  atom_count += 1
233  particle_infos_for_pdb.append((xyz,
234  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
235 
236  elif IMP.atom.Fragment.get_is_setup(p) and not is_a_bead:
237  resindexes = IMP.pmi.tools.get_residue_indexes(p)
238  resind = resindexes[len(resindexes) // 2]
239  if resind in resindexes_dict[protname]:
240  continue
241  else:
242  resindexes_dict[protname].append(resind)
243  rt = IMP.atom.ResidueType('BEA')
244  xyz = IMP.core.XYZ(p).get_coordinates()
245  radius = IMP.core.XYZR(p).get_radius()
246  geometric_center[0] += xyz[0]
247  geometric_center[1] += xyz[1]
248  geometric_center[2] += xyz[2]
249  atom_count += 1
250  particle_infos_for_pdb.append((xyz,
251  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
252 
253  else:
254  if is_a_bead:
255  rt = IMP.atom.ResidueType('BEA')
256  resindexes = IMP.pmi.tools.get_residue_indexes(p)
257  if len(resindexes) > 0:
258  resind = resindexes[len(resindexes) // 2]
259  xyz = IMP.core.XYZ(p).get_coordinates()
260  radius = IMP.core.XYZR(p).get_radius()
261  geometric_center[0] += xyz[0]
262  geometric_center[1] += xyz[1]
263  geometric_center[2] += xyz[2]
264  atom_count += 1
265  particle_infos_for_pdb.append((xyz,
266  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
267 
268  if atom_count > 0:
269  geometric_center = (geometric_center[0] / atom_count,
270  geometric_center[1] / atom_count,
271  geometric_center[2] / atom_count)
272 
273  particle_infos_for_pdb = sorted(particle_infos_for_pdb, key=operator.itemgetter(3, 4))
274 
275  return (particle_infos_for_pdb, geometric_center)
276 
277 
278  def write_pdbs(self, appendmode=True):
279  for pdb in self.dictionary_pdbs.keys():
280  self.write_pdb(pdb, appendmode)
281 
282  def init_pdb_best_scoring(self,
283  suffix,
284  prot,
285  nbestscoring,
286  replica_exchange=False):
287  # save only the nbestscoring conformations
288  # create as many pdbs as needed
289 
290  self.suffixes.append(suffix)
291  self.replica_exchange = replica_exchange
292  if not self.replica_exchange:
293  # common usage
294  # if you are not in replica exchange mode
295  # initialize the array of scores internally
296  self.best_score_list = []
297  else:
298  # otherwise the replicas must cominucate
299  # through a common file to know what are the best scores
300  self.best_score_file_name = "best.scores.rex.py"
301  self.best_score_list = []
302  best_score_file = open(self.best_score_file_name, "w")
303  best_score_file.write(
304  "self.best_score_list=" + str(self.best_score_list))
305  best_score_file.close()
306 
307  self.nbestscoring = nbestscoring
308  for i in range(self.nbestscoring):
309  name = suffix + "." + str(i) + ".pdb"
310  flpdb = open(name, 'w')
311  flpdb.close()
312  self.dictionary_pdbs[name] = prot
313  self.dictchain[name] = {}
314  if IMP.pmi.get_is_canonical(prot):
315  self.use_pmi2 = True
316  self.atomistic = True
317  for n,mol in enumerate(IMP.atom.get_by_type(prot,IMP.atom.MOLECULE_TYPE)):
318  self.dictchain[name][IMP.pmi.get_molecule_name_and_copy(mol)] = IMP.atom.Chain(mol).get_id()
319  else:
320  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
321  self.dictchain[name][i.get_name()] = self.chainids[n]
322 
323  def write_pdb_best_scoring(self, score):
324  if self.nbestscoring is None:
325  print("Output.write_pdb_best_scoring: init_pdb_best_scoring not run")
326 
327  # update the score list
328  if self.replica_exchange:
329  # read the self.best_score_list from the file
330  exec(open(self.best_score_file_name).read())
331 
332  if len(self.best_score_list) < self.nbestscoring:
333  self.best_score_list.append(score)
334  self.best_score_list.sort()
335  index = self.best_score_list.index(score)
336  for suffix in self.suffixes:
337  for i in range(len(self.best_score_list) - 2, index - 1, -1):
338  oldname = suffix + "." + str(i) + ".pdb"
339  newname = suffix + "." + str(i + 1) + ".pdb"
340  # rename on Windows fails if newname already exists
341  if os.path.exists(newname):
342  os.unlink(newname)
343  os.rename(oldname, newname)
344  filetoadd = suffix + "." + str(index) + ".pdb"
345  self.write_pdb(filetoadd, appendmode=False)
346 
347  else:
348  if score < self.best_score_list[-1]:
349  self.best_score_list.append(score)
350  self.best_score_list.sort()
351  self.best_score_list.pop(-1)
352  index = self.best_score_list.index(score)
353  for suffix in self.suffixes:
354  for i in range(len(self.best_score_list) - 1, index - 1, -1):
355  oldname = suffix + "." + str(i) + ".pdb"
356  newname = suffix + "." + str(i + 1) + ".pdb"
357  os.rename(oldname, newname)
358  filenametoremove = suffix + \
359  "." + str(self.nbestscoring) + ".pdb"
360  os.remove(filenametoremove)
361  filetoadd = suffix + "." + str(index) + ".pdb"
362  self.write_pdb(filetoadd, appendmode=False)
363 
364  if self.replica_exchange:
365  # write the self.best_score_list to the file
366  best_score_file = open(self.best_score_file_name, "w")
367  best_score_file.write(
368  "self.best_score_list=" + str(self.best_score_list))
369  best_score_file.close()
370 
371  def init_rmf(self, name, hierarchies, rs=None, geometries=None):
372  rh = RMF.create_rmf_file(name)
373  IMP.rmf.add_hierarchies(rh, hierarchies)
374  if rs is not None:
376  if geometries is not None:
377  IMP.rmf.add_geometries(rh,geometries)
378  self.dictionary_rmfs[name] = rh
379 
380  def add_restraints_to_rmf(self, name, objectlist):
381  flatobjectlist=_flatten(objectlist)
382  for o in flatobjectlist:
383  try:
384  rs = o.get_restraint_for_rmf()
385  except:
386  rs = o.get_restraint()
388  self.dictionary_rmfs[name],
389  rs.get_restraints())
390 
391  def add_geometries_to_rmf(self, name, objectlist):
392  for o in objectlist:
393  geos = o.get_geometries()
394  IMP.rmf.add_geometries(self.dictionary_rmfs[name], geos)
395 
396  def add_particle_pair_from_restraints_to_rmf(self, name, objectlist):
397  for o in objectlist:
398 
399  pps = o.get_particle_pairs()
400  for pp in pps:
402  self.dictionary_rmfs[name],
404 
405  def write_rmf(self, name):
406  IMP.rmf.save_frame(self.dictionary_rmfs[name])
407  self.dictionary_rmfs[name].flush()
408 
409  def close_rmf(self, name):
410  del self.dictionary_rmfs[name]
411 
412  def write_rmfs(self):
413  for rmf in self.dictionary_rmfs.keys():
414  self.write_rmf(rmf)
415 
416  def init_stat(self, name, listofobjects):
417  if self.ascii:
418  flstat = open(name, 'w')
419  flstat.close()
420  else:
421  flstat = open(name, 'wb')
422  flstat.close()
423 
424  # check that all objects in listofobjects have a get_output method
425  for l in listofobjects:
426  if not "get_output" in dir(l):
427  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
428  self.dictionary_stats[name] = listofobjects
429 
430  def set_output_entry(self, key, value):
431  self.initoutput.update({key: value})
432 
433  def write_stat(self, name, appendmode=True):
434  output = self.initoutput
435  for obj in self.dictionary_stats[name]:
436  d = obj.get_output()
437  # remove all entries that begin with _ (private entries)
438  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
439  output.update(dfiltered)
440 
441  if appendmode:
442  writeflag = 'a'
443  else:
444  writeflag = 'w'
445 
446  if self.ascii:
447  flstat = open(name, writeflag)
448  flstat.write("%s \n" % output)
449  flstat.close()
450  else:
451  flstat = open(name, writeflag + 'b')
452  cPickle.dump(output, flstat, 2)
453  flstat.close()
454 
455  def write_stats(self):
456  for stat in self.dictionary_stats.keys():
457  self.write_stat(stat)
458 
459  def get_stat(self, name):
460  output = {}
461  for obj in self.dictionary_stats[name]:
462  output.update(obj.get_output())
463  return output
464 
465  def write_test(self, name, listofobjects):
466 # write the test:
467 # output=output.Output()
468 # output.write_test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
469 # run the test:
470 # output=output.Output()
471 # output.test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
472  flstat = open(name, 'w')
473  output = self.initoutput
474  for l in listofobjects:
475  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
476  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
477  self.dictionary_stats[name] = listofobjects
478 
479  for obj in self.dictionary_stats[name]:
480  try:
481  d = obj.get_test_output()
482  except:
483  d = obj.get_output()
484  # remove all entries that begin with _ (private entries)
485  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
486  output.update(dfiltered)
487  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
488  #output.update(
489  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
490  flstat.write("%s \n" % output)
491  flstat.close()
492 
493  def test(self, name, listofobjects, tolerance=1e-5):
494  output = self.initoutput
495  for l in listofobjects:
496  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
497  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
498  for obj in listofobjects:
499  try:
500  output.update(obj.get_test_output())
501  except:
502  output.update(obj.get_output())
503  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
504  #output.update(
505  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
506 
507  flstat = open(name, 'r')
508 
509  passed=True
510  for l in flstat:
511  test_dict = eval(l)
512  for k in test_dict:
513  if k in output:
514  old_value = str(test_dict[k])
515  new_value = str(output[k])
516  try:
517  float(old_value)
518  is_float = True
519  except ValueError:
520  is_float = False
521 
522  if is_float:
523  fold = float(old_value)
524  fnew = float(new_value)
525  diff = abs(fold - fnew)
526  if diff > tolerance:
527  print("%s: test failed, old value: %s new value %s; "
528  "diff %f > %f" % (str(k), str(old_value),
529  str(new_value), diff,
530  tolerance), file=sys.stderr)
531  passed=False
532  elif test_dict[k] != output[k]:
533  if len(old_value) < 50 and len(new_value) < 50:
534  print("%s: test failed, old value: %s new value %s"
535  % (str(k), old_value, new_value), file=sys.stderr)
536  passed=False
537  else:
538  print("%s: test failed, omitting results (too long)"
539  % str(k), file=sys.stderr)
540  passed=False
541 
542  else:
543  print("%s from old objects (file %s) not in new objects"
544  % (str(k), str(name)), file=sys.stderr)
545  return passed
546 
547  def get_environment_variables(self):
548  import os
549  return str(os.environ)
550 
551  def get_versions_of_relevant_modules(self):
552  import IMP
553  versions = {}
554  versions["IMP_VERSION"] = IMP.get_module_version()
555  try:
556  import IMP.pmi
557  versions["PMI_VERSION"] = IMP.pmi.get_module_version()
558  except (ImportError):
559  pass
560  try:
561  import IMP.isd2
562  versions["ISD2_VERSION"] = IMP.isd2.get_module_version()
563  except (ImportError):
564  pass
565  try:
566  import IMP.isd_emxl
567  versions["ISD_EMXL_VERSION"] = IMP.isd_emxl.get_module_version()
568  except (ImportError):
569  pass
570  return versions
571 
572 #-------------------
573  def init_stat2(
574  self,
575  name,
576  listofobjects,
577  extralabels=None,
578  listofsummedobjects=None):
579  # this is a new stat file that should be less
580  # space greedy!
581  # listofsummedobjects must be in the form [([obj1,obj2,obj3,obj4...],label)]
582  # extralabels
583 
584  if listofsummedobjects is None:
585  listofsummedobjects = []
586  if extralabels is None:
587  extralabels = []
588  flstat = open(name, 'w')
589  output = {}
590  stat2_keywords = {"STAT2HEADER": "STAT2HEADER"}
591  stat2_keywords.update(
592  {"STAT2HEADER_ENVIRON": str(self.get_environment_variables())})
593  stat2_keywords.update(
594  {"STAT2HEADER_IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
595  stat2_inverse = {}
596 
597  for l in listofobjects:
598  if not "get_output" in dir(l):
599  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
600  else:
601  d = l.get_output()
602  # remove all entries that begin with _ (private entries)
603  dfiltered = dict((k, v)
604  for k, v in d.items() if k[0] != "_")
605  output.update(dfiltered)
606 
607  # check for customizable entries
608  for l in listofsummedobjects:
609  for t in l[0]:
610  if not "get_output" in dir(t):
611  raise ValueError("Output: object %s doesn't have get_output() method" % str(t))
612  else:
613  if "_TotalScore" not in t.get_output():
614  raise ValueError("Output: object %s doesn't have _TotalScore entry to be summed" % str(t))
615  else:
616  output.update({l[1]: 0.0})
617 
618  for k in extralabels:
619  output.update({k: 0.0})
620 
621  for n, k in enumerate(output):
622  stat2_keywords.update({n: k})
623  stat2_inverse.update({k: n})
624 
625  flstat.write("%s \n" % stat2_keywords)
626  flstat.close()
627  self.dictionary_stats2[name] = (
628  listofobjects,
629  stat2_inverse,
630  listofsummedobjects,
631  extralabels)
632 
633  def write_stat2(self, name, appendmode=True):
634  output = {}
635  (listofobjects, stat2_inverse, listofsummedobjects,
636  extralabels) = self.dictionary_stats2[name]
637 
638  # writing objects
639  for obj in listofobjects:
640  od = obj.get_output()
641  dfiltered = dict((k, v) for k, v in od.items() if k[0] != "_")
642  for k in dfiltered:
643  output.update({stat2_inverse[k]: od[k]})
644 
645  # writing summedobjects
646  for l in listofsummedobjects:
647  partial_score = 0.0
648  for t in l[0]:
649  d = t.get_output()
650  partial_score += float(d["_TotalScore"])
651  output.update({stat2_inverse[l[1]]: str(partial_score)})
652 
653  # writing extralabels
654  for k in extralabels:
655  if k in self.initoutput:
656  output.update({stat2_inverse[k]: self.initoutput[k]})
657  else:
658  output.update({stat2_inverse[k]: "None"})
659 
660  if appendmode:
661  writeflag = 'a'
662  else:
663  writeflag = 'w'
664 
665  flstat = open(name, writeflag)
666  flstat.write("%s \n" % output)
667  flstat.close()
668 
669  def write_stats2(self):
670  for stat in self.dictionary_stats2.keys():
671  self.write_stat2(stat)
672 
673 
674 class ProcessOutput(object):
675  """A class for reading stat files"""
676  def __init__(self, filename):
677  self.filename = filename
678  self.isstat1 = False
679  self.isstat2 = False
680 
681  # open the file
682  if not self.filename is None:
683  f = open(self.filename, "r")
684  else:
685  raise ValueError("No file name provided. Use -h for help")
686 
687  # get the keys from the first line
688  for line in f.readlines():
689  d = eval(line)
690  self.klist = list(d.keys())
691  # check if it is a stat2 file
692  if "STAT2HEADER" in self.klist:
693  self.isstat2 = True
694  for k in self.klist:
695  if "STAT2HEADER" in str(k):
696  # if print_header: print k, d[k]
697  del d[k]
698  stat2_dict = d
699  # get the list of keys sorted by value
700  kkeys = [k[0]
701  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
702  self.klist = [k[1]
703  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
704  self.invstat2_dict = {}
705  for k in kkeys:
706  self.invstat2_dict.update({stat2_dict[k]: k})
707  else:
708  self.isstat1 = True
709  self.klist.sort()
710 
711  break
712  f.close()
713 
714  def get_keys(self):
715  return self.klist
716 
717  def show_keys(self, ncolumns=2, truncate=65):
718  IMP.pmi.tools.print_multicolumn(self.get_keys(), ncolumns, truncate)
719 
720  def get_fields(self, fields, filtertuple=None, filterout=None, get_every=1):
721  '''
722  Get the desired field names, and return a dictionary.
723 
724  @param fields desired field names
725  @param filterout specify if you want to "grep" out something from
726  the file, so that it is faster
727  @param filtertuple a tuple that contains
728  ("TheKeyToBeFiltered",relationship,value)
729  where relationship = "<", "==", or ">"
730  @param get_every only read every Nth line from the file
731  '''
732 
733  outdict = {}
734  for field in fields:
735  outdict[field] = []
736 
737  # print fields values
738  f = open(self.filename, "r")
739  line_number = 0
740 
741  for line in f.readlines():
742  if not filterout is None:
743  if filterout in line:
744  continue
745  line_number += 1
746 
747  if line_number % get_every != 0:
748  continue
749  #if line_number % 1000 == 0:
750  # print "ProcessOutput.get_fields: read line %s from file %s" % (str(line_number), self.filename)
751  try:
752  d = eval(line)
753  except:
754  print("# Warning: skipped line number " + str(line_number) + " not a valid line")
755  continue
756 
757  if self.isstat1:
758 
759  if not filtertuple is None:
760  keytobefiltered = filtertuple[0]
761  relationship = filtertuple[1]
762  value = filtertuple[2]
763  if relationship == "<":
764  if float(d[keytobefiltered]) >= value:
765  continue
766  if relationship == ">":
767  if float(d[keytobefiltered]) <= value:
768  continue
769  if relationship == "==":
770  if float(d[keytobefiltered]) != value:
771  continue
772  [outdict[field].append(d[field]) for field in fields]
773 
774  elif self.isstat2:
775  if line_number == 1:
776  continue
777 
778  if not filtertuple is None:
779  keytobefiltered = filtertuple[0]
780  relationship = filtertuple[1]
781  value = filtertuple[2]
782  if relationship == "<":
783  if float(d[self.invstat2_dict[keytobefiltered]]) >= value:
784  continue
785  if relationship == ">":
786  if float(d[self.invstat2_dict[keytobefiltered]]) <= value:
787  continue
788  if relationship == "==":
789  if float(d[self.invstat2_dict[keytobefiltered]]) != value:
790  continue
791 
792  [outdict[field].append(d[self.invstat2_dict[field]])
793  for field in fields]
794  f.close()
795  return outdict
796 
797 
798 
799 class CrossLinkIdentifierDatabase(object):
800  def __init__(self):
801  self.clidb=dict()
802 
803  def check_key(self,key):
804  if key not in self.clidb:
805  self.clidb[key]={}
806 
807  def set_unique_id(self,key,value):
808  self.check_key(key)
809  self.clidb[key]["XLUniqueID"]=str(value)
810 
811  def set_protein1(self,key,value):
812  self.check_key(key)
813  self.clidb[key]["Protein1"]=str(value)
814 
815  def set_protein2(self,key,value):
816  self.check_key(key)
817  self.clidb[key]["Protein2"]=str(value)
818 
819  def set_residue1(self,key,value):
820  self.check_key(key)
821  self.clidb[key]["Residue1"]=int(value)
822 
823  def set_residue2(self,key,value):
824  self.check_key(key)
825  self.clidb[key]["Residue2"]=int(value)
826 
827  def set_idscore(self,key,value):
828  self.check_key(key)
829  self.clidb[key]["IDScore"]=float(value)
830 
831  def set_state(self,key,value):
832  self.check_key(key)
833  self.clidb[key]["State"]=int(value)
834 
835  def set_sigma1(self,key,value):
836  self.check_key(key)
837  self.clidb[key]["Sigma1"]=str(value)
838 
839  def set_sigma2(self,key,value):
840  self.check_key(key)
841  self.clidb[key]["Sigma2"]=str(value)
842 
843  def set_psi(self,key,value):
844  self.check_key(key)
845  self.clidb[key]["Psi"]=str(value)
846 
847  def get_unique_id(self,key):
848  return self.clidb[key]["XLUniqueID"]
849 
850  def get_protein1(self,key):
851  return self.clidb[key]["Protein1"]
852 
853  def get_protein2(self,key):
854  return self.clidb[key]["Protein2"]
855 
856  def get_residue1(self,key):
857  return self.clidb[key]["Residue1"]
858 
859  def get_residue2(self,key):
860  return self.clidb[key]["Residue2"]
861 
862  def get_idscore(self,key):
863  return self.clidb[key]["IDScore"]
864 
865  def get_state(self,key):
866  return self.clidb[key]["State"]
867 
868  def get_sigma1(self,key):
869  return self.clidb[key]["Sigma1"]
870 
871  def get_sigma2(self,key):
872  return self.clidb[key]["Sigma2"]
873 
874  def get_psi(self,key):
875  return self.clidb[key]["Psi"]
876 
877  def set_float_feature(self,key,value,feature_name):
878  self.check_key(key)
879  self.clidb[key][feature_name]=float(value)
880 
881  def set_int_feature(self,key,value,feature_name):
882  self.check_key(key)
883  self.clidb[key][feature_name]=int(value)
884 
885  def set_string_feature(self,key,value,feature_name):
886  self.check_key(key)
887  self.clidb[key][feature_name]=str(value)
888 
889  def get_feature(self,key,feature_name):
890  return self.clidb[key][feature_name]
891 
892  def write(self,filename):
893  import pickle
894  with open(filename, 'wb') as handle:
895  pickle.dump(self.clidb,handle)
896 
897  def load(self,filename):
898  import pickle
899  with open(filename, 'rb') as handle:
900  self.clidb=pickle.load(handle)
901 
902 def plot_fields(fields, framemin=None, framemax=None):
903  import matplotlib as mpl
904  mpl.use('Agg')
905  import matplotlib.pyplot as plt
906 
907  plt.rc('lines', linewidth=4)
908  fig, axs = plt.subplots(nrows=len(fields))
909  fig.set_size_inches(10.5, 5.5 * len(fields))
910  plt.rc('axes', color_cycle=['r'])
911 
912  n = 0
913  for key in fields:
914  if framemin is None:
915  framemin = 0
916  if framemax is None:
917  framemax = len(fields[key])
918  x = list(range(framemin, framemax))
919  y = [float(y) for y in fields[key][framemin:framemax]]
920  if len(fields) > 1:
921  axs[n].plot(x, y)
922  axs[n].set_title(key, size="xx-large")
923  axs[n].tick_params(labelsize=18, pad=10)
924  else:
925  axs.plot(x, y)
926  axs.set_title(key, size="xx-large")
927  axs.tick_params(labelsize=18, pad=10)
928  n += 1
929 
930  # Tweak spacing between subplots to prevent labels from overlapping
931  plt.subplots_adjust(hspace=0.3)
932  plt.show()
933 
934 
936  name, values_lists, valuename=None, bins=40, colors=None, format="png",
937  reference_xline=None, yplotrange=None, xplotrange=None,normalized=True,
938  leg_names=None):
939 
940  '''Plot a list of histograms from a value list.
941  @param name the name of the plot
942  @param value_lists the list of list of values eg: [[...],[...],[...]]
943  @param valuename the y-label
944  @param bins the number of bins
945  @param colors If None, will use rainbow. Else will use specific list
946  @param format output format
947  @param reference_xline plot a reference line parallel to the y-axis
948  @param yplotrange the range for the y-axis
949  @param normalized whether the histogram is normalized or not
950  @param leg_names names for the legend
951  '''
952 
953  import matplotlib as mpl
954  mpl.use('Agg')
955  import matplotlib.pyplot as plt
956  import matplotlib.cm as cm
957  fig = plt.figure(figsize=(18.0, 9.0))
958 
959  if colors is None:
960  colors = cm.rainbow(np.linspace(0, 1, len(values_lists)))
961  for nv,values in enumerate(values_lists):
962  col=colors[nv]
963  if leg_names is not None:
964  label=leg_names[nv]
965  else:
966  label=str(nv)
967  h=plt.hist(
968  [float(y) for y in values],
969  bins=bins,
970  color=col,
971  normed=normalized,histtype='step',lw=4,
972  label=label)
973 
974  # plt.title(name,size="xx-large")
975  plt.tick_params(labelsize=12, pad=10)
976  if valuename is None:
977  plt.xlabel(name, size="xx-large")
978  else:
979  plt.xlabel(valuename, size="xx-large")
980  plt.ylabel("Frequency", size="xx-large")
981 
982  if not yplotrange is None:
983  plt.ylim()
984  if not xplotrange is None:
985  plt.xlim(xplotrange)
986 
987  plt.legend(loc=2)
988 
989  if not reference_xline is None:
990  plt.axvline(
991  reference_xline,
992  color='red',
993  linestyle='dashed',
994  linewidth=1)
995 
996  plt.savefig(name + "." + format, dpi=150, transparent=True)
997  plt.show()
998 
999 
1000 def plot_fields_box_plots(name, values, positions, frequencies=None,
1001  valuename="None", positionname="None", xlabels=None,scale_plot_length=1.0):
1002  '''
1003  Plot time series as boxplots.
1004  fields is a list of time series, positions are the x-values
1005  valuename is the y-label, positionname is the x-label
1006  '''
1007 
1008  import matplotlib as mpl
1009  mpl.use('Agg')
1010  import matplotlib.pyplot as plt
1011  from matplotlib.patches import Polygon
1012 
1013  bps = []
1014  fig = plt.figure(figsize=(float(len(positions))*scale_plot_length, 5.0))
1015  fig.canvas.set_window_title(name)
1016 
1017  ax1 = fig.add_subplot(111)
1018 
1019  plt.subplots_adjust(left=0.1, right=0.990, top=0.95, bottom=0.4)
1020 
1021  bps.append(plt.boxplot(values, notch=0, sym='', vert=1,
1022  whis=1.5, positions=positions))
1023 
1024  plt.setp(bps[-1]['boxes'], color='black', lw=1.5)
1025  plt.setp(bps[-1]['whiskers'], color='black', ls=":", lw=1.5)
1026 
1027  if frequencies is not None:
1028  for n,v in enumerate(values):
1029  plist=[positions[n]]*len(v)
1030  ax1.plot(plist, v, 'gx', alpha=0.7, markersize=7)
1031 
1032  # print ax1.xaxis.get_majorticklocs()
1033  if not xlabels is None:
1034  ax1.set_xticklabels(xlabels)
1035  plt.xticks(rotation=90)
1036  plt.xlabel(positionname)
1037  plt.ylabel(valuename)
1038 
1039  plt.savefig(name+".pdf",dpi=150)
1040  plt.show()
1041 
1042 
1043 def plot_xy_data(x,y,title=None,out_fn=None,display=True,set_plot_yaxis_range=None,
1044  xlabel=None,ylabel=None):
1045  import matplotlib as mpl
1046  mpl.use('Agg')
1047  import matplotlib.pyplot as plt
1048  plt.rc('lines', linewidth=2)
1049 
1050  fig, ax = plt.subplots(nrows=1)
1051  fig.set_size_inches(8,4.5)
1052  if title is not None:
1053  fig.canvas.set_window_title(title)
1054 
1055  #plt.rc('axes', color='r')
1056  ax.plot(x,y,color='r')
1057  if set_plot_yaxis_range is not None:
1058  x1,x2,y1,y2=plt.axis()
1059  y1=set_plot_yaxis_range[0]
1060  y2=set_plot_yaxis_range[1]
1061  plt.axis((x1,x2,y1,y2))
1062  if title is not None:
1063  ax.set_title(title)
1064  if xlabel is not None:
1065  ax.set_xlabel(xlabel)
1066  if ylabel is not None:
1067  ax.set_ylabel(ylabel)
1068  if out_fn is not None:
1069  plt.savefig(out_fn+".pdf")
1070  if display:
1071  plt.show()
1072  plt.close(fig)
1073 
1074 def plot_scatter_xy_data(x,y,labelx="None",labely="None",
1075  xmin=None,xmax=None,ymin=None,ymax=None,
1076  savefile=False,filename="None.eps",alpha=0.75):
1077 
1078  import matplotlib as mpl
1079  mpl.use('Agg')
1080  import matplotlib.pyplot as plt
1081  import sys
1082  from matplotlib import rc
1083  #rc('font', **{'family':'serif','serif':['Palatino']})
1084  rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
1085  #rc('text', usetex=True)
1086 
1087  fig, axs = plt.subplots(1)
1088 
1089  axs0 = axs
1090 
1091  axs0.set_xlabel(labelx, size="xx-large")
1092  axs0.set_ylabel(labely, size="xx-large")
1093  axs0.tick_params(labelsize=18, pad=10)
1094 
1095  plot2 = []
1096 
1097  plot2.append(axs0.plot(x, y, 'o', color='k',lw=2, ms=0.1, alpha=alpha, c="w"))
1098 
1099  axs0.legend(
1100  loc=0,
1101  frameon=False,
1102  scatterpoints=1,
1103  numpoints=1,
1104  columnspacing=1)
1105 
1106  fig.set_size_inches(8.0, 8.0)
1107  fig.subplots_adjust(left=0.161, right=0.850, top=0.95, bottom=0.11)
1108  if (not ymin is None) and (not ymax is None):
1109  axs0.set_ylim(ymin,ymax)
1110  if (not xmin is None) and (not xmax is None):
1111  axs0.set_xlim(xmin,xmax)
1112 
1113  #plt.show()
1114  if savefile:
1115  fig.savefig(filename, dpi=300)
1116 
1117 
1118 def get_graph_from_hierarchy(hier):
1119  graph = []
1120  depth_dict = {}
1121  depth = 0
1122  (graph, depth, depth_dict) = recursive_graph(
1123  hier, graph, depth, depth_dict)
1124 
1125  # filters node labels according to depth_dict
1126  node_labels_dict = {}
1127  node_size_dict = {}
1128  for key in depth_dict:
1129  node_size_dict = 10 / depth_dict[key]
1130  if depth_dict[key] < 3:
1131  node_labels_dict[key] = key
1132  else:
1133  node_labels_dict[key] = ""
1134  draw_graph(graph, labels_dict=node_labels_dict)
1135 
1136 
1137 def recursive_graph(hier, graph, depth, depth_dict):
1138  depth = depth + 1
1139  nameh = IMP.atom.Hierarchy(hier).get_name()
1140  index = str(hier.get_particle().get_index())
1141  name1 = nameh + "|#" + index
1142  depth_dict[name1] = depth
1143 
1144  children = IMP.atom.Hierarchy(hier).get_children()
1145 
1146  if len(children) == 1 or children is None:
1147  depth = depth - 1
1148  return (graph, depth, depth_dict)
1149 
1150  else:
1151  for c in children:
1152  (graph, depth, depth_dict) = recursive_graph(
1153  c, graph, depth, depth_dict)
1154  nameh = IMP.atom.Hierarchy(c).get_name()
1155  index = str(c.get_particle().get_index())
1156  namec = nameh + "|#" + index
1157  graph.append((name1, namec))
1158 
1159  depth = depth - 1
1160  return (graph, depth, depth_dict)
1161 
1162 
1163 def draw_graph(graph, labels_dict=None, graph_layout='spring',
1164  node_size=5, node_color=None, node_alpha=0.3,
1165  node_text_size=11, fixed=None, pos=None,
1166  edge_color='blue', edge_alpha=0.3, edge_thickness=1,
1167  edge_text_pos=0.3,
1168  validation_edges=None,
1169  text_font='sans-serif',
1170  out_filename=None):
1171 
1172  import matplotlib as mpl
1173  mpl.use('Agg')
1174  import networkx as nx
1175  import matplotlib.pyplot as plt
1176  from math import sqrt, pi
1177 
1178  # create networkx graph
1179  G = nx.Graph()
1180 
1181  # add edges
1182  if type(edge_thickness) is list:
1183  for edge,weight in zip(graph,edge_thickness):
1184  G.add_edge(edge[0], edge[1], weight=weight)
1185  else:
1186  for edge in graph:
1187  G.add_edge(edge[0], edge[1])
1188 
1189  if node_color==None:
1190  node_color_rgb=(0,0,0)
1191  node_color_hex="000000"
1192  else:
1194  tmpcolor_rgb=[]
1195  tmpcolor_hex=[]
1196  for node in G.nodes():
1197  cctuple=cc.rgb(node_color[node])
1198  tmpcolor_rgb.append((cctuple[0]/255,cctuple[1]/255,cctuple[2]/255))
1199  tmpcolor_hex.append(node_color[node])
1200  node_color_rgb=tmpcolor_rgb
1201  node_color_hex=tmpcolor_hex
1202 
1203  # get node sizes if dictionary
1204  if type(node_size) is dict:
1205  tmpsize=[]
1206  for node in G.nodes():
1207  size=sqrt(node_size[node])/pi*10.0
1208  tmpsize.append(size)
1209  node_size=tmpsize
1210 
1211  for n,node in enumerate(G.nodes()):
1212  color=node_color_hex[n]
1213  size=node_size[n]
1214  nx.set_node_attributes(G, "graphics", {node : {'type': 'ellipse','w': size, 'h': size,'fill': '#'+color, 'label': node}})
1215  nx.set_node_attributes(G, "LabelGraphics", {node : {'type': 'text','text':node, 'color':'#000000', 'visible':'true'}})
1216 
1217  for edge in G.edges():
1218  nx.set_edge_attributes(G, "graphics", {edge : {'width': 1,'fill': '#000000'}})
1219 
1220  for ve in validation_edges:
1221  print(ve)
1222  if (ve[0],ve[1]) in G.edges():
1223  print("found forward")
1224  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#00FF00'}})
1225  elif (ve[1],ve[0]) in G.edges():
1226  print("found backward")
1227  nx.set_edge_attributes(G, "graphics", {(ve[1],ve[0]) : {'width': 1,'fill': '#00FF00'}})
1228  else:
1229  G.add_edge(ve[0], ve[1])
1230  print("not found")
1231  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#FF0000'}})
1232 
1233  # these are different layouts for the network you may try
1234  # shell seems to work best
1235  if graph_layout == 'spring':
1236  print(fixed, pos)
1237  graph_pos = nx.spring_layout(G,k=1.0/8.0,fixed=fixed,pos=pos)
1238  elif graph_layout == 'spectral':
1239  graph_pos = nx.spectral_layout(G)
1240  elif graph_layout == 'random':
1241  graph_pos = nx.random_layout(G)
1242  else:
1243  graph_pos = nx.shell_layout(G)
1244 
1245 
1246  # draw graph
1247  nx.draw_networkx_nodes(G, graph_pos, node_size=node_size,
1248  alpha=node_alpha, node_color=node_color_rgb,
1249  linewidths=0)
1250  nx.draw_networkx_edges(G, graph_pos, width=edge_thickness,
1251  alpha=edge_alpha, edge_color=edge_color)
1252  nx.draw_networkx_labels(
1253  G, graph_pos, labels=labels_dict, font_size=node_text_size,
1254  font_family=text_font)
1255  if out_filename:
1256  plt.savefig(out_filename)
1257  nx.write_gml(G,'out.gml')
1258  plt.show()
1259 
1260 
1261 def draw_table():
1262 
1263  # still an example!
1264 
1265  from ipyD3 import d3object
1266  from IPython.display import display
1267 
1268  d3 = d3object(width=800,
1269  height=400,
1270  style='JFTable',
1271  number=1,
1272  d3=None,
1273  title='Example table with d3js',
1274  desc='An example table created created with d3js with data generated with Python.')
1275  data = [
1276  [1277.0,
1277  654.0,
1278  288.0,
1279  1976.0,
1280  3281.0,
1281  3089.0,
1282  10336.0,
1283  4650.0,
1284  4441.0,
1285  4670.0,
1286  944.0,
1287  110.0],
1288  [1318.0,
1289  664.0,
1290  418.0,
1291  1952.0,
1292  3581.0,
1293  4574.0,
1294  11457.0,
1295  6139.0,
1296  7078.0,
1297  6561.0,
1298  2354.0,
1299  710.0],
1300  [1783.0,
1301  774.0,
1302  564.0,
1303  1470.0,
1304  3571.0,
1305  3103.0,
1306  9392.0,
1307  5532.0,
1308  5661.0,
1309  4991.0,
1310  2032.0,
1311  680.0],
1312  [1301.0,
1313  604.0,
1314  286.0,
1315  2152.0,
1316  3282.0,
1317  3369.0,
1318  10490.0,
1319  5406.0,
1320  4727.0,
1321  3428.0,
1322  1559.0,
1323  620.0],
1324  [1537.0,
1325  1714.0,
1326  724.0,
1327  4824.0,
1328  5551.0,
1329  8096.0,
1330  16589.0,
1331  13650.0,
1332  9552.0,
1333  13709.0,
1334  2460.0,
1335  720.0],
1336  [5691.0,
1337  2995.0,
1338  1680.0,
1339  11741.0,
1340  16232.0,
1341  14731.0,
1342  43522.0,
1343  32794.0,
1344  26634.0,
1345  31400.0,
1346  7350.0,
1347  3010.0],
1348  [1650.0,
1349  2096.0,
1350  60.0,
1351  50.0,
1352  1180.0,
1353  5602.0,
1354  15728.0,
1355  6874.0,
1356  5115.0,
1357  3510.0,
1358  1390.0,
1359  170.0],
1360  [72.0, 60.0, 60.0, 10.0, 120.0, 172.0, 1092.0, 675.0, 408.0, 360.0, 156.0, 100.0]]
1361  data = [list(i) for i in zip(*data)]
1362  sRows = [['January',
1363  'February',
1364  'March',
1365  'April',
1366  'May',
1367  'June',
1368  'July',
1369  'August',
1370  'September',
1371  'October',
1372  'November',
1373  'Deecember']]
1374  sColumns = [['Prod {0}'.format(i) for i in range(1, 9)],
1375  [None, '', None, None, 'Group 1', None, None, 'Group 2']]
1376  d3.addSimpleTable(data,
1377  fontSizeCells=[12, ],
1378  sRows=sRows,
1379  sColumns=sColumns,
1380  sRowsMargins=[5, 50, 0],
1381  sColsMargins=[5, 20, 10],
1382  spacing=0,
1383  addBorders=1,
1384  addOutsideBorders=-1,
1385  rectWidth=45,
1386  rectHeight=0
1387  )
1388  html = d3.render(mode=['html', 'show'])
1389  display(html)
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: Residue.h:155
A class for reading stat files.
Definition: output.py:674
RMF::FrameID save_frame(RMF::FileHandle file, std::string name="")
Save the current state of the linked objects as a new RMF frame.
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: atom/Atom.h:241
def plot_field_histogram
Plot a list of histograms from a value list.
Definition: output.py:935
def plot_fields_box_plots
Plot time series as boxplots.
Definition: output.py:1000
Miscellaneous utilities.
Definition: tools.py:1
std::string get_module_version()
Change color code to hexadecimal to rgb.
Definition: tools.py:1332
void write_pdb(const Selection &mhd, TextOutput out, unsigned int model=1)
def get_prot_name_from_particle
Return the component name provided a particle and a list of names.
Definition: tools.py:980
def get_fields
Get the desired field names, and return a dictionary.
Definition: output.py:720
static bool get_is_setup(Model *m, ParticleIndex pi)
Definition: Fragment.h:46
std::string get_molecule_name_and_copy(atom::Hierarchy h)
Walk up a PMI2 hierarchy/representations and get the "molname.copynum".
Definition: utilities.h:87
The standard decorator for manipulating molecular structures.
Ints get_index(const ParticlesTemp &particles, const Subset &subset, const Subsets &excluded)
def init_pdb
Init PDB Writing.
Definition: output.py:61
A decorator for a particle representing an atom.
Definition: atom/Atom.h:234
The type for a residue.
A decorator for a particle with x,y,z coordinates.
Definition: XYZ.h:30
A base class for Keys.
Definition: Key.h:46
void add_hierarchies(RMF::NodeHandle fh, const atom::Hierarchies &hs)
Class for easy writing of PDBs, RMFs, and stat files.
Definition: output.py:32
void add_geometries(RMF::NodeHandle parent, const display::GeometriesTemp &r)
Add geometries to a given parent node.
void add_restraints(RMF::NodeHandle fh, const Restraints &hs)
bool get_is_canonical(atom::Hierarchy h)
Walk up a PMI2 hierarchy/representations and check if the root is named System.
Definition: utilities.h:98
Display a segment connecting a pair of particles.
Definition: XYZR.h:168
A decorator for a residue.
Definition: Residue.h:134
Basic functionality that is expected to be used by a wide variety of IMP users.
void add_geometry(RMF::FileHandle file, display::Geometry *r)
Add a single geometry to the file.
Store info for a chain of a protein.
Definition: Chain.h:21
Python classes to represent, score, sample and analyze models.
Functionality for loading, creating, manipulating and scoring atomic structures.
Hierarchies get_leaves(const Selection &h)
Select hierarchy particles identified by the biological name.
Definition: Selection.h:66
def get_residue_indexes
Retrieve the residue indexes for the given particle.
Definition: tools.py:1000
std::string get_module_version()
def sublist_iterator
Yield all sublists of length >= lmin and <= lmax.
Definition: tools.py:1096
A decorator for a particle with x,y,z coordinates and a radius.
Definition: XYZR.h:27