IMP logo
IMP Reference Guide  2.5.0
The Integrative Modeling Platform
output.py
1 """@namespace IMP.pmi.output
2  Classes for writing output files and processing them.
3 """
4 
5 from __future__ import print_function, division
6 import IMP
7 import IMP.atom
8 import IMP.core
9 import IMP.pmi
10 import IMP.pmi.tools
11 import os
12 import sys
13 import RMF
14 import numpy as np
15 import operator
16 try:
17  import cPickle as pickle
18 except ImportError:
19  import pickle
20 
21 class Output(object):
22  """Class for easy writing of PDBs, RMFs, and stat files"""
23  def __init__(self, ascii=True,atomistic=False):
24  self.dictionary_pdbs = {}
25  self.dictionary_rmfs = {}
26  self.dictionary_stats = {}
27  self.dictionary_stats2 = {}
28  self.best_score_list = None
29  self.nbestscoring = None
30  self.suffixes = []
31  self.replica_exchange = False
32  self.ascii = ascii
33  self.initoutput = {}
34  self.residuetypekey = IMP.StringKey("ResidueName")
35  self.chainids = "ABCDEFGHIJKLMNOPQRSTUVXYWZabcdefghijklmnopqrstuvxywz"
36  self.dictchain = {}
37  self.particle_infos_for_pdb = {}
38  self.atomistic=atomistic
39 
40  def get_pdb_names(self):
41  return list(self.dictionary_pdbs.keys())
42 
43  def get_rmf_names(self):
44  return list(self.dictionary_rmfs.keys())
45 
46  def get_stat_names(self):
47  return list(self.dictionary_stats.keys())
48 
49  def init_pdb(self, name, prot):
50  flpdb = open(name, 'w')
51  flpdb.close()
52  self.dictionary_pdbs[name] = prot
53  self.dictchain[name] = {}
54 
55  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
56  self.dictchain[name][i.get_name()] = self.chainids[n]
57 
58  def write_psf(self,filename,name):
59  flpsf=open(filename,'w')
60  flpsf.write("PSF CMAP CHEQ"+"\n")
61  index_residue_pair_list={}
62  (particle_infos_for_pdb, geometric_center)=self.get_particle_infos_for_pdb_writing(name)
63  nparticles=len(particle_infos_for_pdb)
64  flpsf.write(str(nparticles)+" !NATOM"+"\n")
65  for n,p in enumerate(particle_infos_for_pdb):
66  atom_index=n+1
67  atomtype=p[1]
68  residue_type=p[2]
69  chain=p[3]
70  resid=p[4]
71  flpsf.write('{0:8d}{1:1s}{2:4s}{3:1s}{4:4s}{5:1s}{6:4s}{7:1s}{8:4s}{9:1s}{10:4s}{11:14.6f}{12:14.6f}{13:8d}{14:14.6f}{15:14.6f}'.format(atom_index," ",chain," ",str(resid)," ",'"'+residue_type.get_string()+'"'," ","C"," ","C",1.0,0.0,0,0.0,0.0))
72  flpsf.write('\n')
73  #flpsf.write(str(atom_index)+" "+str(chain)+" "+str(resid)+" "+str(residue_type).replace('"','')+" C C "+"1.0 0.0 0 0.0 0.0\n")
74  if chain not in index_residue_pair_list:
75  index_residue_pair_list[chain]=[(atom_index,resid)]
76  else:
77  index_residue_pair_list[chain].append((atom_index,resid))
78 
79 
80  #now write the connectivity
81  indexes_pairs=[]
82  for chain in sorted(index_residue_pair_list.keys()):
83 
84  ls=index_residue_pair_list[chain]
85  #sort by residue
86  ls=sorted(ls, key=lambda tup: tup[1])
87  #get the index list
88  indexes=[x[0] for x in ls]
89  # get the contiguous pairs
90  indexes_pairs+=list(IMP.pmi.tools.sublist_iterator(indexes,lmin=2,lmax=2))
91  nbonds=len(indexes_pairs)
92  flpsf.write(str(nbonds)+" !NBOND: bonds"+"\n")
93 
94  sublists=[indexes_pairs[i:i+4] for i in range(0,len(indexes_pairs),4)]
95 
96  # save bonds in fized column format
97  for ip in sublists:
98  if len(ip)==4:
99  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}{6:8d}{7:8d}'.format(ip[0][0],ip[0][1],
100  ip[1][0],ip[1][1],ip[2][0],ip[2][1],ip[3][0],ip[3][1]))
101  elif len(ip)==3:
102  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}'.format(ip[0][0],ip[0][1],ip[1][0],
103  ip[1][1],ip[2][0],ip[2][1]))
104  elif len(ip)==2:
105  flpsf.write('{0:8d}{1:8d}{2:8d}{3:8d}'.format(ip[0][0],ip[0][1],ip[1][0],ip[1][1]))
106  elif len(ip)==1:
107  flpsf.write('{0:8d}{1:8d}'.format(ip[0][0],ip[0][1]))
108  flpsf.write('\n')
109 
110  del particle_infos_for_pdb
111  flpsf.close()
112 
113  def write_pdb(self,name,appendmode=True,
114  translate_to_geometric_center=False):
115  if appendmode:
116  flpdb = open(name, 'a')
117  else:
118  flpdb = open(name, 'w')
119 
120  (particle_infos_for_pdb,
121  geometric_center) = self.get_particle_infos_for_pdb_writing(name)
122 
123  if not translate_to_geometric_center:
124  geometric_center = (0, 0, 0)
125 
126  for n,tupl in enumerate(particle_infos_for_pdb):
127 
128  (xyz, atom_type, residue_type,
129  chain_id, residue_index,radius) = tupl
130 
131  flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
132  xyz[1] - geometric_center[1],
133  xyz[2] - geometric_center[2]),
134  n+1, atom_type, residue_type,
135  chain_id, residue_index,' ',1.00,radius))
136 
137  flpdb.write("ENDMDL\n")
138  flpdb.close()
139 
140  del particle_infos_for_pdb
141 
142  def get_particle_infos_for_pdb_writing(self, name):
143  # index_residue_pair_list={}
144 
145  # the resindexes dictionary keep track of residues that have been already
146  # added to avoid duplication
147  # highest resolution have highest priority
148  resindexes_dict = {}
149 
150  # this dictionary dill contain the sequence of tuples needed to
151  # write the pdb
152  particle_infos_for_pdb = []
153 
154  geometric_center = [0, 0, 0]
155  atom_count = 0
156  atom_index = 0
157 
158  for n, p in enumerate(IMP.atom.get_leaves(self.dictionary_pdbs[name])):
159 
160  # this loop gets the protein name from the
161  # particle leave by descending into the hierarchy
162 
163  (protname, is_a_bead) = IMP.pmi.tools.get_prot_name_from_particle(
164  p, self.dictchain[name])
165 
166  if protname not in resindexes_dict:
167  resindexes_dict[protname] = []
168 
169  if IMP.atom.Atom.get_is_setup(p) and self.atomistic:
170  residue = IMP.atom.Residue(IMP.atom.Atom(p).get_parent())
171  rt = residue.get_residue_type()
172  resind = residue.get_index()
173  atomtype = IMP.atom.Atom(p).get_atom_type()
174  xyz = list(IMP.core.XYZ(p).get_coordinates())
175  radius = IMP.core.XYZR(p).get_radius()
176  geometric_center[0] += xyz[0]
177  geometric_center[1] += xyz[1]
178  geometric_center[2] += xyz[2]
179  atom_count += 1
180  particle_infos_for_pdb.append((xyz,
181  atomtype, rt, self.dictchain[name][protname], resind,radius))
182  resindexes_dict[protname].append(resind)
183 
185 
186  residue = IMP.atom.Residue(p)
187  resind = residue.get_index()
188  # skip if the residue was already added by atomistic resolution
189  # 0
190  if resind in resindexes_dict[protname]:
191  continue
192  else:
193  resindexes_dict[protname].append(resind)
194  rt = residue.get_residue_type()
195  xyz = IMP.core.XYZ(p).get_coordinates()
196  radius = IMP.core.XYZR(p).get_radius()
197  geometric_center[0] += xyz[0]
198  geometric_center[1] += xyz[1]
199  geometric_center[2] += xyz[2]
200  atom_count += 1
201  particle_infos_for_pdb.append((xyz,
202  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
203 
204  elif IMP.atom.Fragment.get_is_setup(p) and not is_a_bead:
205  resindexes = IMP.pmi.tools.get_residue_indexes(p)
206  resind = resindexes[len(resindexes) // 2]
207  if resind in resindexes_dict[protname]:
208  continue
209  else:
210  resindexes_dict[protname].append(resind)
211  rt = IMP.atom.ResidueType('BEA')
212  xyz = IMP.core.XYZ(p).get_coordinates()
213  radius = IMP.core.XYZR(p).get_radius()
214  geometric_center[0] += xyz[0]
215  geometric_center[1] += xyz[1]
216  geometric_center[2] += xyz[2]
217  atom_count += 1
218  particle_infos_for_pdb.append((xyz,
219  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
220 
221  else:
222  if is_a_bead:
223  rt = IMP.atom.ResidueType('BEA')
224  resindexes = IMP.pmi.tools.get_residue_indexes(p)
225  resind = resindexes[len(resindexes) // 2]
226  xyz = IMP.core.XYZ(p).get_coordinates()
227  radius = IMP.core.XYZR(p).get_radius()
228  geometric_center[0] += xyz[0]
229  geometric_center[1] += xyz[1]
230  geometric_center[2] += xyz[2]
231  atom_count += 1
232  particle_infos_for_pdb.append((xyz,
233  IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
234 
235  if atom_count > 0:
236  geometric_center = (geometric_center[0] / atom_count,
237  geometric_center[1] / atom_count,
238  geometric_center[2] / atom_count)
239 
240  particle_infos_for_pdb = sorted(particle_infos_for_pdb, key=operator.itemgetter(3, 4))
241 
242  return (particle_infos_for_pdb, geometric_center)
243 
244 
245  def write_pdbs(self, appendmode=True):
246  for pdb in self.dictionary_pdbs.keys():
247  self.write_pdb(pdb, appendmode)
248 
249  def init_pdb_best_scoring(
250  self,
251  suffix,
252  prot,
253  nbestscoring,
254  replica_exchange=False):
255  # save only the nbestscoring conformations
256  # create as many pdbs as needed
257 
258  self.suffixes.append(suffix)
259  self.replica_exchange = replica_exchange
260  if not self.replica_exchange:
261  # common usage
262  # if you are not in replica exchange mode
263  # initialize the array of scores internally
264  self.best_score_list = []
265  else:
266  # otherwise the replicas must cominucate
267  # through a common file to know what are the best scores
268  self.best_score_file_name = "best.scores.rex.py"
269  self.best_score_list = []
270  best_score_file = open(self.best_score_file_name, "w")
271  best_score_file.write(
272  "self.best_score_list=" + str(self.best_score_list))
273  best_score_file.close()
274 
275  self.nbestscoring = nbestscoring
276  for i in range(self.nbestscoring):
277  name = suffix + "." + str(i) + ".pdb"
278  flpdb = open(name, 'w')
279  flpdb.close()
280  self.dictionary_pdbs[name] = prot
281  self.dictchain[name] = {}
282  for n, i in enumerate(self.dictionary_pdbs[name].get_children()):
283  self.dictchain[name][i.get_name()] = self.chainids[n]
284 
285  def write_pdb_best_scoring(self, score):
286  if self.nbestscoring is None:
287  print("Output.write_pdb_best_scoring: init_pdb_best_scoring not run")
288 
289  # update the score list
290  if self.replica_exchange:
291  # read the self.best_score_list from the file
292  exec(open(self.best_score_file_name).read())
293 
294  if len(self.best_score_list) < self.nbestscoring:
295  self.best_score_list.append(score)
296  self.best_score_list.sort()
297  index = self.best_score_list.index(score)
298  for suffix in self.suffixes:
299  for i in range(len(self.best_score_list) - 2, index - 1, -1):
300  oldname = suffix + "." + str(i) + ".pdb"
301  newname = suffix + "." + str(i + 1) + ".pdb"
302  # rename on Windows fails if newname already exists
303  if os.path.exists(newname):
304  os.unlink(newname)
305  os.rename(oldname, newname)
306  filetoadd = suffix + "." + str(index) + ".pdb"
307  self.write_pdb(filetoadd, appendmode=False)
308 
309  else:
310  if score < self.best_score_list[-1]:
311  self.best_score_list.append(score)
312  self.best_score_list.sort()
313  self.best_score_list.pop(-1)
314  index = self.best_score_list.index(score)
315  for suffix in self.suffixes:
316  for i in range(len(self.best_score_list) - 1, index - 1, -1):
317  oldname = suffix + "." + str(i) + ".pdb"
318  newname = suffix + "." + str(i + 1) + ".pdb"
319  os.rename(oldname, newname)
320  filenametoremove = suffix + \
321  "." + str(self.nbestscoring) + ".pdb"
322  os.remove(filenametoremove)
323  filetoadd = suffix + "." + str(index) + ".pdb"
324  self.write_pdb(filetoadd, appendmode=False)
325 
326  if self.replica_exchange:
327  # write the self.best_score_list to the file
328  best_score_file = open(self.best_score_file_name, "w")
329  best_score_file.write(
330  "self.best_score_list=" + str(self.best_score_list))
331  best_score_file.close()
332 
333  def init_rmf(self, name, hierarchies,rs=None):
334  rh = RMF.create_rmf_file(name)
335  IMP.rmf.add_hierarchies(rh, hierarchies)
336  if rs is not None:
338  self.dictionary_rmfs[name] = rh
339 
340  def add_restraints_to_rmf(self, name, objectlist):
341  for o in objectlist:
342  try:
343  rs = o.get_restraint_for_rmf()
344  except:
345  rs = o.get_restraint()
347  self.dictionary_rmfs[name],
348  rs.get_restraints())
349 
350  def add_geometries_to_rmf(self, name, objectlist):
351  for o in objectlist:
352  geos = o.get_geometries()
353  IMP.rmf.add_geometries(self.dictionary_rmfs[name], geos)
354 
355  def add_particle_pair_from_restraints_to_rmf(self, name, objectlist):
356  for o in objectlist:
357 
358  pps = o.get_particle_pairs()
359  for pp in pps:
360  IMP.rmf.add_geometry(
361  self.dictionary_rmfs[name],
363 
364  def write_rmf(self, name):
365  IMP.rmf.save_frame(self.dictionary_rmfs[name])
366  self.dictionary_rmfs[name].flush()
367 
368  def close_rmf(self, name):
369  del self.dictionary_rmfs[name]
370 
371  def write_rmfs(self):
372  for rmf in self.dictionary_rmfs.keys():
373  self.write_rmf(rmf)
374 
375  def init_stat(self, name, listofobjects):
376  if self.ascii:
377  flstat = open(name, 'w')
378  flstat.close()
379  else:
380  flstat = open(name, 'wb')
381  flstat.close()
382 
383  # check that all objects in listofobjects have a get_output method
384  for l in listofobjects:
385  if not "get_output" in dir(l):
386  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
387  self.dictionary_stats[name] = listofobjects
388 
389  def set_output_entry(self, key, value):
390  self.initoutput.update({key: value})
391 
392  def write_stat(self, name, appendmode=True):
393  output = self.initoutput
394  for obj in self.dictionary_stats[name]:
395  d = obj.get_output()
396  # remove all entries that begin with _ (private entries)
397  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
398  output.update(dfiltered)
399 
400  if appendmode:
401  writeflag = 'a'
402  else:
403  writeflag = 'w'
404 
405  if self.ascii:
406  flstat = open(name, writeflag)
407  flstat.write("%s \n" % output)
408  flstat.close()
409  else:
410  flstat = open(name, writeflag + 'b')
411  cPickle.dump(output, flstat, 2)
412  flstat.close()
413 
414  def write_stats(self):
415  for stat in self.dictionary_stats.keys():
416  self.write_stat(stat)
417 
418  def get_stat(self, name):
419  output = {}
420  for obj in self.dictionary_stats[name]:
421  output.update(obj.get_output())
422  return output
423 
424  def write_test(self, name, listofobjects):
425 # write the test:
426 # output=output.Output()
427 # output.write_test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
428 # run the test:
429 # output=output.Output()
430 # output.test("test_modeling11_models.rmf_45492_11Sep13_veena_imp-020713.dat",outputobjects)
431  flstat = open(name, 'w')
432  output = self.initoutput
433  for l in listofobjects:
434  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
435  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
436  self.dictionary_stats[name] = listofobjects
437 
438  for obj in self.dictionary_stats[name]:
439  try:
440  d = obj.get_test_output()
441  except:
442  d = obj.get_output()
443  # remove all entries that begin with _ (private entries)
444  dfiltered = dict((k, v) for k, v in d.items() if k[0] != "_")
445  output.update(dfiltered)
446  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
447  #output.update(
448  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
449  flstat.write("%s \n" % output)
450  flstat.close()
451 
452  def test(self, name, listofobjects, tolerance=1e-5):
453  output = self.initoutput
454  for l in listofobjects:
455  if not "get_test_output" in dir(l) and not "get_output" in dir(l):
456  raise ValueError("Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
457  for obj in listofobjects:
458  try:
459  output.update(obj.get_test_output())
460  except:
461  output.update(obj.get_output())
462  #output.update({"ENVIRONMENT": str(self.get_environment_variables())})
463  #output.update(
464  # {"IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
465 
466  flstat = open(name, 'r')
467 
468  passed=True
469  for l in flstat:
470  test_dict = eval(l)
471  for k in test_dict:
472  if k in output:
473  old_value = str(test_dict[k])
474  new_value = str(output[k])
475  try:
476  float(old_value)
477  is_float = True
478  except ValueError:
479  is_float = False
480 
481  if is_float:
482  fold = float(old_value)
483  fnew = float(new_value)
484  diff = abs(fold - fnew)
485  if diff > tolerance:
486  print("%s: test failed, old value: %s new value %s; "
487  "diff %f > %f" % (str(k), str(old_value),
488  str(new_value), diff,
489  tolerance), file=sys.stderr)
490  passed=False
491  elif test_dict[k] != output[k]:
492  if len(old_value) < 50 and len(new_value) < 50:
493  print("%s: test failed, old value: %s new value %s"
494  % (str(k), old_value, new_value), file=sys.stderr)
495  passed=False
496  else:
497  print("%s: test failed, omitting results (too long)"
498  % str(k), file=sys.stderr)
499  passed=False
500 
501  else:
502  print("%s from old objects (file %s) not in new objects"
503  % (str(k), str(name)), file=sys.stderr)
504  return passed
505 
506  def get_environment_variables(self):
507  import os
508  return str(os.environ)
509 
510  def get_versions_of_relevant_modules(self):
511  import IMP
512  versions = {}
513  versions["IMP_VERSION"] = IMP.get_module_version()
514  try:
515  import IMP.pmi
516  versions["PMI_VERSION"] = IMP.pmi.get_module_version()
517  except (ImportError):
518  pass
519  try:
520  import IMP.isd2
521  versions["ISD2_VERSION"] = IMP.isd2.get_module_version()
522  except (ImportError):
523  pass
524  try:
525  import IMP.isd_emxl
526  versions["ISD_EMXL_VERSION"] = IMP.isd_emxl.get_module_version()
527  except (ImportError):
528  pass
529  return versions
530 
531 #-------------------
532  def init_stat2(
533  self,
534  name,
535  listofobjects,
536  extralabels=None,
537  listofsummedobjects=None):
538  # this is a new stat file that should be less
539  # space greedy!
540  # listofsummedobjects must be in the form [([obj1,obj2,obj3,obj4...],label)]
541  # extralabels
542 
543  if listofsummedobjects is None:
544  listofsummedobjects = []
545  if extralabels is None:
546  extralabels = []
547  flstat = open(name, 'w')
548  output = {}
549  stat2_keywords = {"STAT2HEADER": "STAT2HEADER"}
550  stat2_keywords.update(
551  {"STAT2HEADER_ENVIRON": str(self.get_environment_variables())})
552  stat2_keywords.update(
553  {"STAT2HEADER_IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
554  stat2_inverse = {}
555 
556  for l in listofobjects:
557  if not "get_output" in dir(l):
558  raise ValueError("Output: object %s doesn't have get_output() method" % str(l))
559  else:
560  d = l.get_output()
561  # remove all entries that begin with _ (private entries)
562  dfiltered = dict((k, v)
563  for k, v in d.items() if k[0] != "_")
564  output.update(dfiltered)
565 
566  # check for customizable entries
567  for l in listofsummedobjects:
568  for t in l[0]:
569  if not "get_output" in dir(t):
570  raise ValueError("Output: object %s doesn't have get_output() method" % str(t))
571  else:
572  if "_TotalScore" not in t.get_output():
573  raise ValueError("Output: object %s doesn't have _TotalScore entry to be summed" % str(t))
574  else:
575  output.update({l[1]: 0.0})
576 
577  for k in extralabels:
578  output.update({k: 0.0})
579 
580  for n, k in enumerate(output):
581  stat2_keywords.update({n: k})
582  stat2_inverse.update({k: n})
583 
584  flstat.write("%s \n" % stat2_keywords)
585  flstat.close()
586  self.dictionary_stats2[name] = (
587  listofobjects,
588  stat2_inverse,
589  listofsummedobjects,
590  extralabels)
591 
592  def write_stat2(self, name, appendmode=True):
593  output = {}
594  (listofobjects, stat2_inverse, listofsummedobjects,
595  extralabels) = self.dictionary_stats2[name]
596 
597  # writing objects
598  for obj in listofobjects:
599  od = obj.get_output()
600  dfiltered = dict((k, v) for k, v in od.items() if k[0] != "_")
601  for k in dfiltered:
602  output.update({stat2_inverse[k]: od[k]})
603 
604  # writing summedobjects
605  for l in listofsummedobjects:
606  partial_score = 0.0
607  for t in l[0]:
608  d = t.get_output()
609  partial_score += float(d["_TotalScore"])
610  output.update({stat2_inverse[l[1]]: str(partial_score)})
611 
612  # writing extralabels
613  for k in extralabels:
614  if k in self.initoutput:
615  output.update({stat2_inverse[k]: self.initoutput[k]})
616  else:
617  output.update({stat2_inverse[k]: "None"})
618 
619  if appendmode:
620  writeflag = 'a'
621  else:
622  writeflag = 'w'
623 
624  flstat = open(name, writeflag)
625  flstat.write("%s \n" % output)
626  flstat.close()
627 
628  def write_stats2(self):
629  for stat in self.dictionary_stats2.keys():
630  self.write_stat2(stat)
631 
632 
633 class ProcessOutput(object):
634  """A class for reading stat files"""
635  def __init__(self, filename):
636  self.filename = filename
637  self.isstat1 = False
638  self.isstat2 = False
639 
640  # open the file
641  if not self.filename is None:
642  f = open(self.filename, "r")
643  else:
644  raise ValueError("No file name provided. Use -h for help")
645 
646  # get the keys from the first line
647  for line in f.readlines():
648  d = eval(line)
649  self.klist = list(d.keys())
650  # check if it is a stat2 file
651  if "STAT2HEADER" in self.klist:
652  self.isstat2 = True
653  for k in self.klist:
654  if "STAT2HEADER" in str(k):
655  # if print_header: print k, d[k]
656  del d[k]
657  stat2_dict = d
658  # get the list of keys sorted by value
659  kkeys = [k[0]
660  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
661  self.klist = [k[1]
662  for k in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
663  self.invstat2_dict = {}
664  for k in kkeys:
665  self.invstat2_dict.update({stat2_dict[k]: k})
666  else:
667  self.isstat1 = True
668  self.klist.sort()
669 
670  break
671  f.close()
672 
673  def get_keys(self):
674  return self.klist
675 
676  def show_keys(self, ncolumns=2, truncate=65):
677  IMP.pmi.tools.print_multicolumn(self.get_keys(), ncolumns, truncate)
678 
679  def get_fields(self, fields, filtertuple=None, filterout=None, get_every=1):
680  '''
681  Get the desired field names, and return a dictionary.
682 
683  @param fields desired field names
684  @param filterout specify if you want to "grep" out something from
685  the file, so that it is faster
686  @param filtertuple a tuple that contains
687  ("TheKeyToBeFiltered",relationship,value)
688  where relationship = "<", "==", or ">"
689  @param get_every only read every Nth line from the file
690  '''
691 
692  outdict = {}
693  for field in fields:
694  outdict[field] = []
695 
696  # print fields values
697  f = open(self.filename, "r")
698  line_number = 0
699 
700  for line in f.readlines():
701  if not filterout is None:
702  if filterout in line:
703  continue
704  line_number += 1
705 
706  if line_number % get_every != 0:
707  continue
708  #if line_number % 1000 == 0:
709  # print "ProcessOutput.get_fields: read line %s from file %s" % (str(line_number), self.filename)
710  try:
711  d = eval(line)
712  except:
713  print("# Warning: skipped line number " + str(line_number) + " not a valid line")
714  continue
715 
716  if self.isstat1:
717 
718  if not filtertuple is None:
719  keytobefiltered = filtertuple[0]
720  relationship = filtertuple[1]
721  value = filtertuple[2]
722  if relationship == "<":
723  if float(d[keytobefiltered]) >= value:
724  continue
725  if relationship == ">":
726  if float(d[keytobefiltered]) <= value:
727  continue
728  if relationship == "==":
729  if float(d[keytobefiltered]) != value:
730  continue
731  [outdict[field].append(d[field]) for field in fields]
732 
733  elif self.isstat2:
734  if line_number == 1:
735  continue
736 
737  if not filtertuple is None:
738  keytobefiltered = filtertuple[0]
739  relationship = filtertuple[1]
740  value = filtertuple[2]
741  if relationship == "<":
742  if float(d[self.invstat2_dict[keytobefiltered]]) >= value:
743  continue
744  if relationship == ">":
745  if float(d[self.invstat2_dict[keytobefiltered]]) <= value:
746  continue
747  if relationship == "==":
748  if float(d[self.invstat2_dict[keytobefiltered]]) != value:
749  continue
750 
751  [outdict[field].append(d[self.invstat2_dict[field]])
752  for field in fields]
753  f.close()
754  return outdict
755 
756 
757 
758 class CrossLinkIdentifierDatabase(object):
759  def __init__(self):
760  self.clidb=dict()
761 
762  def check_key(self,key):
763  if key not in self.clidb:
764  self.clidb[key]={}
765 
766  def set_unique_id(self,key,value):
767  self.check_key(key)
768  self.clidb[key]["XLUniqueID"]=str(value)
769 
770  def set_protein1(self,key,value):
771  self.check_key(key)
772  self.clidb[key]["Protein1"]=str(value)
773 
774  def set_protein2(self,key,value):
775  self.check_key(key)
776  self.clidb[key]["Protein2"]=str(value)
777 
778  def set_residue1(self,key,value):
779  self.check_key(key)
780  self.clidb[key]["Residue1"]=int(value)
781 
782  def set_residue2(self,key,value):
783  self.check_key(key)
784  self.clidb[key]["Residue2"]=int(value)
785 
786  def set_idscore(self,key,value):
787  self.check_key(key)
788  self.clidb[key]["IDScore"]=float(value)
789 
790  def set_state(self,key,value):
791  self.check_key(key)
792  self.clidb[key]["State"]=int(value)
793 
794  def set_sigma1(self,key,value):
795  self.check_key(key)
796  self.clidb[key]["Sigma1"]=str(value)
797 
798  def set_sigma2(self,key,value):
799  self.check_key(key)
800  self.clidb[key]["Sigma2"]=str(value)
801 
802  def set_psi(self,key,value):
803  self.check_key(key)
804  self.clidb[key]["Psi"]=str(value)
805 
806  def get_unique_id(self,key):
807  return self.clidb[key]["XLUniqueID"]
808 
809  def get_protein1(self,key):
810  return self.clidb[key]["Protein1"]
811 
812  def get_protein2(self,key):
813  return self.clidb[key]["Protein2"]
814 
815  def get_residue1(self,key):
816  return self.clidb[key]["Residue1"]
817 
818  def get_residue2(self,key):
819  return self.clidb[key]["Residue2"]
820 
821  def get_idscore(self,key):
822  return self.clidb[key]["IDScore"]
823 
824  def get_state(self,key):
825  return self.clidb[key]["State"]
826 
827  def get_sigma1(self,key):
828  return self.clidb[key]["Sigma1"]
829 
830  def get_sigma2(self,key):
831  return self.clidb[key]["Sigma2"]
832 
833  def get_psi(self,key):
834  return self.clidb[key]["Psi"]
835 
836  def set_float_feature(self,key,value,feature_name):
837  self.check_key(key)
838  self.clidb[key][feature_name]=float(value)
839 
840  def set_int_feature(self,key,value,feature_name):
841  self.check_key(key)
842  self.clidb[key][feature_name]=int(value)
843 
844  def set_string_feature(self,key,value,feature_name):
845  self.check_key(key)
846  self.clidb[key][feature_name]=str(value)
847 
848  def get_feature(self,key,feature_name):
849  return self.clidb[key][feature_name]
850 
851  def write(self,filename):
852  import pickle
853  with open(filename, 'wb') as handle:
854  pickle.dump(self.clidb,handle)
855 
856  def load(self,filename):
857  import pickle
858  with open(filename, 'rb') as handle:
859  self.clidb=pickle.load(handle)
860 
861 def plot_fields(fields, framemin=None, framemax=None):
862  import matplotlib as mpl
863  mpl.use('Agg')
864  import matplotlib.pyplot as plt
865 
866  plt.rc('lines', linewidth=4)
867  fig, axs = plt.subplots(nrows=len(fields))
868  fig.set_size_inches(10.5, 5.5 * len(fields))
869  plt.rc('axes', color_cycle=['r'])
870 
871  n = 0
872  for key in fields:
873  if framemin is None:
874  framemin = 0
875  if framemax is None:
876  framemax = len(fields[key])
877  x = list(range(framemin, framemax))
878  y = [float(y) for y in fields[key][framemin:framemax]]
879  if len(fields) > 1:
880  axs[n].plot(x, y)
881  axs[n].set_title(key, size="xx-large")
882  axs[n].tick_params(labelsize=18, pad=10)
883  else:
884  axs.plot(x, y)
885  axs.set_title(key, size="xx-large")
886  axs.tick_params(labelsize=18, pad=10)
887  n += 1
888 
889  # Tweak spacing between subplots to prevent labels from overlapping
890  plt.subplots_adjust(hspace=0.3)
891  plt.show()
892 
893 
895  name, values_lists, valuename=None, bins=40, colors=None, format="png",
896  reference_xline=None, yplotrange=None, xplotrange=None,normalized=True,
897  leg_names=None):
898 
899  '''Plot a list of histograms from a value list.
900  @param name the name of the plot
901  @param value_lists the list of list of values eg: [[...],[...],[...]]
902  @param valuename the y-label
903  @param bins the number of bins
904  @param colors If None, will use rainbow. Else will use specific list
905  @param format output format
906  @param reference_xline plot a reference line parallel to the y-axis
907  @param yplotrange the range for the y-axis
908  @param normalized whether the histogram is normalized or not
909  @param leg_names names for the legend
910  '''
911 
912  import matplotlib as mpl
913  mpl.use('Agg')
914  import matplotlib.pyplot as plt
915  import matplotlib.cm as cm
916  fig = plt.figure(figsize=(18.0, 9.0))
917 
918  if colors is None:
919  colors = cm.rainbow(np.linspace(0, 1, len(values_lists)))
920  for nv,values in enumerate(values_lists):
921  col=colors[nv]
922  if leg_names is not None:
923  label=leg_names[nv]
924  else:
925  label=str(nv)
926  h=plt.hist(
927  [float(y) for y in values],
928  bins=bins,
929  color=col,
930  normed=normalized,histtype='step',lw=4,
931  label=label)
932 
933  # plt.title(name,size="xx-large")
934  plt.tick_params(labelsize=12, pad=10)
935  if valuename is None:
936  plt.xlabel(name, size="xx-large")
937  else:
938  plt.xlabel(valuename, size="xx-large")
939  plt.ylabel("Frequency", size="xx-large")
940 
941  if not yplotrange is None:
942  plt.ylim()
943  if not xplotrange is None:
944  plt.xlim(xplotrange)
945 
946  plt.legend(loc=2)
947 
948  if not reference_xline is None:
949  plt.axvline(
950  reference_xline,
951  color='red',
952  linestyle='dashed',
953  linewidth=1)
954 
955  plt.savefig(name + "." + format, dpi=150, transparent=True)
956  plt.show()
957 
958 
959 def plot_fields_box_plots(name, values, positions, frequencies=None,
960  valuename="None", positionname="None", xlabels=None,scale_plot_length=1.0):
961  '''
962  Plot time series as boxplots.
963  fields is a list of time series, positions are the x-values
964  valuename is the y-label, positionname is the x-label
965  '''
966 
967  import matplotlib as mpl
968  mpl.use('Agg')
969  import matplotlib.pyplot as plt
970  from matplotlib.patches import Polygon
971 
972  bps = []
973  fig = plt.figure(figsize=(float(len(positions))*scale_plot_length, 5.0))
974  fig.canvas.set_window_title(name)
975 
976  ax1 = fig.add_subplot(111)
977 
978  plt.subplots_adjust(left=0.1, right=0.990, top=0.95, bottom=0.4)
979 
980  bps.append(plt.boxplot(values, notch=0, sym='', vert=1,
981  whis=1.5, positions=positions))
982 
983  plt.setp(bps[-1]['boxes'], color='black', lw=1.5)
984  plt.setp(bps[-1]['whiskers'], color='black', ls=":", lw=1.5)
985 
986  if frequencies is not None:
987  for n,v in enumerate(values):
988  plist=[positions[n]]*len(v)
989  ax1.plot(plist, v, 'gx', alpha=0.7, markersize=7)
990 
991  # print ax1.xaxis.get_majorticklocs()
992  if not xlabels is None:
993  ax1.set_xticklabels(xlabels)
994  plt.xticks(rotation=90)
995  plt.xlabel(positionname)
996  plt.ylabel(valuename)
997 
998  plt.savefig(name+".pdf",dpi=150)
999  plt.show()
1000 
1001 
1002 def plot_xy_data(x,y,title=None,out_fn=None,display=True,set_plot_yaxis_range=None,
1003  xlabel=None,ylabel=None):
1004  import matplotlib as mpl
1005  mpl.use('Agg')
1006  import matplotlib.pyplot as plt
1007  plt.rc('lines', linewidth=2)
1008 
1009  fig, ax = plt.subplots(nrows=1)
1010  fig.set_size_inches(8,4.5)
1011  if title is not None:
1012  fig.canvas.set_window_title(title)
1013 
1014  #plt.rc('axes', color='r')
1015  ax.plot(x,y,color='r')
1016  if set_plot_yaxis_range is not None:
1017  x1,x2,y1,y2=plt.axis()
1018  y1=set_plot_yaxis_range[0]
1019  y2=set_plot_yaxis_range[1]
1020  plt.axis((x1,x2,y1,y2))
1021  if title is not None:
1022  ax.set_title(title)
1023  if xlabel is not None:
1024  ax.set_xlabel(xlabel)
1025  if ylabel is not None:
1026  ax.set_ylabel(ylabel)
1027  if out_fn is not None:
1028  plt.savefig(out_fn+".pdf")
1029  if display:
1030  plt.show()
1031  plt.close(fig)
1032 
1033 def plot_scatter_xy_data(x,y,labelx="None",labely="None",
1034  xmin=None,xmax=None,ymin=None,ymax=None,
1035  savefile=False,filename="None.eps",alpha=0.75):
1036 
1037  import matplotlib as mpl
1038  mpl.use('Agg')
1039  import matplotlib.pyplot as plt
1040  import sys
1041  from matplotlib import rc
1042  #rc('font', **{'family':'serif','serif':['Palatino']})
1043  rc('font',**{'family':'sans-serif','sans-serif':['Helvetica']})
1044  #rc('text', usetex=True)
1045 
1046  fig, axs = plt.subplots(1)
1047 
1048  axs0 = axs
1049 
1050  axs0.set_xlabel(labelx, size="xx-large")
1051  axs0.set_ylabel(labely, size="xx-large")
1052  axs0.tick_params(labelsize=18, pad=10)
1053 
1054  plot2 = []
1055 
1056  plot2.append(axs0.plot(x, y, 'o', color='k',lw=2, ms=0.1, alpha=alpha, c="w"))
1057 
1058  axs0.legend(
1059  loc=0,
1060  frameon=False,
1061  scatterpoints=1,
1062  numpoints=1,
1063  columnspacing=1)
1064 
1065  fig.set_size_inches(8.0, 8.0)
1066  fig.subplots_adjust(left=0.161, right=0.850, top=0.95, bottom=0.11)
1067  if (not ymin is None) and (not ymax is None):
1068  axs0.set_ylim(ymin,ymax)
1069  if (not xmin is None) and (not xmax is None):
1070  axs0.set_xlim(xmin,xmax)
1071 
1072  #plt.show()
1073  if savefile:
1074  fig.savefig(filename, dpi=300)
1075 
1076 
1077 def get_graph_from_hierarchy(hier):
1078  graph = []
1079  depth_dict = {}
1080  depth = 0
1081  (graph, depth, depth_dict) = recursive_graph(
1082  hier, graph, depth, depth_dict)
1083 
1084  # filters node labels according to depth_dict
1085  node_labels_dict = {}
1086  node_size_dict = {}
1087  for key in depth_dict:
1088  node_size_dict = 10 / depth_dict[key]
1089  if depth_dict[key] < 3:
1090  node_labels_dict[key] = key
1091  else:
1092  node_labels_dict[key] = ""
1093  draw_graph(graph, labels_dict=node_labels_dict)
1094 
1095 
1096 def recursive_graph(hier, graph, depth, depth_dict):
1097  depth = depth + 1
1098  nameh = IMP.atom.Hierarchy(hier).get_name()
1099  index = str(hier.get_particle().get_index())
1100  name1 = nameh + "|#" + index
1101  depth_dict[name1] = depth
1102 
1103  children = IMP.atom.Hierarchy(hier).get_children()
1104 
1105  if len(children) == 1 or children is None:
1106  depth = depth - 1
1107  return (graph, depth, depth_dict)
1108 
1109  else:
1110  for c in children:
1111  (graph, depth, depth_dict) = recursive_graph(
1112  c, graph, depth, depth_dict)
1113  nameh = IMP.atom.Hierarchy(c).get_name()
1114  index = str(c.get_particle().get_index())
1115  namec = nameh + "|#" + index
1116  graph.append((name1, namec))
1117 
1118  depth = depth - 1
1119  return (graph, depth, depth_dict)
1120 
1121 
1122 def draw_graph(graph, labels_dict=None, graph_layout='spring',
1123  node_size=5, node_color=None, node_alpha=0.3,
1124  node_text_size=11, fixed=None, pos=None,
1125  edge_color='blue', edge_alpha=0.3, edge_thickness=1,
1126  edge_text_pos=0.3,
1127  validation_edges=None,
1128  text_font='sans-serif',
1129  out_filename=None):
1130 
1131  import matplotlib as mpl
1132  mpl.use('Agg')
1133  import networkx as nx
1134  import matplotlib.pyplot as plt
1135  from math import sqrt, pi
1136 
1137  # create networkx graph
1138  G = nx.Graph()
1139 
1140  # add edges
1141  if type(edge_thickness) is list:
1142  for edge,weight in zip(graph,edge_thickness):
1143  G.add_edge(edge[0], edge[1], weight=weight)
1144  else:
1145  for edge in graph:
1146  G.add_edge(edge[0], edge[1])
1147 
1148  if node_color==None:
1149  node_color_rgb=(0,0,0)
1150  node_color_hex="000000"
1151  else:
1153  tmpcolor_rgb=[]
1154  tmpcolor_hex=[]
1155  for node in G.nodes():
1156  cctuple=cc.rgb(node_color[node])
1157  tmpcolor_rgb.append((cctuple[0]/255,cctuple[1]/255,cctuple[2]/255))
1158  tmpcolor_hex.append(node_color[node])
1159  node_color_rgb=tmpcolor_rgb
1160  node_color_hex=tmpcolor_hex
1161 
1162  # get node sizes if dictionary
1163  if type(node_size) is dict:
1164  tmpsize=[]
1165  for node in G.nodes():
1166  size=sqrt(node_size[node])/pi*10.0
1167  tmpsize.append(size)
1168  node_size=tmpsize
1169 
1170  for n,node in enumerate(G.nodes()):
1171  color=node_color_hex[n]
1172  size=node_size[n]
1173  nx.set_node_attributes(G, "graphics", {node : {'type': 'ellipse','w': size, 'h': size,'fill': '#'+color, 'label': node}})
1174  nx.set_node_attributes(G, "LabelGraphics", {node : {'type': 'text','text':node, 'color':'#000000', 'visible':'true'}})
1175 
1176  for edge in G.edges():
1177  nx.set_edge_attributes(G, "graphics", {edge : {'width': 1,'fill': '#000000'}})
1178 
1179  for ve in validation_edges:
1180  print(ve)
1181  if (ve[0],ve[1]) in G.edges():
1182  print("found forward")
1183  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#00FF00'}})
1184  elif (ve[1],ve[0]) in G.edges():
1185  print("found backward")
1186  nx.set_edge_attributes(G, "graphics", {(ve[1],ve[0]) : {'width': 1,'fill': '#00FF00'}})
1187  else:
1188  G.add_edge(ve[0], ve[1])
1189  print("not found")
1190  nx.set_edge_attributes(G, "graphics", {ve : {'width': 1,'fill': '#FF0000'}})
1191 
1192  # these are different layouts for the network you may try
1193  # shell seems to work best
1194  if graph_layout == 'spring':
1195  print(fixed, pos)
1196  graph_pos = nx.spring_layout(G,k=1.0/8.0,fixed=fixed,pos=pos)
1197  elif graph_layout == 'spectral':
1198  graph_pos = nx.spectral_layout(G)
1199  elif graph_layout == 'random':
1200  graph_pos = nx.random_layout(G)
1201  else:
1202  graph_pos = nx.shell_layout(G)
1203 
1204 
1205  # draw graph
1206  nx.draw_networkx_nodes(G, graph_pos, node_size=node_size,
1207  alpha=node_alpha, node_color=node_color_rgb,
1208  linewidths=0)
1209  nx.draw_networkx_edges(G, graph_pos, width=edge_thickness,
1210  alpha=edge_alpha, edge_color=edge_color)
1211  nx.draw_networkx_labels(
1212  G, graph_pos, labels=labels_dict, font_size=node_text_size,
1213  font_family=text_font)
1214  if out_filename:
1215  plt.savefig(out_filename)
1216  nx.write_gml(G,'out.gml')
1217  plt.show()
1218 
1219 
1220 def draw_table():
1221 
1222  # still an example!
1223 
1224  from ipyD3 import d3object
1225  from IPython.display import display
1226 
1227  d3 = d3object(width=800,
1228  height=400,
1229  style='JFTable',
1230  number=1,
1231  d3=None,
1232  title='Example table with d3js',
1233  desc='An example table created created with d3js with data generated with Python.')
1234  data = [
1235  [1277.0,
1236  654.0,
1237  288.0,
1238  1976.0,
1239  3281.0,
1240  3089.0,
1241  10336.0,
1242  4650.0,
1243  4441.0,
1244  4670.0,
1245  944.0,
1246  110.0],
1247  [1318.0,
1248  664.0,
1249  418.0,
1250  1952.0,
1251  3581.0,
1252  4574.0,
1253  11457.0,
1254  6139.0,
1255  7078.0,
1256  6561.0,
1257  2354.0,
1258  710.0],
1259  [1783.0,
1260  774.0,
1261  564.0,
1262  1470.0,
1263  3571.0,
1264  3103.0,
1265  9392.0,
1266  5532.0,
1267  5661.0,
1268  4991.0,
1269  2032.0,
1270  680.0],
1271  [1301.0,
1272  604.0,
1273  286.0,
1274  2152.0,
1275  3282.0,
1276  3369.0,
1277  10490.0,
1278  5406.0,
1279  4727.0,
1280  3428.0,
1281  1559.0,
1282  620.0],
1283  [1537.0,
1284  1714.0,
1285  724.0,
1286  4824.0,
1287  5551.0,
1288  8096.0,
1289  16589.0,
1290  13650.0,
1291  9552.0,
1292  13709.0,
1293  2460.0,
1294  720.0],
1295  [5691.0,
1296  2995.0,
1297  1680.0,
1298  11741.0,
1299  16232.0,
1300  14731.0,
1301  43522.0,
1302  32794.0,
1303  26634.0,
1304  31400.0,
1305  7350.0,
1306  3010.0],
1307  [1650.0,
1308  2096.0,
1309  60.0,
1310  50.0,
1311  1180.0,
1312  5602.0,
1313  15728.0,
1314  6874.0,
1315  5115.0,
1316  3510.0,
1317  1390.0,
1318  170.0],
1319  [72.0, 60.0, 60.0, 10.0, 120.0, 172.0, 1092.0, 675.0, 408.0, 360.0, 156.0, 100.0]]
1320  data = [list(i) for i in zip(*data)]
1321  sRows = [['January',
1322  'February',
1323  'March',
1324  'April',
1325  'May',
1326  'June',
1327  'July',
1328  'August',
1329  'September',
1330  'October',
1331  'November',
1332  'Deecember']]
1333  sColumns = [['Prod {0}'.format(i) for i in range(1, 9)],
1334  [None, '', None, None, 'Group 1', None, None, 'Group 2']]
1335  d3.addSimpleTable(data,
1336  fontSizeCells=[12, ],
1337  sRows=sRows,
1338  sColumns=sColumns,
1339  sRowsMargins=[5, 50, 0],
1340  sColsMargins=[5, 20, 10],
1341  spacing=0,
1342  addBorders=1,
1343  addOutsideBorders=-1,
1344  rectWidth=45,
1345  rectHeight=0
1346  )
1347  html = d3.render(mode=['html', 'show'])
1348  display(html)
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: Residue.h:155
A class for reading stat files.
Definition: output.py:633
RMF::FrameID save_frame(RMF::FileHandle file, std::string name="")
Save the current state of the linked objects as a new RMF frame.
static bool get_is_setup(const IMP::ParticleAdaptor &p)
Definition: atom/Atom.h:241
def plot_field_histogram
Plot a list of histograms from a value list.
Definition: output.py:894
def plot_fields_box_plots
Plot time series as boxplots.
Definition: output.py:959
Miscellaneous utilities.
Definition: tools.py:1
std::string get_module_version()
Change color code to hexadecimal to rgb.
Definition: tools.py:1406
void write_pdb(const Selection &mhd, TextOutput out, unsigned int model=1)
def get_prot_name_from_particle
Return the component name provided a particle and a list of names.
Definition: tools.py:939
def get_fields
Get the desired field names, and return a dictionary.
Definition: output.py:679
static bool get_is_setup(Model *m, ParticleIndex pi)
Definition: Fragment.h:46
The standard decorator for manipulating molecular structures.
Ints get_index(const ParticlesTemp &particles, const Subset &subset, const Subsets &excluded)
A decorator for a particle representing an atom.
Definition: atom/Atom.h:234
The type for a residue.
A decorator for a particle with x,y,z coordinates.
Definition: XYZ.h:30
A base class for Keys.
Definition: Key.h:46
void add_hierarchies(RMF::NodeHandle fh, const atom::Hierarchies &hs)
Class for easy writing of PDBs, RMFs, and stat files.
Definition: output.py:21
void add_geometries(RMF::NodeHandle parent, const display::GeometriesTemp &r)
void add_restraints(RMF::NodeHandle fh, const Restraints &hs)
Display a segment connecting a pair of particles.
Definition: XYZR.h:168
A decorator for a residue.
Definition: Residue.h:134
Basic functionality that is expected to be used by a wide variety of IMP users.
Python classes to represent, score, sample and analyze models.
Functionality for loading, creating, manipulating and scoring atomic structures.
Hierarchies get_leaves(const Selection &h)
def get_residue_indexes
Retrieve the residue indexes for the given particle.
Definition: tools.py:959
std::string get_module_version()
def sublist_iterator
Yield all sublists of length >= lmin and <= lmax.
Definition: tools.py:1055
A decorator for a particle with x,y,z coordinates and a radius.
Definition: XYZR.h:27