1 """@namespace IMP.pmi.output
2 Classes for writing output files and processing them.
5 from __future__
import print_function, division
16 import cPickle
as pickle
21 """Class for easy writing of PDBs, RMFs, and stat files"""
22 def __init__(self, ascii=True,atomistic=False):
23 self.dictionary_pdbs = {}
24 self.dictionary_rmfs = {}
25 self.dictionary_stats = {}
26 self.dictionary_stats2 = {}
27 self.best_score_list =
None
28 self.nbestscoring =
None
30 self.replica_exchange =
False
34 self.chainids =
"ABCDEFGHIJKLMNOPQRSTUVXYWZabcdefghijklmnopqrstuvxywz"
36 self.particle_infos_for_pdb = {}
37 self.atomistic=atomistic
39 def get_pdb_names(self):
40 return list(self.dictionary_pdbs.keys())
42 def get_rmf_names(self):
43 return list(self.dictionary_rmfs.keys())
45 def get_stat_names(self):
46 return list(self.dictionary_stats.keys())
48 def init_pdb(self, name, prot):
49 flpdb = open(name,
'w')
51 self.dictionary_pdbs[name] = prot
52 self.dictchain[name] = {}
54 for n, i
in enumerate(self.dictionary_pdbs[name].get_children()):
55 self.dictchain[name][i.get_name()] = self.chainids[n]
57 def write_psf(self,filename,name):
58 flpsf=open(filename,
'w')
59 flpsf.write(
"PSF CMAP CHEQ"+
"\n")
60 index_residue_pair_list={}
61 (particle_infos_for_pdb, geometric_center)=self.get_particle_infos_for_pdb_writing(name)
62 nparticles=len(particle_infos_for_pdb)
63 flpsf.write(str(nparticles)+
" !NATOM"+
"\n")
64 for n,p
in enumerate(particle_infos_for_pdb):
70 flpsf.write(
'{0:8d}{1:1s}{2:4s}{3:1s}{4:4s}{5:1s}{6:4s}{7:1s}{8:4s}{9:1s}{10:4s}{11:14.6f}{12:14.6f}{13:8d}{14:14.6f}{15:14.6f}'.format(atom_index,
" ",chain,
" ",str(resid),
" ",residue_type,
" ",
"C",
" ",
"C",1.0,0.0,0,0.0,0.0))
73 if chain
not in index_residue_pair_list:
74 index_residue_pair_list[chain]=[(atom_index,resid)]
76 index_residue_pair_list[chain].append((atom_index,resid))
81 for chain
in sorted(index_residue_pair_list.keys()):
83 ls=index_residue_pair_list[chain]
85 ls=sorted(ls, key=
lambda tup: tup[1])
87 indexes=[x[0]
for x
in ls]
90 nbonds=len(indexes_pairs)
91 flpsf.write(str(nbonds)+
" !NBOND: bonds"+
"\n")
93 sublists=[indexes_pairs[i:i+4]
for i
in range(0,len(indexes_pairs),4)]
98 flpsf.write(
'{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}{6:8d}{7:8d}'.format(ip[0][0],ip[0][1],
99 ip[1][0],ip[1][1],ip[2][0],ip[2][1],ip[3][0],ip[3][1]))
101 flpsf.write(
'{0:8d}{1:8d}{2:8d}{3:8d}{4:8d}{5:8d}'.format(ip[0][0],ip[0][1],ip[1][0],
102 ip[1][1],ip[2][0],ip[2][1]))
104 flpsf.write(
'{0:8d}{1:8d}{2:8d}{3:8d}'.format(ip[0][0],ip[0][1],ip[1][0],ip[1][1]))
106 flpsf.write(
'{0:8d}{1:8d}'.format(ip[0][0],ip[0][1]))
109 del particle_infos_for_pdb
112 translate_to_geometric_center=
False):
114 flpdb = open(name,
'a')
116 flpdb = open(name,
'w')
118 (particle_infos_for_pdb,
119 geometric_center) = self.get_particle_infos_for_pdb_writing(name)
121 if not translate_to_geometric_center:
122 geometric_center = (0, 0, 0)
124 for n,tupl
in enumerate(particle_infos_for_pdb):
126 (xyz, atom_type, residue_type,
127 chain_id, residue_index,radius) = tupl
129 flpdb.write(IMP.atom.get_pdb_string((xyz[0] - geometric_center[0],
130 xyz[1] - geometric_center[1],
131 xyz[2] - geometric_center[2]),
132 n+1, atom_type, residue_type,
133 chain_id, residue_index,
' ',1.00,radius))
135 flpdb.write(
"ENDMDL\n")
138 del particle_infos_for_pdb
140 def get_particle_infos_for_pdb_writing(self, name):
150 particle_infos_for_pdb = []
152 geometric_center = [0, 0, 0]
162 p, self.dictchain[name])
164 if protname
not in resindexes_dict:
165 resindexes_dict[protname] = []
169 rt = residue.get_residue_type()
170 resind = residue.get_index()
174 geometric_center[0] += xyz[0]
175 geometric_center[1] += xyz[1]
176 geometric_center[2] += xyz[2]
178 particle_infos_for_pdb.append((xyz,
179 atomtype, rt, self.dictchain[name][protname], resind,radius))
180 resindexes_dict[protname].append(resind)
185 resind = residue.get_index()
188 if resind
in resindexes_dict[protname]:
191 resindexes_dict[protname].append(resind)
192 rt = residue.get_residue_type()
195 geometric_center[0] += xyz[0]
196 geometric_center[1] += xyz[1]
197 geometric_center[2] += xyz[2]
199 particle_infos_for_pdb.append((xyz,
200 IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
204 resind = resindexes[len(resindexes) // 2]
205 if resind
in resindexes_dict[protname]:
208 resindexes_dict[protname].append(resind)
212 geometric_center[0] += xyz[0]
213 geometric_center[1] += xyz[1]
214 geometric_center[2] += xyz[2]
216 particle_infos_for_pdb.append((xyz,
217 IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
223 resind = resindexes[len(resindexes) // 2]
226 geometric_center[0] += xyz[0]
227 geometric_center[1] += xyz[1]
228 geometric_center[2] += xyz[2]
230 particle_infos_for_pdb.append((xyz,
231 IMP.atom.AT_CA, rt, self.dictchain[name][protname], resind,radius))
233 geometric_center = (geometric_center[0] / atom_count,
234 geometric_center[1] / atom_count,
235 geometric_center[2] / atom_count)
237 particle_infos_for_pdb = sorted(particle_infos_for_pdb, key=operator.itemgetter(3, 4))
239 return (particle_infos_for_pdb, geometric_center)
242 def write_pdbs(self, appendmode=True):
243 for pdb
in self.dictionary_pdbs.keys():
244 self.write_pdb(pdb, appendmode)
246 def init_pdb_best_scoring(
251 replica_exchange=
False):
255 self.suffixes.append(suffix)
256 self.replica_exchange = replica_exchange
257 if not self.replica_exchange:
261 self.best_score_list = []
265 self.best_score_file_name =
"best.scores.rex.py"
266 self.best_score_list = []
267 best_score_file = open(self.best_score_file_name,
"w")
268 best_score_file.write(
269 "self.best_score_list=" + str(self.best_score_list))
270 best_score_file.close()
272 self.nbestscoring = nbestscoring
273 for i
in range(self.nbestscoring):
274 name = suffix +
"." + str(i) +
".pdb"
275 flpdb = open(name,
'w')
277 self.dictionary_pdbs[name] = prot
278 self.dictchain[name] = {}
279 for n, i
in enumerate(self.dictionary_pdbs[name].get_children()):
280 self.dictchain[name][i.get_name()] = self.chainids[n]
282 def write_pdb_best_scoring(self, score):
283 if self.nbestscoring
is None:
284 print(
"Output.write_pdb_best_scoring: init_pdb_best_scoring not run")
287 if self.replica_exchange:
289 exec(open(self.best_score_file_name).read())
291 if len(self.best_score_list) < self.nbestscoring:
292 self.best_score_list.append(score)
293 self.best_score_list.sort()
294 index = self.best_score_list.index(score)
295 for suffix
in self.suffixes:
296 for i
in range(len(self.best_score_list) - 2, index - 1, -1):
297 oldname = suffix +
"." + str(i) +
".pdb"
298 newname = suffix +
"." + str(i + 1) +
".pdb"
300 if os.path.exists(newname):
302 os.rename(oldname, newname)
303 filetoadd = suffix +
"." + str(index) +
".pdb"
304 self.write_pdb(filetoadd, appendmode=
False)
307 if score < self.best_score_list[-1]:
308 self.best_score_list.append(score)
309 self.best_score_list.sort()
310 self.best_score_list.pop(-1)
311 index = self.best_score_list.index(score)
312 for suffix
in self.suffixes:
313 for i
in range(len(self.best_score_list) - 1, index - 1, -1):
314 oldname = suffix +
"." + str(i) +
".pdb"
315 newname = suffix +
"." + str(i + 1) +
".pdb"
316 os.rename(oldname, newname)
317 filenametoremove = suffix + \
318 "." + str(self.nbestscoring) +
".pdb"
319 os.remove(filenametoremove)
320 filetoadd = suffix +
"." + str(index) +
".pdb"
321 self.write_pdb(filetoadd, appendmode=
False)
323 if self.replica_exchange:
325 best_score_file = open(self.best_score_file_name,
"w")
326 best_score_file.write(
327 "self.best_score_list=" + str(self.best_score_list))
328 best_score_file.close()
330 def init_rmf(self, name, hierarchies,rs=None):
331 rh = RMF.create_rmf_file(name)
335 self.dictionary_rmfs[name] = rh
337 def add_restraints_to_rmf(self, name, objectlist):
340 rs = o.get_restraint_for_rmf()
342 rs = o.get_restraint()
344 self.dictionary_rmfs[name],
347 def add_geometries_to_rmf(self, name, objectlist):
349 geos = o.get_geometries()
352 def add_particle_pair_from_restraints_to_rmf(self, name, objectlist):
355 pps = o.get_particle_pairs()
357 IMP.rmf.add_geometry(
358 self.dictionary_rmfs[name],
361 def write_rmf(self, name):
363 self.dictionary_rmfs[name].flush()
365 def close_rmf(self, name):
366 del self.dictionary_rmfs[name]
368 def write_rmfs(self):
369 for rmf
in self.dictionary_rmfs.keys():
372 def init_stat(self, name, listofobjects):
374 flstat = open(name,
'w')
377 flstat = open(name,
'wb')
381 for l
in listofobjects:
382 if not "get_output" in dir(l):
383 raise ValueError(
"Output: object %s doesn't have get_output() method" % str(l))
384 self.dictionary_stats[name] = listofobjects
386 def set_output_entry(self, key, value):
387 self.initoutput.update({key: value})
389 def write_stat(self, name, appendmode=True):
390 output = self.initoutput
391 for obj
in self.dictionary_stats[name]:
394 dfiltered = dict((k, v)
for k, v
in d.items()
if k[0] !=
"_")
395 output.update(dfiltered)
403 flstat = open(name, writeflag)
404 flstat.write(
"%s \n" % output)
407 flstat = open(name, writeflag +
'b')
408 cPickle.dump(output, flstat, 2)
411 def write_stats(self):
412 for stat
in self.dictionary_stats.keys():
413 self.write_stat(stat)
415 def get_stat(self, name):
417 for obj
in self.dictionary_stats[name]:
418 output.update(obj.get_output())
421 def write_test(self, name, listofobjects):
428 flstat = open(name,
'w')
429 output = self.initoutput
430 for l
in listofobjects:
431 if not "get_test_output" in dir(l)
and not "get_output" in dir(l):
432 raise ValueError(
"Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
433 self.dictionary_stats[name] = listofobjects
435 for obj
in self.dictionary_stats[name]:
437 d = obj.get_test_output()
441 dfiltered = dict((k, v)
for k, v
in d.items()
if k[0] !=
"_")
442 output.update(dfiltered)
446 flstat.write(
"%s \n" % output)
449 def test(self, name, listofobjects):
450 from numpy.testing
import assert_approx_equal
as aae
451 output = self.initoutput
452 for l
in listofobjects:
453 if not "get_test_output" in dir(l)
and not "get_output" in dir(l):
454 raise ValueError(
"Output: object %s doesn't have get_output() or get_test_output() method" % str(l))
455 for obj
in listofobjects:
457 output.update(obj.get_test_output())
459 output.update(obj.get_output())
464 flstat = open(name,
'r')
471 old_value = str(test_dict[k])
472 new_value = str(output[k])
474 if test_dict[k] != output[k]:
475 if len(old_value) < 50
and len(new_value) < 50:
476 print(str(k) +
": test failed, old value: " + old_value +
" new value " + new_value)
479 print(str(k) +
": test failed, omitting results (too long)")
483 print(str(k) +
" from old objects (file " + str(name) +
") not in new objects")
486 def get_environment_variables(self):
488 return str(os.environ)
490 def get_versions_of_relevant_modules(self):
497 except (ImportError):
501 versions[
"ISD2_VERSION"] = IMP.isd2.get_module_version()
502 except (ImportError):
506 versions[
"ISD_EMXL_VERSION"] = IMP.isd_emxl.get_module_version()
507 except (ImportError):
517 listofsummedobjects=
None):
523 if listofsummedobjects
is None:
524 listofsummedobjects = []
525 if extralabels
is None:
527 flstat = open(name,
'w')
529 stat2_keywords = {
"STAT2HEADER":
"STAT2HEADER"}
530 stat2_keywords.update(
531 {
"STAT2HEADER_ENVIRON": str(self.get_environment_variables())})
532 stat2_keywords.update(
533 {
"STAT2HEADER_IMP_VERSIONS": str(self.get_versions_of_relevant_modules())})
536 for l
in listofobjects:
537 if not "get_output" in dir(l):
538 raise ValueError(
"Output: object %s doesn't have get_output() method" % str(l))
542 dfiltered = dict((k, v)
543 for k, v
in d.items()
if k[0] !=
"_")
544 output.update(dfiltered)
547 for l
in listofsummedobjects:
549 if not "get_output" in dir(t):
550 raise ValueError(
"Output: object %s doesn't have get_output() method" % str(t))
552 if "_TotalScore" not in t.get_output():
553 raise ValueError(
"Output: object %s doesn't have _TotalScore entry to be summed" % str(t))
555 output.update({l[1]: 0.0})
557 for k
in extralabels:
558 output.update({k: 0.0})
560 for n, k
in enumerate(output):
561 stat2_keywords.update({n: k})
562 stat2_inverse.update({k: n})
564 flstat.write(
"%s \n" % stat2_keywords)
566 self.dictionary_stats2[name] = (
572 def write_stat2(self, name, appendmode=True):
574 (listofobjects, stat2_inverse, listofsummedobjects,
575 extralabels) = self.dictionary_stats2[name]
578 for obj
in listofobjects:
579 od = obj.get_output()
580 dfiltered = dict((k, v)
for k, v
in od.items()
if k[0] !=
"_")
582 output.update({stat2_inverse[k]: od[k]})
585 for l
in listofsummedobjects:
589 partial_score += float(d[
"_TotalScore"])
590 output.update({stat2_inverse[l[1]]: str(partial_score)})
593 for k
in extralabels:
594 if k
in self.initoutput:
595 output.update({stat2_inverse[k]: self.initoutput[k]})
597 output.update({stat2_inverse[k]:
"None"})
604 flstat = open(name, writeflag)
605 flstat.write(
"%s \n" % output)
608 def write_stats2(self):
609 for stat
in self.dictionary_stats2.keys():
610 self.write_stat2(stat)
614 """A class for reading stat files"""
615 def __init__(self, filename):
616 self.filename = filename
621 if not self.filename
is None:
622 f = open(self.filename,
"r")
624 raise ValueError(
"No file name provided. Use -h for help")
627 for line
in f.readlines():
629 self.klist = list(d.keys())
631 if "STAT2HEADER" in self.klist:
634 if "STAT2HEADER" in str(k):
640 for k
in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
642 for k
in sorted(stat2_dict.items(), key=operator.itemgetter(1))]
643 self.invstat2_dict = {}
645 self.invstat2_dict.update({stat2_dict[k]: k})
656 def show_keys(self, ncolumns=2, truncate=65):
657 IMP.pmi.tools.print_multicolumn(self.get_keys(), ncolumns, truncate)
659 def get_fields(self, fields, filtertuple=None, filterout=None, get_every=1):
661 Get the desired field names, and return a dictionary.
663 @param filterout specify if you want to "grep" out something from
664 the file, so that it is faster
665 @param filtertuple a tuple that contains
666 ("TheKeyToBeFiltered",relationship,value)
667 where relationship = "<", "==", or ">"
675 f = open(self.filename,
"r")
678 for line
in f.readlines():
679 if not filterout
is None:
680 if filterout
in line:
684 if line_number % get_every != 0:
691 print(
"# Warning: skipped line number " + str(line_number) +
" not a valid line")
696 if not filtertuple
is None:
697 keytobefiltered = filtertuple[0]
698 relationship = filtertuple[1]
699 value = filtertuple[2]
700 if relationship ==
"<":
701 if float(d[keytobefiltered]) >= value:
703 if relationship ==
">":
704 if float(d[keytobefiltered]) <= value:
706 if relationship ==
"==":
707 if float(d[keytobefiltered]) != value:
709 [outdict[field].append(d[field])
for field
in fields]
715 if not filtertuple
is None:
716 keytobefiltered = filtertuple[0]
717 relationship = filtertuple[1]
718 value = filtertuple[2]
719 if relationship ==
"<":
720 if float(d[self.invstat2_dict[keytobefiltered]]) >= value:
722 if relationship ==
">":
723 if float(d[self.invstat2_dict[keytobefiltered]]) <= value:
725 if relationship ==
"==":
726 if float(d[self.invstat2_dict[keytobefiltered]]) != value:
729 [outdict[field].append(d[self.invstat2_dict[field]])
735 def plot_fields(fields, framemin=None, framemax=None):
736 import matplotlib
as mpl
738 import matplotlib.pyplot
as plt
740 plt.rc(
'lines', linewidth=4)
741 fig, axs = plt.subplots(nrows=len(fields))
742 fig.set_size_inches(10.5, 5.5 * len(fields))
743 plt.rc(
'axes', color_cycle=[
'r'])
750 framemax = len(fields[key])
751 x = list(range(framemin, framemax))
752 y = [float(y)
for y
in fields[key][framemin:framemax]]
755 axs[n].set_title(key, size=
"xx-large")
756 axs[n].tick_params(labelsize=18, pad=10)
759 axs.set_title(key, size=
"xx-large")
760 axs.tick_params(labelsize=18, pad=10)
764 plt.subplots_adjust(hspace=0.3)
769 name, values_lists, valuename=
None, bins=40, colors=
None, format=
"png",
770 reference_xline=
None, yplotrange=
None, xplotrange=
None,normalized=
True,
773 '''Plot a list of histograms from a value list.
774 @param name the name of the plot
775 @param value_lists the list of list of values eg: [[...],[...],[...]]
776 @param valuename the y-label
777 @param bins the number of bins
778 @param colors If None, will use rainbow. Else will use specific list
779 @param format output format
780 @param reference_xline plot a reference line parallel to the y-axis
781 @param yplotrange the range for the y-axis
782 @param normalized whether the histogram is normalized or not
783 @param leg_names names for the legend
786 import matplotlib
as mpl
788 import matplotlib.pyplot
as plt
789 import matplotlib.cm
as cm
790 fig = plt.figure(figsize=(18.0, 9.0))
793 colors = cm.rainbow(np.linspace(0, 1, len(values_lists)))
794 for nv,values
in enumerate(values_lists):
796 if leg_names
is not None:
801 [float(y)
for y
in values],
804 normed=normalized,histtype=
'step',lw=4,
808 plt.tick_params(labelsize=12, pad=10)
809 if valuename
is None:
810 plt.xlabel(name, size=
"xx-large")
812 plt.xlabel(valuename, size=
"xx-large")
813 plt.ylabel(
"Frequency", size=
"xx-large")
815 if not yplotrange
is None:
817 if not xplotrange
is None:
822 if not reference_xline
is None:
829 plt.savefig(name +
"." + format, dpi=150, transparent=
True)
834 valuename=
"None", positionname=
"None", xlabels=
None):
836 Plot time series as boxplots.
837 fields is a list of time series, positions are the x-values
838 valuename is the y-label, positionname is the x-label
840 import matplotlib
as mpl
842 import matplotlib.pyplot
as plt
843 from matplotlib.patches
import Polygon
846 fig = plt.figure(figsize=(float(len(positions)) / 2, 5.0))
847 fig.canvas.set_window_title(name)
849 ax1 = fig.add_subplot(111)
851 plt.subplots_adjust(left=0.2, right=0.990, top=0.95, bottom=0.4)
853 bps.append(plt.boxplot(values, notch=0, sym=
'', vert=1,
854 whis=1.5, positions=positions))
856 plt.setp(bps[-1][
'boxes'], color=
'black', lw=1.5)
857 plt.setp(bps[-1][
'whiskers'], color=
'black', ls=
":", lw=1.5)
859 if frequencies
is not None:
860 ax1.plot(positions, frequencies,
'k.', alpha=0.5, markersize=20)
863 if not xlabels
is None:
864 ax1.set_xticklabels(xlabels)
865 plt.xticks(rotation=90)
866 plt.xlabel(positionname)
867 plt.ylabel(valuename)
869 plt.savefig(name,dpi=150)
873 def plot_xy_data(x,y,title=None,out_fn=None,display=True,set_plot_yaxis_range=None,
874 xlabel=
None,ylabel=
None):
875 import matplotlib
as mpl
877 import matplotlib.pyplot
as plt
878 plt.rc(
'lines', linewidth=2)
880 fig, ax = plt.subplots(nrows=1)
881 fig.set_size_inches(8,4.5)
882 if title
is not None:
883 fig.canvas.set_window_title(title)
886 ax.plot(x,y,color=
'r')
887 if set_plot_yaxis_range
is not None:
888 x1,x2,y1,y2=plt.axis()
889 y1=set_plot_yaxis_range[0]
890 y2=set_plot_yaxis_range[1]
891 plt.axis((x1,x2,y1,y2))
892 if title
is not None:
894 if xlabel
is not None:
895 ax.set_xlabel(xlabel)
896 if ylabel
is not None:
897 ax.set_ylabel(ylabel)
898 if out_fn
is not None:
899 plt.savefig(out_fn+
".pdf")
904 def plot_scatter_xy_data(x,y,labelx="None",labely="None",
905 xmin=
None,xmax=
None,ymin=
None,ymax=
None,
906 savefile=
False,filename=
"None.eps",alpha=0.75):
908 import matplotlib
as mpl
910 import matplotlib.pyplot
as plt
912 from matplotlib
import rc
914 rc(
'font',**{
'family':
'sans-serif',
'sans-serif':[
'Helvetica']})
917 fig, axs = plt.subplots(1)
921 axs0.set_xlabel(labelx, size=
"xx-large")
922 axs0.set_ylabel(labely, size=
"xx-large")
923 axs0.tick_params(labelsize=18, pad=10)
927 plot2.append(axs0.plot(x, y,
'o', color=
'k',lw=2, ms=0.1, alpha=alpha, c=
"w"))
936 fig.set_size_inches(8.0, 8.0)
937 fig.subplots_adjust(left=0.161, right=0.850, top=0.95, bottom=0.11)
938 if (
not ymin
is None)
and (
not ymax
is None):
939 axs0.set_ylim(ymin,ymax)
940 if (
not xmin
is None)
and (
not xmax
is None):
941 axs0.set_xlim(xmin,xmax)
945 fig.savefig(filename, dpi=300)
948 def get_graph_from_hierarchy(hier):
952 (graph, depth, depth_dict) = recursive_graph(
953 hier, graph, depth, depth_dict)
956 node_labels_dict = {}
958 for key
in depth_dict:
959 node_size_dict = 10 / depth_dict[key]
960 if depth_dict[key] < 3:
961 node_labels_dict[key] = key
963 node_labels_dict[key] =
""
964 draw_graph(graph, labels_dict=node_labels_dict)
967 def recursive_graph(hier, graph, depth, depth_dict):
970 index = str(hier.get_particle().
get_index())
971 name1 = nameh +
"|#" + index
972 depth_dict[name1] = depth
976 if len(children) == 1
or children
is None:
978 return (graph, depth, depth_dict)
982 (graph, depth, depth_dict) = recursive_graph(
983 c, graph, depth, depth_dict)
985 index = str(c.get_particle().
get_index())
986 namec = nameh +
"|#" + index
987 graph.append((name1, namec))
990 return (graph, depth, depth_dict)
993 def draw_graph(graph, labels_dict=None, graph_layout='spring',
994 node_size=5, node_color=
None, node_alpha=0.3,
995 node_text_size=11, fixed=
None, pos=
None,
996 edge_color=
'blue', edge_alpha=0.3, edge_thickness=1,
998 validation_edges=
None,
999 text_font=
'sans-serif',
1002 import matplotlib
as mpl
1004 import networkx
as nx
1005 import matplotlib.pyplot
as plt
1006 from math
import sqrt, pi
1012 if type(edge_thickness)
is list:
1013 for edge,weight
in zip(graph,edge_thickness):
1014 G.add_edge(edge[0], edge[1], weight=weight)
1017 G.add_edge(edge[0], edge[1])
1019 if node_color==
None:
1020 node_color_rgb=(0,0,0)
1021 node_color_hex=
"000000"
1026 for node
in G.nodes():
1027 cctuple=cc.rgb(node_color[node])
1028 tmpcolor_rgb.append((cctuple[0]/255,cctuple[1]/255,cctuple[2]/255))
1029 tmpcolor_hex.append(node_color[node])
1030 node_color_rgb=tmpcolor_rgb
1031 node_color_hex=tmpcolor_hex
1034 if type(node_size)
is dict:
1036 for node
in G.nodes():
1037 size=sqrt(node_size[node])/pi*10.0
1038 tmpsize.append(size)
1041 for n,node
in enumerate(G.nodes()):
1042 color=node_color_hex[n]
1044 nx.set_node_attributes(G,
"graphics", {node : {
'type':
'ellipse',
'w': size,
'h': size,
'fill':
'#'+color,
'label': node}})
1045 nx.set_node_attributes(G,
"LabelGraphics", {node : {
'type':
'text',
'text':node,
'color':
'#000000',
'visible':
'true'}})
1047 for edge
in G.edges():
1048 nx.set_edge_attributes(G,
"graphics", {edge : {
'width': 1,
'fill':
'#000000'}})
1050 for ve
in validation_edges:
1052 if (ve[0],ve[1])
in G.edges():
1053 print(
"found forward")
1054 nx.set_edge_attributes(G,
"graphics", {ve : {
'width': 1,
'fill':
'#00FF00'}})
1055 elif (ve[1],ve[0])
in G.edges():
1056 print(
"found backward")
1057 nx.set_edge_attributes(G,
"graphics", {(ve[1],ve[0]) : {
'width': 1,
'fill':
'#00FF00'}})
1059 G.add_edge(ve[0], ve[1])
1061 nx.set_edge_attributes(G,
"graphics", {ve : {
'width': 1,
'fill':
'#FF0000'}})
1065 if graph_layout ==
'spring':
1067 graph_pos = nx.spring_layout(G,k=1.0/8.0,fixed=fixed,pos=pos)
1068 elif graph_layout ==
'spectral':
1069 graph_pos = nx.spectral_layout(G)
1070 elif graph_layout ==
'random':
1071 graph_pos = nx.random_layout(G)
1073 graph_pos = nx.shell_layout(G)
1077 nx.draw_networkx_nodes(G, graph_pos, node_size=node_size,
1078 alpha=node_alpha, node_color=node_color_rgb,
1080 nx.draw_networkx_edges(G, graph_pos, width=edge_thickness,
1081 alpha=edge_alpha, edge_color=edge_color)
1082 nx.draw_networkx_labels(
1083 G, graph_pos, labels=labels_dict, font_size=node_text_size,
1084 font_family=text_font)
1086 plt.savefig(out_filename)
1087 nx.write_gml(G,
'out.gml')
1095 from ipyD3
import d3object
1096 from IPython.display
import display
1098 d3 = d3object(width=800,
1103 title=
'Example table with d3js',
1104 desc=
'An example table created created with d3js with data generated with Python.')
1190 [72.0, 60.0, 60.0, 10.0, 120.0, 172.0, 1092.0, 675.0, 408.0, 360.0, 156.0, 100.0]]
1191 data = [list(i)
for i
in zip(*data)]
1192 sRows = [[
'January',
1204 sColumns = [[
'Prod {0}'.format(i)
for i
in range(1, 9)],
1205 [
None,
'',
None,
None,
'Group 1',
None,
None,
'Group 2']]
1206 d3.addSimpleTable(data,
1207 fontSizeCells=[12, ],
1210 sRowsMargins=[5, 50, 0],
1211 sColsMargins=[5, 20, 10],
1214 addOutsideBorders=-1,
1218 html = d3.render(mode=[
'html',
'show'])
void write_pdb(const Selection &mhd, base::TextOutput out, unsigned int model=1)
void save_frame(RMF::FileHandle file, unsigned int, std::string name="")
void add_restraints(RMF::NodeHandle fh, const kernel::Restraints &hs)
A class for reading stat files.
Ints get_index(const kernel::ParticlesTemp &particles, const Subset &subset, const Subsets &excluded)
def plot_field_histogram
Plot a list of histograms from a value list.
def plot_fields_box_plots
Plot time series as boxplots.
std::string get_module_version()
def get_fields
Get the desired field names, and return a dictionary.
The standard decorator for manipulating molecular structures.
A decorator for a particle representing an atom.
A decorator for a particle with x,y,z coordinates.
void add_hierarchies(RMF::NodeHandle fh, const atom::Hierarchies &hs)
Class for easy writing of PDBs, RMFs, and stat files.
void add_geometries(RMF::NodeHandle parent, const display::GeometriesTemp &r)
std::string get_module_version()
Display a segment connecting a pair of particles.
static bool get_is_setup(const IMP::kernel::ParticleAdaptor &p)
A decorator for a residue.
Basic functionality that is expected to be used by a wide variety of IMP users.
static bool get_is_setup(const IMP::kernel::ParticleAdaptor &p)
static bool get_is_setup(kernel::Model *m, kernel::ParticleIndex pi)
Python classes to represent, score, sample and analyze models.
Functionality for loading, creating, manipulating and scoring atomic structures.
Hierarchies get_leaves(const Selection &h)
A decorator for a particle with x,y,z coordinates and a radius.