forked from yarden/MISO
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathsamples_utils.py
More file actions
202 lines (165 loc) · 6.87 KB
/
samples_utils.py
File metadata and controls
202 lines (165 loc) · 6.87 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
##
## Utilities for working with MISO output samples
##
from scipy import *
from numpy import *
import os
import glob
import hypothesis_test as ht
from parse_csv import *
#import mim_sampler as miso
#from samples_plotter import load_samples
def maxi(l):
m = max(l)
for i, v in enumerate(l):
if m == v:
return i
def load_samples(samples_file):
"""
Load a file with samples. Return the samples, header from the file, the sampled MAP estimate,
and the sampled MAP's log score.
"""
data, h = csv2array(samples_file, skiprows=1, raw_header=True)
sampled_map_indx = maxi(data['sampled_psi'])
sampled_map = [float(v) for v in data['sampled_psi'][sampled_map_indx].split(',')]
sampled_map_log_score = data['log_score'][sampled_map_indx]
# print " - Sampled MAP: %s" %(sampled_map)
# print " - Sampled MAP log score: %.4f" %(sampled_map_log_score)
samples = []
for vals in data['sampled_psi']:
psi_vals = [float(v) for v in vals.split(',')]
samples.append(psi_vals)
samples = array(samples)
# Extract counts from the file's header
counts_info = get_counts_from_header(h[0])
return (samples, h, data['log_score'], sampled_map,
sampled_map_log_score, counts_info)
def get_isoforms_from_header(samples_header):
"""
Given header of a raw MISO samples file, return the isoforms
field.
"""
# Get first field (removing prefix comment symbol, '#')
isoforms = samples_header[1:].split("\t")[0]
# Remove isoforms= key
isoforms = isoforms.split("isoforms=")[1]
# Remove brackets, '[', ']'
isoforms = isoforms[1:-1]
return isoforms
def get_counts_from_header(samples_header):
"""
Given a header of a raw MISO samples file, return the
counts= and assigned_counts= fields.
"""
fields = samples_header[1:].split("\t")
counts = {}
for f in fields:
if f.startswith("counts="):
counts['counts'] = f.split("=")[1]
elif f.startswith("assigned_counts="):
counts['assigned_counts'] = f.split("=")[1]
if len(counts.keys()) != 2:
print "Warning: Could not get counts fields out of " \
"%s header." %(samples_header)
counts = {'counts': 'n/a',
'assigned_counts': 'n/a'}
return counts
def format_credible_intervals(event_name, samples,
confidence_level=0.95):
"""
Returns a list of print-able credible intervals for an NxM samples
matrix. Handles both the two isoform and multi-isoform cases.
"""
num_samples, num_isoforms = shape(samples)
if num_isoforms > 2:
cred_interval = ht.compute_multi_iso_credible_intervals(samples,
confidence_level=confidence_level)
cred_interval_lowbounds = ",".join([str("%.2f" %(ci[0])) for ci in cred_interval])
cred_interval_highbounds = ",".join([str("%.2f" %(ci[1])) for ci in cred_interval])
posterior_mean = ",".join("%.2f" %(val) for val in mean(samples, 0))
output_fields = [event_name,
"%s" %(posterior_mean),
"%s" %(cred_interval_lowbounds),
"%s" %(cred_interval_highbounds)]
else:
cred_interval = ht.compute_credible_intervals(samples)
posterior_mean = mean(samples, 0)[0]
output_fields = [event_name,
"%.2f" %(posterior_mean),
"%.2f" %(cred_interval[0]),
"%.2f" %(cred_interval[1])]
return output_fields
def summarize_sampler_results(samples_dir, summary_filename):
"""
Given a set of samples from MISO, output a summary file.
"""
summary_file = open(summary_filename, 'w')
header_fields = ["event_name", "miso_posterior_mean", "ci_low", "ci_high",
"isoforms", "counts", "assigned_counts"]
summary_header = "%s\n" %("\t".join(header_fields))
summary_file.write(summary_header)
print "Loading events from: %s" %(samples_dir)
print "Writing summary to: %s" %(summary_filename)
all_filenames = get_samples_dir_filenames(samples_dir)
num_events = 0
for samples_filename in all_filenames:
basename = os.path.basename(samples_filename)
event_name = basename.split('.')[0]
# Load samples and header information
samples_results = load_samples(samples_filename)
samples = samples_results[0]
header = samples_results[1]
header = header[0]
# Get counts information from header
counts_info = samples_results[5]
num_samples, num_isoforms = shape(samples)
output_fields = format_credible_intervals(event_name, samples)
# Add isoforms information to output fields
isoforms_field = get_isoforms_from_header(header)
output_fields.append(isoforms_field)
# Add counts information to output fields
output_fields.append(counts_info['counts'])
output_fields.append(counts_info['assigned_counts'])
output_line = "%s\n" %("\t".join(output_fields))
summary_file.write(output_line)
num_events += 1
print " - Summarized a total of %d events." %(num_events)
summary_file.close()
def get_samples_dir_filenames(samples_dir):
"""
Get all the filenames associated with a samples directory.
Assumes samples directory have the following structure:
- samples_dir
- chr1
- chr2
...
- chrN
Also collect files in samples_dir for backwards compatibility.
"""
directories = glob.glob(os.path.join(samples_dir, "*"))
# Keep only numeric directories or directories beginning with "chr"
directories = filter(lambda d: os.path.basename(d).startswith("chr") or \
os.path.basename(d).isdigit(),
directories)
# Filenames indexed by chromosomes
filenames = []
for directory in directories:
if os.path.isdir(directory):
dir_filenames = os.listdir(directory)
dir_filenames = [os.path.join(directory, dname) \
for dname in dir_filenames]
filenames.extend(dir_filenames)
# Filenames in top-level directory
filenames.extend(os.listdir(samples_dir))
# Add parent directory to all filenames
filenames = [os.path.join(samples_dir, fname) \
for fname in filenames]
# Remove directories and files beginning with "."
filenames = filter(lambda f: not os.path.isdir(f),
filenames)
filenames = filter(lambda f: not os.path.basename(f).startswith("."),
filenames)
# Remove files that do not end with proper extension
filenames = filter(lambda f: os.path.basename(f).endswith(".miso"),
filenames)
return filenames