Rumah > Soal Jawab > teks badan
#!/usr/bin/env python
# -*- coding: utf-8 -*-
infile2 = open('genemark.gff3', 'r')
infile1 = set(line1.strip() for line1 in open('1.txt', 'r'))
for line in infile2:
line = line.strip().split()
if line[2] == 'gene':
chr, start, end = line[0], int(line[3]), int(line[4])
for line1 in infile1:
line1 = line1.split()
chr1, start1, end1 = line1[1], int(line1[2]), int(line1[3])
if chr1 == chr:
if start1 < start < end1:
print line1[0], line[-1]
if start1 < end < end1:
print line1[0], line[-1]
if start1 > start and end > end1:
print line1[0], line[-1]
genemark.gff3
格式类似下边:
chr1D GeneMark.hmm gene 2705930 2711118 . + . ID=1903228_g;Name=1903228_g
chr1D GeneMark.hmm mRNA 2705930 2711118 . + . ID=1903228_t;Name=1903228_t;Parent=1903228_g
1.txt
:
UN011157 chr1D 2705329 2706342 98.4 95.0 972 30 21 0
UN003843 chr1D 2705681 2721144 61.4 97.4 633 12 5 0
附上原始文件的百度云链接,希望感兴趣的参考
点击下载 密码 enu8
综合楼下各位朋友的答案,现推荐两种
第一种 根据 @ferstar @用筹兮用严 的答案,即并行版
#!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
from multiprocessing import Pool, cpu_count
from functools import partial
def find_sth(f2, f1=None):
start, end = int(f2[3]), int(f2[4])
for uno1, start1, end1 in f1[f2[0]]:
if (start1 <= start and start <= end1) or (start1 <= end and end <= end1) or (start1 >= start and end >= end1):
with open("out.txt", "a") as fh:
fh.write(uno1 + "\t" + f2[-1] + "\n")
#print(uno1, f2[-1])
def main():
with open('1.txt', 'r') as f1:
infile1 = defaultdict(set)
for uno1, chr1, start1, end1, *others in map(str.split, f1):
infile1[chr1].add((uno1, int(start1), int(end1)))
with open('genemark.gff3', 'r') as f2:
infile2 = [x for x in map(str.split, f2) if x[2] == 'gene']
pool = Pool(cpu_count())
pool.map(partial(find_sth, f1=infile1), infile2)
pool.close()
pool.join()
if __name__ == "__main__":
main()
第二种 @citaret 他的版本(单核版),对单核来说,不逊于上述代码。但是两者结果稍有不同,并行版结果更全(这里少了73条,出在判断条件的边界问题,由于对intervaltree熟悉,怎么改还不知道),现在边界问题已修改,两种代码结果完全一样,perfect!
如下
from collections import defaultdict
from intervaltree import Interval, IntervalTree
with open('1.txt') as f:
d1 = defaultdict(list)
xs = map(lambda x: x.strip().split(), f)
for x in xs:
y = (x[0], int(x[2]), int(x[3]))
d1[x[1]].append(y)
for k, v in d1.items():
d1[k] = IntervalTree(Interval(s, e, u) for u, s, e in v)
with open('genemark.gff3') as f:
for line in f:
line = line.strip().split()
if line[2] == 'gene':
chr, start, end = line[0], int(line[3]), int(line[4])
for start1, end1, un1 in d1[chr][start-1:end+1]:
print(un1, line[-1])
巴扎黑2017-04-18 09:18:12
Sepatutnya tiada ruang untuk pengoptimuman prestasi yang lebih baik, tetapi kod boleh dilaraskan sedikit
with open('1.txt', 'r') as f1, open('2.txt', 'r') as f2:
lines1 = [_.strip().split() for _ in f1]
for line2 in f2:
line2 = line2.strip().split()
if line2[2] != 'gene':
continue
chr2, start2, end2 = line2[0], int(line2[3]), int(line2[4])
for line1 in lines1:
chr1, start1, end1 = line1[1], int(line1[2]), int(line1[3])
if chr1 == chr2 and (start1 < start2 < end1 or start1 < end2 < end1 or start1 > start2 and end2 > end1):
print line1[0], line2[-1]
阿神2017-04-18 09:18:12
Berikut ialah dua cadangan:
Kod bersarang terlalu dalam dalam fungsi, anda boleh mengurangkan tahap bersarang dengan kembali seawal mungkin, dalam gelung, anda boleh menggunakan teruskan untuk mengurangkan tahap bersarang.
Perihal prestasi
for line1 in infile1:
line1 = line1.split()
Adalah tidak bijak untuk memisahkan baris dalam fail1 setiap kali melalui gelung
Berikut ialah kod yang saya ubah suai
#!/usr/bin/env python
# -*- coding: utf-8 -*-
infile2 = open('genemark.gff3', 'r')
infile1 = {}
for line1 in open('1.txt', 'r'):
line1 = line1.strip().split()
id, chr1, start1, end1 = line1[0], line1[1], int(line1[2]), int(line1[3])
if not infile1.has_key(chr1):
infile1[chr1] = []
infile1[chr1].append({"start": start1, "end": end1, "id": id})
for line in infile2:
line = line.strip().split()
if line[2] != 'gene':
continue
chr, start, end = line[0], int(line[3]), int(line[4])
if not infile1.has_key(chr):
continue
for i in infile1[chr]:
if i['start'] < start < i['end']:
print i['id'], line[-1]
if i['start'] < end < i['end']:
print i['id'], line[-1]
if i['start'] > start and i['end'] > end:
print i['id'], line[-1]
PHPz2017-04-18 09:18:12
Tukar ruang untuk masa untuk membina senarai genemark.gff3 dan kamus 1.txt masing-masing Pelaksanaan khusus:
from collections import defaultdict
with open('genemark.gff3') as f:
ls = f.readlines()
xs = map(lambda x: x.strip().split(), ls)
t2 = (x for x in xs if x[2] == 'gene')
with open('1.txt') as f:
d1 = defaultdict(list)
ls = f.readlines()
xs = map(lambda x: x.strip().split(), ls)
for x in xs:
d1[x[1]].append(x)
for line in t2:
chr, start, end = line[0], int(line[3]), int(line[4])
if chr in d1:
for line1 in d1[chr]:
chr1, start1, end1 = line1[1], int(line1[2]), int(line1[3])
if start1 < start < end1:
print line1[0], line[-1]
if start1 < end < end1:
print line1[0], line[-1]
if start1 > start and end > end1:
print line1[0], line[-1]
Versi v2 yang diubah suai, menghapuskan int() dalam gelung dalam dan memudahkan output.
from collections import defaultdict
with open('1.txt') as f:
d1 = defaultdict(list)
xs = map(lambda x: x.strip().split(), f)
for x in xs:
y = (x[0], int(x[2]), int(x[3]))
d1[x[1]].append(y)
with open('genemark.gff3') as f:
for line in f:
line = line.strip().split()
chr, start, end = line[0], int(line[3]), int(line[4])
for un1, start1, end1 in d1[chr]:
if start < end1 and end > start1:
print un1, line[-1]
v3: Selepas mengkaji dengan teliti maksud soalan, kami mendapati bahawa gelung utama adalah untuk mencari semua serpihan dalam set yang bersilang dengan serpihan Kita boleh melihat set ini terlebih dahulu:
chr1D 7359
chr2D 9219
chr2B 9486
chr2A 8986
chr6B 7178
chr6A 6446
chr6D 6093
chr4A 7543
chr4B 7086
chr4D 6316
...
Bilangan serpihan dalam setiap koleksi ialah 6000-10000, dan traversal tidak cekap Oleh itu, pertimbangkan untuk menggunakan intervaltree untuk mendapatkan semua serpihan yang bersilang dengan serpihan dengan cepat:
from collections import defaultdict
from intervaltree import Interval, IntervalTree
with open('1.txt') as f:
d1 = defaultdict(list)
xs = map(lambda x: x.strip().split(), f)
for x in xs:
y = (x[0], int(x[2]), int(x[3]))
d1[x[1]].append(y)
for k, v in d1.items():
d1[k] = IntervalTree(Interval(s, e, u) for u, s, e in v)
with open('genemark.gff3') as f:
for line in f:
line = line.strip().split()
if line[2] != 'gene': continue
chr, start, end = line[0], int(line[3]), int(line[4])
for start1, end1, un1 in d1[chr][start:end]:
print un1, line[-1]
Hasil ujian masa ialah: ia mengambil masa 10 saat untuk membina intervaltree, tetapi kelajuan proses persimpangan dipertingkatkan sebanyak kira-kira 100 kali.
rujukan intervaltee https://pypi.python.org/pypi/...
PHP中文网2017-04-18 09:18:12
from collections import defaultdict
with open('1.txt', 'r') as f1, open('genemark.gff3', 'r') as f2:
infile1 = defaultdict(set)
for uno1, chr1, start1, end1, *others in map(str.split, f1):
infile1[chr1].add((uno1, int(start1), int(end1)))
infile2 = filter(lambda x: x[2] == 'gene', map(str.split, f2))
for chr, start, end, info in map(lambda x: (x[0], int(x[3]), int(x[4]), x[-1]), infile2):
for uno1, start1, end1 in infile1[chr]:
if start1 < start < end1 or start1 < end < end or (start1 > start and end > end1):
print(uno1, info)
Parallelization yang dicadangkan oleh @ferstar di tingkat 6 adalah arah yang betul, tetapi terdapat masalah dengan kod...
Menukarnya:
#!/usr/bin/env python
# encoding: utf-8
from collections import defaultdict
from multiprocessing import Pool, cpu_count
from functools import partial
def find_sth(line, f1=None):
line = line.split()
if line[2] != 'gene':
return
start, end = int(line[3]), int(line[4])
for uno1, start1, end1 in f1[line[0]]:
if start1 < start < end1 or start1 < end < end or (start1 > start and end > end1):
print(uno1, line[-1])
def main():
pool = Pool(cpu_count())
with open('1.txt', 'r') as f1, open('genemark.gff3', 'r') as f2:
infile1 = defaultdict(set)
for uno1, chr1, start1, end1, *others in map(str.split, f1):
infile1[chr1].add((uno1, int(start1), int(end1)))
pool.map(partial(find_sth, f1=infile1), f2)
#fw.writelines(filter(lambda x: x is not None, map(lambda x: x.get(), [pool.apply_async(func, (line,)) for line in f2])))
pool.close()
pool.join()
if __name__ == "__main__":
main()
伊谢尔伦2017-04-18 09:18:12
Saya dapati satu perkara yang sangat menarik Semua orang bertindak balas dengan sangat positif, tetapi untuk keputusan sebenar, saya hanya melakukan sedikit ujian kerana bosan
Memandangkan teks sampel yang disediakan oleh soalan hanya mempunyai dua baris, saya menggandakanIsih mengikut bilangan tingkat yang dijawab Contohnya, kod penyoal ialahdan
1.txt
masing-masing kepada barisgenemark.gff3
4000
(qiime) [ngs@cluster ~]$ wc -l 1.txt 4000 1.txt (qiime) [ngs@cluster ~]$ wc -l genemark.gff3 4000 genemark.gff3
, kemudian kod responden di tingkat satu ialah hi.py
dan seterusnya hi1.py
(qiime) [ngs@cluster ~]$ time python hi.py > hi.txt
real 0m0.049s
user 0m0.042s
sys 0m0.007s
(qiime) [ngs@cluster ~]$ wc -l hi.txt
6000 hi.txt
Rasanya berulang
time python hi1.py > hi1.txt
real 0m21.727s
user 0m21.171s
sys 0m0.547s
(qiime) [ngs@cluster ~]$ wc -l hi1.txt
8000000 hi1.txt
Pergi ke rumah nenek lagi
(qiime) [ngs@cluster ~]$ time python hi2.py > hi2.txt
real 0m16.326s
user 0m14.550s
sys 0m1.044s
(qiime) [ngs@cluster ~]$ wc -l hi2.txt
12000000 hi2.txt
(qiime) [ngs@cluster ~]$ time python hi3.py > hi3.txt
real 0m27.079s
user 0m26.281s
sys 0m0.786s
(qiime) [ngs@cluster ~]$ wc -l hi3.txt
12000000 hi3.txt
Hasil responden di tingkat tiga adalah sama seperti di tingkat dua, tetapi lebih 10 saat lebih perlahan
(py3) [ngs@cluster ~]$ time python hi4.py > hi4.txt
real 0m0.074s
user 0m0.064s
sys 0m0.010s
(py3) [ngs@cluster ~]$ wc -l hi4.txt
4000 hi4.txt
Cukup pasti, komunikasi membawa kepada kemajuan, dan keputusan semasa adalah betulRingkasan
Pelan saya - buat perubahan kecil pada kod di tingkat empat menjadi selari
Terdapat sesuatu yang salah dengan apa yang saya tulis, @yongchixiyongyan mengemas kini kod selari yang betul, dan kod saya tidak akan ditukar, supaya ia boleh dirujuk oleh rakan sekelas yang akan melihatnya nantiPenggredan langsung (python3)
from collections import defaultdict
import multiprocessing
def find_sth(x):
with open('1.txt', 'r') as f1:
infile1 = defaultdict(set)
for uno1, chr1, start1, end1, *others in map(str.split, f1):
infile1[chr1].add((uno1, int(start1), int(end1)))
chr, start, end, info = x[0], int(x[3]), int(x[4]), x[-1]
for uno1, start1, end1 in infile1[chr]:
if start1 < start < end1 or start1 < end < end or (start1 > start and end > end1):
print(uno1, info)
def main():
with open('genemark.gff3', 'r') as fh:
lst = [x for x in map(str.split, fh) if x[2] == 'gene']
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.map(find_sth, lst)
pool.close()
pool.join()
if __name__ == "__main__":
main()
Kemudian lihat kecekapan operasi
(py3) [ngs@cluster ~]$ time python hi_new.py > hi_new.txt
real 0m3.033s
user 0m31.952s
sys 0m0.219s
(py3) [ngs@cluster ~]$ wc -l hi_new.txt
4000 hi_new.txt
Nampaknya jauh lebih perlahan dari segi masa (4000 baris data hanya beberapa ratus KB). kelebihan kecekapan pemprosesan selariPS: Saya menganggarkan bahawa saiz sebenar data yang diproses oleh penyoal mestilah dalam tahap MB atau malah GB pemprosesan selari pada tahap ini adalah cara untuk pergi
Data sumber dan pautan alamat hasil: http://pan.baidu.com/s/1hrSZQuS Kata laluan: u93n