python文件排序的方法總結(jié)
在python環(huán)境中提供兩種排序方案:用庫(kù)函數(shù)sorted()對(duì)字符串排序,它的對(duì)象是字符;用函數(shù)sort()對(duì)數(shù)字排序,它的對(duì)象是數(shù)字,如果讀取文件的話,需要進(jìn)行處理(把文件后綴名‘屏蔽')。
(1)首先:我測(cè)試的文件夾是/img/,里面的文件都是圖片,如下圖所示:

(2)測(cè)試庫(kù)函數(shù)sorted(),直接貼出代碼:
import numpy as np import os img_path='./img/' img_list=sorted(os.listdir(img_path))#文件名按字母排序 img_nums=len(img_list) for i in range(img_nums): img_name=img_path+img_list[i] print(img_name)
運(yùn)行效果如下:

從圖片可以清晰的看出,文件名是按字符排序的。
(3)測(cè)試函數(shù)sort(),代碼:
import numpy as np import os img_path='./img/' img_list=os.listdir(img_path) img_list.sort() img_list.sort(key = lambda x: int(x[:-4])) ##文件名按數(shù)字排序 img_nums=len(img_list) for i in range(img_nums): img_name=img_path+img_list[i] print(img_name)
運(yùn)行效果如下:

可以看出,文件名是按數(shù)字排序的;順便提下,sort函數(shù)中用到了匿名函數(shù)(key = lambda x:int(x[:-4])),其作用是將后綴名'.jpg'“屏蔽”(因?yàn)椤?jpg'是4個(gè)字符,所以[:-4]的含義是從文件名開(kāi)始到倒數(shù)第四個(gè)字符為止),具體看python的匿名函數(shù)和數(shù)組取值方式。
實(shí)例擴(kuò)展:
import gzip
import os
from multiprocessing import Process, Queue, Pipe, current_process, freeze_support
from datetime import datetime
def sort_worker(input,output):
while True:
lines = input.get().splitlines()
element_set = {}
for line in lines:
if line.strip() == 'STOP':
return
try:
element = line.split(' ')[0]
if not element_set.get(element): element_set[element] = ''
except:
pass
sorted_element = sorted(element_set)
#print sorted_element
output.put('\n'.join(sorted_element))
def write_worker(input, pre):
os.system('mkdir %s'%pre)
i = 0
while True:
content = input.get()
if content.strip() == 'STOP':
return
write_sorted_bulk(content, '%s/%s'%(pre, i))
i += 1
def write_sorted_bulk(content, filename):
f = file(filename, 'w')
f.write(content)
f.close()
def split_sort_file(filename, num_sort = 3, buf_size = 65536*64*4):
t = datetime.now()
pre, ext = os.path.splitext(filename)
if ext == '.gz':
file_file = gzip.open(filename, 'rb')
else:
file_file = open(filename)
bulk_queue = Queue(10)
sorted_queue = Queue(10)
NUM_SORT = num_sort
sort_worker_pool = []
for i in range(NUM_SORT):
sort_worker_pool.append( Process(target=sort_worker, args=(bulk_queue, sorted_queue)) )
sort_worker_pool[i].start()
NUM_WRITE = 1
write_worker_pool = []
for i in range(NUM_WRITE):
write_worker_pool.append( Process(target=write_worker, args=(sorted_queue, pre)) )
write_worker_pool[i].start()
buf = file_file.read(buf_size)
sorted_count = 0
while len(buf):
end_line = buf.rfind('\n')
#print buf[:end_line+1]
bulk_queue.put(buf[:end_line+1])
sorted_count += 1
if end_line != -1:
buf = buf[end_line+1:] + file_file.read(buf_size)
else:
buf = file_file.read(buf_size)
for i in range(NUM_SORT):
bulk_queue.put('STOP')
for i in range(NUM_SORT):
sort_worker_pool[i].join()
for i in range(NUM_WRITE):
sorted_queue.put('STOP')
for i in range(NUM_WRITE):
write_worker_pool[i].join()
print 'elasped ', datetime.now() - t
return sorted_count
from heapq import heappush, heappop
from datetime import datetime
from multiprocessing import Process, Queue, Pipe, current_process, freeze_support
import os
class file_heap:
def __init__(self, dir, idx = 0, count = 1):
files = os.listdir(dir)
self.heap = []
self.files = {}
self.bulks = {}
self.pre_element = None
for i in range(len(files)):
file = files[i]
if hash(file) % count != idx: continue
input = open(os.path.join(dir, file))
self.files[i] = input
self.bulks[i] = ''
heappush(self.heap, (self.get_next_element_buffered(i), i))
def get_next_element_buffered(self, i):
if len(self.bulks[i]) < 256:
if self.files[i] is not None:
buf = self.files[i].read(65536)
if buf:
self.bulks[i] += buf
else:
self.files[i].close()
self.files[i] = None
end_line = self.bulks[i].find('\n')
if end_line == -1:
end_line = len(self.bulks[i])
element = self.bulks[i][:end_line]
self.bulks[i] = self.bulks[i][end_line+1:]
return element
def poppush_uniq(self):
while True:
element = self.poppush()
if element is None:
return None
if element != self.pre_element:
self.pre_element = element
return element
def poppush(self):
try:
element, index = heappop(self.heap)
except IndexError:
return None
new_element = self.get_next_element_buffered(index)
if new_element:
heappush(self.heap, (new_element, index))
return element
def heappoppush(dir, queue, idx = 0, count = 1):
heap = file_heap(dir, idx, count)
while True:
d = heap.poppush_uniq()
queue.put(d)
if d is None: return
def heappoppush2(dir, queue, count = 1):
heap = []
procs = []
queues = []
pre_element = None
for i in range(count):
q = Queue(1024)
q_buf = queue_buffer(q)
queues.append(q_buf)
p = Process(target=heappoppush, args=(dir, q_buf, i, count))
procs.append(p)
p.start()
queues = tuple(queues)
for i in range(count):
heappush(heap, (queues[i].get(), i))
while True:
try:
d, i= heappop(heap)
except IndexError:
queue.put(None)
for p in procs:
p.join()
return
else:
if d is not None:
heappush(heap,(queues[i].get(), i))
if d != pre_element:
pre_element = d
queue.put(d)
def merge_file(dir):
heap = file_heap( dir )
os.system('rm -f '+dir+'.merge')
fmerge = open(dir+'.merge', 'a')
element = heap.poppush_uniq()
fmerge.write(element+'\n')
while element is not None:
element = heap.poppush_uniq()
fmerge.write(element+'\n')
class queue_buffer:
def __init__(self, queue):
self.q = queue
self.rbuf = []
self.wbuf = []
def get(self):
if len(self.rbuf) == 0:
self.rbuf = self.q.get()
r = self.rbuf[0]
del self.rbuf[0]
return r
def put(self, d):
self.wbuf.append(d)
if d is None or len(self.wbuf) > 1024:
self.q.put(self.wbuf)
self.wbuf = []
def diff_file(file_old, file_new, file_diff, buf = 268435456):
print 'buffer size', buf
from file_split import split_sort_file
os.system('rm -rf '+ os.path.splitext(file_old)[0] )
os.system('rm -rf '+ os.path.splitext(file_new)[0] )
t = datetime.now()
split_sort_file(file_old,5,buf)
split_sort_file(file_new,5,buf)
print 'split elasped ', datetime.now() - t
os.system('cat %s/* | wc -l'%os.path.splitext(file_old)[0])
os.system('cat %s/* | wc -l'%os.path.splitext(file_new)[0])
os.system('rm -f '+file_diff)
t = datetime.now()
zdiff = open(file_diff, 'a')
old_q = Queue(1024)
new_q = Queue(1024)
old_queue = queue_buffer(old_q)
new_queue = queue_buffer(new_q)
h1 = Process(target=heappoppush2, args=(os.path.splitext(file_old)[0], old_queue, 3))
h2 = Process(target=heappoppush2, args=(os.path.splitext(file_new)[0], new_queue, 3))
h1.start(), h2.start()
old = old_queue.get()
new = new_queue.get()
old_count, new_count = 0, 0
while old is not None or new is not None:
if old > new or old is None:
zdiff.write('< '+new+'\n')
new = new_queue.get()
new_count +=1
elif old < new or new is None:
zdiff.write('> '+old+'\n')
old = old_queue.get()
old_count +=1
else:
old = old_queue.get()
new = new_queue.get()
print 'new_count:', new_count
print 'old_count:', old_count
print 'diff elasped ', datetime.now() - t
h1.join(), h2.join()
到此這篇關(guān)于python文件排序的方法總結(jié)的文章就介紹到這了,更多相關(guān)python文件排序都有哪些方法內(nèi)容請(qǐng)搜索腳本之家以前的文章或繼續(xù)瀏覽下面的相關(guān)文章希望大家以后多多支持腳本之家!
相關(guān)文章
Python實(shí)現(xiàn)向好友發(fā)送微信消息
利用python可以實(shí)現(xiàn)微信消息發(fā)送功能,怎么實(shí)現(xiàn)呢?你肯定會(huì)想著很復(fù)雜,但是python的好處就是很多人已經(jīng)把接口打包做好了,只需要調(diào)用即可,今天通過(guò)本文給大家分享使用?Python?實(shí)現(xiàn)微信消息發(fā)送的思路代碼,一起看看吧2022-06-06
Django實(shí)現(xiàn)將一個(gè)字典傳到前端顯示出來(lái)
這篇文章主要介紹了Django實(shí)現(xiàn)將一個(gè)字典傳到前端顯示出來(lái),具有很好的參考價(jià)值,希望2020-04-04
python中split(),?os.path.split()和os.path.splitext()的用法
本文主要介紹了python中split(),?os.path.split()和os.path.splitext()的用法,文中通過(guò)示例代碼介紹的非常詳細(xì),對(duì)大家的學(xué)習(xí)或者工作具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面隨著小編來(lái)一起學(xué)習(xí)學(xué)習(xí)吧2023-02-02
python 字典 按key值大小 倒序取值的實(shí)例
今天小編就為大家分享一篇python 字典 按key值大小 倒序取值的實(shí)例,具有很好的參考價(jià)值,希望對(duì)大家有所幫助。一起跟隨小編過(guò)來(lái)看看吧2018-07-07
Python運(yùn)行時(shí)修改業(yè)務(wù)SQL代碼
這篇文章主要介紹了Python運(yùn)行時(shí)修改業(yè)務(wù)SQL代碼,文章圍繞主題展開(kāi)詳細(xì)的內(nèi)容介紹,具有一定的參考價(jià)值,需要的小伙伴可以參考一下2022-06-06
django利用request id便于定位及給日志加上request_id
這篇文章主要介紹了django利用request id便于定位及給日志加上request_id的相關(guān)資料,文中通過(guò)示例代碼介紹的非常詳細(xì),對(duì)大家學(xué)習(xí)或者使用django具有一定的參考學(xué)習(xí)價(jià)值,需要的朋友們下面來(lái)一起看看吧2018-08-08
Python+OpenCV實(shí)現(xiàn)表面缺陷檢測(cè)
對(duì)于現(xiàn)在很多工業(yè)檢測(cè),特別是對(duì)一些精密的器件進(jìn)行篩選,往往都是像素級(jí)別的,十分的精確。本文將利用OpenCV+Python實(shí)現(xiàn)表面缺陷檢測(cè),感興趣的可以了解一下2022-08-08

