github_repo_scraped
/
repos
/gitpython-developers-GitPython-0a5250e
/test
/performance
/test_streams.py
# This module is part of GitPython and is released under the | |
# 3-Clause BSD License: https://opensource.org/license/bsd-3-clause/ | |
"""Performance tests for data streaming.""" | |
import gc | |
import os | |
import os.path as osp | |
import subprocess | |
import sys | |
from time import time | |
from gitdb import LooseObjectDB, IStream | |
from gitdb.test.lib import make_memory_file | |
from git.util import bin_to_hex | |
from test.lib import with_rw_repo | |
from test.performance.lib import TestBigRepoR | |
class TestObjDBPerformance(TestBigRepoR): | |
large_data_size_bytes = 1000 * 1000 * 10 # Some MiB should do it. | |
moderate_data_size_bytes = 1000 * 1000 * 1 # Just 1 MiB. | |
def test_large_data_streaming(self, rwrepo): | |
# TODO: This part overlaps with the same file in | |
# gitdb.test.performance.test_stream. It should be shared if possible. | |
ldb = LooseObjectDB(osp.join(rwrepo.git_dir, "objects")) | |
for randomize in range(2): | |
desc = (randomize and "random ") or "" | |
print("Creating %s data ..." % desc, file=sys.stderr) | |
st = time() | |
size, stream = make_memory_file(self.large_data_size_bytes, randomize) | |
elapsed = time() - st | |
print("Done (in %f s)" % elapsed, file=sys.stderr) | |
# Writing - due to the compression it will seem faster than it is. | |
st = time() | |
binsha = ldb.store(IStream("blob", size, stream)).binsha | |
elapsed_add = time() - st | |
assert ldb.has_object(binsha) | |
db_file = ldb.readable_db_object_path(bin_to_hex(binsha)) | |
fsize_kib = osp.getsize(db_file) / 1000 | |
size_kib = size / 1000 | |
msg = "Added %i KiB (filesize = %i KiB) of %s data to loose odb in %f s ( %f Write KiB / s)" | |
msg %= (size_kib, fsize_kib, desc, elapsed_add, size_kib / elapsed_add) | |
print(msg, file=sys.stderr) | |
# Reading all at once. | |
st = time() | |
ostream = ldb.stream(binsha) | |
shadata = ostream.read() | |
elapsed_readall = time() - st | |
stream.seek(0) | |
assert shadata == stream.getvalue() | |
msg = "Read %i KiB of %s data at once from loose odb in %f s ( %f Read KiB / s)" | |
msg %= (size_kib, desc, elapsed_readall, size_kib / elapsed_readall) | |
print(msg, file=sys.stderr) | |
# Reading in chunks of 1 MiB. | |
cs = 512 * 1000 | |
chunks = [] | |
st = time() | |
ostream = ldb.stream(binsha) | |
while True: | |
data = ostream.read(cs) | |
chunks.append(data) | |
if len(data) < cs: | |
break | |
# END read in chunks | |
elapsed_readchunks = time() - st | |
stream.seek(0) | |
assert b"".join(chunks) == stream.getvalue() | |
cs_kib = cs / 1000 | |
print( | |
"Read %i KiB of %s data in %i KiB chunks from loose odb in %f s ( %f Read KiB / s)" | |
% ( | |
size_kib, | |
desc, | |
cs_kib, | |
elapsed_readchunks, | |
size_kib / elapsed_readchunks, | |
), | |
file=sys.stderr, | |
) | |
# del db file so git has something to do. | |
ostream = None | |
gc.collect() | |
os.remove(db_file) | |
# VS. CGIT | |
########## | |
# CGIT! Can using the cgit programs be faster? | |
proc = rwrepo.git.hash_object("-w", "--stdin", as_process=True, istream=subprocess.PIPE) | |
# Write file - pump everything in at once to be a fast as possible. | |
data = stream.getvalue() # Cache it. | |
st = time() | |
proc.stdin.write(data) | |
proc.stdin.close() | |
gitsha = proc.stdout.read().strip() | |
proc.wait() | |
gelapsed_add = time() - st | |
del data | |
assert gitsha == bin_to_hex(binsha) # We do it the same way, right? | |
# As it's the same sha, we reuse our path. | |
fsize_kib = osp.getsize(db_file) / 1000 | |
msg = "Added %i KiB (filesize = %i KiB) of %s data to using git-hash-object in %f s ( %f Write KiB / s)" | |
msg %= (size_kib, fsize_kib, desc, gelapsed_add, size_kib / gelapsed_add) | |
print(msg, file=sys.stderr) | |
# Compare. | |
print( | |
"Git-Python is %f %% faster than git when adding big %s files" | |
% (100.0 - (elapsed_add / gelapsed_add) * 100, desc), | |
file=sys.stderr, | |
) | |
# Read all. | |
st = time() | |
_hexsha, _typename, size, data = rwrepo.git.get_object_data(gitsha) | |
gelapsed_readall = time() - st | |
print( | |
"Read %i KiB of %s data at once using git-cat-file in %f s ( %f Read KiB / s)" | |
% (size_kib, desc, gelapsed_readall, size_kib / gelapsed_readall), | |
file=sys.stderr, | |
) | |
# Compare. | |
print( | |
"Git-Python is %f %% faster than git when reading big %sfiles" | |
% (100.0 - (elapsed_readall / gelapsed_readall) * 100, desc), | |
file=sys.stderr, | |
) | |
# Read chunks. | |
st = time() | |
_hexsha, _typename, size, stream = rwrepo.git.stream_object_data(gitsha) | |
while True: | |
data = stream.read(cs) | |
if len(data) < cs: | |
break | |
# END read stream | |
gelapsed_readchunks = time() - st | |
msg = "Read %i KiB of %s data in %i KiB chunks from git-cat-file in %f s ( %f Read KiB / s)" | |
msg %= ( | |
size_kib, | |
desc, | |
cs_kib, | |
gelapsed_readchunks, | |
size_kib / gelapsed_readchunks, | |
) | |
print(msg, file=sys.stderr) | |
# Compare. | |
print( | |
"Git-Python is %f %% faster than git when reading big %s files in chunks" | |
% (100.0 - (elapsed_readchunks / gelapsed_readchunks) * 100, desc), | |
file=sys.stderr, | |
) | |
# END for each randomization factor | |