summaryrefslogtreecommitdiff
path: root/c_src/permdbtest.py
diff options
context:
space:
mode:
Diffstat (limited to 'c_src/permdbtest.py')
-rw-r--r--c_src/permdbtest.py103
1 files changed, 103 insertions, 0 deletions
diff --git a/c_src/permdbtest.py b/c_src/permdbtest.py
new file mode 100644
index 0000000..58debe7
--- /dev/null
+++ b/c_src/permdbtest.py
@@ -0,0 +1,103 @@
+import argparse
+import struct
+import sys
+import os
+import random
+import datetime
+import permdb
+import hashlib
+
+parser = argparse.ArgumentParser(description="")
+parser.add_argument('--store', help="Store", required=True)
+parser.add_argument('--fsync', metavar="n", type=int, help="Fsync every n adds", required=False)
+parser.add_argument('--startrand', metavar="n", type=int, default=0, help="Start position in random sequence", required=False)
+parser.add_argument("--remove", action='store_true', help="Remove database before starting test")
+parser.add_argument('--datasize', metavar="n", type=int, default=32, help="Start position in random sequence", required=False)
+parser.add_argument('testentries', type=int, help="Number of entries to insert")
+args = parser.parse_args()
+
+if args.remove:
+ os.remove(args.store)
+ os.remove(args.store+".idx")
+permdbobj = permdb.alloc(args.store)
+q = 2
+
+def timing_point(timer_dict=None, name=None):
+ t = datetime.datetime.now()
+ if timer_dict:
+ starttime = timer_dict["lasttime"]
+ stoptime = t
+ deltatime = stoptime - starttime
+ deltaseconds = deltatime.seconds + float(deltatime.microseconds) / 1000000
+ print name, deltaseconds
+ timer_dict["deltatimes"].append((name, deltaseconds))
+ timer_dict["lasttime"] = t
+ return None
+ else:
+ timer_dict = {"deltatimes":[], "lasttime":t}
+ return timer_dict
+
+def getsize():
+ return permdb.datasize(permdbobj)
+
+
+def main():
+ permdb.addvalue(permdbobj, b"\xAB\xCDABCDEFGHIJKLMNOPQRSTUVWXYZ1234", b"hej")
+ permdb.clear_nodecache(permdbobj)
+ permdb.addvalue(permdbobj, b"\xAB\xCDABCDEFGHIJKLMNOPQRSTUVWXYZ1234", b"hej")
+ assert permdb.getvalue(permdbobj, b"\xAB\xCDABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == b"hej"
+ assert permdb.getvalue(permdbobj, b"\xAB\xCEABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == None
+ permdb.addvalue(permdbobj, b"\xAB\x12ABCDEFGHIJKLMNOPQRSTUVWXYZ1234", b"hej1")
+ permdb.addvalue(permdbobj, b"\xAC\x52ABCDEFGHIJKLMNOPQRSTUVWXYZ1234", b"hej2")
+ permdb.addvalue(permdbobj, b"\x9A\x43ABCDEFGHIJKLMNOPQRSTUVWXYZ1234", b"hej3")
+ permdb.committree(permdbobj)
+ permdb.clear_nodecache(permdbobj)
+ assert permdb.getvalue(permdbobj, b"\xAB\xCDABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == b"hej"
+ assert permdb.getvalue(permdbobj, b"\xAB\xCEABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == None
+ assert permdb.getvalue(permdbobj, b"\xAB\x12ABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == b"hej1"
+ assert permdb.getvalue(permdbobj, b"\xAC\x52ABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == b"hej2"
+ assert permdb.getvalue(permdbobj, b"\x9A\x43ABCDEFGHIJKLMNOPQRSTUVWXYZ1234") == b"hej3"
+
+ print "generating test data"
+ timing = timing_point()
+ testdata = [(hashlib.sha256(struct.pack(">II", i, 0)).digest(), hashlib.sha256(struct.pack(">II", i, 1)).digest()) for i in range(args.startrand, args.testentries+args.startrand)]
+ timing_point(timing, "gendata2")
+ print "inserting test data"
+ written_since_fsync = 0
+ datamultiplier = args.datasize // 32
+ if args.datasize % 32 != 0:
+ print "datasize", args.datasize, "not multiple of 32, truncating to", datamultiplier * 32
+ for (k, v) in testdata:
+ permdb.addvalue(permdbobj, k, v * datamultiplier)
+ written_since_fsync += 1
+ if args.fsync and written_since_fsync >= args.fsync:
+ permdb.committree(permdbobj)
+ written_since_fsync = 0
+ permdb.committree(permdbobj)
+ timing_point(timing, "insert")
+ nentries = args.testentries
+ print "reading test data"
+ for (k, v) in testdata:
+ assert permdb.getvalue(permdbobj, k) == v * datamultiplier
+ timing_point(timing, "read1")
+ random.shuffle(testdata)
+ timing_point(timing, "shuffle")
+ permdb.clear_nodecache(permdbobj)
+ for (k, v) in testdata:
+ assert permdb.getvalue(permdbobj, k) == v * datamultiplier
+ timing_point(timing, "read2")
+ if getsize() > 1024*1024:
+ print "db size %sM" % (getsize() / (1024*1024),)
+ else:
+ print "db size", getsize()
+ print "db size/entry", getsize()/nentries
+ print "data file size %sM, entry size %d" % (os.stat(args.store).st_size / (1024*1024), datamultiplier * 32)
+ print "q", q, "entries", nentries, "fsync", args.fsync
+ print timing["deltatimes"]
+ timingdict = dict(timing["deltatimes"])
+ print len(testdata)/timingdict["insert"], "writeops/s", "(%f microseconds)" % (timingdict["insert"]*1000000/len(testdata))
+ print len(testdata)/timingdict["read1"], "cached readops/s", "(%f microseconds)" % (timingdict["read1"]*1000000/len(testdata))
+ print len(testdata)/timingdict["read2"], "uncached readops/s", "(%f microseconds)" % (timingdict["read2"]*1000000/len(testdata))
+ permdb.clear_nodecache(permdbobj)
+
+main()