ref: 99993f0d2c9b49a70a095235d90a7d0c01936d2e
parent: 8bb05d8ca955ed85c7028d74371f1d23ce66a800
author: 9ferno <gophone2015@gmail.com>
date: Sun Jan 23 07:50:15 EST 2022
added missing files from 9front
--- /dev/null
+++ b/include/tos.h
@@ -1,0 +1,24 @@
+typedef struct Tos Tos;
+typedef struct Plink Plink;
+
+#pragma incomplete Plink
+
+struct Tos {
+ struct /* Per process profiling */
+ {
+ Plink *pp; /* known to be 0(ptr) */
+ Plink *next; /* known to be 4(ptr) */
+ Plink *last;
+ Plink *first;
+ ulong pid;
+ ulong what;
+ } prof;
+ uvlong cyclefreq; /* cycle clock frequency if there is one, 0 otherwise */
+ vlong kcycles; /* cycles spent in kernel */
+ vlong pcycles; /* cycles spent in process (kernel + user) */
+ ulong pid; /* might as well put the pid here */
+ ulong clock;
+ /* top of stack is here */
+};
+
+extern Tos *_tos;
--- /dev/null
+++ b/os/port/cache.c
@@ -1,0 +1,521 @@
+#include "u.h"
+#include "../port/lib.h"
+#include "mem.h"
+#include "dat.h"
+#include "fns.h"
+#include "../port/error.h"
+
+enum
+{
+ NHASH = 128,
+ NFILE = 4093, /* should be prime */
+ MAXCACHE = 8*1024*1024,
+
+ MAPBITS = 8*sizeof(ulong),
+ NBITMAP = (PGROUND(MAXCACHE)/BY2PG + MAPBITS-1) / MAPBITS,
+};
+
+/* devmnt.c: parallel read ahread implementation */
+extern void mntrahinit(Mntrah *rah);
+extern long mntrahread(Mntrah *rah, Chan *c, uchar *buf, long len, vlong off);
+
+typedef struct Mntcache Mntcache;
+struct Mntcache
+{
+ Qid qid;
+ int dev;
+ int type;
+
+ QLock;
+ Proc *locked;
+ ulong nlocked;
+
+ Mntcache *hash;
+ Mntcache *prev;
+ Mntcache *next;
+
+ /* page bitmap of valid pages */
+ ulong bitmap[NBITMAP];
+
+ /* read ahead state */
+ Mntrah rah;
+};
+
+typedef struct Cache Cache;
+struct Cache
+{
+ Lock;
+ Mntcache *alloc;
+ Mntcache *head;
+ Mntcache *tail;
+ Mntcache *hash[NHASH];
+};
+
+Image fscache;
+
+static Cache cache;
+
+void
+cinit(void)
+{
+ int i;
+ Mntcache *m;
+
+ m = xalloc(sizeof(Mntcache)*NFILE);
+ if (m == nil)
+ panic("cinit: no memory");
+
+ cache.alloc = m;
+ cache.head = m;
+
+ for(i = 0; i < NFILE-1; i++) {
+ m->next = m+1;
+ m->prev = m-1;
+ m++;
+ }
+
+ cache.tail = m;
+ cache.tail->next = nil;
+ cache.head->prev = nil;
+
+ fscache.notext = 1;
+}
+
+static uintptr
+cacheaddr(Mntcache *m, ulong pn)
+{
+ uintptr da = pn * NFILE + (m - cache.alloc);
+ return (da << PGSHIFT) | (da >> (sizeof(da)*8 - PGSHIFT));
+}
+
+static void
+cnodata(Mntcache *m)
+{
+ memset(m->bitmap, 0, sizeof(m->bitmap));
+}
+
+static void
+ctail(Mntcache *m)
+{
+ /* Unlink and send to the tail */
+ if(m->prev != nil)
+ m->prev->next = m->next;
+ else
+ cache.head = m->next;
+ if(m->next != nil)
+ m->next->prev = m->prev;
+ else
+ cache.tail = m->prev;
+
+ if(cache.tail != nil) {
+ m->prev = cache.tail;
+ cache.tail->next = m;
+ m->next = nil;
+ cache.tail = m;
+ }
+ else {
+ cache.head = m;
+ cache.tail = m;
+ m->prev = nil;
+ m->next = nil;
+ }
+}
+
+/* called with cache locked */
+static Mntcache*
+clookup(Chan *c, int skipvers)
+{
+ Mntcache *m;
+
+ for(m = cache.hash[c->qid.path%NHASH]; m != nil; m = m->hash)
+ if(eqchantdqid(c, m->type, m->dev, m->qid, skipvers) && c->qid.type == m->qid.type)
+ return m;
+
+ return nil;
+}
+
+/*
+ * resursive Mntcache locking. Mntcache.rah is protected by the
+ * same lock and we want to call cupdate() from mntrahread()
+ * while holding the lock.
+ */
+static int
+cancachelock(Mntcache *m)
+{
+ if(m->locked == up || canqlock(m)){
+ m->locked = up;
+ m->nlocked++;
+ return 1;
+ }
+ return 0;
+}
+static void
+cachelock(Mntcache *m)
+{
+ if(m->locked != up){
+ qlock(m);
+ assert(m->nlocked == 0);
+ m->locked = up;
+ }
+ m->nlocked++;
+
+}
+static void
+cacheunlock(Mntcache *m)
+{
+ assert(m->locked == up);
+ if(--m->nlocked == 0){
+ m->locked = nil;
+ qunlock(m);
+ }
+}
+
+/* return locked Mntcache if still valid else reset mcp */
+static Mntcache*
+ccache(Chan *c)
+{
+ Mntcache *m;
+
+ m = c->mcp;
+ if(m != nil) {
+ cachelock(m);
+ if(eqchantdqid(c, m->type, m->dev, m->qid, 0) && c->qid.type == m->qid.type)
+ return m;
+ c->mcp = nil;
+ cacheunlock(m);
+ }
+ return nil;
+}
+
+int
+copen(Chan *c)
+{
+ Mntcache *m, *f, **l;
+
+ /* directories aren't cacheable */
+ if(c->qid.type&QTDIR){
+ c->mcp = nil;
+ return 0;
+ }
+
+ lock(&cache);
+ m = clookup(c, 0);
+ if(m != nil){
+ ctail(m);
+ unlock(&cache);
+ c->mcp = m;
+ return 1;
+ }
+ m = clookup(c, 1);
+ if(m == nil)
+ m = cache.head;
+ ctail(m);
+
+ l = &cache.hash[m->qid.path%NHASH];
+ for(f = *l; f != nil; f = f->hash) {
+ if(f == m) {
+ *l = m->hash;
+ break;
+ }
+ l = &f->hash;
+ }
+
+ if(!cancachelock(m)){
+ unlock(&cache);
+ cachelock(m);
+ lock(&cache);
+ f = clookup(c, 0);
+ if(f != nil) {
+ /*
+ * someone got there first while cache lock
+ * was released and added a updated Mntcache
+ * for us. update LRU and use it.
+ */
+ ctail(f);
+ unlock(&cache);
+ cacheunlock(m);
+ c->mcp = f;
+ return 1;
+ }
+ }
+
+ m->qid = c->qid;
+ m->dev = c->dev;
+ m->type = c->type;
+
+ l = &cache.hash[c->qid.path%NHASH];
+ m->hash = *l;
+ *l = m;
+
+ unlock(&cache);
+
+ m->rah.vers = m->qid.vers;
+ mntrahinit(&m->rah);
+ cnodata(m);
+ cacheunlock(m);
+ c->mcp = m;
+ return 0;
+}
+
+enum {
+ VABITS = 8*sizeof(uintptr) - 2*PGSHIFT,
+ VAMASK = (((uintptr)1 << VABITS)-1) << PGSHIFT,
+};
+
+static Page*
+cpage(Mntcache *m, ulong pn, ulong *po, ulong *pe)
+{
+ ulong b;
+ Page *p;
+
+ b = 1 << (pn%MAPBITS);
+ if((m->bitmap[pn/MAPBITS] & b) == 0)
+ return nil;
+ p = lookpage(&fscache, cacheaddr(m, pn));
+ if(p == nil){
+ m->bitmap[pn/MAPBITS] &= ~b;
+ return nil;
+ }
+ *po = p->va & (BY2PG-1);
+ *pe = 1 + (p->va >> (PGSHIFT+VABITS));
+ assert(*po < *pe);
+ return p;
+}
+
+static void
+cpageset(Page *p, ulong po, ulong pe)
+{
+ assert(po < pe);
+ p->va = po | (p->va & VAMASK) | ((uintptr)pe - 1) << (PGSHIFT+VABITS);
+}
+
+int
+cread(Chan *c, uchar *buf, int len, vlong off)
+{
+ KMap *k;
+ Page *p;
+ Mntcache *m;
+ int l, tot;
+ ulong offset, pn, po, pe;
+
+ if(len <= 0)
+ return 0;
+
+ m = ccache(c);
+ if(m == nil)
+ return 0;
+
+ if(waserror()){
+ cacheunlock(m);
+ nexterror();
+ }
+
+ tot = 0;
+ if(off >= MAXCACHE)
+ goto Prefetch;
+
+ offset = off;
+ if(offset+len > MAXCACHE)
+ len = MAXCACHE - offset;
+ pn = offset / BY2PG;
+ offset &= (BY2PG-1);
+
+ while(len > 0){
+ p = cpage(m, pn, &po, &pe);
+ if(p == nil)
+ break;
+ if(offset < po || offset >= pe){
+ putpage(p);
+ break;
+ }
+ l = pe - offset;
+ if(l > len)
+ l = len;
+
+ k = kmap(p);
+ if(waserror()) {
+ kunmap(k);
+ putpage(p);
+ nexterror();
+ }
+ memmove(buf, (uchar*)VA(k) + offset, l);
+ kunmap(k);
+ putpage(p);
+ poperror();
+
+ tot += l;
+ buf += l;
+ len -= l;
+
+ offset += l;
+ offset &= (BY2PG-1);
+ if(offset != 0)
+ break;
+
+ pn++;
+ }
+
+Prefetch:
+ if(len > 0){
+ if(m->rah.vers != m->qid.vers){
+ mntrahinit(&m->rah);
+ m->rah.vers = m->qid.vers;
+ }
+ off += tot;
+ tot += mntrahread(&m->rah, c, buf, len, off);
+ }
+ cacheunlock(m);
+ poperror();
+
+ return tot;
+}
+
+/* invalidate pages in page bitmap */
+static void
+invalidate(Mntcache *m, ulong offset, int len)
+{
+ ulong pn;
+
+ for(pn = offset/BY2PG; len > 0; pn++, len -= BY2PG)
+ m->bitmap[pn/MAPBITS] &= ~(1 << (pn%MAPBITS));
+}
+
+/* replace buf data from [off, off+len) in the cache or invalidate */
+static void
+cachedata(Mntcache *m, uchar *buf, int len, vlong off)
+{
+ int l;
+ Page *p;
+ KMap *k;
+ ulong offset, pn, po, pe;
+
+ if(off >= MAXCACHE || len <= 0){
+ cacheunlock(m);
+ return;
+ }
+
+ offset = off;
+ if(offset+len > MAXCACHE)
+ len = MAXCACHE - offset;
+ pn = offset / BY2PG;
+ offset &= (BY2PG-1);
+
+ while(len > 0){
+ l = BY2PG - offset;
+ if(l > len)
+ l = len;
+ p = cpage(m, pn, &po, &pe);
+ if(p != nil){
+ if(offset > pe || (offset+l) < po){
+ /* cached range not extendable, set new cached range */
+ po = offset;
+ pe = offset+l;
+ } else {
+ /* extend cached range */
+ if(offset < po)
+ po = offset;
+ if((offset+l) > pe)
+ pe = offset+l;
+ }
+ } else {
+ if(needpages(nil)){
+ invalidate(m, offset + pn*BY2PG, len);
+ break;
+ }
+ p = newpage(0, nil, pn*BY2PG);
+ p->daddr = cacheaddr(m, pn);
+ cachedel(&fscache, p->daddr);
+ cachepage(p, &fscache);
+ m->bitmap[pn/MAPBITS] |= 1 << (pn%MAPBITS);
+
+ po = offset;
+ pe = offset+l;
+ }
+ cpageset(p, po, pe);
+
+ k = kmap(p);
+ if(waserror()) {
+ kunmap(k);
+ putpage(p);
+ invalidate(m, offset + pn*BY2PG, len);
+ cacheunlock(m);
+ nexterror();
+ }
+ memmove((uchar*)VA(k) + offset, buf, l);
+ poperror();
+ kunmap(k);
+ putpage(p);
+
+ offset = 0;
+ pn++;
+ buf += l;
+ len -= l;
+ }
+ cacheunlock(m);
+}
+
+void
+cupdate(Chan *c, uchar *buf, int len, vlong off)
+{
+ Mntcache *m;
+
+ m = ccache(c);
+ if(m == nil)
+ return;
+ cachedata(m, buf, len, off);
+}
+
+void
+cwrite(Chan* c, uchar *buf, int len, vlong off)
+{
+ Mntcache *m;
+
+ m = ccache(c);
+ if(m == nil)
+ return;
+ m->qid.vers++;
+ c->qid.vers++;
+ if(c->qid.type&QTAPPEND){
+ cacheunlock(m);
+ return;
+ }
+ cachedata(m, buf, len, off);
+}
+
+void
+ctrunc(Chan *c)
+{
+ Mntcache *m;
+
+ if(c->qid.type&QTDIR)
+ return;
+
+ if((c->flag&COPEN) == 0){
+ lock(&cache);
+ c->mcp = clookup(c, 0);
+ unlock(&cache);
+ }
+
+ m = ccache(c);
+ if(m == nil)
+ return;
+ mntrahinit(&m->rah);
+ cnodata(m);
+ cacheunlock(m);
+
+ if((c->flag&COPEN) == 0)
+ c->mcp = nil;
+}
+
+void
+cclunk(Chan *c)
+{
+ Mntcache *m;
+
+ m = ccache(c);
+ if(m == nil)
+ return;
+ mntrahinit(&m->rah);
+ cacheunlock(m);
+ c->mcp = nil;
+}
--- /dev/null
+++ b/tests/bigloop.c
@@ -1,0 +1,18 @@
+#include <u.h>
+#include <libc.h>
+
+void
+main(int, void**)
+{
+ uvlong n, t, s;
+
+ s = nsec();
+ print("start nanoseconds %zd\n", s);
+ for(n = 0; n<1000*1000*1000; n++){
+ ;
+ }
+ print("n %zd\n", n);
+ t = nsec();
+ print("end nanoseconds %zd taken %zd\n", t, t -s);
+ exits(0);
+}
--- /dev/null
+++ b/tests/diskthroughput.c
@@ -1,0 +1,173 @@
+#include <u.h>
+#include <libc.h>
+#include <libsec.h>
+
+/*
+ ori disk throughput script
+17:54 < ori> single proc random reads with the test program => 130 megs/sec
+17:54 < h9ro> ah, that wasn't clear
+17:54 < ori> gefs => 50 megs/sec
+17:54 < h9ro> ok, then you're right of course :)
+17:55 < ori> no, gefs doesn't do parallel reads, but it clearly does something dumb.
+17:55 < h9ro> if your random offsets are ok and same blocksize...
+17:55 < ori> oh. that's actually a good thing to check.
+17:55 < ori> I should make sure that the offsets ARE aligned.
+17:55 < ori> pretty sure they are.
+17:55 < ori> but it'd be dumb if I'm wrong.
+17:55 < h9ro> try some really big alignments
+17:55 < h9ro> just in case
+17:55 < h9ro> you never know how dumb the SSD is!!!
+17:56 < h9ro> maybe make a test to find the internal ssd blocksizes
+17:56 < ori> yeah, but the test program is doing the same alignments I *think* I'm doing in gefs.
+17:56 < h9ro> and the ssd might have implemented readahead
+17:56 < ori> no worries. I'll figure it out.
+
+20:10 < ori> when you have multiple procs
+20:11 < ori> eg, for gefs testing one of the tests I use is '{for(i in *){cat $i&}} | tput'
+
+ */
+enum {
+ KiB = 1024,
+ MiB = 1024*KiB,
+ GiB = 1024*MiB,
+};
+
+int nproc = 1;
+int iosize = 8*1024;
+char *order = "seq";
+vlong fsize = 0;
+
+int fd;
+char *buf;
+vlong nbytes = 0;
+long (*iofn)(int, void*, long, vlong);
+
+void
+usage(void)
+{
+ fprint(2, "usage: %s [-p procs] [-s iosize] [-o order] path\n", argv0);
+ exits("usage");
+}
+
+void
+inc64(vlong *v, vlong dv)
+{
+ vlong ov, nv;
+
+ while(1){
+ ov = *v;
+ nv = ov + dv;
+ extern int cas64(u64int*, u64int, u64int);
+ if(cas64((u64int*)v, ov, nv))
+ break;
+ }
+}
+
+vlong
+seq(vlong off)
+{
+ return off + iosize;
+}
+
+vlong
+skip(vlong off)
+{
+ return off + 2*iosize;
+}
+
+vlong
+randoff(vlong off)
+{
+ return (fastrand()<<32)|fastrand();
+}
+
+void
+io(vlong (*next)(vlong))
+{
+ vlong off = 0;
+
+ while(1){
+ pwrite(fd, buf, iosize, off);
+ inc64(&nbytes, iosize);
+ off = next(off);
+ if(off > fsize + iosize)
+ off -= fsize;
+ off -= off % iosize;
+ }
+}
+
+void
+showstats(void)
+{
+ double speed;
+ int sec;
+
+ sec = 0;
+ while(1){
+ sleep(1000);
+ sec++;
+ speed = (double)nbytes/(double)sec;
+ print("%lld/%d\n", nbytes, sec);
+ if(speed > GiB)
+ print("%f GiB/s\n", speed/GiB);
+ else if(speed > MiB)
+ print("%f MiB/s\n", speed/MiB);
+ else if(speed > KiB)
+ print("%f KiB/s\n", speed/KiB);
+ }
+}
+
+void
+main(int argc, char **argv)
+{
+ Dir *d;
+ int i, pid;
+
+ iofn = pwrite;
+ ARGBEGIN{
+ case 'p':
+ nproc = atoi(EARGF(usage()));
+ break;
+ case 's':
+ iosize = atoi(EARGF(usage()));
+ break;
+ case 'o':
+ order = EARGF(usage());
+ break;
+ case 'r':
+ iofn = pread;
+ break;
+ default:
+ usage();
+ break;
+ }ARGEND;
+
+ if(argc != 1)
+ usage();
+ if((fd = open(argv[0], OWRITE)) == -1)
+ sysfatal("open %s: %r", argv[0]);
+ if((d = dirfstat(fd)) == nil)
+ sysfatal("stat %s: %r", argv[0]);
+ if((buf = malloc(iosize)) == nil)
+ sysfatal("alloc buf: %r");
+ fsize = d->length;
+
+ for(i = 0; i < nproc; i++){
+ if((pid = rfork(RFPROC|RFMEM)) == -1)
+ sysfatal("rfork: %r");
+ if(pid == 0){
+ if(strcmp(order, "seq") == 0)
+ io(seq);
+ else if(strcmp(order, "skip") == 0)
+ io(skip);
+ else if(strcmp(order, "rand") == 0)
+ io(randoff);
+ else
+ sysfatal("unknown order %s\n", order);
+ exits(nil);
+ }
+ }
+ showstats();
+ exits(nil);
+}
+