123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177 |
- #include <u.h>
- #include <libc.h>
- #include <auth.h>
- #include <fcall.h>
- #include "dat.h"
- #include "fns.h"
- /*
- * We used to use 100 i/o buffers of size 2kb (Sectorsize).
- * Unfortunately, reading 2kb at a time often hopping around
- * the disk doesn't let us get near the disk bandwidth.
- *
- * Based on a trace of iobuf address accesses taken while
- * tarring up a Plan 9 distribution CD, we now use 16 128kb
- * buffers. This works for ISO9660 because data is required
- * to be laid out contiguously; effectively we're doing agressive
- * readahead. Because the buffers are so big and the typical
- * disk accesses so concentrated, it's okay that we have so few
- * of them.
- *
- * If this is used to access multiple discs at once, it's not clear
- * how gracefully the scheme degrades, but I'm not convinced
- * it's worth worrying about. -rsc
- */
- #define BUFPERCLUST 64 /* 64*Sectorsize = 128kb */
- #define NCLUST 16
- int nclust = NCLUST;
- static Ioclust* iohead;
- static Ioclust* iotail;
- static Ioclust* getclust(Xdata*, long);
- static void putclust(Ioclust*);
- static void xread(Ioclust*);
- void
- iobuf_init(void)
- {
- int i, j, n;
- Ioclust *c;
- Iobuf *b;
- uchar *mem;
- n = nclust*sizeof(Ioclust) +
- nclust*BUFPERCLUST*(sizeof(Iobuf)+Sectorsize);
- mem = sbrk(n);
- if(mem == (void*)-1)
- panic(0, "iobuf_init");
- memset(mem, 0, n);
- for(i=0; i<nclust; i++){
- c = (Ioclust*)mem;
- mem += sizeof(Ioclust);
- c->addr = -1;
- c->prev = iotail;
- if(iotail)
- iotail->next = c;
- iotail = c;
- if(iohead == nil)
- iohead = c;
- c->buf = (Iobuf*)mem;
- mem += BUFPERCLUST*sizeof(Iobuf);
- c->iobuf = mem;
- mem += BUFPERCLUST*Sectorsize;
- for(j=0; j<BUFPERCLUST; j++){
- b = &c->buf[j];
- b->clust = c;
- b->addr = -1;
- b->iobuf = c->iobuf+j*Sectorsize;
- }
- }
- }
- void
- purgebuf(Xdata *dev)
- {
- Ioclust *p;
- for(p=iohead; p!=nil; p=p->next)
- if(p->dev == dev){
- p->addr = -1;
- p->busy = 0;
- }
- }
- static Ioclust*
- getclust(Xdata *dev, long addr)
- {
- Ioclust *c, *f;
- f = nil;
- for(c=iohead; c; c=c->next){
- if(!c->busy)
- f = c;
- if(c->addr == addr && c->dev == dev){
- c->busy++;
- return c;
- }
- }
- if(f == nil)
- panic(0, "out of buffers");
- f->addr = addr;
- f->dev = dev;
- f->busy++;
- if(waserror()){
- f->addr = -1; /* stop caching */
- putclust(f);
- nexterror();
- }
- xread(f);
- poperror();
- return f;
- }
- static void
- putclust(Ioclust *c)
- {
- if(c->busy <= 0)
- panic(0, "putbuf");
- c->busy--;
- /* Link onto head for LRU */
- if(c == iohead)
- return;
- c->prev->next = c->next;
- if(c->next)
- c->next->prev = c->prev;
- else
- iotail = c->prev;
- c->prev = nil;
- c->next = iohead;
- iohead->prev = c;
- iohead = c;
- }
- Iobuf*
- getbuf(Xdata *dev, long addr)
- {
- int off;
- Ioclust *c;
- off = addr%BUFPERCLUST;
- c = getclust(dev, addr - off);
- if(c->nbuf < off){
- c->busy--;
- error("I/O read error");
- }
- return &c->buf[off];
- }
- void
- putbuf(Iobuf *b)
- {
- putclust(b->clust);
- }
- static void
- xread(Ioclust *c)
- {
- int n;
- vlong addr;
- Xdata *dev;
- dev = c->dev;
- addr = c->addr;
- seek(dev->dev, addr*Sectorsize, 0);
- n = readn(dev->dev, c->iobuf, BUFPERCLUST*Sectorsize);
- if(n < Sectorsize)
- error("I/O read error");
- c->nbuf = n/Sectorsize;
- }
|