// only cpu0 will call this, so there is no need to lock // after kinit is called, void kinit() { for(int i = 0; i < NCPU; i++) initlock(&cpus[i].kmem.lock, "kmem"); freerange(end, (void*)PHYSTOP); }
void freerange(void *pa_start, void *pa_end) { char *p; p = (char*)PGROUNDUP((uint64)pa_start); for(; p + PGSIZE <= (char*)pa_end; p += PGSIZE) kfree(p); }
// Free the page of physical memory pointed at by v, // which normally should have been returned by a // call to kalloc(). (The exception is when // initializing the allocator; see kinit above.) void kfree(void *pa) { structrun *r;
// cpu C steal memory from other cpus void* steal(struct cpu *C) { structrun *r =0; for(struct cpu *c = cpus; c < cpus + NCPU; c++) { if(c != C) { acquire(&c->kmem.lock); if(c->kmem.freelist) { r = c->kmem.freelist; c->kmem.freelist = r->next;// remove r from the free list of c // there is no need to put r into the free list of C // because r is allocated, when we kfree r release(&c->kmem.lock); break; } release(&c->kmem.lock); } } return r; // in case NCPU = 1 }
// Allocate one 4096-byte page of physical memory. // Returns a pointer that the kernel can use. // Returns 0 if the memory cannot be allocated. void * kalloc(void) { structrun *r; push_off(); structcpu *c = mycpu(); acquire(&c->kmem.lock); r = c->kmem.freelist; if(r) c->kmem.freelist = r->next; else r = steal(c); release(&c->kmem.lock); pop_off(); if(r) memset((char*)r, 5, PGSIZE); // fill with junk return (void*)r; }
for(int i = 0; i < HASHPRIME; i++) { if(i == idx) continue; acquire(&bcache.lock[i]); // we need to hold both the locks of the i'th table and the idx'th table for(b = bcache.head[i].prev; b != &bcache.head[i]; b = b->prev){ if(b->refcnt == 0) { // we find a replaceable buffer, now we remove it from i'th table and insert it into idx'th table b->dev = dev; b->blockno = blockno; b->valid = 0; b->refcnt = 1; b->next->prev = b->prev; b->prev->next = b->next; // we now insert the buffer into the idx'th table b->next = bcache.head[idx].next; b->prev = &bcache.head[idx]; bcache.head[idx].next->prev= b; bcache.head[idx].next = b; // we then return with bcache.lock[idx] released release(&bcache.lock[i]); release(&bcache.lock[idx]); acquiresleep(&b->lock); return b; } } release(&bcache.lock[i]); }
When replacing a block, you might move a struct buf from one bucket to another bucket, because the new block hashes to a different bucket. You might have a tricky case: the new block might hash to the same bucket as the old block. Make sure you avoid deadlock in that case.
release(&bcache.lock[idx]); // we fail to find a replaceable buffer in idx'th hash table // we try to replace a buffer in other hash table for(int i = 0; i < HASHPRIME; i++) { if(i == idx) continue; if(i < idx) { acquire(&bcache.lock[i]); acquire(&bcache.lock[idx]); } else { acquire(&bcache.lock[idx]); acquire(&bcache.lock[i]); } // we need to hold both the locks of the i'th table and the idx'th table for(b = bcache.head[i].prev; b != &bcache.head[i]; b = b->prev){ if(b->refcnt == 0) { // we find a replaceable buffer, now we remove it from i'th table and insert it into idx'th table b->dev = dev; b->blockno = blockno; b->valid = 0; b->refcnt = 1; b->next->prev = b->prev; b->prev->next = b->next; // we now insert the buffer into the idx'th table b->next = bcache.head[idx].next; b->prev = &bcache.head[idx]; bcache.head[idx].next->prev= b; bcache.head[idx].next = b; // we then return with bcache.lock[idx] released release(&bcache.lock[i]); release(&bcache.lock[idx]); acquiresleep(&b->lock); return b; } } release(&bcache.lock[i]); release(&bcache.lock[idx]); }
// Linked list of all buffers, through prev/next. // Sorted by how recently the buffer was used. // head.next is most recent, head.prev is least. structbufhead[HASHPRIME]; } bcache;
void binit(void) { structbuf *b;
uint i = 0; for(i = 0; i < HASHPRIME; i++) { initlock(&bcache.lock[i], "bhash"); bcache.head[i].prev = &bcache.head[i]; bcache.head[i].next = &bcache.head[i]; } i = 0; for(b = bcache.buf; b < bcache.buf+NBUF; b++, i++){ if(i == HASHPRIME) i = 0; b->next = bcache.head[i].next; b->prev = &bcache.head[i]; bcache.head[i].next->prev = b; bcache.head[i].next = b; // Create linked list of buffers initsleeplock(&b->lock, "buffer"); } }
// Look through buffer cache for block on device dev. // If not found, allocate a buffer. // In either case, return locked buffer. staticstruct buf* bget(uint dev, uint blockno) { structbuf *b;
// Is the block already cached? uint idx = blockno % HASHPRIME; acquire(&bcache.lock[idx]); for(b = bcache.head[idx].next; b != &bcache.head[idx]; b = b->next){ if(b->dev == dev && b->blockno == blockno){ b->refcnt++; release(&bcache.lock[idx]); acquiresleep(&b->lock); return b; } } // Not cached in idx'th hash table. // Recycle the other least recently used (LRU) unused buffer in idx'th hash table. // in this case, we keep the hold of the lock of idx'th hash table // and try to replace a buffer for(b = bcache.head[idx].prev; b != &bcache.head[idx]; b = b->prev){ if(b->refcnt == 0) { b->dev = dev; b->blockno = blockno; b->valid = 0; b->refcnt = 1; release(&bcache.lock[idx]); acquiresleep(&b->lock); return b; } } release(&bcache.lock[idx]); // we fail to find a replaceable buffer in idx'th hash table // we try to replace a buffer in other hash table for(int i = 0; i < HASHPRIME; i++) { if(i == idx) continue; if(i < idx) { acquire(&bcache.lock[i]); acquire(&bcache.lock[idx]); } else { acquire(&bcache.lock[idx]); acquire(&bcache.lock[i]); } // we need to hold both the locks of the i'th table and the idx'th table for(b = bcache.head[i].prev; b != &bcache.head[i]; b = b->prev){ if(b->refcnt == 0) { // we find a replaceable buffer, now we remove it from i'th table and insert it into idx'th table b->dev = dev; b->blockno = blockno; b->valid = 0; b->refcnt = 1; b->next->prev = b->prev; b->prev->next = b->next; // we now insert the buffer into the idx'th table b->next = bcache.head[idx].next; b->prev = &bcache.head[idx]; bcache.head[idx].next->prev= b; bcache.head[idx].next = b; // we then return with bcache.lock[idx] released release(&bcache.lock[i]); release(&bcache.lock[idx]); acquiresleep(&b->lock); return b; } } release(&bcache.lock[i]); release(&bcache.lock[idx]); } panic("bget: no buffers"); }