static long doub_ind_s;
static long triple_ind_s;
static long out_range_s;
- int iomode = NORMAL;
+ int iomode;
- if(opportunistic) iomode = PREFETCH;
+ iomode = opportunistic ? PEEK : NORMAL;
if (first_time) {
addr_in_block = rip->i_sp->s_block_size / BLOCK_ADDRESS_BYTES;
}
if (b == NO_BLOCK) return(NO_BLOCK);
bp = get_block(rip->i_dev, b, iomode); /* get double indirect block */
- if(opportunistic && lmfs_dev(bp) == NO_DEV) {
- put_block(bp, INDIRECT_BLOCK);
- return NO_BLOCK;
- }
+ if (bp == NULL)
+ return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
mindex = excess / addr_in_block;
}
if (b == NO_BLOCK) return(NO_BLOCK);
bp = get_block(rip->i_dev, b, iomode); /* get single indirect block */
- if(opportunistic && lmfs_dev(bp) == NO_DEV) {
- put_block(bp, INDIRECT_BLOCK);
- return NO_BLOCK;
- }
+ if (bp == NULL)
+ return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
unsigned int dzones, nr_indirects;
block_t b;
unsigned long excess, zone, block_pos;
- int iomode = NORMAL;
+ int iomode;
- if(opportunistic) iomode = PREFETCH;
+ iomode = opportunistic ? PEEK : NORMAL;
scale = rip->i_sp->s_log_zone_size; /* for block-zone conversion */
block_pos = position/rip->i_sp->s_block_size; /* relative blk # in file */
if ((unsigned int) index > rip->i_nindirs)
return(NO_BLOCK); /* Can't go beyond double indirects */
bp = get_block(rip->i_dev, b, iomode); /* get double indirect block */
- if(opportunistic && lmfs_dev(bp) == NO_DEV) {
- put_block(bp, INDIRECT_BLOCK);
- return NO_BLOCK;
- }
+ if (bp == NULL)
+ return NO_BLOCK; /* peeking failed */
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
z = rd_indir(bp, index); /* z= zone for single*/
if (z == NO_ZONE) return(NO_BLOCK);
b = (block_t) z << scale; /* b is blk # for single ind */
bp = get_block(rip->i_dev, b, iomode); /* get single indirect block */
- if(opportunistic && lmfs_dev(bp) == NO_DEV) {
- put_block(bp, INDIRECT_BLOCK);
- return NO_BLOCK;
- }
+ if (bp == NULL)
+ return NO_BLOCK; /* peeking failed */
z = rd_indir(bp, (int) excess); /* get block pointed to */
put_block(bp, INDIRECT_BLOCK); /* release single indir blk */
if (z == NO_ZONE) return(NO_BLOCK);
void lmfs_reset_rdwt_err(void);
int lmfs_rdwt_err(void);
void lmfs_buf_pool(int new_nr_bufs);
-struct buf *lmfs_get_block(dev_t dev, block64_t block,int only_search);
-struct buf *lmfs_get_block_ino(dev_t dev, block64_t block,int only_search,
- ino_t ino, u64_t off);
+struct buf *lmfs_get_block(dev_t dev, block64_t block, int how);
+struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
+ u64_t off);
void lmfs_put_block(struct buf *bp, int block_type);
void lmfs_free_block(dev_t dev, block64_t block);
void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t off);
#define NORMAL 0 /* forces get_block to do disk read */
#define NO_READ 1 /* prevents get_block from doing disk read */
#define PREFETCH 2 /* tells get_block not to read or mark dev */
+#define PEEK 3 /* returns NULL if not in cache or VM cache */
/* When a block is released, the type of usage is passed to put_block(). */
#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
/*===========================================================================*
* lmfs_get_block *
*===========================================================================*/
-struct buf *lmfs_get_block(dev_t dev, block64_t block, int only_search)
+struct buf *lmfs_get_block(dev_t dev, block64_t block, int how)
{
- return lmfs_get_block_ino(dev, block, only_search, VMC_NO_INODE, 0);
+ return lmfs_get_block_ino(dev, block, how, VMC_NO_INODE, 0);
}
static void munmap_t(void *a, int len)
/*===========================================================================*
* lmfs_get_block_ino *
*===========================================================================*/
-struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int only_search,
- ino_t ino, u64_t ino_off)
+struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
+ u64_t ino_off)
{
/* Check to see if the requested block is in the block cache. If so, return
* a pointer to it. If not, evict some other block and fetch it (unless
- * 'only_search' is 1). All the blocks in the cache that are not in use
- * are linked together in a chain, with 'front' pointing to the least recently
- * used block and 'rear' to the most recently used block. If 'only_search' is
- * 1, the block being requested will be overwritten in its entirety, so it is
- * only necessary to see if it is in the cache; if it is not, any free buffer
- * will do. It is not necessary to actually read the block in from disk.
- * If 'only_search' is PREFETCH, the block need not be read from the disk,
- * and the device is not to be marked on the block, so callers can tell if
- * the block returned is valid.
+ * 'how' is NO_READ). All the blocks in the cache that are not in use are
+ * linked together in a chain, with 'front' pointing to the least recently used
+ * block and 'rear' to the most recently used block. If 'how' is NO_READ, the
+ * block being requested will be overwritten in its entirety, so it is only
+ * necessary to see if it is in the cache; if it is not, any free buffer will
+ * do. It is not necessary to actually read the block in from disk. If 'how'
+ * is PREFETCH, the block need not be read from the disk, and the device is not
+ * to be marked on the block (i.e., set to NO_DEV), so callers can tell if the
+ * block returned is valid. If 'how' is PEEK, the function returns the block
+ * if it is in the cache or could be obtained from VM, and NULL otherwise.
* In addition to the LRU chain, there is also a hash chain to link together
* blocks whose block numbers end with the same bit strings, for fast lookup.
*/
-
int b;
static struct buf *bp;
uint64_t dev_off;
}
bp->data = NULL;
+ /* The block is not in the cache, and VM does not know about it. If we were
+ * requested to search for the block only, we can now return failure to the
+ * caller. Return the block to the pool without allocating data pages, since
+ * these would be freed upon recycling the block anyway.
+ */
+ if (how == PEEK) {
+ bp->lmfs_dev = NO_DEV;
+
+ lmfs_put_block(bp, ONE_SHOT);
+
+ return NULL;
+ }
+
/* Not in the cache; reserve memory for its contents. */
lmfs_alloc_block(bp);
assert(bp->data);
- if(only_search == PREFETCH) {
+ if(how == PREFETCH) {
/* PREFETCH: don't do i/o. */
bp->lmfs_dev = NO_DEV;
- } else if (only_search == NORMAL) {
+ } else if (how == NORMAL) {
read_block(bp);
- } else if(only_search == NO_READ) {
+ } else if(how == NO_READ) {
/* This block will be overwritten by new contents. */
} else
- panic("unexpected only_search value: %d", only_search);
+ panic("unexpected 'how' value: %d", how);
assert(bp->data);
assert(bp->lmfs_flags & VMMC_BLOCK_LOCKED);
bp->lmfs_flags &= ~VMMC_BLOCK_LOCKED;
- /* block has sensible content - if necesary, identify it to VM */
+ /* block has sensible content - if necessary, identify it to VM */
if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
+ assert(bp->data);
+
setflags = (block_type & ONE_SHOT) ? VMSF_ONCE : 0;
if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
bp->lmfs_inode_offset, &bp->lmfs_flags, fs_block_size,
register struct buf *bp;
+ assert(device != NO_DEV);
+
for (bp = &buf[0]; bp < &buf[nr_bufs]; bp++) {
if (bp->lmfs_dev == device) {
assert(bp->data);