Mostly removal of unused parameters from calls.
Change-Id: I0eb7b568265d1669492d958e78b9e69d7cf6fc05
rip->i_prealloc_count = EXT2_PREALLOC_BLOCKS - 1;
lmfs_markdirty(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
gd->free_blocks_count -= EXT2_PREALLOC_BLOCKS;
sp->s_free_blocks_count -= EXT2_PREALLOC_BLOCKS;
- lmfs_blockschange(sp->s_dev, -EXT2_PREALLOC_BLOCKS);
+ lmfs_blockschange(-EXT2_PREALLOC_BLOCKS);
group_descriptors_dirty = 1;
return block;
}
check_block_number(block, sp, gd);
lmfs_markdirty(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
gd->free_blocks_count--;
sp->s_free_blocks_count--;
- lmfs_blockschange(sp->s_dev, -1);
+ lmfs_blockschange(-1);
group_descriptors_dirty = 1;
if (update_bsearch && block != -1 && block != NO_BLOCK) {
panic("Tried to free unused block %d", bit_returned);
lmfs_markdirty(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
gd->free_blocks_count++;
sp->s_free_blocks_count++;
- lmfs_blockschange(sp->s_dev, 1);
+ lmfs_blockschange(1);
group_descriptors_dirty = 1;
-/* Buffer (block) cache. To acquire a block, a routine calls get_block(),
- * telling which block it wants. The block is then regarded as "in use"
- * and has its 'b_count' field incremented. All the blocks that are not
- * in use are chained together in an LRU list, with 'front' pointing
- * to the least recently used block, and 'rear' to the most recently used
- * block. A reverse chain, using the field b_prev is also maintained.
- * Usage for LRU is measured by the time the put_block() is done. The second
- * parameter to put_block() can violate the LRU order and put a block on the
- * front of the list, if it will probably not be needed soon. If a block
- * is modified, the modifying routine must set b_dirt to DIRTY, so the block
- * will eventually be rewritten to the disk.
- */
-
#ifndef EXT2_BUF_H
#define EXT2_BUF_H
-#include <sys/dirent.h>
-
union fsdata_u {
- char b__data[1]; /* ordinary user data */
-/* indirect block */
- block_t b__ind[1];
-/* bit map block */
- bitchunk_t b__bitmap[1];
+ char b__data[1]; /* ordinary user data */
+ block_t b__ind[1]; /* indirect block */
+ bitchunk_t b__bitmap[1]; /* bit map block */
};
-/* A block is free if b_dev == NO_DEV. */
-
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
#define b_data(bp) ((union fsdata_u *) bp->data)->b__data
#define b_ind(bp) ((union fsdata_u *) bp->data)->b__ind
}
lmfs_markdirty(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
gd->free_inodes_count--;
sp->s_free_inodes_count--;
panic("Tried to free unused inode %d", bit_returned);
lmfs_markdirty(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
gd->free_inodes_count++;
sp->s_free_inodes_count++;
icopy(rip, dip, rw_flag, TRUE);
- put_block(bp, INODE_BLOCK);
+ put_block(bp);
rip->i_dirt = IN_CLEAN;
}
if (bytes > rip->i_size)
bytes = rip->i_size;
r = fsdriver_copyout(data, 0, link_text, bytes);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
if (r == OK)
r = bytes;
}
panic("zeroblock_range: len too long: %lld", len);
memset(b_data(bp) + offset, 0, len);
lmfs_markdirty(bp);
- put_block(bp, FULL_DATA_BLOCK);
+ put_block(bp);
}
return(EINVAL);
}
- lmfs_set_blocksize(superblock->s_block_size, major(fs_dev));
+ lmfs_set_blocksize(superblock->s_block_size);
/* Get the root inode of the mounted file system. */
if ( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {
}
}
- put_block(bp, DIRECTORY_BLOCK); /* put_block() accepts NULL. */
+ put_block(bp); /* put_block() accepts NULL. */
if(r != OK) {
sip->i_links_count = NO_LINK;
*numb = (ino_t) conv4(le_CPU, dp->d_ino);
}
assert(lmfs_dev(bp) != NO_DEV);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
return(r);
}
/* The whole block has been searched or ENTER has a free slot. */
assert(lmfs_dev(bp) != NO_DEV);
if (e_hit) break; /* e_hit set if ENTER can be performed now */
- put_block(bp, DIRECTORY_BLOCK); /* otherwise, continue searching dir */
+ put_block(bp); /* otherwise, continue searching dir */
}
/* The whole directory has now been searched. */
dp->d_file_type = EXT2_FT_UNKNOWN;
}
lmfs_markdirty(bp);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
ldir_ptr->i_update |= CTIME | MTIME; /* mark mtime for update later */
ldir_ptr->i_dirt = IN_DIRTY;
#define EXT2_PROTO_H
#define get_block(d, n, t) lmfs_get_block(d, n, t)
-#define put_block(n, t) lmfs_put_block(n, t)
+#define put_block(n) lmfs_put_block(n)
/* Function prototypes. */
#include "inode.h"
#include "super.h"
#include <sys/param.h>
+#include <sys/dirent.h>
#include <assert.h>
lmfs_markdirty(bp);
}
- n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
- put_block(bp, n);
+ put_block(bp);
return(r);
}
excess = block_pos - triple_ind_s;
mindex = excess / addr_in_block2;
b = rd_indir(bp, mindex); /* num of double ind block */
- put_block(bp, INDIRECT_BLOCK); /* release triple ind block */
+ put_block(bp); /* release triple ind block */
excess = excess % addr_in_block2;
}
if (b == NO_BLOCK) return(NO_BLOCK);
ASSERT(lmfs_dev(bp) == rip->i_dev);
mindex = excess / addr_in_block;
b = rd_indir(bp, mindex); /* num of single ind block */
- put_block(bp, INDIRECT_BLOCK); /* release double ind block */
+ put_block(bp); /* release double ind block */
mindex = excess % addr_in_block; /* index into single ind blk */
}
if (b == NO_BLOCK) return(NO_BLOCK);
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
b = rd_indir(bp, mindex);
- put_block(bp, INDIRECT_BLOCK); /* release single ind block */
+ put_block(bp); /* release single ind block */
return(b);
}
}
if (lmfs_dev(bp) != NO_DEV) {
/* Oops, block already in the cache, get out. */
- put_block(bp, FULL_DATA_BLOCK);
+ put_block(bp);
break;
}
}
}
}
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
if (done)
break;
}
/*===========================================================================*
* blockstats *
*===========================================================================*/
-void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
+void fs_blockstats(u64_t *blocks, u64_t *free)
{
struct super_block *sp = get_super(fs_dev);
*blocks = sp->s_blocks_count;
*free = sp->s_free_blocks_count;
- *used = *blocks - *free;
}
/* Create the double indirect block. */
if ( (b2 = alloc_block(rip, rip->i_bsearch) ) == NO_BLOCK) {
/* Release triple ind blk. */
- put_block(bp_tindir, INDIRECT_BLOCK);
+ put_block(bp_tindir);
ext2_debug("failed to allocate dblock near %d\n", rip->i_block[0]);
return(ENOSPC);
}
if (b1 == NO_BLOCK && !(op & WMAP_FREE)) {
if ( (b1 = alloc_block(rip, rip->i_bsearch) ) == NO_BLOCK) {
/* Release dbl and triple indirect blks. */
- put_block(bp_dindir, INDIRECT_BLOCK);
- put_block(bp_tindir, INDIRECT_BLOCK);
+ put_block(bp_dindir);
+ put_block(bp_tindir);
ext2_debug("failed to allocate dblock near %d\n", rip->i_block[0]);
return(ENOSPC);
}
/* b1 equals NO_BLOCK only when we are freeing up the indirect block. */
if(b1 != NO_BLOCK)
lmfs_markdirty(bp);
- put_block(bp, INDIRECT_BLOCK);
+ put_block(bp);
}
/* If the single indirect block isn't there (or was just freed),
rip->i_block[EXT2_TIND_BLOCK] = NO_BLOCK;
}
- put_block(bp_dindir, INDIRECT_BLOCK); /* release double indirect blk */
- put_block(bp_tindir, INDIRECT_BLOCK); /* release triple indirect blk */
+ put_block(bp_dindir); /* release double indirect blk */
+ put_block(bp_tindir); /* release triple indirect blk */
return(OK);
}
register struct buf *bp; /* pointer to buffer to zero */
{
/* Zero a block. */
- ASSERT(lmfs_bytes(bp) > 0);
ASSERT(bp->data);
- memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
+ memset(b_data(bp), 0, lmfs_fs_block_size());
lmfs_markdirty(bp);
}
break;
}
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
bp = read_extent_block(extent, *offset /
v_pri.logical_block_size_l);
}
/* Parse basic ISO 9660 specs. */
if (check_dir_record(dir_rec,
offset % v_pri.logical_block_size_l) != OK) {
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
return EINVAL;
}
offset += dir_rec->length;
read_inode_extents(i_node, dir_rec, extent, &offset);
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
if (new_offset != NULL)
*new_offset = offset;
return OK;
if (check_dir_record(dir_rec,
*offset % v_pri.logical_block_size_l) != OK) {
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
return;
}
if ((dir_rec->file_flags & D_NOT_LAST_EXTENT) == 0)
done = TRUE;
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
}
}
r = fsdriver_copyout(data, cum_io, b_data(bp)+off, chunk);
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
if (r != OK)
break; /* EOF reached. */
return OK;
}
-void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
+void fs_blockstats(u64_t *blocks, u64_t *free)
{
- *used = *blocks = v_pri.volume_space_size_l;
+ *blocks = v_pri.volume_space_size_l;
*free = 0;
}
(vol_pri->file_struct_ver != 1))
return EINVAL;
- lmfs_set_blocksize(vol_pri->logical_block_size_l, major(fs_dev));
+ lmfs_set_blocksize(vol_pri->logical_block_size_l);
/* Read root directory record. */
root_record = (struct iso9660_dir_record *)vol_pri->root_directory;
}
parse_susp_buffer(dir, b_data(ca_bp) + ca_offset, ca_length);
- lmfs_put_block(ca_bp, FULL_DATA_BLOCK);
+ lmfs_put_block(ca_bp);
return OK;
}
#include "clean.h"
-/* Buffer (block) cache. To acquire a block, a routine calls get_block(),
- * telling which block it wants. The block is then regarded as "in use"
- * and has its 'b_count' field incremented. All the blocks that are not
- * in use are chained together in an LRU list, with 'front' pointing
- * to the least recently used block, and 'rear' to the most recently used
- * block. A reverse chain, using the field b_prev is also maintained.
- * Usage for LRU is measured by the time the put_block() is done. The second
- * parameter to put_block() can violate the LRU order and put a block on the
- * front of the list, if it will probably not be needed soon. If a block
- * is modified, the modifying routine must set b_dirt to DIRTY, so the block
- * will eventually be rewritten to the disk.
- */
-
-#include <sys/dirent.h>
-
union fsdata_u {
- char b__data[1]; /* ordinary user data */
-/* directory block */
- struct direct b__dir[1];
-/* V2 indirect block */
- zone_t b__v2_ind[1];
-/* V2 inode block */
- d2_inode b__v2_ino[1];
-/* bit map block */
- bitchunk_t b__bitmap[1];
+ char b__data[1]; /* ordinary user data */
+ struct direct b__dir[1]; /* directory block */
+ zone_t b__v2_ind[1]; /* V2 indirect block */
+ d2_inode b__v2_ino[1]; /* V2 inode block */
+ bitchunk_t b__bitmap[1]; /* bit map block */
};
-/* A block is free if b_dev == NO_DEV. */
-
-
/* These defs make it possible to use to bp->b_data instead of bp->b.b__data */
#define b_data(b) ((union fsdata_u *) b->data)->b__data
#define b_dir(b) ((union fsdata_u *) b->data)->b__dir
#define b_bitmap(b) ((union fsdata_u *) b->data)->b__bitmap
#endif
-
assert(sp->s_version == V3);
new_icopy(rip, dip2, rw_flag, sp->s_native);
- put_block(bp, INODE_BLOCK);
+ put_block(bp);
IN_MARKCLEAN(rip);
}
if (bytes > rip->i_size)
bytes = rip->i_size;
r = fsdriver_copyout(data, 0, b_data(bp), bytes);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
if (r == OK)
r = bytes;
}
bytes = len;
memset(b_data(bp) + offset, 0, bytes);
MARKDIRTY(bp);
- put_block(bp, FULL_DATA_BLOCK);
+ put_block(bp);
pos += bytes;
len -= bytes;
printf("MFS: WARNING: FS 0x%llx unclean, mounting readonly\n", fs_dev);
}
- lmfs_set_blocksize(superblock.s_block_size, major(fs_dev));
+ lmfs_set_blocksize(superblock.s_block_size);
/* Get the root inode of the mounted file system. */
if( (root_ip = get_inode(fs_dev, ROOT_INODE)) == NULL) {
}
}
- put_block(bp, DIRECTORY_BLOCK); /* put_block() accepts NULL. */
+ put_block(bp); /* put_block() accepts NULL. */
if(r != OK) {
sip->i_nlinks = NO_LINK;
(int) dp->mfs_d_ino);
}
assert(lmfs_dev(bp) != NO_DEV);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
return(r);
}
/* The whole block has been searched or ENTER has a free slot. */
if (e_hit) break; /* e_hit set if ENTER can be performed now */
assert(lmfs_dev(bp) != NO_DEV);
- put_block(bp, DIRECTORY_BLOCK); /* otherwise, continue searching dir */
+ put_block(bp); /* otherwise, continue searching dir */
}
/* The whole directory has now been searched. */
sp = ldir_ptr->i_sp;
dp->mfs_d_ino = conv4(sp->s_native, (int) *numb);
MARKDIRTY(bp);
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
ldir_ptr->i_update |= CTIME | MTIME; /* mark mtime for update later */
IN_MARKDIRTY(ldir_ptr);
if (new_slots > old_slots) {
/* Some shortcuts to functions in -lminixfs */
#define get_block(d, b, t) lmfs_get_block(d, b, t)
-#define put_block(b, t) lmfs_put_block(b, t)
+#define put_block(b) lmfs_put_block(b)
/* Function prototypes. */
#include "inode.h"
#include "super.h"
#include <sys/param.h>
+#include <sys/dirent.h>
#include <assert.h>
MARKDIRTY(bp);
}
- n = (off + chunk == block_size ? FULL_DATA_BLOCK : PARTIAL_DATA_BLOCK);
- put_block(bp, n);
+ put_block(bp);
return(r);
}
ASSERT(lmfs_dev(bp) != NO_DEV);
ASSERT(lmfs_dev(bp) == rip->i_dev);
z = rd_indir(bp, index); /* z= zone for single*/
- put_block(bp, INDIRECT_BLOCK); /* release double ind block */
+ put_block(bp); /* release double ind block */
excess = excess % nr_indirects; /* index into single ind blk */
}
if (bp == NULL)
return NO_BLOCK; /* peeking failed */
z = rd_indir(bp, (int) excess); /* get block pointed to */
- put_block(bp, INDIRECT_BLOCK); /* release single indir blk */
+ put_block(bp); /* release single indir blk */
if (z == NO_ZONE) return(NO_BLOCK);
b = (block_t) ((z << scale) + boff);
return(b);
assert(bp->lmfs_count > 0);
if (lmfs_dev(bp) != NO_DEV) {
/* Oops, block already in the cache, get out. */
- put_block(bp, FULL_DATA_BLOCK);
+ put_block(bp);
break;
}
}
}
}
- put_block(bp, DIRECTORY_BLOCK);
+ put_block(bp);
if (done)
break;
}
{
struct super_block *sp;
int scale;
- u64_t used;
sp = get_super(fs_dev);
scale = sp->s_log_zone_size;
- fs_blockstats(&st->f_blocks, &st->f_bfree, &used);
+ fs_blockstats(&st->f_blocks, &st->f_bfree);
st->f_bavail = st->f_bfree;
st->f_bsize = sp->s_block_size << scale;
if (b >= map_bits) break;
}
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
++block;
word = 0;
} while (--bcount > 0);
- return free_bits; /* no bit could be allocated */
+ return free_bits;
}
/*===========================================================================*
* blockstats *
*===========================================================================*/
-void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used)
+void fs_blockstats(u64_t *blocks, u64_t *free)
{
struct super_block *sp;
assert(!sp->s_log_zone_size);
*blocks = sp->s_zones;
- *used = get_used_blocks(sp);
- *free = *blocks - *used;
+ *free = *blocks - get_used_blocks(sp);
return;
}
k |= 1 << i;
*wptr = (bitchunk_t) conv4(sp->s_native, (int) k);
MARKDIRTY(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
if(map == ZMAP) {
used_blocks++;
- lmfs_blockschange(sp->s_dev, 1);
+ lmfs_blockschange(1);
}
return(b);
}
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
if (++block >= (unsigned int) bit_blocks) /* last block, wrap around */
block = 0;
word = 0;
b_bitmap(bp)[word] = (bitchunk_t) conv4(sp->s_native, (int) k);
MARKDIRTY(bp);
- put_block(bp, MAP_BLOCK);
+ put_block(bp);
if(map == ZMAP) {
used_blocks--;
- lmfs_blockschange(sp->s_dev, -1);
+ lmfs_blockschange(-1);
}
}
sp->s_dev = save_dev;
}
- put_block(bp, FULL_DATA_BLOCK);
+ put_block(bp);
lmfs_flushall();
return OK;
if (bp_dindir != NULL) MARKDIRTY(bp_dindir);
if (z1 == NO_ZONE) {
/* Release dbl indirect blk. */
- put_block(bp_dindir, INDIRECT_BLOCK);
+ put_block(bp_dindir);
return(err_code); /* couldn't create single ind */
}
}
}
/* z1 equals NO_ZONE only when we are freeing up the indirect block. */
if(z1 != NO_ZONE) MARKDIRTY(bp);
- put_block(bp, INDIRECT_BLOCK);
+ put_block(bp);
}
/* If the single indirect block isn't there (or was just freed),
rip->i_zone[zones+1] = NO_ZONE;
}
- put_block(bp_dindir, INDIRECT_BLOCK); /* release double indirect blk */
+ put_block(bp_dindir); /* release double indirect blk */
return(OK);
}
register struct buf *bp; /* pointer to buffer to zero */
{
/* Zero a block. */
- ASSERT(lmfs_bytes(bp) > 0);
ASSERT(bp->data);
- memset(b_data(bp), 0, (size_t) lmfs_bytes(bp));
+ memset(b_data(bp), 0, lmfs_fs_block_size());
MARKDIRTY(bp);
}
void lmfs_markclean(struct buf *bp);
int lmfs_isclean(struct buf *bp);
dev_t lmfs_dev(struct buf *bp);
-int lmfs_bytes(struct buf *bp);
int lmfs_bufs_in_use(void);
int lmfs_nr_bufs(void);
void lmfs_flushall(void);
void lmfs_flushdev(dev_t dev);
int lmfs_fs_block_size(void);
-void lmfs_may_use_vmcache(int);
-void lmfs_set_blocksize(int blocksize, int major);
-void lmfs_reset_rdwt_err(void);
-int lmfs_rdwt_err(void);
+void lmfs_may_use_vmcache(int);
+void lmfs_set_blocksize(int blocksize);
+void lmfs_reset_rdwt_err(void);
+int lmfs_rdwt_err(void);
void lmfs_buf_pool(int new_nr_bufs);
struct buf *lmfs_get_block(dev_t dev, block64_t block, int how);
struct buf *lmfs_get_block_ino(dev_t dev, block64_t block, int how, ino_t ino,
u64_t off);
-void lmfs_put_block(struct buf *bp, int block_type);
+void lmfs_put_block(struct buf *bp);
void lmfs_free_block(dev_t dev, block64_t block);
void lmfs_zero_block_ino(dev_t dev, ino_t ino, u64_t off);
void lmfs_invalidate(dev_t device);
void lmfs_rw_scattered(dev_t, struct buf **, int, int);
void lmfs_setquiet(int q);
-void lmfs_cache_reevaluate(dev_t dev);
-void lmfs_blockschange(dev_t dev, int delta);
+void lmfs_cache_reevaluate(void);
+void lmfs_blockschange(int delta);
/* calls that libminixfs does into fs */
-void fs_blockstats(u64_t *blocks, u64_t *free, u64_t *used);
+void fs_blockstats(u64_t *blocks, u64_t *free);
/* get_block arguments */
#define NORMAL 0 /* forces get_block to do disk read */
#define PREFETCH 2 /* tells get_block not to read or mark dev */
#define PEEK 3 /* returns NULL if not in cache or VM cache */
-/* When a block is released, the type of usage is passed to put_block(). */
-#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
-
-#define INODE_BLOCK 0 /* inode block */
-#define DIRECTORY_BLOCK 1 /* directory block */
-#define INDIRECT_BLOCK 2 /* pointer block */
-#define MAP_BLOCK 3 /* bit map */
-#define FULL_DATA_BLOCK 5 /* data, fully used */
-#define PARTIAL_DATA_BLOCK 6 /* data, partly used*/
-
#define END_OF_FILE (-104) /* eof detected */
/* Block I/O helper functions. */
assert(bp != NULL);
if (lmfs_dev(bp) != NO_DEV) {
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
break;
}
(char *)bp->data + block_off, chunk);
}
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
if (r != OK)
break;
#include <minix/u64.h>
#include <minix/bdev.h>
+/* Buffer (block) cache. To acquire a block, a routine calls lmfs_get_block(),
+ * telling which block it wants. The block is then regarded as "in use" and
+ * has its reference count incremented. All the blocks that are not in use are
+ * chained together in an LRU list, with 'front' pointing to the least recently
+ * used block, and 'rear' to the most recently used block. A reverse chain is
+ * also maintained. Usage for LRU is measured by the time the put_block() is
+ * done. The second parameter to put_block() can violate the LRU order and put
+ * a block on the front of the list, if it will probably not be needed again.
+ * This is used internally only; the lmfs_put_block() API call has no second
+ * parameter. If a block is modified, the modifying routine must mark the
+ * block as dirty, so the block will eventually be rewritten to the disk.
+ */
+
+/* Flags to put_block(). */
+#define ONE_SHOT 0x1 /* set if block will not be needed again */
+
#define BUFHASH(b) ((unsigned int)((b) % nr_bufs))
#define MARKCLEAN lmfs_markclean
static void rm_lru(struct buf *bp);
static void read_block(struct buf *);
static void freeblock(struct buf *bp);
-static void cache_heuristic_check(int major);
+static void cache_heuristic_check(void);
+static void put_block(struct buf *bp, int put_flags);
static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
void lmfs_setquiet(int q) { quiet = q; }
static u32_t fs_bufs_heuristic(int minbufs, u32_t btotal, u64_t bfree,
- int blocksize, dev_t majordev)
+ int blocksize)
{
struct vm_stats_info vsi;
int bufs;
return bufs;
}
-void lmfs_blockschange(dev_t dev, int delta)
+void lmfs_blockschange(int delta)
{
/* Change the number of allocated blocks by 'delta.'
* Also accumulate the delta since the last cache re-evaluation.
#define BANDKB (10*1024) /* recheck cache every 10MB change */
if(bitdelta*(int)fs_block_size/1024 > BANDKB ||
bitdelta*(int)fs_block_size/1024 < -BANDKB) {
- lmfs_cache_reevaluate(dev);
+ lmfs_cache_reevaluate();
bitdelta = 0;
}
}
return bp->lmfs_dev;
}
-int lmfs_bytes(struct buf *bp)
-{
- return bp->lmfs_bytes;
-}
-
static void free_unused_blocks(void)
{
struct buf *bp;
if (how == PEEK) {
bp->lmfs_dev = NO_DEV;
- lmfs_put_block(bp, ONE_SHOT);
+ put_block(bp, ONE_SHOT);
return NULL;
}
}
/*===========================================================================*
- * lmfs_put_block *
+ * put_block *
*===========================================================================*/
-void lmfs_put_block(
- struct buf *bp, /* pointer to the buffer to be released */
- int block_type /* INODE_BLOCK, DIRECTORY_BLOCK, or whatever */
-)
+static void put_block(struct buf *bp, int put_flags)
{
-/* Return a block to the list of available blocks. Depending on 'block_type'
+/* Return a block to the list of available blocks. Depending on 'put_flags'
* it may be put on the front or rear of the LRU chain. Blocks that are
- * expected to be needed again shortly (e.g., partially full data blocks)
- * go on the rear; blocks that are unlikely to be needed again shortly
- * (e.g., full data blocks) go on the front. Blocks whose loss can hurt
- * the integrity of the file system (e.g., inode blocks) are written to
- * disk immediately if they are dirty.
+ * expected to be needed again at some point go on the rear; blocks that are
+ * unlikely to be needed again at all go on the front.
*/
dev_t dev;
uint64_t dev_off;
int r, setflags;
- if (bp == NULL) return; /* it is easier to check here than in caller */
+ assert(bp != NULL);
dev = bp->lmfs_dev;
if (bp->lmfs_count != 0) return; /* block is still in use */
/* Put this block back on the LRU chain. */
- if (dev == NO_DEV || dev == DEV_RAM || (block_type & ONE_SHOT)) {
- /* Block probably won't be needed quickly. Put it on front of chain.
+ if (dev == NO_DEV || dev == DEV_RAM || (put_flags & ONE_SHOT)) {
+ /* Block will not be needed again. Put it on front of chain.
* It will be the next block to be evicted from the cache.
*/
bp->lmfs_prev = NULL;
front = bp;
}
else {
- /* Block probably will be needed quickly. Put it on rear of chain.
+ /* Block may be needed again. Put it on rear of chain.
* It will not be evicted from the cache for a long time.
*/
bp->lmfs_prev = rear;
if(vmcache && bp->lmfs_needsetcache && dev != NO_DEV) {
assert(bp->data);
- setflags = (block_type & ONE_SHOT) ? VMSF_ONCE : 0;
+ setflags = (put_flags & ONE_SHOT) ? VMSF_ONCE : 0;
if ((r = vm_set_cacheblock(bp->data, dev, dev_off, bp->lmfs_inode,
bp->lmfs_inode_offset, &bp->lmfs_flags, fs_block_size,
setflags)) != OK) {
* after, which could be a problem if VM already forgot the block and we are
* expected to pass it to VM again, which then wouldn't happen.
*/
- if (block_type & ONE_SHOT)
+ if (put_flags & ONE_SHOT)
bp->lmfs_dev = NO_DEV;
}
+/*===========================================================================*
+ * lmfs_put_block *
+ *===========================================================================*/
+void lmfs_put_block(struct buf *bp)
+{
+/* User interface to put_block(). */
+
+ if (bp == NULL) return; /* for poorly written file systems */
+
+ put_block(bp, 0);
+}
+
/*===========================================================================*
* lmfs_free_block *
*===========================================================================*/
* TODO: tell VM that it is an all-zeroes block, so that VM can deduplicate
* all such pages in its cache.
*/
- lmfs_put_block(bp, ONE_SHOT);
+ put_block(bp, ONE_SHOT);
}
-void lmfs_cache_reevaluate(dev_t dev)
+void lmfs_cache_reevaluate(void)
{
- if(bufs_in_use == 0 && dev != NO_DEV) {
+ if (bufs_in_use == 0) {
/* if the cache isn't in use any more, we could resize it. */
- cache_heuristic_check(major(dev));
+ cache_heuristic_check();
}
}
}
if (rw_flag == READING) {
bp->lmfs_dev = dev; /* validate block */
- lmfs_put_block(bp, PARTIAL_DATA_BLOCK);
+ lmfs_put_block(bp);
} else {
MARKCLEAN(bp);
}
* give at this time. Don't forget to release those extras.
*/
while (bufqsize > 0) {
- lmfs_put_block(*bufq++, PARTIAL_DATA_BLOCK);
+ lmfs_put_block(*bufq++);
bufqsize--;
}
}
fs_block_size = blocksize;
}
-static void cache_heuristic_check(int major)
+static void cache_heuristic_check(void)
{
int bufs, d;
- u64_t btotal, bfree, bused;
+ u64_t btotal, bfree;
- fs_blockstats(&btotal, &bfree, &bused);
+ fs_blockstats(&btotal, &bfree);
- bufs = fs_bufs_heuristic(10, btotal, bfree,
- fs_block_size, major);
+ bufs = fs_bufs_heuristic(10, btotal, bfree, fs_block_size);
/* set the cache to the new heuristic size if the new one
* is more than 10% off from the current one.
/*===========================================================================*
* lmfs_set_blocksize *
*===========================================================================*/
-void lmfs_set_blocksize(int new_block_size, int major)
+void lmfs_set_blocksize(int new_block_size)
{
cache_resize(new_block_size, MINBUFS);
- cache_heuristic_check(major);
+ cache_heuristic_check();
/* Decide whether to use seconday cache or not.
- * Only do this if
- * - it's available, and
- * - use of it hasn't been disabled for this fs, and
- * - our main FS device isn't a memory device
+ * Only do this if the block size is a multiple of the page size, and using
+ * the VM cache has been enabled for this FS.
*/
vmcache = 0;
+++ /dev/null
-#define _SYSTEM
-
-#include <lib.h> /* common to all libraries */
-#include <minix/com.h> /* need task numbers + message types */
lmfs_markdirty(bp);
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
return blocksize;
}
memcpy(data, bp->data, blocksize);
- lmfs_put_block(bp, FULL_DATA_BLOCK);
+ lmfs_put_block(bp);
return blocksize;
}
/* Fake some libminixfs client functions */
void
-fs_blockstats(u64_t *total, u64_t *free, u64_t *used)
+fs_blockstats(u64_t *total, u64_t *free)
{
- *total = *free = *used = 0;
+ *total = *free = 0;
}
static void allocate(int b)
for(p = 1; p <= 3; p++) {
/* Do not update curblocksize until the cache is flushed. */
newblocksize = PAGE_SIZE*p;
- lmfs_set_blocksize(newblocksize, MYMAJOR);
+ lmfs_set_blocksize(newblocksize);
curblocksize = newblocksize; /* now it's safe to update */
lmfs_buf_pool(BLOCKS);
if(dotest(curblocksize, BLOCKS, ITER)) e(n);
for(wss = 2; wss <= 3; wss++) {
int wsblocks = 10*wss*wss*wss*wss*wss;
for(cs = wsblocks/4; cs <= wsblocks*3; cs *= 1.5) {
- lmfs_set_blocksize(PAGE_SIZE, MYMAJOR);
+ lmfs_set_blocksize(PAGE_SIZE);
curblocksize = PAGE_SIZE; /* same as above */
lmfs_buf_pool(cs);
if(dotest(curblocksize, wsblocks, ITER)) e(n);