static void read_block(struct buf *);
static void flushall(dev_t dev);
static void freeblock(struct buf *bp);
+static void cache_heuristic_check(int major);
static int vmcache = 0; /* are we using vm's secondary cache? (initially not) */
* portion of the used FS, and at most a certain %age of remaining
* memory
*/
- if((vm_info_stats(&vsi) != OK)) {
+ if(vm_info_stats(&vsi) != OK) {
bufs = 1024;
- if(!quiet) printf("fslib: heuristic info fail: default to %d bufs\n", bufs);
+ if(!quiet)
+ printf("fslib: heuristic info fail: default to %d bufs\n", bufs);
return bufs;
}
- kbytes_remain_mem = div64u(mul64u(vsi.vsi_free, vsi.vsi_pagesize), 1024);
+ /* remaining free memory is unused memory plus memory in used for cache,
+ * as the cache can be evicted
+ */
+ kbytes_remain_mem = (u64_t)(vsi.vsi_free + vsi.vsi_cached) *
+ vsi.vsi_pagesize / 1024;
/* check fs usage. */
kbytes_used_fs = div64u(mul64u(bused, blocksize), 1024);
return bufs;
}
+void lmfs_blockschange(dev_t dev, int delta)
+{
+ /* Change the number of allocated blocks by 'delta.'
+ * Also accumulate the delta since the last cache re-evaluation.
+ * If it is outside a certain band, ask the cache library to
+ * re-evaluate the cache size.
+ */
+ static int bitdelta = 0;
+ bitdelta += delta;
+#define BANDKB (10*1024) /* recheck cache every 10MB change */
+ if(bitdelta*fs_block_size/1024 > BANDKB ||
+ bitdelta*fs_block_size/1024 < -BANDKB) {
+ lmfs_cache_reevaluate(dev);
+ bitdelta = 0;
+ }
+}
+
void
lmfs_markdirty(struct buf *bp)
{
}
}
bp->lmfs_needsetcache = 0;
+
+}
+
+void lmfs_cache_reevaluate(dev_t dev)
+{
+ if(bufs_in_use == 0 && dev != NO_DEV) {
+ /* if the cache isn't in use any more, we could resize it. */
+ cache_heuristic_check(major(dev));
+ }
}
/*===========================================================================*
fs_block_size = blocksize;
}
-/*===========================================================================*
- * lmfs_set_blocksize *
- *===========================================================================*/
-void lmfs_set_blocksize(int new_block_size, int major)
+static void cache_heuristic_check(int major)
{
- int bufs;
+ int bufs, d;
u32_t btotal, bfree, bused;
- cache_resize(new_block_size, MINBUFS);
-
fs_blockstats(&btotal, &bfree, &bused);
bufs = fs_bufs_heuristic(10, btotal, bfree,
- new_block_size, major);
+ fs_block_size, major);
+
+ /* set the cache to the new heuristic size if the new one
+ * is more than 10% off from the current one.
+ */
+ d = bufs-nr_bufs;
+ if(d < 0) d = -d;
+ if(d*100/nr_bufs > 10) {
+ cache_resize(fs_block_size, bufs);
+ }
+}
- cache_resize(new_block_size, bufs);
+/*===========================================================================*
+ * lmfs_set_blocksize *
+ *===========================================================================*/
+void lmfs_set_blocksize(int new_block_size, int major)
+{
+ cache_resize(new_block_size, MINBUFS);
+ cache_heuristic_check(major);
/* Decide whether to use seconday cache or not.
* Only do this if
#include "super.h"
#include "const.h"
+static u32_t used_blocks = 0;
/*===========================================================================*
* alloc_bit *
*wptr = (bitchunk_t) conv4(sp->s_native, (int) k);
MARKDIRTY(bp);
put_block(bp, MAP_BLOCK);
+ if(map == ZMAP) {
+ used_blocks++;
+ lmfs_blockschange(sp->s_dev, 1);
+ }
return(b);
}
put_block(bp, MAP_BLOCK);
MARKDIRTY(bp);
put_block(bp, MAP_BLOCK);
-}
+ if(map == ZMAP) {
+ used_blocks--;
+ lmfs_blockschange(sp->s_dev, -1);
+ }
+}
/*===========================================================================*
* get_super *
return rw_super(sp, 1);
}
+static int blocks_known = 0;
+
+u32_t get_used_blocks(struct super_block *sp)
+{
+ if(!blocks_known) {
+ /* how many blocks are in use? */
+ used_blocks = sp->s_zones - count_free_bits(sp, ZMAP);
+ blocks_known = 1;
+ }
+ return used_blocks;
+}