*/
#if (DEBUG_TIME_LOCKS || DEBUG_LOCK_CHECK)
# undef lock
-# define lock(c, v) do { lockcheck; intr_disable(); locktimestart(c, v); } while(0)
+# define lock(c, v) do { lockcheck; \
+ intr_disable(); \
+ locktimestart(c, v); \
+ } while(0)
# undef unlock
-# define unlock(c) do { locktimeend(c); intr_enable(); } while(0)
+# define unlock(c) do { locktimeend(c); \
+ intr_enable();\
+ } while(0)
#endif
#endif /* DEBUG_H */
* Together with "mpx.s" it forms the lowest layer of the MINIX kernel.
* There is one entry point from the outside:
*
- * sys_call: a system call, i.e., the kernel is trapped with an INT
+ * sys_call: a system call, i.e., the kernel is trapped with an INT
*
* As well as several entry points used from the interrupt and task level:
*
#if (CHIP == INTEL)
#define CopyMess(s,sp,sm,dp,dm) \
- cp_mess(s, (sp)->p_memmap[D].mem_phys, (vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
+ cp_mess(s, (sp)->p_memmap[D].mem_phys, \
+ (vir_bytes)sm, (dp)->p_memmap[D].mem_phys, (vir_bytes)dm)
#endif /* (CHIP == INTEL) */
#if (CHIP == M68000)
* if the caller doesn't do receive().
*/
if (! (priv(caller_ptr)->s_trap_mask & (1 << function)) ||
- (iskerneln(src_dst) && function != SENDREC && function != RECEIVE)) {
+ (iskerneln(src_dst) && function != SENDREC
+ && function != RECEIVE)) {
kprintf("sys_call: trap %d not allowed, caller %d, src_dst %d\n",
function, proc_nr(caller_ptr), src_dst);
return(ECALLDENIED); /* trap denied by mask or kernel */
* (u32_t *) dtp->base = vir2phys(idt);
/* Build segment descriptors for tasks and interrupt handlers. */
- init_codeseg(&gdt[CS_INDEX], kinfo.code_base, kinfo.code_size, INTR_PRIVILEGE);
- init_dataseg(&gdt[DS_INDEX], kinfo.data_base, kinfo.data_size, INTR_PRIVILEGE);
+ init_codeseg(&gdt[CS_INDEX],
+ kinfo.code_base, kinfo.code_size, INTR_PRIVILEGE);
+ init_dataseg(&gdt[DS_INDEX],
+ kinfo.data_base, kinfo.data_size, INTR_PRIVILEGE);
init_dataseg(&gdt[ES_INDEX], 0L, 0, TASK_PRIVILEGE);
/* Build scratch descriptors for functions in klib88. */
#define CS_SELECTOR 0x30 /* (CS_INDEX * DESC_SIZE) */
#define MON_CS_SELECTOR 0x38 /* (MON_CS_INDEX * DESC_SIZE) */
#define TSS_SELECTOR 0x40 /* (TSS_INDEX * DESC_SIZE) */
-#define DS_286_SELECTOR 0x49 /* (DS_286_INDEX*DESC_SIZE + TASK_PRIVILEGE) */
-#define ES_286_SELECTOR 0x51 /* (ES_286_INDEX*DESC_SIZE + TASK_PRIVILEGE) */
+#define DS_286_SELECTOR 0x49 /* (DS_286_INDEX*DESC_SIZE+TASK_PRIVILEGE) */
+#define ES_286_SELECTOR 0x51 /* (ES_286_INDEX*DESC_SIZE+TASK_PRIVILEGE) */
/* Fixed local descriptors. */
#define CS_LDT_INDEX 0 /* process CS */
#define PAGE_GRAN_SHIFT 12 /* extra shift for page granular limits */
/* Type-byte bits. */
-#define DESC_386_BIT 0x08 /* 386 types are obtained by ORing with this */
+#define DESC_386_BIT 0x08 /* 386 types are obtained by ORing with this */
/* LDT's and TASK_GATE's don't need it */
/* Granularity byte. */
-#define GRANULAR 0x80 /* set for 4K granularilty */
-#define DEFAULT 0x40 /* set for 32-bit defaults (executable seg) */
-#define BIG 0x40 /* set for "BIG" (expand-down seg) */
-#define AVL 0x10 /* 0 for available */
-#define LIMIT_HIGH 0x0F /* mask for high bits of limit */
+#define GRANULAR 0x80 /* set for 4K granularilty */
+#define DEFAULT 0x40 /* set for 32-bit defaults (executable seg) */
+#define BIG 0x40 /* set for "BIG" (expand-down seg) */
+#define AVL 0x10 /* 0 for available */
+#define LIMIT_HIGH 0x0F /* mask for high bits of limit */
/* Copy bytes from virtual address src_addr to virtual address dst_addr.
* Virtual addresses can be in ABS, LOCAL_SEG, REMOTE_SEG, or BIOS_SEG.
*/
- struct vir_addr *vir_addr[2]; /* virtual source and destination address */
+ struct vir_addr *vir_addr[2]; /* virtual source and destination address */
phys_bytes phys_addr[2]; /* absolute source and destination */
int seg_index;
int i;
* structure 0 is shared by user processes.
*/
#define s(n) (1 << s_nr_to_id(n))
-#define SRV_M (~0)
-#define SYS_M (~0)
-#define USR_M (s(PM_PROC_NR) | s(FS_PROC_NR) | s(RS_PROC_NR))
-#define DRV_M (USR_M | s(SYSTEM) | s(CLOCK) | s(LOG_PROC_NR) | s(TTY_PROC_NR))
+#define SRV_M (~0)
+#define SYS_M (~0)
+#define USR_M (s(PM_PROC_NR) | s(FS_PROC_NR) | s(RS_PROC_NR))
+#define DRV_M (USR_M | s(SYSTEM) | s(CLOCK) | s(LOG_PROC_NR) | s(TTY_PROC_NR))
/* Define kernel calls that processes are allowed to make. This is not looking
* very nice, but we need to define the access rights on a per call basis.
/* The system image table lists all programs that are part of the boot image.
* The order of the entries here MUST agree with the order of the programs
* in the boot image and all kernel tasks must come first.
- * Each entry provides the process number, flags, quantum size (qs), scheduling
+ * Each entry provides the process number, flags, quantum size (qs), scheduling
* queue, allowed traps, ipc mask, and a name for the process table. The
* initial program counter and stack size is also provided for kernel tasks.
*/
* causing a compile time error. Note that no space is actually allocated
* because 'dummy' is declared extern.
*/
-extern int dummy[(NR_BOOT_PROCS==sizeof(image)/sizeof(struct boot_image))?1:-1];
+extern int dummy[(NR_BOOT_PROCS==sizeof(image)/
+ sizeof(struct boot_image))?1:-1];
extern int dummy[(BITCHUNK_BITS > NR_BOOT_PROCS - 1) ? 1 : -1];
/* Data portion of the buffer. */
union {
char b__data[MAX_BLOCK_SIZE]; /* ordinary user data */
- struct direct b__dir[NR_DIR_ENTRIES(MAX_BLOCK_SIZE)]; /* directory block */
- zone1_t b__v1_ind[V1_INDIRECTS]; /* V1 indirect block */
- zone_t b__v2_ind[V2_INDIRECTS(MAX_BLOCK_SIZE)]; /* V2 indirect block */
- d1_inode b__v1_ino[V1_INODES_PER_BLOCK]; /* V1 inode block */
- d2_inode b__v2_ino[V2_INODES_PER_BLOCK(MAX_BLOCK_SIZE)]; /* V2 inode block */
- bitchunk_t b__bitmap[FS_BITMAP_CHUNKS(MAX_BLOCK_SIZE)]; /* bit map block */
+/* directory block */
+ struct direct b__dir[NR_DIR_ENTRIES(MAX_BLOCK_SIZE)];
+/* V1 indirect block */
+ zone1_t b__v1_ind[V1_INDIRECTS];
+/* V2 indirect block */
+ zone_t b__v2_ind[V2_INDIRECTS(MAX_BLOCK_SIZE)];
+/* V1 inode block */
+ d1_inode b__v1_ino[V1_INODES_PER_BLOCK];
+/* V2 inode block */
+ d2_inode b__v2_ino[V2_INODES_PER_BLOCK(MAX_BLOCK_SIZE)];
+/* bit map block */
+ bitchunk_t b__bitmap[FS_BITMAP_CHUNKS(MAX_BLOCK_SIZE)];
} b;
/* Header portion of the buffer. */
EXTERN int bufs_in_use; /* # bufs currently in use (not on free list)*/
/* When a block is released, the type of usage is passed to put_block(). */
-#define WRITE_IMMED 0100 /* block should be written to disk now */
-#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
+#define WRITE_IMMED 0100 /* block should be written to disk now */
+#define ONE_SHOT 0200 /* set if block not likely to be needed soon */
#define INODE_BLOCK 0 /* inode block */
#define DIRECTORY_BLOCK 1 /* directory block */
#define DUP_MASK 0100 /* mask to distinguish dup2 from dup */
-#define LOOK_UP 0 /* tells search_dir to lookup string */
-#define ENTER 1 /* tells search_dir to make dir entry */
-#define DELETE 2 /* tells search_dir to delete entry */
-#define IS_EMPTY 3 /* tells search_dir to ret. OK or ENOTEMPTY */
+#define LOOK_UP 0 /* tells search_dir to lookup string */
+#define ENTER 1 /* tells search_dir to make dir entry */
+#define DELETE 2 /* tells search_dir to delete entry */
+#define IS_EMPTY 3 /* tells search_dir to ret. OK or ENOTEMPTY */
#define CLEAN 0 /* disk and memory copies identical */
#define DIRTY 1 /* disk and memory copies differ */
/* Derived sizes pertaining to the V1 file system. */
#define V1_ZONE_NUM_SIZE usizeof (zone1_t) /* # bytes in V1 zone */
#define V1_INODE_SIZE usizeof (d1_inode) /* bytes in V1 dsk ino */
-#define V1_INDIRECTS (STATIC_BLOCK_SIZE/V1_ZONE_NUM_SIZE) /* # zones/indir block */
-#define V1_INODES_PER_BLOCK (STATIC_BLOCK_SIZE/V1_INODE_SIZE)/* # V1 dsk inodes/blk */
+
+/* # zones/indir block */
+#define V1_INDIRECTS (STATIC_BLOCK_SIZE/V1_ZONE_NUM_SIZE)
+
+/* # V1 dsk inodes/blk */
+#define V1_INODES_PER_BLOCK (STATIC_BLOCK_SIZE/V1_INODE_SIZE)
/* Derived sizes pertaining to the V2 file system. */
#define V2_ZONE_NUM_SIZE usizeof (zone_t) /* # bytes in V2 zone */
* them from the disk.
*
* The entry points into this file are
- * get_inode: search inode table for a given inode; if not there, read it
+ * get_inode: search inode table for a given inode; if not there,
+ * read it
* put_inode: indicate that an inode is no longer needed in memory
* alloc_inode: allocate a new, unused inode
* wipe_inode: erase some fields of a newly allocated inode
b = (block_t) (rip->i_num - 1)/sp->s_inodes_per_block + offset;
bp = get_block(rip->i_dev, b, NORMAL);
dip = bp->b_v1_ino + (rip->i_num - 1) % V1_INODES_PER_BLOCK;
- dip2 = bp->b_v2_ino + (rip->i_num - 1) % V2_INODES_PER_BLOCK(sp->s_block_size);
+ dip2 = bp->b_v2_ino + (rip->i_num - 1) %
+ V2_INODES_PER_BLOCK(sp->s_block_size);
/* Do the read or write. */
if (rw_flag == WRITING) {
time_t i_atime; /* time of last access (V2 only) */
time_t i_mtime; /* when was file data last changed */
time_t i_ctime; /* when was inode itself changed (V2 only)*/
- zone_t i_zone[V2_NR_TZONES]; /* zone numbers for direct, ind, and dbl ind */
+ zone_t i_zone[V2_NR_TZONES]; /* zone numbers for direct, ind, and dbl ind */
/* The following items are not present on the disk. */
dev_t i_dev; /* which device is the inode on */
printf("FS, warning illegal %d system call by %d\n", call_nr, who);
} else if (fp->fp_pid == PID_FREE) {
error = ENOSYS;
- printf("FS, bad process, who = %d, call_nr = %d, slot1 = %d\n", who, call_nr, m_in.slot1);
-
+ printf("FS, bad process, who = %d, call_nr = %d, slot1 = %d\n",
+ who, call_nr, m_in.slot1);
} else {
error = (*call_vec[call_nr])();
}
if (NR_BUFS < 6) panic(__FILE__,"NR_BUFS < 6", NO_NUM);
if (V1_INODE_SIZE != 32) panic(__FILE__,"V1 inode size != 32", NO_NUM);
if (V2_INODE_SIZE != 64) panic(__FILE__,"V2 inode size != 64", NO_NUM);
- if (OPEN_MAX > 8 * sizeof(long)) panic(__FILE__,"Too few bits in fp_cloexec", NO_NUM);
+ if (OPEN_MAX > 8 * sizeof(long))
+ panic(__FILE__,"Too few bits in fp_cloexec", NO_NUM);
/* The following initializations are needed to let dev_opcl succeed .*/
fp = (struct fproc *) NULL;
if(block_size_image % block_size_ram) {
printf("\nram block size: %d image block size: %d\n",
block_size_ram, block_size_image);
- panic(__FILE__, "ram disk block size must be a multiple of the image disk block size", NO_NUM);
+ panic(__FILE__, "ram disk block size must be a multiple of "
+ "the image disk block size", NO_NUM);
}
/* Loading blocks from image device. */
if (fetch_name(m_in.name1, m_in.name1_length, M1) != OK) return(err_code);
ldirp = last_dir(user_path, string); /* pointer to new dir's parent */
if (ldirp == NIL_INODE) return(err_code);
- if (ldirp->i_nlinks >= (ldirp->i_sp->s_version == V1 ? CHAR_MAX : SHRT_MAX)) {
+ if (ldirp->i_nlinks >= (ldirp->i_sp->s_version == V1 ?
+ CHAR_MAX : SHRT_MAX)) {
put_inode(ldirp); /* return parent */
return(EMLINK);
}
panic(__FILE__,"get_block returned NO_BLOCK", NO_NUM);
/* Search a directory block. */
- for (dp = &bp->b_dir[0]; dp < &bp->b_dir[NR_DIR_ENTRIES(ldir_ptr->i_sp->s_block_size)]; dp++) {
+ for (dp = &bp->b_dir[0];
+ dp < &bp->b_dir[NR_DIR_ENTRIES(ldir_ptr->i_sp->s_block_size)];
+ dp++) {
if (++new_slots > old_slots) { /* not found, but room left */
if (flag == ENTER) e_hit = TRUE;
break;
* do_pipe: perform the PIPE system call
* pipe_check: check to see that a read or write on a pipe is feasible now
* suspend: suspend a process that cannot do a requested read or write
- * release: check to see if a suspended process can be released and do it
+ * release: check to see if a suspended process can be released and do
+ * it
* revive: mark a suspended process as able to run again
* do_unpause: a signal has been sent to a process; see if it suspended
*/
}
if (position + bytes > PIPE_SIZE(rip->i_sp->s_block_size)) {
- if ((oflags & O_NONBLOCK) && bytes < PIPE_SIZE(rip->i_sp->s_block_size))
+ if ((oflags & O_NONBLOCK)
+ && bytes < PIPE_SIZE(rip->i_sp->s_block_size))
return(EAGAIN);
- else if ((oflags & O_NONBLOCK) && bytes > PIPE_SIZE(rip->i_sp->s_block_size)) {
- if ( (*canwrite = (PIPE_SIZE(rip->i_sp->s_block_size) - position)) > 0) {
+ else if ((oflags & O_NONBLOCK)
+ && bytes > PIPE_SIZE(rip->i_sp->s_block_size)) {
+ if ( (*canwrite = (PIPE_SIZE(rip->i_sp->s_block_size)
+ - position)) > 0) {
/* Do a partial write. Need to wakeup reader */
if(!notouch)
release(rip, READ, susp_count);
}
}
if (bytes > PIPE_SIZE(rip->i_sp->s_block_size)) {
- if ((*canwrite = PIPE_SIZE(rip->i_sp->s_block_size) - position) > 0) {
+ if ((*canwrite = PIPE_SIZE(rip->i_sp->s_block_size)
+ - position) > 0) {
/* Do a partial write. Need to wakeup reader
* since we'll suspend ourself in read_write()
*/
register struct fproc *rfp;
register int task;
- if (proc_nr < 0 || proc_nr >= NR_PROCS) panic(__FILE__,"revive err", proc_nr);
+ if (proc_nr < 0 || proc_nr >= NR_PROCS)
+ panic(__FILE__,"revive err", proc_nr);
rfp = &fproc[proc_nr];
if (rfp->fp_suspended == NOT_SUSPENDED || rfp->fp_revived == REVIVING)return;
if (who > PM_PROC_NR) return(EPERM);
proc_nr = m_in.pro;
- if (proc_nr < 0 || proc_nr >= NR_PROCS) panic(__FILE__,"unpause err 1", proc_nr);
+ if (proc_nr < 0 || proc_nr >= NR_PROCS)
+ panic(__FILE__,"unpause err 1", proc_nr);
rfp = &fproc[proc_nr];
if (rfp->fp_suspended == NOT_SUSPENDED) return(OK);
task = -rfp->fp_task;
#include "super.h"
FORWARD _PROTOTYPE( int rw_chunk, (struct inode *rip, off_t position,
- unsigned off, int chunk, unsigned left, int rw_flag,
- char *buff, int seg, int usr, int block_size, int *completed) );
+ unsigned off, int chunk, unsigned left, int rw_flag,
+ char *buff, int seg, int usr, int block_size, int *completed));
/*===========================================================================*
* do_read *
if (((f->filp_mode) & (rw_flag == READING ? R_BIT : W_BIT)) == 0) {
return(f->filp_mode == FILP_CLOSED ? EIO : EBADF);
}
- if (m_in.nbytes == 0) return(0); /* so char special files need not check for 0*/
+ if (m_in.nbytes == 0)
+ return(0); /* so char special files need not check for 0*/
/* check if user process has the memory it needs.
* if not, copying will fail later.
if((char_spec = (mode_word == I_CHAR_SPECIAL ? 1 : 0))) {
if(rip->i_zone[0] == NO_DEV)
- panic(__FILE__,"read_write tries to read from character device NO_DEV", NO_NUM);
+ panic(__FILE__,"read_write tries to read from "
+ "character device NO_DEV", NO_NUM);
block_size = get_block_size(rip->i_zone[0]);
}
if((block_spec = (mode_word == I_BLOCK_SPECIAL ? 1 : 0))) {
f_size = ULONG_MAX;
if(rip->i_zone[0] == NO_DEV)
- panic(__FILE__,"read_write tries to read from block device NO_DEV", NO_NUM);
+ panic(__FILE__,"read_write tries to read from "
+ " block device NO_DEV", NO_NUM);
block_size = get_block_size(rip->i_zone[0]);
}
/*===========================================================================*
* rw_chunk *
*===========================================================================*/
-PRIVATE int rw_chunk(rip, position, off, chunk, left, rw_flag, buff, seg, usr, block_size, completed)
+PRIVATE int rw_chunk(rip, position, off, chunk, left, rw_flag, buff,
+ seg, usr, block_size, completed)
register struct inode *rip; /* pointer to inode for file to be rd/wr */
off_t position; /* position within file to read or write */
unsigned off; /* off within the current block */
* 6 june 2005 Created (Ben Gras)
*/
- /* TODO: check if close (pipe?) / exit works;
- * some printf()s are serious errors;
- * check combinations of cases listen in open group select
- * spec (various NULLs and behaviours);
- * make select cancel disappearing fp's
- */
-
#define DEBUG_SELECT 0
#include "fs.h"
FORWARD _PROTOTYPE(int select_reevaluate, (struct filp *fp));
-FORWARD _PROTOTYPE(int select_request_file, (struct filp *f, int *ops, int block));
+FORWARD _PROTOTYPE(int select_request_file,
+ (struct filp *f, int *ops, int block));
FORWARD _PROTOTYPE(int select_match_file, (struct filp *f));
-FORWARD _PROTOTYPE(int select_request_general, (struct filp *f, int *ops, int block));
-FORWARD _PROTOTYPE(int select_major_match, (int match_major, struct filp *file));
+FORWARD _PROTOTYPE(int select_request_general,
+ (struct filp *f, int *ops, int block));
+FORWARD _PROTOTYPE(int select_major_match,
+ (int match_major, struct filp *file));
FORWARD _PROTOTYPE(void select_cancel_all, (struct selectentry *e));
FORWARD _PROTOTYPE(void select_wakeup, (struct selectentry *e));
selecttab[s].vir_errorfds = (fd_set *) m_in.SEL_ERRORFDS;
/* copy args */
- if(selecttab[s].vir_readfds && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_READFDS,
+ if(selecttab[s].vir_readfds
+ && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_READFDS,
SELF, D, (vir_bytes) &selecttab[s].readfds, sizeof(fd_set))) != OK)
return r;
- if(selecttab[s].vir_writefds && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_WRITEFDS,
+ if(selecttab[s].vir_writefds
+ && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_WRITEFDS,
SELF, D, (vir_bytes) &selecttab[s].writefds, sizeof(fd_set))) != OK)
return r;
- if(selecttab[s].vir_errorfds && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_ERRORFDS,
+ if(selecttab[s].vir_errorfds
+ && (r=sys_vircopy(who, D, (vir_bytes) m_in.SEL_ERRORFDS,
SELF, D, (vir_bytes) &selecttab[s].errorfds, sizeof(fd_set))) != OK)
return r;
/* Fill in the statbuf struct. */
mo = rip->i_mode & I_TYPE;
- s = (mo == I_CHAR_SPECIAL || mo == I_BLOCK_SPECIAL); /* true iff special */
+
+ /* true iff special */
+ s = (mo == I_CHAR_SPECIAL || mo == I_BLOCK_SPECIAL);
+
statbuf.st_dev = rip->i_dev;
statbuf.st_ino = rip->i_num;
statbuf.st_mode = rip->i_mode;
-/* This file manages the super block table and the related data structures, * namely, the bit maps that keep track of which zones and which inodes are
+/* This file manages the super block table and the related data structures,
+ * namely, the bit maps that keep track of which zones and which inodes are
* allocated and which are free. When a new inode or zone is needed, the
* appropriate bit map is searched for a free entry.
*
start_block = START_BLOCK + sp->s_imap_blocks;
}
block = bit_returned / FS_BITS_PER_BLOCK(sp->s_block_size);
- word = (bit_returned % FS_BITS_PER_BLOCK(sp->s_block_size)) / FS_BITCHUNK_BITS;
+ word = (bit_returned % FS_BITS_PER_BLOCK(sp->s_block_size))
+ / FS_BITCHUNK_BITS;
+
bit = bit_returned % FS_BITCHUNK_BITS;
mask = 1 << bit;
|| sp->s_ninodes < 1 || sp->s_zones < 1
|| (unsigned) sp->s_log_zone_size > 4) {
printf("not enough imap or zone map blocks, \n");
- printf("or not enough inodes, or not enough zones, or zone size too large\n");
+ printf("or not enough inodes, or not enough zones, "
+ "or zone size too large\n");
return(EINVAL);
}
sp->s_dev = dev; /* restore device number */
#include "fs.h"
-#define VERBOSE 0
-
#include <timers.h>
#include <minix/syslib.h>
#include <minix/com.h>
/* reschedule our synchronous alarm if necessary */
if(!old_head || old_head > new_head) {
if(sys_setalarm(new_head, 1) != OK)
- panic(__FILE__, "FS set timer couldn't set synchronous alarm.", NO_NUM);
-#if VERBOSE
- else
- printf("timers: after setting, set synalarm to %d -> %d\n", old_head, new_head);
-#endif
+ panic(__FILE__, "FS set timer "
+ "couldn't set synchronous alarm.", NO_NUM);
}
return;
tmrs_exptimers(&fs_timers, now, &new_head);
if(new_head > 0) {
if(sys_setalarm(new_head, 1) != OK)
- panic(__FILE__, "FS expire timer couldn't set synchronous alarm.", NO_NUM);
-#if VERBOSE
- else
- printf("timers: after expiry, set synalarm to %d\n", new_head);
-#endif
+ panic(__FILE__, "FS expire timer couldn't set "
+ "synchronous alarm.", NO_NUM);
}
-#if VERBOSE
- else printf("after expiry, no new timer set\n");
-#endif
}
PUBLIC void fs_init_timer(timer_t *tp)
*/
if(old_head < new_head || !new_head) {
if(sys_setalarm(new_head, 1) != OK)
- panic(__FILE__, "FS expire timer couldn't set synchronous alarm.", NO_NUM);
-#if VERBOSE
- printf("timers: after cancelling, set synalarm to %d -> %d\n", old_head, new_head);
-#endif
+ panic(__FILE__,
+ "FS expire timer couldn't set synchronous alarm.",
+ NO_NUM);
}
}
rip->i_dirt = DIRTY; /* inode will be changed */
bp = NIL_BUF;
scale = rip->i_sp->s_log_zone_size; /* for zone-block conversion */
- zone = (position/rip->i_sp->s_block_size) >> scale; /* relative zone # to insert */
+ /* relative zone # to insert */
+ zone = (position/rip->i_sp->s_block_size) >> scale;
zones = rip->i_ndzones; /* # direct zones in the inode */
nr_indirects = rip->i_nindirs;/* # indirect zones per indirect block */
-/* $FreeBSD: src/sys/crypto/sha2/sha2.c,v 1.2.2.2 2002/03/05 08:36:47 ume Exp $ */
-/* $KAME: sha2.c,v 1.8 2001/11/08 01:07:52 itojun Exp $ */
/*
* sha2.c
*
* Please make sure that your system defines SHA2_BYTE_ORDER. If your
* architecture is little-endian, make sure it also defines
- * SHA2_LITTLE_ENDIAN and that the two (SHA2_BYTE_ORDER and SHA2_LITTLE_ENDIAN) are
- * equivilent.
+ * SHA2_LITTLE_ENDIAN and that the two (SHA2_BYTE_ORDER and
+ * SHA2_LITTLE_ENDIAN) are equivilent.
*
* If your system does not define the above, then you can do so by
* hand like this:
* del_slot *
*===========================================================================*/
PRIVATE void del_slot(prev_ptr, hp)
-register struct hole *prev_ptr; /* pointer to hole entry just ahead of 'hp' */
-register struct hole *hp; /* pointer to hole entry to be removed */
+/* pointer to hole entry just ahead of 'hp' */
+register struct hole *prev_ptr;
+/* pointer to hole entry to be removed */
+register struct hole *hp;
{
/* Remove an entry from the hole list. This procedure is called when a
* request to allocate memory removes a hole in its entirety, thus reducing
phys_bytes *tot_bytes, long *sym_bytes, vir_clicks sc,
vir_bytes *pc) );
-#define ESCRIPT (-2000) /* Returned by read_header for a #! script. */
-#define PTRSIZE sizeof(char *) /* Size of pointers in argv[] and envp[]. */
+#define ESCRIPT (-2000) /* Returned by read_header for a #! script. */
+#define PTRSIZE sizeof(char *) /* Size of pointers in argv[] and envp[]. */
/*===========================================================================*
* do_exec *
dst = (vir_bytes) mbuf;
r = sys_datacopy(who, (vir_bytes) src,
PM_PROC_NR, (vir_bytes) dst, (phys_bytes)stk_bytes);
-
- if (r != OK) return(EACCES); /* can't fetch stack (e.g. bad virtual addr) */
+ /* can't fetch stack (e.g. bad virtual addr) */
+ if (r != OK) return(EACCES);
r = 0; /* r = 0 (first attempt), or 1 (interpreted script) */
name = name_buf; /* name of file to exec. */
/*===========================================================================*
* new_mem *
*===========================================================================*/
-PRIVATE int new_mem(sh_mp, text_bytes, data_bytes,bss_bytes,stk_bytes,tot_bytes)
+PRIVATE int new_mem(sh_mp, text_bytes, data_bytes,
+ bss_bytes,stk_bytes,tot_bytes)
struct mproc *sh_mp; /* text can be shared with this process */
vir_bytes text_bytes; /* text segment size in bytes */
vir_bytes data_bytes; /* size of initialized data in bytes */
}
/* Free the data and stack segments. */
free_mem(rmp->mp_seg[D].mem_phys,
- rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len - rmp->mp_seg[D].mem_vir);
+ rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len - rmp->mp_seg[D].mem_vir);
/* We have now passed the point of no return. The old core image has been
* forever lost, memory for a new core image has been allocated. Set up
#if (CHIP == M68000)
rmp->mp_seg[T].mem_vir = 0;
rmp->mp_seg[D].mem_vir = rmp->mp_seg[T].mem_len;
- rmp->mp_seg[S].mem_vir = rmp->mp_seg[D].mem_vir + rmp->mp_seg[D].mem_len + gap_clicks;
+ rmp->mp_seg[S].mem_vir = rmp->mp_seg[D].mem_vir
+ + rmp->mp_seg[D].mem_len + gap_clicks;
#endif
sys_newmap(who, rmp->mp_seg); /* report new map to the kernel */
}
/* Free the data and stack segments. */
free_mem(rmp->mp_seg[D].mem_phys,
- rmp->mp_seg[S].mem_vir + rmp->mp_seg[S].mem_len - rmp->mp_seg[D].mem_vir);
+ rmp->mp_seg[S].mem_vir
+ + rmp->mp_seg[S].mem_len - rmp->mp_seg[D].mem_vir);
/* The process slot can only be freed if the parent has done a WAIT. */
rmp->mp_exitstatus = (char) exit_status;
/* Set a param override? */
if(req == MMSETPARAM) {
- if(local_params >= MAX_LOCAL_PARAMS) return ENOSPC;
- if(sysgetenv.keylen <= 0 || sysgetenv.keylen >= sizeof(local_param_overrides[local_params].name)
- || sysgetenv.vallen <= 0 || sysgetenv.vallen >= sizeof(local_param_overrides[local_params].value))
- return EINVAL;
+ if(local_params >= MAX_LOCAL_PARAMS) return ENOSPC;
+ if(sysgetenv.keylen <= 0
+ || sysgetenv.keylen >=
+ sizeof(local_param_overrides[local_params].name)
+ || sysgetenv.vallen <= 0
+ || sysgetenv.vallen >=
+ sizeof(local_param_overrides[local_params].value))
+ return EINVAL;
- if ((s = sys_datacopy(who, (vir_bytes) sysgetenv.key,
- SELF, (vir_bytes) local_param_overrides[local_params].name,
- sysgetenv.keylen)) != OK)
- return s;
- if ((s = sys_datacopy(who, (vir_bytes) sysgetenv.val,
- SELF, (vir_bytes) local_param_overrides[local_params].value,
- sysgetenv.keylen)) != OK)
- return s;
- local_param_overrides[local_params].name[sysgetenv.keylen] = '\0';
- local_param_overrides[local_params].value[sysgetenv.vallen] = '\0';
-
- local_params++;
-
- return OK;
+ if ((s = sys_datacopy(who, (vir_bytes) sysgetenv.key,
+ SELF, (vir_bytes) local_param_overrides[local_params].name,
+ sysgetenv.keylen)) != OK)
+ return s;
+ if ((s = sys_datacopy(who, (vir_bytes) sysgetenv.val,
+ SELF, (vir_bytes) local_param_overrides[local_params].value,
+ sysgetenv.keylen)) != OK)
+ return s;
+ local_param_overrides[local_params].name[sysgetenv.keylen] = '\0';
+ local_param_overrides[local_params].value[sysgetenv.vallen] = '\0';
+
+ local_params++;
+
+ return OK;
}
if (sysgetenv.keylen == 0) { /* copy all parameters */