PUBLIC int fs_rdlink()
{
block_t b; /* block containing link text */
- struct buf *bp; /* buffer containing link text */
+ struct buf *bp = NULL; /* buffer containing link text */
char* link_text; /* either bp->b_data or rip->i_block */
register struct inode *rip; /* target inode */
register int r; /* return value */
if( (rip = get_inode(fs_dev, (ino_t) fs_m_in.REQ_INODE_NR)) == NULL)
return(EINVAL);
- if (!S_ISLNK(rip->i_mode))
- r = EACCES;
if (rip->i_size > MAX_FAST_SYMLINK_LENGTH) {
/* normal symlink */
if ((b = read_map(rip, (off_t) 0)) == NO_BLOCK) {
/* We can safely cast to unsigned, because copylen is guaranteed to be
below max file size */
copylen = min( copylen, (unsigned) rip->i_size);
- bp = get_block(rip->i_dev, b, NORMAL);
r = sys_safecopyto(VFS_PROC_NR, (cp_grant_id_t) fs_m_in.REQ_GRANT,
(vir_bytes) 0, (vir_bytes) link_text,
(size_t) copylen, D);
old_ip = NULL;
if (r == EENTERMOUNT) r = EXDEV; /* should this fail at all? */
else if (r == ELEAVEMOUNT) r = EINVAL; /* rename on dot-dot */
+ } else if (old_ip == NULL) {
+ return(err_code);
}
/* Get new dir inode */
- if( (new_dirp = get_inode(fs_dev, (ino_t) fs_m_in.REQ_REN_NEW_DIR)) == NULL)
- r = err_code;
- else {
+ if( (new_dirp = get_inode(fs_dev, (ino_t) fs_m_in.REQ_REN_NEW_DIR)) == NULL) {
+ put_inode(old_ip);
+ put_inode(old_dirp);
+ return(err_code);
+ } else {
if (new_dirp->i_links_count == NO_LINK) { /* Dir does not actually exist */
put_inode(old_ip);
put_inode(old_dirp);
} else {
len = offset;
pos -= offset;
- offset = 0;
}
zeroblock_range(rip, pos, len);
* three major activities: getting new work, processing the work, and
* sending the reply. The loop never terminates, unless a panic occurs.
*/
- int error, ind, transid;
+ int error = OK, ind, transid;
unsigned short test_endian = 1;
/* SEF local startup. */
assert(IS_VFS_FS_TRANSID(transid));
src = fs_m_in.m_source;
- error = OK;
caller_uid = INVAL_UID; /* To trap errors */
caller_gid = INVAL_GID;
req_nr = fs_m_in.m_type;
sip->i_dirt = DIRTY;
link_target_buf = (char*) sip->i_block;
} else {
- r = (bp = new_block(sip, (off_t) 0)) == NULL ? err_code :
+ if ((bp = new_block(sip, (off_t) 0)) != NULL) {
sys_safecopyfrom(VFS_PROC_NR,
(cp_grant_id_t) fs_m_in.REQ_GRANT3,
(vir_bytes) 0, (vir_bytes) bp->b_data,
(vir_bytes) fs_m_in.REQ_MEM_SIZE, D);
- bp->b_dirt = DIRTY;
- link_target_buf = bp->b_data;
+ bp->b_dirt = DIRTY;
+ link_target_buf = bp->b_data;
+ } else {
+ r = err_code;
+ }
}
if (r == OK) {
link_target_buf[fs_m_in.REQ_MEM_SIZE] = '\0';
block_t block, blocks_left;
off_t ind1_pos;
dev_t dev;
- struct buf *bp;
+ struct buf *bp = NULL;
static unsigned int readqsize = 0;
static struct buf **read_q;
block = baseblock;
bp = get_block(dev, block, PREFETCH);
+ assert(bp != NULL);
if (bp->b_dev != NO_DEV) return(bp);
/* The best guess for the number of blocks to prefetch: A lot.
* or newly created.
* If there wasn't one and WMAP_FREE is set, 'b3' is NO_BLOCK.
*/
- if (b3 == NO_BLOCK) {
+ if (b3 == NO_BLOCK && (op & WMAP_FREE)) {
/* WMAP_FREE and no triple indirect block - then no
* double and single indirect blocks either.
*/
* or newly created.
* If there wasn't one and WMAP_FREE is set, 'b2' is NO_BLOCK.
*/
- if (b2 == NO_BLOCK) {
+ if (b2 == NO_BLOCK && (op & WMAP_FREE)) {
/* WMAP_FREE and no double indirect block - then no
* single indirect block either.
*/