INCS+= minix/acpi.h minix/ansi.h minix/audio_fw.h minix/bitmap.h \
minix/callnr.h minix/com.h minix/compiler.h minix/config.h \
minix/const.h minix/cpufeature.h minix/crtso.h minix/debug.h \
- minix/devio.h minix/devman.h minix/dmap.h \
- minix/driver.h minix/drivers.h minix/drvlib.h minix/ds.h \
+ minix/devio.h minix/devman.h minix/dmap.h minix/driver.h \
+ minix/driver_mt.h minix/drivers.h minix/drvlib.h minix/ds.h \
minix/endpoint.h minix/fslib.h minix/gcov.h minix/hash.h \
minix/ioctl.h minix/input.h minix/ipc.h minix/ipcconst.h \
minix/keymap.h minix/limits.h minix/mthread.h minix/minlib.h \
- minix/mq.h minix/netdriver.h minix/partition.h minix/portio.h \
+ minix/netdriver.h minix/partition.h minix/portio.h \
minix/priv.h minix/procfs.h minix/profile.h minix/queryparam.h \
minix/rs.h minix/safecopies.h minix/sched.h minix/sef.h \
minix/sound.h minix/spin.h minix/sys_config.h minix/sysinfo.h \
#include <minix/partition.h>
#include <minix/u64.h>
+typedef int thread_id_t;
+
/* Info about and entry points into the device dependent code. */
struct driver {
_PROTOTYPE( char *(*dr_name), (void) );
_PROTOTYPE( int (*dr_cancel), (struct driver *dp, message *m_ptr) );
_PROTOTYPE( int (*dr_select), (struct driver *dp, message *m_ptr) );
_PROTOTYPE( int (*dr_other), (struct driver *dp, message *m_ptr) );
- _PROTOTYPE( int (*dr_hw_int), (struct driver *dp, message *m_ptr) );
+ _PROTOTYPE( void (*dr_hw_int), (struct driver *dp, message *m_ptr) );
+ _PROTOTYPE( int (*dr_thread), (dev_t dev, thread_id_t *threadp) );
};
/* Base and size of a partition in bytes. */
u64_t dv_size;
};
-
#define DRIVER_STD 0 /* Use the standard reply protocol */
#define DRIVER_ASYN 1 /* Use the new asynchronous protocol */
#define IS_DEV_MINOR_RQ(type) (IS_DEV_RQ(type) && (type) != DEV_STATUS)
-/* Functions defined by driver.c: */
+/* Functions defined by libdriver. These can be used for both singlethreaded
+ * and multithreaded drivers.
+ */
_PROTOTYPE( void driver_announce, (void) );
-_PROTOTYPE( int driver_receive, (endpoint_t src, message *m_ptr,
- int *status_ptr) );
-_PROTOTYPE( int driver_receive_mq, (message *m_ptr, int *status_ptr) );
-_PROTOTYPE( int driver_handle_msg, (struct driver *dp, int type, message *m_ptr,
- int ipc_status));
-_PROTOTYPE( void driver_terminate, (void) );
-_PROTOTYPE( void driver_task, (struct driver *dr, int type) );
-_PROTOTYPE( int driver_mq_queue, (message *m_ptr, int status) );
-_PROTOTYPE( void driver_init_buffer, (void) );
_PROTOTYPE( char *no_name, (void) );
_PROTOTYPE( int do_nop, (struct driver *dp, message *m_ptr) );
_PROTOTYPE( struct device *nop_prepare, (int device) );
_PROTOTYPE( int do_diocntl, (struct driver *dp, message *m_ptr) );
_PROTOTYPE( int nop_ioctl, (struct driver *dp, message *m_ptr) );
+#ifndef _DRIVER_MT_API
+/* Additional functions for the singlethreaded version. These allow the driver
+ * to either use the stock driver_task(), or implement its own message loop.
+ * To avoid accidents, these functions are not exposed when minix/driver_mt.h
+ * has been included previously.
+ */
+_PROTOTYPE( int driver_receive, (endpoint_t src, message *m_ptr,
+ int *status_ptr) );
+_PROTOTYPE( int driver_receive_mq, (message *m_ptr, int *status_ptr) );
+_PROTOTYPE( void driver_handle_msg, (struct driver *dp, int type,
+ message *m_ptr, int ipc_status) );
+_PROTOTYPE( void driver_terminate, (void) );
+_PROTOTYPE( void driver_task, (struct driver *dr, int type) );
+_PROTOTYPE( int driver_mq_queue, (message *m_ptr, int status) );
+#endif /* !_DRIVER_MT_API */
+
/* Parameters for the disk drive. */
#define SECTOR_SIZE 512 /* physical sector size in bytes */
#define SECTOR_SHIFT 9 /* for division */
#define CD_SECTOR_SIZE 2048 /* sector size of a CD-ROM in bytes */
/* Size of the DMA buffer buffer in bytes. */
-#define USE_EXTRA_DMA_BUF 0 /* usually not needed */
#define DMA_BUF_SIZE (DMA_SECTORS * SECTOR_SIZE)
-#if (CHIP == INTEL)
-extern u8_t *tmp_buf; /* the DMA buffer */
-#else
-extern u8_t tmp_buf[]; /* the DMA buffer */
-#endif
-extern phys_bytes tmp_phys; /* phys address of DMA buffer */
-
#endif /* __MINIX_DRIVER_H__ */
--- /dev/null
+#ifndef _MINIX_DRIVER_MT_H
+#define _MINIX_DRIVER_MT_H
+
+#define DRIVER_MT_API 1 /* do not expose the singlethreaded API */
+#include <minix/driver.h>
+
+/* The maximum number of worker threads. */
+#define DRIVER_MT_MAX_WORKERS 32
+
+_PROTOTYPE( void driver_mt_task, (struct driver *driver_p, int driver_type) );
+_PROTOTYPE( void driver_mt_sleep, (void) );
+_PROTOTYPE( void driver_mt_wakeup, (thread_id_t id) );
+_PROTOTYPE( void driver_mt_stop, (void) );
+_PROTOTYPE( void driver_mt_terminate, (void) );
+
+#endif /* _MINIX_DRIVER_MT_H */
+++ /dev/null
-/*
-inet/mq.h
-
-Created: Jan 3, 1992 by Philip Homburg
-
-Copyright 1995 Philip Homburg
-*/
-
-#ifndef INET__MQ_H
-#define INET__MQ_H
-
-typedef struct mq
-{
- message mq_mess;
- int mq_mess_status;
- struct mq *mq_next;
- int mq_allocated;
-} mq_t;
-
-_PROTOTYPE( mq_t *mq_get, (void) );
-_PROTOTYPE( void mq_free, (mq_t *mq) );
-_PROTOTYPE( void mq_init, (void) );
-
-#endif /* INET__MQ_H */
-
-/*
- * $PchId: mq.h,v 1.4 1995/11/21 06:40:30 philip Exp $
- */
unsigned long ex64lo(u64_t i);
unsigned long ex64hi(u64_t i);
u64_t make64(unsigned long lo, unsigned long hi);
-u64_t rrotate64(u64_t x, unsigned short b);
-u64_t rshift64(u64_t x, unsigned short b);
-u64_t xor64(u64_t a, u64_t b);
-u64_t and64(u64_t a, u64_t b);
-u64_t not64(u64_t a);
#define is_zero64(i) ((i).lo == 0 && (i).hi == 0)
#define make_zero64(i) do { (i).lo = (i).hi = 0; } while(0)
#endif
+u64_t rrotate64(u64_t x, unsigned short b);
+u64_t rshift64(u64_t x, unsigned short b);
+u64_t xor64(u64_t a, u64_t b);
+u64_t and64(u64_t a, u64_t b);
+u64_t not64(u64_t a);
+
#endif /* _MINIX__U64_H */
SRCS= ahci.c
DPADD+= ${LIBDRIVER} ${LIBSYS}
-LDADD+= -ldriver -lsys -ltimers
+LDADD+= -ldriver -lsys -ltimers -lmthread
MAN=
*
* The driver supports device hot-plug, active device status tracking,
* nonremovable ATA and removable ATAPI devices, custom logical sector sizes,
- * and sector-unaligned reads.
+ * sector-unaligned reads, and parallel requests to different devices.
*
* It does not implement transparent failure recovery, power management, native
- * command queuing, port multipliers, or any form of parallelism with respect
- * to incoming requests.
+ * command queuing, or port multipliers.
*/
/*
* An AHCI controller exposes a number of ports (up to 32), each of which may
* or may not have one device attached (port multipliers are not supported).
- * Each port is maintained independently, although due to the synchronous
- * nature of libdriver, an ongoing request for one port will block subsequent
- * requests for all other ports as well. It should be relatively easy to remove
- * this limitation in the future.
+ * Each port is maintained independently.
*
* The following figure depicts the possible transitions between port states.
* The NO_PORT state is not included; no transitions can be made from or to it.
* other state.
*
* Normally, the BUSY flag is used to indicate whether a command is in
- * progress. Again, due to the synchronous nature of libdriver, there is no
- * support for native command queuing yet. To allow this limitation to be
- * removed in the future, there is already some support in the code for
- * specifying a command number, even though it will currently always be zero.
+ * progress. There is no support for native command queuing yet. To allow this
+ * limitation to be removed in the future, there is already some support in the
+ * code for specifying a command number, even though it will currently always
+ * be zero.
*/
/*
* The maximum byte size of a single transfer (MAX_TRANSFER) is currently set
* vectors completely filled with 64KB-blocks.
*/
#include <minix/drivers.h>
-#include <minix/driver.h>
+#include <minix/driver_mt.h>
#include <minix/drvlib.h>
#include <minix/u64.h>
#include <machine/pci.h>
PRIVATE void ahci_geometry(struct partition *part);
PRIVATE void ahci_alarm(struct driver *UNUSED(dp), message *m);
PRIVATE int ahci_other(struct driver *UNUSED(dp), message *m);
-PRIVATE int ahci_intr(struct driver *UNUSED(dr), message *m);
+PRIVATE void ahci_intr(struct driver *UNUSED(dp), message *m);
+PRIVATE int ahci_thread(dev_t minor, thread_id_t *id);
+PRIVATE struct port_state *ahci_get_port(dev_t minor);
/* AHCI driver table. */
PRIVATE struct driver ahci_dtab = {
nop_cancel,
nop_select,
ahci_other,
- ahci_intr
+ ahci_intr,
+ ahci_thread
};
/*===========================================================================*
memset(&fis, 0, sizeof(fis));
fis.cf_cmd = ATA_CMD_PACKET;
- if (size > 0) {
+ if (size > 0) {
fis.cf_feat = ATA_FEAT_PACKET_DMA;
if (!write && (ps->flags & FLAG_USE_DMADIR))
fis.cf_feat |= ATA_FEAT_PACKET_DMADIR;
(buf[4] << 24) | (buf[5] << 16) | (buf[6] << 8) | buf[7];
if (ps->sector_size == 0 || (ps->sector_size & 1)) {
- dprintf(V_ERR, ("%s: invalid medium sector size %lu\n",
+ dprintf(V_ERR, ("%s: invalid medium sector size %u\n",
ahci_portname(ps), ps->sector_size));
return EINVAL;
}
dprintf(V_INFO,
- ("%s: medium detected (%lu byte sectors, %lu MB size)\n",
+ ("%s: medium detected (%u byte sectors, %lu MB size)\n",
ahci_portname(ps), ps->sector_size,
div64u(mul64(ps->lba_count, cvu64(ps->sector_size)),
1024*1024)));
ps->sector_size = ATA_SECTOR_SIZE;
if (ps->sector_size < ATA_SECTOR_SIZE) {
- dprintf(V_ERR, ("%s: invalid sector size %lu\n",
+ dprintf(V_ERR, ("%s: invalid sector size %u\n",
ahci_portname(ps), ps->sector_size));
return FALSE;
/*===========================================================================*
* sum_iovec *
*===========================================================================*/
-PRIVATE int sum_iovec(endpoint_t endpt, iovec_s_t *iovec, int nr_req,
- vir_bytes *total)
+PRIVATE int sum_iovec(struct port_state *ps, endpoint_t endpt,
+ iovec_s_t *iovec, int nr_req, vir_bytes *total)
{
/* Retrieve the total size of the given I/O vector. Check for alignment
* requirements along the way. Return OK (and the total request size)
if (size == 0 || (size & 1) || size > LONG_MAX) {
dprintf(V_ERR, ("%s: bad size %lu in iovec from %d\n",
- ahci_name(), size, endpt));
+ ahci_portname(ps), size, endpt));
return EINVAL;
}
if (bytes > LONG_MAX) {
dprintf(V_ERR, ("%s: iovec size overflow from %d\n",
- ahci_name(), endpt));
+ ahci_portname(ps), endpt));
return EINVAL;
}
}
if (r != OK) {
dprintf(V_ERR, ("%s: unable to map area from %d "
- "(%d)\n", ahci_name(), endpt, r));
+ "(%d)\n", ahci_portname(ps), endpt, r));
return EINVAL;
}
if (phys & 1) {
dprintf(V_ERR, ("%s: bad physical address from %d\n",
- ahci_name(), endpt));
+ ahci_portname(ps), endpt));
return EINVAL;
}
{
/* Perform an I/O transfer on a port.
*/
- static prd_t prdt[NR_PRDS];
+ prd_t prdt[NR_PRDS];
vir_bytes size, lead, chunk;
unsigned int count, nr_prds;
u64_t start_lba;
int i, r;
/* Get the total request size from the I/O vector. */
- if ((r = sum_iovec(endpt, iovec, nr_req, &size)) != OK)
+ if ((r = sum_iovec(ps, endpt, iovec, nr_req, &size)) != OK)
return r;
dprintf(V_REQ, ("%s: %s for %lu bytes at pos %08lx%08lx\n",
ahci_portname(ps), write ? "write" : "read", size,
- ex64hi(pos), ex64lo(pos)));
+ ex64hi(pos), ex64lo(pos)));
assert(ps->state == STATE_GOOD_DEV);
assert(ps->flags & FLAG_HAS_MEDIUM);
sig = ps->reg[AHCI_PORT_SIG];
if (sig != ATA_SIG_ATA && sig != ATA_SIG_ATAPI) {
- dprintf(V_ERR, ("%s: unsupported signature (%08lx)\n",
+ dprintf(V_ERR, ("%s: unsupported signature (%08x)\n",
ahci_portname(ps), sig));
port_stop(ps);
}
/* Clear all state flags except the busy flag, which may be relevant if
- * a DEV_OPEN call is waiting for the device to become ready, and the
+ * a DEV_OPEN call is waiting for the device to become ready, the
* barrier flag, which prevents access to the device until it is
- * completely closed and (re)opened.
+ * completely closed and (re)opened, and the thread suspension flag.
*/
- ps->flags &= FLAG_BUSY | FLAG_BARRIER;
+ ps->flags &= (FLAG_BUSY | FLAG_BARRIER | FLAG_SUSPENDED);
if (sig == ATA_SIG_ATAPI)
ps->flags |= FLAG_ATAPI;
}
if (ps->flags & FLAG_HAS_MEDIUM)
- printf(", %lu byte sectors, %lu MB size",
+ printf(", %u byte sectors, %lu MB size",
ps->sector_size, div64u(mul64(ps->lba_count,
cvu64(ps->sector_size)), 1024*1024));
/* Clear the interrupt flags that we saw were set. */
ps->reg[AHCI_PORT_IS] = smask;
- dprintf(V_REQ, ("%s: interrupt (%08lx)\n", ahci_portname(ps), smask));
+ dprintf(V_REQ, ("%s: interrupt (%08x)\n", ahci_portname(ps), smask));
if (emask & AHCI_PORT_IS_PRCS) {
/* Clear the N diagnostics bit to clear this interrupt. */
ps = &port_state[port];
+ /* Regardless of the outcome of this timeout, wake up the thread if it
+ * is suspended.
+ */
+ if (ps->flags & FLAG_SUSPENDED)
+ driver_mt_wakeup(ps->device);
+
/* If detection of a device after startup timed out, give up on initial
* detection and only look for hot plug events from now on.
*/
*===========================================================================*/
PRIVATE void port_wait(struct port_state *ps)
{
- /* Receive and process incoming messages until the given port is no
- * longer busy (due to command completion or timeout). Queue any new
- * requests for later processing.
+ /* Suspend the current thread until the given port is no longer busy,
+ * due to either command completion or timeout.
*/
- message m;
- int r, ipc_status;
- while (ps->flags & FLAG_BUSY) {
- if ((r = driver_receive(ANY, &m, &ipc_status)) != OK)
- panic("driver_receive failed: %d", r);
+ ps->flags |= FLAG_SUSPENDED;
- if (is_ipc_notify(ipc_status)) {
- switch (m.m_source) {
- case HARDWARE:
- ahci_intr(NULL, &m);
- break;
-
- case CLOCK:
- ahci_alarm(NULL, &m);
- break;
+ while (ps->flags & FLAG_BUSY)
+ driver_mt_sleep();
- default:
- driver_mq_queue(&m, ipc_status);
- }
- }
- else {
- driver_mq_queue(&m, ipc_status);
- }
- }
+ ps->flags &= ~FLAG_SUSPENDED;
}
/*===========================================================================*
((cap >> AHCI_HBA_CAP_NCS_SHIFT) & AHCI_HBA_CAP_NCS_MASK) + 1,
(cap & AHCI_HBA_CAP_SNCQ) ? "supports" : "no",
hba_state.irq));
- dprintf(V_INFO, ("%s: CAP %08lx, CAP2 %08lx, PI %08lx\n",
+ dprintf(V_INFO, ("%s: CAP %08x, CAP2 %08x, PI %08x\n",
ahci_name(), cap, hba_state.base[AHCI_HBA_CAP2],
hba_state.base[AHCI_HBA_PI]));
{
/* Disable AHCI, and clean up resources to the extent possible.
*/
+ struct port_state *ps;
int r, port;
for (port = 0; port < hba_state.nr_ports; port++) {
- if (port_state[port].state != STATE_NO_PORT) {
- if (port_state[port].state == STATE_GOOD_DEV)
- (void) gen_flush_wcache(&port_state[port], 0);
+ ps = &port_state[port];
- port_stop(&port_state[port]);
+ if (ps->state != STATE_NO_PORT) {
+ port_stop(ps);
- port_free(&port_state[port]);
+ port_free(ps);
}
}
/*===========================================================================*
* ahci_intr *
*===========================================================================*/
-PRIVATE int ahci_intr(struct driver *UNUSED(dr), message *UNUSED(m))
+PRIVATE void ahci_intr(struct driver *UNUSED(dr), message *UNUSED(m))
{
/* Process an interrupt.
*/
+ struct port_state *ps;
u32_t mask;
int r, port;
/* Handle an interrupt for each port that has the interrupt bit set. */
mask = hba_state.base[AHCI_HBA_IS];
- for (port = 0; port < hba_state.nr_ports; port++)
- if (mask & (1L << port))
- port_intr(&port_state[port]);
+ for (port = 0; port < hba_state.nr_ports; port++) {
+ if (mask & (1L << port)) {
+ ps = &port_state[port];
+
+ port_intr(ps);
+
+ /* After processing an interrupt, wake up the device
+ * thread if it is suspended and now no longer busy.
+ */
+ if ((ps->flags & (FLAG_SUSPENDED | FLAG_BUSY)) ==
+ FLAG_SUSPENDED)
+ driver_mt_wakeup(ps->device);
+ }
+ }
/* Clear the bits that we processed. */
hba_state.base[AHCI_HBA_IS] = mask;
/* Reenable the interrupt. */
if ((r = sys_irqenable(&hba_state.hook_id)) != OK)
panic("unable to enable IRQ: %d", r);
-
- return OK;
}
/*===========================================================================*
*===========================================================================*/
PRIVATE char *ahci_name(void)
{
- /* Return a printable name for the controller and possibly the
- * currently selected port.
+ /* Return a printable name for the controller. We avoid the use of the
+ * currently selected port, as it may not be accurate.
*/
static char name[] = "AHCI0";
- if (current_port != NULL)
- return ahci_portname(current_port);
-
name[4] = '0' + ahci_instance;
return name;
{
/* Open a device.
*/
+ struct port_state *ps;
int r;
- if (ahci_prepare(m->DEVICE) == NULL)
- return ENXIO;
+ ps = ahci_get_port(m->DEVICE);
/* If we are still in the process of initializing this port or device,
* wait for completion of that phase first.
*/
- if (current_port->flags & FLAG_BUSY)
- port_wait(current_port);
+ if (ps->flags & FLAG_BUSY)
+ port_wait(ps);
/* The device may only be opened if it is now properly functioning. */
- if (current_port->state != STATE_GOOD_DEV)
- return ENXIO;
+ if (ps->state != STATE_GOOD_DEV) {
+ r = ENXIO;
+ goto err_stop;
+ }
/* Some devices may only be opened in read-only mode. */
- if ((current_port->flags & FLAG_READONLY) && (m->COUNT & W_BIT))
- return EACCES;
+ if ((ps->flags & FLAG_READONLY) && (m->COUNT & W_BIT)) {
+ r = EACCES;
+ goto err_stop;
+ }
- if (current_port->open_count == 0) {
+ if (ps->open_count == 0) {
/* The first open request. Clear the barrier flag, if set. */
- current_port->flags &= ~FLAG_BARRIER;
+ ps->flags &= ~FLAG_BARRIER;
/* Recheck media only when nobody is using the device. */
- if ((current_port->flags & FLAG_ATAPI) &&
- (r = atapi_check_medium(current_port, 0)) != OK)
- return r;
+ if ((ps->flags & FLAG_ATAPI) &&
+ (r = atapi_check_medium(ps, 0)) != OK)
+ goto err_stop;
/* After rechecking the media, the partition table must always
* be read. This is also a convenient time to do it for
* nonremovable devices. Start by resetting the partition
* tables and setting the working size of the entire device.
*/
- memset(current_port->part, 0, sizeof(current_port->part));
- memset(current_port->subpart, 0,
- sizeof(current_port->subpart));
+ memset(ps->part, 0, sizeof(ps->part));
+ memset(ps->subpart, 0, sizeof(ps->subpart));
- current_port->part[0].dv_size =
- mul64(current_port->lba_count,
- cvu64(current_port->sector_size));
+ ps->part[0].dv_size =
+ mul64(ps->lba_count, cvu64(ps->sector_size));
- partition(&ahci_dtab, current_port->device * DEV_PER_DRIVE,
- P_PRIMARY, !!(current_port->flags & FLAG_ATAPI));
+ partition(&ahci_dtab, ps->device * DEV_PER_DRIVE, P_PRIMARY,
+ !!(ps->flags & FLAG_ATAPI));
}
else {
/* If the barrier flag is set, deny new open requests until the
* device is fully closed first.
*/
- if (current_port->flags & FLAG_BARRIER)
+ if (ps->flags & FLAG_BARRIER)
return ENXIO;
}
- current_port->open_count++;
+ ps->open_count++;
return OK;
+
+err_stop:
+ /* Stop the thread if the device is now fully closed. */
+ if (ps->open_count == 0)
+ driver_mt_stop();
+
+ return r;
}
/*===========================================================================*
{
/* Close a device.
*/
+ struct port_state *ps;
int port;
- if (ahci_prepare(m->DEVICE) == NULL)
- return ENXIO;
+ ps = ahci_get_port(m->DEVICE);
- if (current_port->open_count <= 0) {
+ /* Decrease the open count. */
+ if (ps->open_count <= 0) {
dprintf(V_ERR, ("%s: closing already-closed port\n",
- ahci_portname(current_port)));
+ ahci_portname(ps)));
return EINVAL;
}
- current_port->open_count--;
+ ps->open_count--;
+
+ if (ps->open_count > 0)
+ return OK;
- /* If we've been told to terminate, check whether all devices are now
- * closed. If so, tell libdriver to quit after replying to the close.
+ /* The device is now fully closed. That also means that the thread for
+ * this device is not needed anymore.
+ */
+ driver_mt_stop();
+
+ if (ps->state == STATE_GOOD_DEV && !(ps->flags & FLAG_BARRIER)) {
+ dprintf(V_INFO, ("%s: flushing write cache\n",
+ ahci_portname(ps)));
+
+ (void) gen_flush_wcache(ps, 0);
+ }
+
+ /* If the entire driver has been told to terminate, check whether all
+ * devices are now closed. If so, tell libdriver to quit after replying
+ * to the close request.
*/
if (ahci_exiting) {
for (port = 0; port < hba_state.nr_ports; port++)
if (port == hba_state.nr_ports) {
ahci_stop();
- driver_terminate();
+ driver_mt_terminate();
}
}
*/
u64_t pos, eof;
+ /* We can safely use current_port, as we won't get interrupted until
+ * the call to port_transfer(), after which it is no longer used.
+ * Nonpreemptive threading guarantees that we will not be descheduled
+ * before then.
+ */
assert(current_port != NULL);
assert(current_dev != NULL);
/* Process any messages not covered by the other calls.
* This function only implements IOCTLs.
*/
+ struct port_state *ps;
int r, val;
if (m->m_type != DEV_IOCTL_S)
return EINVAL;
- if (ahci_prepare(m->DEVICE) == NULL)
- return ENXIO;
+ ps = ahci_get_port(m->DEVICE);
switch (m->REQUEST) {
case DIOCEJECT:
- if (current_port->state != STATE_GOOD_DEV)
+ if (ps->state != STATE_GOOD_DEV || (ps->flags & FLAG_BARRIER))
return EIO;
- if (!(current_port->flags & FLAG_ATAPI))
+ if (!(ps->flags & FLAG_ATAPI))
return EINVAL;
- return atapi_load_eject(current_port, 0, FALSE /*load*/);
+ return atapi_load_eject(ps, 0, FALSE /*load*/);
case DIOCOPENCT:
return sys_safecopyto(m->m_source, (cp_grant_id_t) m->IO_GRANT,
- 0, (vir_bytes) ¤t_port->open_count,
- sizeof(current_port->open_count), D);
+ 0, (vir_bytes) &ps->open_count, sizeof(ps->open_count),
+ D);
case DIOCFLUSH:
- if (current_port->state != STATE_GOOD_DEV)
+ if (ps->state != STATE_GOOD_DEV || (ps->flags & FLAG_BARRIER))
return EIO;
- return gen_flush_wcache(current_port, 0);
+ return gen_flush_wcache(ps, 0);
case DIOCSETWC:
- if (current_port->state != STATE_GOOD_DEV)
+ if (ps->state != STATE_GOOD_DEV || (ps->flags & FLAG_BARRIER))
return EIO;
if ((r = sys_safecopyfrom(m->m_source,
sizeof(val), D)) != OK)
return r;
- return gen_set_wcache(current_port, 0, val);
+ return gen_set_wcache(ps, 0, val);
case DIOCGETWC:
- if (current_port->state != STATE_GOOD_DEV)
+ if (ps->state != STATE_GOOD_DEV || (ps->flags & FLAG_BARRIER))
return EIO;
- if ((r = gen_get_wcache(current_port, 0, &val)) != OK)
+ if ((r = gen_get_wcache(ps, 0, &val)) != OK)
return r;
return sys_safecopyto(m->m_source, (cp_grant_id_t) m->IO_GRANT,
return EINVAL;
}
+/*===========================================================================*
+ * ahci_thread *
+ *===========================================================================*/
+PRIVATE int ahci_thread(dev_t minor, thread_id_t *id)
+{
+ /* Map a device number to a worker thread number.
+ */
+
+ if (ahci_prepare(minor) == NULL)
+ return ENXIO;
+
+ *id = current_port->device;
+
+ return OK;
+}
+
+/*===========================================================================*
+ * ahci_get_port *
+ *===========================================================================*/
+PRIVATE struct port_state *ahci_get_port(dev_t minor)
+{
+ /* Get the port structure associated with the given minor device.
+ * Called only from worker threads, so the minor device is already
+ * guaranteed to map to a port.
+ */
+
+ if (ahci_prepare(minor) == NULL)
+ panic("device mapping for minor %d disappeared", minor);
+
+ return current_port;
+}
+
/*===========================================================================*
* main *
*===========================================================================*/
env_setargs(argc, argv);
sef_local_startup();
- driver_task(&ahci_dtab, DRIVER_STD);
+ driver_mt_task(&ahci_dtab, DRIVER_STD);
return 0;
}
#define FLAG_BARRIER 0x00000040 /* no access until unset */
#define FLAG_HAS_WCACHE 0x00000080 /* is a write cache present? */
#define FLAG_HAS_FLUSH 0x00000100 /* is FLUSH CACHE supported? */
+#define FLAG_SUSPENDED 0x00000200 /* is the thread suspended? */
/* Mapping between devices and ports. */
#define NO_PORT -1 /* this device maps to no port */
PRIVATE int w_drive; /* selected drive */
PRIVATE struct device *w_dv; /* device's base and size */
-/* Unfortunately, DMA_SECTORS and DMA_BUF_SIZE are already defined libdriver
- * for 'tmp_buf'.
- */
+PRIVATE u8_t *tmp_buf;
+
#define ATA_DMA_SECTORS 64
#define ATA_DMA_BUF_SIZE (ATA_DMA_SECTORS*SECTOR_SIZE)
FORWARD _PROTOTYPE( void ack_irqs, (unsigned int) );
FORWARD _PROTOTYPE( int w_do_close, (struct driver *dp, message *m_ptr) );
FORWARD _PROTOTYPE( int w_other, (struct driver *dp, message *m_ptr) );
-FORWARD _PROTOTYPE( int w_hw_int, (struct driver *dp, message *m_ptr) );
+FORWARD _PROTOTYPE( void w_hw_int, (struct driver *dp, message *m_ptr) );
FORWARD _PROTOTYPE( int com_simple, (struct command *cmd) );
FORWARD _PROTOTYPE( void w_timeout, (void) );
FORWARD _PROTOTYPE( int w_reset, (void) );
/* Initialize the at_wini driver. */
system_hz = sys_hz();
- driver_init_buffer();
+ if (!(tmp_buf = alloc_contig(2*DMA_BUF_SIZE, AC_ALIGN4K, NULL)))
+ panic("unable to allocate temporary buffer");
w_identify_wakeup_ticks = WAKEUP_TICKS;
wakeup_ticks = WAKEUP_TICKS;
/*===========================================================================*
* w_hw_int *
*===========================================================================*/
-PRIVATE int w_hw_int(dr, m)
+PRIVATE void w_hw_int(dr, m)
struct driver *dr;
message *m;
{
/* Leftover interrupt(s) received; ack it/them. */
ack_irqs(m->NOTIFY_ARG);
-
- return OK;
}
nop_cancel,
nop_select,
nop_ioctl,
- do_nop,
+ NULL,
+ NULL
};
/** Represents the /dev/hello device. */
LIB= driver
-SRCS= driver.c drvlib.c mq.c
+SRCS= driver.c drvlib.c driver_st.c driver_mt.c mq.c event.c
.if ${USE_STATECTL} != "no"
CPPFLAGS+= -DUSE_STATECTL
-/* This file contains device independent device driver interface.
- *
- * Changes:
- * Jul 25, 2005 added SYS_SIG type for signals (Jorrit N. Herder)
- * Sep 15, 2004 added SYN_ALARM type for timeouts (Jorrit N. Herder)
- * Jul 23, 2004 removed kernel dependencies (Jorrit N. Herder)
- * Apr 02, 1992 constructed from AT wini and floppy driver (Kees J. Bot)
- *
+/* This file contains the common part of the device driver interface.
+ * In addition, callers get to choose between the singlethreaded API
+ * (in driver_st.c) and the multithreaded API (in driver_mt.c).
*
* The drivers support the following operations (using message format m2):
*
* m_type DEVICE USER_ENDPT COUNT POSITION HIGHPOS IO_GRANT
* ----------------------------------------------------------------------------
- * | DEV_OPEN | device | proc nr | | | | |
+ * | DEV_OPEN | device | proc nr | mode | | | |
* |---------------+--------+---------+---------+--------+--------+-----------|
* | DEV_CLOSE | device | proc nr | | | | |
* |---------------+--------+---------+---------+--------+--------+-----------|
* |---------------+--------+---------+---------+--------+--------+-----------|
* | DEV_IOCTL_S | device | proc nr | request | | | buf grant |
* |---------------+--------+---------+---------+--------+--------+-----------|
- * | CANCEL | device | proc nr | r/w | | | |
+ * | CANCEL | device | proc nr | r/w | | | grant |
+ * |---------------+--------+---------+---------+--------+--------+-----------|
+ * | DEV_SELECT | device | ops | | | | |
* ----------------------------------------------------------------------------
*
- * The file contains the following entry points:
- *
- * driver_announce: called by a device driver to announce it is up
- * driver_receive: receive() interface for drivers
- * driver_receive_mq: receive() interface for drivers with message queueing
- * driver_task: called by the device dependent task entry
- * driver_init_buffer: initialize a DMA buffer
- * driver_mq_queue: queue an incoming message for later processing
+ * Changes:
+ * Aug 27, 2011 split common functions into driver_common.c (A. Welzel)
+ * Jul 25, 2005 added SYS_SIG type for signals (Jorrit N. Herder)
+ * Sep 15, 2004 added SYN_ALARM type for timeouts (Jorrit N. Herder)
+ * Jul 23, 2004 removed kernel dependencies (Jorrit N. Herder)
+ * Apr 02, 1992 constructed from AT wini and floppy driver (Kees J. Bot)
*/
#include <minix/drivers.h>
#include <sys/ioc_disk.h>
-#include <minix/mq.h>
-#include <minix/endpoint.h>
#include <minix/driver.h>
#include <minix/ds.h>
-/* Claim space for variables. */
-u8_t *tmp_buf = NULL; /* the DMA buffer eventually */
-phys_bytes tmp_phys; /* phys address of DMA buffer */
-
-FORWARD _PROTOTYPE( void clear_open_devs, (void) );
-FORWARD _PROTOTYPE( int is_open_dev, (int device) );
-FORWARD _PROTOTYPE( void set_open_dev, (int device) );
-
-FORWARD _PROTOTYPE( void asyn_reply, (message *mess, int r) );
-FORWARD _PROTOTYPE( int driver_reply, (endpoint_t caller_e, int caller_status,
- message *m_ptr) );
-FORWARD _PROTOTYPE( int driver_spurious_reply, (endpoint_t caller_e,
- int caller_status, message *m_ptr) );
-FORWARD _PROTOTYPE( int do_rdwt, (struct driver *dr, message *mp) );
-FORWARD _PROTOTYPE( int do_vrdwt, (struct driver *dr, message *mp) );
-
-PRIVATE endpoint_t device_caller;
-PUBLIC endpoint_t device_endpt; /* used externally by log driver */
-PRIVATE mq_t *queue_head = NULL;
+#include "driver.h"
+#include "mq.h"
+
+/* Management data for opened devices. */
PRIVATE int open_devs[MAX_NR_OPEN_DEVICES];
PRIVATE int next_open_devs_slot = 0;
-PRIVATE int driver_running;
/*===========================================================================*
- * clear_open_devs *
+ * clear_open_devs *
*===========================================================================*/
-PRIVATE void clear_open_devs()
+PRIVATE void clear_open_devs(void)
{
+/* Reset the set of previously opened minor devices. */
next_open_devs_slot = 0;
}
/*===========================================================================*
- * is_open_dev *
+ * is_open_dev *
*===========================================================================*/
PRIVATE int is_open_dev(int device)
{
- int i, open_dev_found;
-
- open_dev_found = FALSE;
- for(i=0;i<next_open_devs_slot;i++) {
- if(open_devs[i] == device) {
- open_dev_found = TRUE;
- break;
- }
- }
+/* Check whether the given minor device has previously been opened. */
+ int i;
+
+ for (i = 0; i < next_open_devs_slot; i++)
+ if (open_devs[i] == device)
+ return TRUE;
- return open_dev_found;
+ return FALSE;
}
/*===========================================================================*
- * set_open_dev *
+ * set_open_dev *
*===========================================================================*/
PRIVATE void set_open_dev(int device)
{
- if(next_open_devs_slot >= MAX_NR_OPEN_DEVICES) {
- panic("out of slots for open devices");
- }
+/* Mark the given minor device as having been opened. */
+
+ if (next_open_devs_slot >= MAX_NR_OPEN_DEVICES)
+ panic("out of slots for open devices");
+
open_devs[next_open_devs_slot] = device;
next_open_devs_slot++;
}
/*===========================================================================*
* asyn_reply *
*===========================================================================*/
-PRIVATE void asyn_reply(mess, r)
-message *mess;
-int r;
+PRIVATE void asyn_reply(message *mess, int r)
{
-/* Send a reply using the new asynchronous character device protocol.
- */
+/* Send a reply using the asynchronous character device protocol. */
message reply_mess;
+ /* Do not reply with ERESTART in this protocol. The only possible caller,
+ * VFS, will find out through other means when we have restarted, and is not
+ * (fully) ready to deal with ERESTART errors.
+ */
+ if (r == ERESTART)
+ return;
+
switch (mess->m_type) {
case DEV_OPEN:
reply_mess.m_type = DEV_REVIVE;
- reply_mess.REP_ENDPT = device_endpt;
+ reply_mess.REP_ENDPT = mess->USER_ENDPT;
reply_mess.REP_STATUS = r;
break;
case DEV_CLOSE:
reply_mess.m_type = DEV_CLOSE_REPL;
- reply_mess.REP_ENDPT = device_endpt;
+ reply_mess.REP_ENDPT = mess->USER_ENDPT;
reply_mess.REP_STATUS = r;
break;
case DEV_IOCTL_S:
if (r == SUSPEND)
printf("driver_task: reviving %d (%d) with SUSPEND\n",
- device_caller, device_endpt);
+ mess->m_source, mess->USER_ENDPT);
reply_mess.m_type = DEV_REVIVE;
- reply_mess.REP_ENDPT = device_endpt;
+ reply_mess.REP_ENDPT = mess->USER_ENDPT;
reply_mess.REP_IO_GRANT = (cp_grant_id_t) mess->IO_GRANT;
reply_mess.REP_STATUS = r;
break;
default:
reply_mess.m_type = TASK_REPLY;
- reply_mess.REP_ENDPT = device_endpt;
+ reply_mess.REP_ENDPT = mess->USER_ENDPT;
/* Status is # of bytes transferred or error code. */
reply_mess.REP_STATUS = r;
break;
}
- r= asynsend(device_caller, &reply_mess);
+ r = asynsend(mess->m_source, &reply_mess);
+
if (r != OK)
- {
- printf("driver_task: unable to asynsend to %d: %d\n",
- device_caller, r);
- }
+ printf("asyn_reply: unable to asynsend reply to %d: %d\n",
+ mess->m_source, r);
}
/*===========================================================================*
- * driver_reply *
+ * standard_reply *
*===========================================================================*/
-PRIVATE int driver_reply(caller_e, caller_status, m_ptr)
-endpoint_t caller_e;
-int caller_status;
-message *m_ptr;
+PRIVATE void standard_reply(message *m_ptr, int ipc_status, int reply)
{
/* Reply to a message sent to the driver. */
+ endpoint_t caller_e, user_e;
int r;
- /* Use sendnb if caller is guaranteed to be blocked, asynsend otherwise. */
- if(IPC_STATUS_CALL(caller_status) == SENDREC) {
- r = sendnb(caller_e, m_ptr);
- }
- else {
- r = asynsend(caller_e, m_ptr);
- }
+ caller_e = m_ptr->m_source;
+ user_e = m_ptr->USER_ENDPT;
- return r;
+ m_ptr->m_type = TASK_REPLY;
+ m_ptr->REP_ENDPT = user_e;
+ m_ptr->REP_STATUS = reply;
+
+ /* If we would block sending the message, send it asynchronously. */
+ if (IPC_STATUS_CALL(ipc_status) == SENDREC)
+ r = sendnb(caller_e, m_ptr);
+ else
+ r = asynsend(caller_e, m_ptr);
+
+ if (r != OK)
+ printf("driver_reply: unable to send reply to %d: %d\n", caller_e, r);
}
/*===========================================================================*
- * driver_spurious_reply *
+ * driver_reply *
*===========================================================================*/
-PRIVATE int driver_spurious_reply(caller_e, caller_status, m_ptr)
-endpoint_t caller_e;
-int caller_status;
-message *m_ptr;
+PUBLIC void driver_reply(int driver_type, message *m_ptr, int ipc_status,
+ int reply)
{
-/* Reply to a spurious message pretending to be dead. */
- int r;
+/* Prepare and send a reply message. */
- m_ptr->m_type = TASK_REPLY;
- m_ptr->REP_ENDPT = m_ptr->USER_ENDPT;
- m_ptr->REP_STATUS = ERESTART;
+ if (reply == EDONTREPLY)
+ return;
- r = driver_reply(caller_e, caller_status, m_ptr);
- if(r != OK) {
- printf("unable to reply to spurious message from %d\n",
- caller_e);
- }
+ switch (driver_type) {
+ case DRIVER_STD:
+ standard_reply(m_ptr, ipc_status, reply);
- return r;
+ break;
+
+ case DRIVER_ASYN:
+ asyn_reply(m_ptr, reply);
+
+ break;
+
+ default:
+ panic("unknown driver type: %d", driver_type);
+ }
}
/*===========================================================================*
- * driver_announce *
+ * driver_announce *
*===========================================================================*/
-PUBLIC void driver_announce()
+PUBLIC void driver_announce(void)
{
/* Announce we are up after a fresh start or restart. */
int r;
/* Expect a DEV_OPEN for any device before serving regular driver requests. */
clear_open_devs();
-}
-
-/*===========================================================================*
- * driver_receive *
- *===========================================================================*/
-PUBLIC int driver_receive(src, m_ptr, status_ptr)
-endpoint_t src;
-message *m_ptr;
-int *status_ptr;
-{
-/* receive() interface for drivers. */
- int r;
- int ipc_status;
-
- while (TRUE) {
- /* Wait for a request. */
- r = sef_receive_status(src, m_ptr, &ipc_status);
- *status_ptr = ipc_status;
- if (r != OK) {
- return r;
- }
-
- /* See if only DEV_OPEN is to be expected for this device. */
- if(IS_DEV_MINOR_RQ(m_ptr->m_type) && !is_open_dev(m_ptr->DEVICE)) {
- if(m_ptr->m_type != DEV_OPEN) {
- if(!is_ipc_asynch(ipc_status)) {
- driver_spurious_reply(m_ptr->m_source,
- ipc_status, m_ptr);
- }
- continue;
- }
- set_open_dev(m_ptr->DEVICE);
- }
-
- break;
- }
-
- return OK;
-}
-
-/*===========================================================================*
- * driver_receive_mq *
- *===========================================================================*/
-PUBLIC int driver_receive_mq(m_ptr, status_ptr)
-message *m_ptr;
-int *status_ptr;
-{
-/* receive() interface for drivers with message queueing. */
- int ipc_status;
-
- /* Any queued messages? Oldest are at the head. */
- while(queue_head) {
- mq_t *mq;
- mq = queue_head;
- memcpy(m_ptr, &mq->mq_mess, sizeof(mq->mq_mess));
- ipc_status = mq->mq_mess_status;
- *status_ptr = ipc_status;
- queue_head = queue_head->mq_next;
- mq_free(mq);
-
- /* See if only DEV_OPEN is to be expected for this device. */
- if(IS_DEV_MINOR_RQ(m_ptr->m_type) && !is_open_dev(m_ptr->DEVICE)) {
- if(m_ptr->m_type != DEV_OPEN) {
- if(!is_ipc_asynch(ipc_status)) {
- driver_spurious_reply(m_ptr->m_source,
- ipc_status, m_ptr);
- }
- continue;
- }
- set_open_dev(m_ptr->DEVICE);
- }
-
- return OK;
- }
-
- /* Fall back to standard receive() interface for drivers. */
- return driver_receive(ANY, m_ptr, status_ptr);
-}
-
-/*===========================================================================*
- * driver_terminate *
- *===========================================================================*/
-PUBLIC void driver_terminate(void)
-{
-/* Break out of the main driver loop after finishing the current request.
- */
-
- driver_running = FALSE;
-}
-
-/*===========================================================================*
- * driver_task *
- *===========================================================================*/
-PUBLIC void driver_task(dp, type)
-struct driver *dp; /* Device dependent entry points. */
-int type; /* Driver type (DRIVER_STD or DRIVER_ASYN) */
-{
-/* Main program of any device driver task. */
- int r, ipc_status;
- message mess;
-
- driver_running = TRUE;
-
- /* Here is the main loop of the disk task. It waits for a message, carries
- * it out, and sends a reply.
- */
- while (driver_running) {
- if ((r=driver_receive_mq(&mess, &ipc_status)) != OK) {
- panic("driver_receive_mq failed: %d", r);
- }
- driver_handle_msg(dp, type, &mess, ipc_status);
- }
-}
-
-/*===========================================================================*
- * driver_handle_msg *
- *===========================================================================*/
-PUBLIC int driver_handle_msg(dp, type, m_ptr, ipc_status)
-struct driver *dp; /* Device dependent entry points. */
-int type; /* Driver type (DRIVER_STD or DRIVER_ASYN) */
-message *m_ptr; /* Pointer to message to handle */
-int ipc_status;
-{
- int r;
-
- device_caller = m_ptr->m_source;
- device_endpt = m_ptr->USER_ENDPT;
- if (is_ipc_notify(ipc_status)) {
- switch (_ENDPOINT_P(m_ptr->m_source)) {
- case HARDWARE:
- /* leftover interrupt or expired timer. */
- if(dp->dr_hw_int) {
- (*dp->dr_hw_int)(dp, m_ptr);
- }
- break;
- case CLOCK:
- (*dp->dr_alarm)(dp, m_ptr);
- break;
- default:
- if(dp->dr_other)
- r = (*dp->dr_other)(dp, m_ptr);
- else
- r = EINVAL;
- goto send_reply;
- }
- return 0;
- }
-
- switch(m_ptr->m_type) {
- case DEV_OPEN: r = (*dp->dr_open)(dp, m_ptr); break;
- case DEV_CLOSE: r = (*dp->dr_close)(dp, m_ptr); break;
- case DEV_IOCTL_S: r = (*dp->dr_ioctl)(dp, m_ptr); break;
- case CANCEL: r = (*dp->dr_cancel)(dp, m_ptr);break;
- case DEV_SELECT: r = (*dp->dr_select)(dp, m_ptr);break;
- case DEV_READ_S:
- case DEV_WRITE_S: r = do_rdwt(dp, m_ptr); break;
- case DEV_GATHER_S:
- case DEV_SCATTER_S: r = do_vrdwt(dp, m_ptr); break;
- default:
- if(dp->dr_other)
- r = (*dp->dr_other)(dp, m_ptr);
- else
- r = EINVAL;
- break;
- }
-
-send_reply:
- /* Clean up leftover state. */
- (*dp->dr_cleanup)();
-
- /* Finally, prepare and send the reply message. */
- if (r == EDONTREPLY)
- return 0;
-
- switch (type) {
- case DRIVER_STD:
- m_ptr->m_type = TASK_REPLY;
- m_ptr->REP_ENDPT = device_endpt;
- /* Status is # of bytes transferred or error code. */
- m_ptr->REP_STATUS = r;
-
- r = driver_reply(device_caller, ipc_status, m_ptr);
-
- if (r != OK) {
- printf("driver_task: unable to send reply to %d: %d\n",
- device_caller, r);
- }
-
- break;
-
- case DRIVER_ASYN:
- asyn_reply(m_ptr, r);
-
- break;
-
- default:
- panic("unknown driver type: %d", type);
- }
- return 0;
-}
-
-/*===========================================================================*
- * driver_init_buffer *
- *===========================================================================*/
-PUBLIC void driver_init_buffer(void)
-{
-/* Select a buffer that can safely be used for DMA transfers. It may also
- * be used to read partition tables and such. Its absolute address is
- * 'tmp_phys', the normal address is 'tmp_buf'.
- */
- vir_bytes size;
-
- if (tmp_buf == NULL) {
- size = MAX(2*DMA_BUF_SIZE, CD_SECTOR_SIZE);
-
- if(!(tmp_buf = alloc_contig(size, AC_ALIGN4K, &tmp_phys)))
- panic("can't allocate tmp_buf: %lu", size);
- }
+ driver_mq_init();
}
/*===========================================================================*
* do_rdwt *
*===========================================================================*/
-PRIVATE int do_rdwt(dp, mp)
-struct driver *dp; /* device dependent entry points */
-message *mp; /* pointer to read or write message */
+PRIVATE int do_rdwt(struct driver *dp, message *mp)
{
/* Carry out a single read or write request. */
iovec_t iovec1;
r = (*dp->dr_transfer)(mp->m_source, opcode, position, &iovec1, 1);
/* Return the number of bytes transferred or an error code. */
- return(r == OK ? (mp->COUNT - iovec1.iov_size) : r);
+ return(r == OK ? (int) (mp->COUNT - iovec1.iov_size) : r);
}
-/*==========================================================================*
- * do_vrdwt *
- *==========================================================================*/
-PRIVATE int do_vrdwt(dp, mp)
-struct driver *dp; /* device dependent entry points */
-message *mp; /* pointer to read or write message */
+/*===========================================================================*
+ * do_vrdwt *
+ *===========================================================================*/
+PRIVATE int do_vrdwt(struct driver *dp, message *mp)
{
/* Carry out an device read or write to/from a vector of user addresses.
* The "user addresses" are assumed to be safe, i.e. FS transferring to/from
* its own buffers, so they are not checked.
*/
- static iovec_t iovec[NR_IOREQS];
+ iovec_t iovec[NR_IOREQS];
phys_bytes iovec_size;
unsigned nr_req;
int r, opcode;
return(r);
}
+/*===========================================================================*
+ * driver_handle_notify *
+ *===========================================================================*/
+PUBLIC void driver_handle_notify(struct driver *dp, message *m_ptr)
+{
+/* Take care of the notifications (interrupt and clock messages) by calling the
+ * appropiate callback functions. Never send a reply.
+ */
+
+ /* Call the appropriate function for this notification. */
+ switch (_ENDPOINT_P(m_ptr->m_source)) {
+ case HARDWARE:
+ if (dp->dr_hw_int)
+ dp->dr_hw_int(dp, m_ptr);
+ break;
+
+ case CLOCK:
+ if (dp->dr_alarm)
+ dp->dr_alarm(dp, m_ptr);
+ break;
+
+ default:
+ if (dp->dr_other)
+ (void) dp->dr_other(dp, m_ptr);
+ }
+}
+
+/*===========================================================================*
+ * driver_handle_request *
+ *===========================================================================*/
+PUBLIC int driver_handle_request(struct driver *dp, message *m_ptr)
+{
+/* Call the appropiate driver function, based on the type of request. Return
+ * the result code that is to be sent back to the caller, or EDONTREPLY if no
+ * reply is to be sent.
+ */
+ int r;
+
+ /* We might get spurious requests if the driver has been restarted. Deny any
+ * requests on devices that have not previously been opened, signaling the
+ * caller that something went wrong.
+ */
+ if (IS_DEV_MINOR_RQ(m_ptr->m_type) && !is_open_dev(m_ptr->DEVICE)) {
+ /* Reply ERESTART to spurious requests for unopened devices. */
+ if (m_ptr->m_type != DEV_OPEN)
+ return ERESTART;
+
+ /* Mark the device as opened otherwise. */
+ set_open_dev(m_ptr->DEVICE);
+ }
+
+ /* Call the appropriate function(s) for this request. */
+ switch (m_ptr->m_type) {
+ case DEV_OPEN: r = (*dp->dr_open)(dp, m_ptr); break;
+ case DEV_CLOSE: r = (*dp->dr_close)(dp, m_ptr); break;
+ case DEV_IOCTL_S: r = (*dp->dr_ioctl)(dp, m_ptr); break;
+ case CANCEL: r = (*dp->dr_cancel)(dp, m_ptr); break;
+ case DEV_SELECT: r = (*dp->dr_select)(dp, m_ptr); break;
+ case DEV_READ_S:
+ case DEV_WRITE_S: r = do_rdwt(dp, m_ptr); break;
+ case DEV_GATHER_S:
+ case DEV_SCATTER_S: r = do_vrdwt(dp, m_ptr); break;
+ default:
+ if (dp->dr_other)
+ r = dp->dr_other(dp, m_ptr);
+ else
+ r = EINVAL;
+ }
+
+ /* Let the driver perform any cleanup. */
+ (*dp->dr_cleanup)();
+
+ return r;
+}
+
/*===========================================================================*
* no_name *
*===========================================================================*/
-PUBLIC char *no_name()
+PUBLIC char *no_name(void)
{
/* Use this default name if there is no specific name for the device. This was
* originally done by fetching the name from the task table for this process:
return name;
}
-/*============================================================================*
- * do_nop *
- *============================================================================*/
-PUBLIC int do_nop(dp, mp)
-struct driver *dp;
-message *mp;
+/*===========================================================================*
+ * do_nop *
+ *===========================================================================*/
+PUBLIC int do_nop(struct driver *UNUSED(dp), message *mp)
{
/* Nothing there, or nothing to do. */
}
}
-/*============================================================================*
- * nop_ioctl *
- *============================================================================*/
-PUBLIC int nop_ioctl(dp, mp)
-struct driver *dp;
-message *mp;
+/*===========================================================================*
+ * nop_ioctl *
+ *===========================================================================*/
+PUBLIC int nop_ioctl(struct driver *UNUSED(dp), message *UNUSED(mp))
{
return(ENOTTY);
}
-/*============================================================================*
- * nop_alarm *
- *============================================================================*/
-PUBLIC void nop_alarm(dp, mp)
-struct driver *dp;
-message *mp;
+/*===========================================================================*
+ * nop_alarm *
+ *===========================================================================*/
+PUBLIC void nop_alarm(struct driver *UNUSED(dp), message *UNUSED(mp))
{
/* Ignore the leftover alarm. */
}
/*===========================================================================*
* nop_prepare *
*===========================================================================*/
-PUBLIC struct device *nop_prepare(int device)
+PUBLIC struct device *nop_prepare(int UNUSED(device))
{
/* Nothing to prepare for. */
return(NULL);
/*===========================================================================*
* nop_cleanup *
*===========================================================================*/
-PUBLIC void nop_cleanup()
+PUBLIC void nop_cleanup(void)
{
/* Nothing to clean up. */
}
/*===========================================================================*
* nop_cancel *
*===========================================================================*/
-PUBLIC int nop_cancel(struct driver *dr, message *m)
+PUBLIC int nop_cancel(struct driver *UNUSED(dr), message *UNUSED(m))
{
/* Nothing to do for cancel. */
return(OK);
/*===========================================================================*
* nop_select *
*===========================================================================*/
-PUBLIC int nop_select(struct driver *dr, message *m)
+PUBLIC int nop_select(struct driver *UNUSED(dr), message *UNUSED(m))
{
/* Nothing to do for select. */
return(OK);
}
-/*============================================================================*
- * do_diocntl *
- *============================================================================*/
-PUBLIC int do_diocntl(dp, mp)
-struct driver *dp;
-message *mp; /* pointer to ioctl request */
+/*===========================================================================*
+ * do_diocntl *
+ *===========================================================================*/
+PUBLIC int do_diocntl(struct driver *dp, message *mp)
{
/* Carry out a partition setting/getting request. */
struct device *dv;
struct partition entry;
+ unsigned int request;
int s;
- if (mp->REQUEST != DIOCSETP && mp->REQUEST != DIOCGETP) {
- if(dp->dr_other) {
- return dp->dr_other(dp, mp);
- } else return(ENOTTY);
+ request = mp->REQUEST;
+
+ if (request != DIOCSETP && request != DIOCGETP) {
+ if(dp->dr_other)
+ return dp->dr_other(dp, mp);
+ else
+ return(ENOTTY);
}
/* Decode the message parameters. */
if ((dv = (*dp->dr_prepare)(mp->DEVICE)) == NULL) return(ENXIO);
- if (mp->REQUEST == DIOCSETP) {
+ if (request == DIOCSETP) {
/* Copy just this one partition table entry. */
s=sys_safecopyfrom(mp->m_source, (vir_bytes) mp->IO_GRANT,
0, (vir_bytes) &entry, sizeof(entry), D);
}
return(OK);
}
-
-/*===========================================================================*
- * driver_mq_queue *
- *===========================================================================*/
-PUBLIC int driver_mq_queue(message *m, int status)
-{
- mq_t *mq, *mi;
- static int mq_initialized = FALSE;
-
- if(!mq_initialized) {
- /* Init MQ library. */
- mq_init();
- mq_initialized = TRUE;
- }
-
- if(!(mq = mq_get()))
- panic("driver_mq_queue: mq_get failed");
- memcpy(&mq->mq_mess, m, sizeof(mq->mq_mess));
- mq->mq_mess_status = status;
- mq->mq_next = NULL;
- if(!queue_head) {
- queue_head = mq;
- } else {
- for(mi = queue_head; mi->mq_next; mi = mi->mq_next)
- ;
- mi->mq_next = mq;
- }
-
- return OK;
-}
-
--- /dev/null
+#ifndef _DRIVER_DRIVER_H
+#define _DRIVER_DRIVER_H
+
+_PROTOTYPE( void driver_handle_notify, (struct driver *dp, message *m_ptr) );
+_PROTOTYPE( int driver_handle_request, (struct driver *dp, message *m_ptr) );
+_PROTOTYPE( void driver_reply, (int driver_type, message *m_ptr,
+ int ipc_status, int reply) );
+
+#endif /* _DRIVER_DRIVER_H */
--- /dev/null
+/* This file contains the multithreaded device independent driver interface.
+ *
+ * Changes:
+ * Aug 27, 2011 created (A. Welzel)
+ *
+ * The entry points into this file are:
+ * driver_mt_task: the main message loop of the driver
+ * driver_mt_terminate: break out of the main message loop
+ * driver_mt_sleep: put the current thread to sleep
+ * driver_mt_wakeup: wake up a sleeping thread
+ * driver_mt_stop: put up the current thread for termination
+ */
+
+#include <minix/driver_mt.h>
+#include <minix/mthread.h>
+#include <assert.h>
+
+#include "driver.h"
+#include "mq.h"
+#include "event.h"
+
+typedef enum {
+ STATE_DEAD,
+ STATE_RUNNING,
+ STATE_STOPPING,
+ STATE_EXITED
+} worker_state;
+
+/* Structure to handle running worker threads. */
+typedef struct {
+ thread_id_t id;
+ worker_state state;
+ mthread_thread_t mthread;
+ event_t queue_event;
+ event_t sleep_event;
+} worker_t;
+
+PRIVATE struct driver *driver_cb;
+PRIVATE int driver_mt_type;
+PRIVATE int driver_mt_running = FALSE;
+
+PRIVATE mthread_key_t worker_key;
+
+PRIVATE worker_t worker[DRIVER_MT_MAX_WORKERS];
+
+PRIVATE worker_t *exited[DRIVER_MT_MAX_WORKERS];
+PRIVATE int num_exited = 0;
+
+/*===========================================================================*
+ * enqueue *
+ *===========================================================================*/
+PRIVATE void enqueue(worker_t *wp, const message *m_src, int ipc_status)
+{
+/* Enqueue a message into a worker thread's queue, and signal the thread.
+ * Must be called from the master thread.
+ */
+
+ assert(wp->state == STATE_RUNNING || wp->state == STATE_STOPPING);
+
+ if (!driver_mq_enqueue(wp->id, m_src, ipc_status))
+ panic("driver_mt: enqueue failed (message queue full)");
+
+ driver_event_fire(&wp->queue_event);
+}
+
+/*===========================================================================*
+ * try_dequeue *
+ *===========================================================================*/
+PRIVATE int try_dequeue(worker_t *wp, message *m_dst, int *ipc_status)
+{
+/* See if a message can be dequeued from the current worker thread's queue. If
+ * so, dequeue the message and return TRUE. If not, return FALSE. Must be
+ * called from a worker thread. Does not block.
+ */
+
+ return driver_mq_dequeue(wp->id, m_dst, ipc_status);
+}
+
+/*===========================================================================*
+ * dequeue *
+ *===========================================================================*/
+PRIVATE void dequeue(worker_t *wp, message *m_dst, int *ipc_status)
+{
+/* Dequeue a message from the current worker thread's queue. Block the current
+ * thread if necessary. Must be called from a worker thread. Always successful.
+ */
+
+ while (!try_dequeue(wp, m_dst, ipc_status))
+ driver_event_wait(&wp->queue_event);
+}
+
+/*===========================================================================*
+ * worker_thread *
+ *===========================================================================*/
+PRIVATE void *worker_thread(void *param)
+{
+/* The worker thread loop. Set up the thread-specific reference to itself and
+ * start looping. The loop consists of blocking dequeing and handling messages.
+ * After handling a message, the thread might have been stopped, so we check
+ * for this condition and exit if so.
+ */
+ worker_t *wp;
+ message m;
+ int ipc_status, r;
+
+ wp = (worker_t *) param;
+ assert(wp != NULL);
+
+ if (mthread_setspecific(worker_key, wp))
+ panic("driver_mt: could not save local thread pointer");
+
+ while (driver_mt_running) {
+ /* See if a new message is available right away. */
+ if (!try_dequeue(wp, &m, &ipc_status)) {
+ /* If not, and this thread should be stopped, stop now. */
+ if (wp->state == STATE_STOPPING)
+ break;
+
+ /* Otherwise, block waiting for a new message. */
+ dequeue(wp, &m, &ipc_status);
+
+ if (!driver_mt_running)
+ break;
+ }
+
+ /* Even if the thread was stopped before, a new message resumes it. */
+ wp->state = STATE_RUNNING;
+
+ /* Handle the request, and send a reply. */
+ r = driver_handle_request(driver_cb, &m);
+
+ driver_reply(driver_mt_type, &m, ipc_status, r);
+ }
+
+ /* Clean up and terminate this thread. */
+ if (mthread_setspecific(worker_key, NULL))
+ panic("driver_mt: could not delete local thread pointer");
+
+ wp->state = STATE_EXITED;
+
+ exited[num_exited++] = wp;
+
+ return NULL;
+}
+
+/*===========================================================================*
+ * master_create_worker *
+ *===========================================================================*/
+PRIVATE void master_create_worker(worker_t *wp, thread_id_t id)
+{
+/* Start a new worker thread.
+ */
+ int r;
+
+ wp->id = id;
+ wp->state = STATE_RUNNING;
+
+ /* Initialize synchronization primitives. */
+ driver_event_init(&wp->queue_event);
+ driver_event_init(&wp->sleep_event);
+
+ r = mthread_create(&wp->mthread, NULL /*attr*/, worker_thread, (void *) wp);
+
+ if (r != 0)
+ panic("driver_mt: could not start thread %d (%d)", id, r);
+}
+
+/*===========================================================================*
+ * master_destroy_worker *
+ *===========================================================================*/
+PRIVATE void master_destroy_worker(worker_t *wp)
+{
+/* Clean up resources used by an exited worker thread.
+ */
+ message m;
+ int ipc_status;
+
+ assert(wp != NULL);
+ assert(wp->state == STATE_EXITED);
+ assert(!driver_mq_dequeue(wp->id, &m, &ipc_status));
+
+ /* Join the thread. */
+ if (mthread_join(wp->mthread, NULL))
+ panic("driver_mt: could not join thread %d", wp->id);
+
+ /* Destroy resources. */
+ driver_event_destroy(&wp->sleep_event);
+ driver_event_destroy(&wp->queue_event);
+
+ wp->state = STATE_DEAD;
+}
+
+/*===========================================================================*
+ * master_handle_exits *
+ *===========================================================================*/
+PRIVATE void master_handle_exits(void)
+{
+/* Destroy the remains of all exited threads.
+ */
+ int i;
+
+ for (i = 0; i < num_exited; i++)
+ master_destroy_worker(exited[i]);
+
+ num_exited = 0;
+}
+
+/*===========================================================================*
+ * master_handle_request *
+ *===========================================================================*/
+PRIVATE void master_handle_request(message *m_ptr, int ipc_status)
+{
+/* For real request messages, query the thread ID, start a thread if one with
+ * that ID is not already running, and enqueue the message in the thread's
+ * message queue.
+ */
+ thread_id_t thread_id;
+ worker_t *wp;
+ int r;
+
+ /* If this is not a request that has a minor device associated with it, we
+ * can not tell which thread should process it either. In that case, the
+ * master thread has to handle it instead.
+ */
+ if (!IS_DEV_MINOR_RQ(m_ptr->m_type)) {
+ if (driver_cb->dr_other)
+ r = (*driver_cb->dr_other)(driver_cb, m_ptr);
+ else
+ r = EINVAL;
+
+ driver_reply(driver_mt_type, m_ptr, ipc_status, r);
+
+ return;
+ }
+
+ /* Query the thread ID. Upon failure, send the error code to the caller. */
+ r = driver_cb->dr_thread(m_ptr->DEVICE, &thread_id);
+
+ if (r != OK) {
+ driver_reply(driver_mt_type, m_ptr, ipc_status, r);
+
+ return;
+ }
+
+ /* Start the thread if it is not already running. */
+ assert(thread_id >= 0 && thread_id < DRIVER_MT_MAX_WORKERS);
+
+ wp = &worker[thread_id];
+
+ assert(wp->state != STATE_EXITED);
+
+ if (wp->state == STATE_DEAD)
+ master_create_worker(wp, thread_id);
+
+ /* Enqueue the message for the thread, and possibly wake it up. */
+ enqueue(wp, m_ptr, ipc_status);
+}
+
+/*===========================================================================*
+ * master_init *
+ *===========================================================================*/
+PRIVATE void master_init(struct driver *dp, int type)
+{
+/* Initialize the state of the master thread.
+ */
+ int i;
+
+ assert(dp != NULL);
+ assert(dp->dr_thread != NULL);
+
+ mthread_init();
+
+ driver_mt_type = type;
+ driver_cb = dp;
+
+ for (i = 0; i < DRIVER_MT_MAX_WORKERS; i++)
+ worker[i].state = STATE_DEAD;
+
+ /* Initialize a per-thread key, where each worker thread stores its own
+ * reference to the worker structure.
+ */
+ if (mthread_key_create(&worker_key, NULL))
+ panic("driver_mt: error initializing worker key");
+}
+
+/*===========================================================================*
+ * driver_mt_receive *
+ *===========================================================================*/
+PRIVATE void driver_mt_receive(message *m_ptr, int *ipc_status)
+{
+/* Receive a message.
+ */
+ int r;
+
+ r = sef_receive_status(ANY, m_ptr, ipc_status);
+
+ if (r != OK)
+ panic("driver_mt: sef_receive_status() returned %d", r);
+}
+
+/*===========================================================================*
+ * driver_mt_task *
+ *===========================================================================*/
+PUBLIC void driver_mt_task(struct driver *driver_p, int driver_type)
+{
+/* The multithreaded driver task.
+ */
+ int ipc_status;
+ message mess;
+
+ /* Initialize first if necessary. */
+ if (!driver_mt_running) {
+ master_init(driver_p, driver_type);
+
+ driver_mt_running = TRUE;
+ }
+
+ /* The main message loop. */
+ while (driver_mt_running) {
+ /* Receive a message. */
+ driver_mt_receive(&mess, &ipc_status);
+
+ /* Dispatch the message. */
+ if (is_ipc_notify(ipc_status))
+ driver_handle_notify(driver_cb, &mess);
+ else
+ master_handle_request(&mess, ipc_status);
+
+ /* Let other threads run. */
+ mthread_yield_all();
+
+ /* Clean up any exited threads. */
+ if (num_exited > 0)
+ master_handle_exits();
+ }
+}
+
+/*===========================================================================*
+ * driver_mt_terminate *
+ *===========================================================================*/
+PUBLIC void driver_mt_terminate(void)
+{
+/* Instruct libdriver to shut down.
+ */
+
+ driver_mt_running = FALSE;
+}
+
+/*===========================================================================*
+ * driver_mt_sleep *
+ *===========================================================================*/
+PUBLIC void driver_mt_sleep(void)
+{
+/* Let the current thread sleep until it gets woken up by the master thread.
+ */
+ worker_t *wp;
+
+ wp = (worker_t *) mthread_getspecific(worker_key);
+
+ if (wp == NULL)
+ panic("driver_mt: master thread cannot sleep");
+
+ driver_event_wait(&wp->sleep_event);
+}
+
+/*===========================================================================*
+ * driver_mt_wakeup *
+ *===========================================================================*/
+PUBLIC void driver_mt_wakeup(thread_id_t id)
+{
+/* Wake up a sleeping worker thread from the master thread.
+ */
+ worker_t *wp;
+
+ assert(id >= 0 && id < DRIVER_MT_MAX_WORKERS);
+
+ wp = &worker[id];
+
+ assert(wp->state == STATE_RUNNING || wp->state == STATE_STOPPING);
+
+ driver_event_fire(&wp->sleep_event);
+}
+
+/*===========================================================================*
+ * driver_mt_stop *
+ *===========================================================================*/
+PUBLIC void driver_mt_stop(void)
+{
+/* Put up the current worker thread for termination. Once the current dispatch
+ * call has finished, and there are no more messages in the thread's message
+ * queue, the thread will be terminated. Any messages in the queue will undo
+ * the effect of this call.
+ */
+ worker_t *wp;
+
+ wp = (worker_t *) mthread_getspecific(worker_key);
+
+ assert(wp != NULL);
+ assert(wp->state == STATE_RUNNING || wp->state == STATE_STOPPING);
+
+ wp->state = STATE_STOPPING;
+}
--- /dev/null
+/* This file contains the singlethreaded device independent driver interface.
+ *
+ * Changes:
+ * Aug 27, 2011 extracted from driver.c (A. Welzel)
+ *
+ * The entry points into this file are:
+ * driver_task: the main message loop of the driver
+ * driver_terminate: break out of the main message loop
+ * driver_handle_msg: handle a single received message
+ * driver_receive: message receive interface for drivers
+ * driver_receive_mq: message receive interface; try the message queue first
+ * driver_mq_queue: queue an incoming message for later processing
+ */
+
+#include <minix/drivers.h>
+#include <minix/driver.h>
+
+#include "driver.h"
+#include "mq.h"
+
+PUBLIC endpoint_t device_endpt; /* used externally by log driver */
+PRIVATE int driver_running;
+
+/*===========================================================================*
+ * driver_receive *
+ *===========================================================================*/
+PUBLIC int driver_receive(endpoint_t src, message *m_ptr, int *status_ptr)
+{
+/* receive() interface for drivers. */
+
+ return sef_receive_status(src, m_ptr, status_ptr);
+}
+
+/*===========================================================================*
+ * driver_receive_mq *
+ *===========================================================================*/
+PUBLIC int driver_receive_mq(message *m_ptr, int *status_ptr)
+{
+/* receive() interface for drivers with message queueing. */
+
+ /* Any queued messages? */
+ if (driver_mq_dequeue(DRIVER_MQ_SINGLE, m_ptr, status_ptr))
+ return OK;
+
+ /* Fall back to standard receive() interface otherwise. */
+ return driver_receive(ANY, m_ptr, status_ptr);
+}
+
+/*===========================================================================*
+ * driver_terminate *
+ *===========================================================================*/
+PUBLIC void driver_terminate(void)
+{
+/* Break out of the main driver loop after finishing the current request. */
+
+ driver_running = FALSE;
+}
+
+/*===========================================================================*
+ * driver_task *
+ *===========================================================================*/
+PUBLIC void driver_task(dp, type)
+struct driver *dp; /* Device dependent entry points. */
+int type; /* Driver type (DRIVER_STD or DRIVER_ASYN) */
+{
+/* Main program of any device driver task. */
+ int r, ipc_status;
+ message mess;
+
+ driver_running = TRUE;
+
+ /* Here is the main loop of the disk task. It waits for a message, carries
+ * it out, and sends a reply.
+ */
+ while (driver_running) {
+ if ((r = driver_receive_mq(&mess, &ipc_status)) != OK)
+ panic("driver_receive_mq failed: %d", r);
+
+ driver_handle_msg(dp, type, &mess, ipc_status);
+ }
+}
+
+/*===========================================================================*
+ * driver_handle_msg *
+ *===========================================================================*/
+PUBLIC void driver_handle_msg(struct driver *dp, int driver_type,
+ message *m_ptr, int ipc_status)
+{
+/* Handle the given received message. */
+ int r;
+
+ /* Dirty hack: set a global variable for the log driver. */
+ device_endpt = m_ptr->USER_ENDPT;
+
+ /* Process the notification or request. */
+ if (is_ipc_notify(ipc_status)) {
+ driver_handle_notify(dp, m_ptr);
+
+ /* Do not reply to notifications. */
+ } else {
+ r = driver_handle_request(dp, m_ptr);
+
+ driver_reply(driver_type, m_ptr, ipc_status, r);
+ }
+}
+
+/*===========================================================================*
+ * driver_mq_queue *
+ *===========================================================================*/
+PUBLIC int driver_mq_queue(message *m, int status)
+{
+/* Queue a message for later processing. */
+
+ return driver_mq_enqueue(DRIVER_MQ_SINGLE, m, status);
+}
/* Extended partition? */
#define ext_part(s) ((s) == 0x05 || (s) == 0x0F)
+FORWARD _PROTOTYPE( void parse_part_table, (struct driver *dp, int device,
+ int style, int atapi, u8_t *tmp_buf) );
FORWARD _PROTOTYPE( void extpartition, (struct driver *dp, int extdev,
- unsigned long extbase) );
+ unsigned long extbase, u8_t *tmp_buf) );
FORWARD _PROTOTYPE( int get_part_table, (struct driver *dp, int device,
- unsigned long offset, struct part_entry *table));
+ unsigned long offset, struct part_entry *table, u8_t *tmp_buf) );
FORWARD _PROTOTYPE( void sort, (struct part_entry *table) );
/*============================================================================*
int atapi; /* atapi device */
{
/* This routine is called on first open to initialize the partition tables
- * of a device. It makes sure that each partition falls safely within the
+ * of a device.
+ */
+ u8_t *tmp_buf;
+
+ if ((*dp->dr_prepare)(device) == NULL)
+ return;
+
+ /* For multithreaded drivers, multiple partition() calls may be made on
+ * different devices in parallel. Hence we need a separate temporary buffer
+ * for each request.
+ */
+ if (!(tmp_buf = alloc_contig(CD_SECTOR_SIZE, AC_ALIGN4K, NULL)))
+ panic("partition: unable to allocate temporary buffer");
+
+ parse_part_table(dp, device, style, atapi, tmp_buf);
+
+ free_contig(tmp_buf, CD_SECTOR_SIZE);
+}
+
+/*============================================================================*
+ * parse_part_table *
+ *============================================================================*/
+PRIVATE void parse_part_table(dp, device, style, atapi, tmp_buf)
+struct driver *dp; /* device dependent entry points */
+int device; /* device to partition */
+int style; /* partitioning style: floppy, primary, sub. */
+int atapi; /* atapi device */
+u8_t *tmp_buf; /* temporary buffer */
+{
+/* This routine reads and parses a partition table. It may be called
+ * recursively. It makes sure that each partition falls safely within the
* device's limits. Depending on the partition style we are either making
* floppy partitions, primary partitions or subpartitions. Only primary
* partitions are sorted, because they are shared with other operating
limit = base + div64u(dv->dv_size, SECTOR_SIZE);
/* Read the partition table for the device. */
- if(!get_part_table(dp, device, 0L, table)) {
+ if(!get_part_table(dp, device, 0L, table, tmp_buf)) {
return;
}
if (style == P_PRIMARY) {
/* Each Minix primary partition can be subpartitioned. */
if (pe->sysind == MINIX_PART)
- partition(dp, device + par, P_SUB, atapi);
+ parse_part_table(dp, device + par, P_SUB, atapi,
+ tmp_buf);
/* An extended partition has logical partitions. */
if (ext_part(pe->sysind))
- extpartition(dp, device + par, pe->lowsec);
+ extpartition(dp, device + par, pe->lowsec, tmp_buf);
}
}
}
/*============================================================================*
* extpartition *
*============================================================================*/
-PRIVATE void extpartition(dp, extdev, extbase)
+PRIVATE void extpartition(dp, extdev, extbase, tmp_buf)
struct driver *dp; /* device dependent entry points */
int extdev; /* extended partition to scan */
unsigned long extbase; /* sector offset of the base extended partition */
+u8_t *tmp_buf; /* temporary buffer */
{
/* Extended partitions cannot be ignored alas, because people like to move
* files to and from DOS partitions. Avoid reading this code, it's no fun.
offset = 0;
do {
- if (!get_part_table(dp, extdev, offset, table)) return;
+ if (!get_part_table(dp, extdev, offset, table, tmp_buf)) return;
sort(table);
/* The table should contain one logical partition and optionally
/*============================================================================*
* get_part_table *
*============================================================================*/
-PRIVATE int get_part_table(dp, device, offset, table)
+PRIVATE int get_part_table(dp, device, offset, table, tmp_buf)
struct driver *dp;
int device;
unsigned long offset; /* sector offset to the table */
struct part_entry *table; /* four entries */
+u8_t *tmp_buf; /* temporary buffer */
{
/* Read the partition table for the device, return true iff there were no
* errors.
iovec_t iovec1;
u64_t position;
- driver_init_buffer();
-
position = mul64u(offset, SECTOR_SIZE);
iovec1.iov_addr = (vir_bytes) tmp_buf;
iovec1.iov_size = CD_SECTOR_SIZE;
--- /dev/null
+/* This file contains a simple thread event implementation.
+ */
+
+#include <minix/mthread.h>
+#include <minix/sysutil.h>
+
+#include "event.h"
+
+/*===========================================================================*
+ * driver_event_init *
+ *===========================================================================*/
+PUBLIC void driver_event_init(event_t *event)
+{
+/* Initialize an event object.
+ */
+ int r;
+
+ if ((r = mthread_mutex_init(&event->mutex, NULL)) != 0)
+ panic("libdriver: error initializing mutex (%d)", r);
+ if ((r = mthread_cond_init(&event->cond, NULL)) != 0)
+ panic("libdriver: error initializing condvar (%d)", r);
+}
+
+/*===========================================================================*
+ * driver_event_destroy *
+ *===========================================================================*/
+PUBLIC void driver_event_destroy(event_t *event)
+{
+/* Destroy an event object.
+ */
+ int r;
+
+ if ((r = mthread_cond_destroy(&event->cond)) != 0)
+ panic("libdriver: error destroying condvar (%d)", r);
+ if ((r = mthread_mutex_destroy(&event->mutex)) != 0)
+ panic("libdriver: error destroying mutex (%d)", r);
+}
+
+/*===========================================================================*
+ * driver_event_wait *
+ *===========================================================================*/
+PUBLIC void driver_event_wait(event_t *event)
+{
+/* Wait for an event, blocking the current thread in the process.
+ */
+ int r;
+
+ if ((r = mthread_mutex_lock(&event->mutex)) != 0)
+ panic("libdriver: error locking mutex (%d)", r);
+ if ((r = mthread_cond_wait(&event->cond, &event->mutex)) != 0)
+ panic("libdriver: error waiting for condvar (%d)", r);
+ if ((r = mthread_mutex_unlock(&event->mutex)) != 0)
+ panic("libdriver: error unlocking mutex (%d)", r);
+}
+
+/*===========================================================================*
+ * driver_event_fire *
+ *===========================================================================*/
+PUBLIC void driver_event_fire(event_t *event)
+{
+/* Fire an event, waking up any thread blocked on it without scheduling it.
+ */
+ int r;
+
+ if ((r = mthread_mutex_lock(&event->mutex)) != 0)
+ panic("libdriver: error locking mutex (%d)", r);
+ if ((r = mthread_cond_signal(&event->cond)) != 0)
+ panic("libdriver: error signaling condvar (%d)", r);
+ if ((r = mthread_mutex_unlock(&event->mutex)) != 0)
+ panic("libdriver: error unlocking mutex (%d)", r);
+}
--- /dev/null
+#ifndef _DRIVER_EVENT_H
+#define _DRIVER_EVENT_H
+
+typedef struct {
+ mthread_mutex_t mutex;
+ mthread_cond_t cond;
+} event_t;
+
+_PROTOTYPE( void driver_event_init, (event_t *event) );
+_PROTOTYPE( void driver_event_destroy, (event_t *event) );
+_PROTOTYPE( void driver_event_wait, (event_t *event) );
+_PROTOTYPE( void driver_event_fire, (event_t *event) );
+
+#endif /* _DRIVER_EVENT_H */
-/*
-inet/mq.c
-
-Created: Jan 3, 1992 by Philip Homburg
-
-Copyright 1995 Philip Homburg
-*/
+/* This file contains a simple message queue implementation to support both
+ * the singlethread and the multithreaded driver implementation.
+ *
+ * Changes:
+ * Oct 27, 2011 rewritten to use sys/queue.h (D.C. van Moolenbroek)
+ * Aug 27, 2011 integrated into libdriver (A. Welzel)
+ */
-#include <minix/ansi.h>
+#include <minix/driver_mt.h>
+#include <sys/queue.h>
#include <assert.h>
-#include <minix/config.h>
-#include <minix/const.h>
-#include <minix/type.h>
-#include <minix/ipc.h>
-#include <minix/mq.h>
+#include "mq.h"
#define MQ_SIZE 128
-PRIVATE mq_t mq_list[MQ_SIZE];
-PRIVATE mq_t *mq_freelist;
+struct mq_cell {
+ message mess;
+ int ipc_status;
+ STAILQ_ENTRY(mq_cell) next;
+};
-void mq_init()
-{
- int i;
-
- mq_freelist= NULL;
- for (i= 0; i<MQ_SIZE; i++)
- {
- mq_list[i].mq_next= mq_freelist;
- mq_freelist= &mq_list[i];
- mq_list[i].mq_allocated= 0;
- }
-}
+PRIVATE struct mq_cell pool[MQ_SIZE];
+
+PRIVATE STAILQ_HEAD(queue, mq_cell) queue[DRIVER_MT_MAX_WORKERS];
+PRIVATE STAILQ_HEAD(free_list, mq_cell) free_list;
-mq_t *mq_get()
+/*===========================================================================*
+ * driver_mq_init *
+ *===========================================================================*/
+PUBLIC void driver_mq_init(void)
{
- mq_t *mq;
+/* Initialize the message queues and message cells.
+ */
+ int i;
+
+ STAILQ_INIT(&free_list);
- mq= mq_freelist;
- assert(mq != NULL);
+ for (i = 0; i < DRIVER_MT_MAX_WORKERS; i++)
+ STAILQ_INIT(&queue[i]);
- mq_freelist= mq->mq_next;
- mq->mq_next= NULL;
- assert(mq->mq_allocated == 0);
- mq->mq_allocated= 1;
- return mq;
+ for (i = 0; i < MQ_SIZE; i++)
+ STAILQ_INSERT_HEAD(&free_list, &pool[i], next);
}
-void mq_free(mq)
-mq_t *mq;
+/*===========================================================================*
+ * driver_mq_enqueue *
+ *===========================================================================*/
+PUBLIC int driver_mq_enqueue(thread_id_t thread_id, const message *mess,
+ int ipc_status)
{
- mq->mq_next= mq_freelist;
- mq_freelist= mq;
- assert(mq->mq_allocated == 1);
- mq->mq_allocated= 0;
+/* Add a message, including its IPC status, to the message queue of a thread.
+ * Return TRUE iff the message was added successfully.
+ */
+ struct mq_cell *cell;
+
+ assert(thread_id >= 0 && thread_id < DRIVER_MT_MAX_WORKERS);
+
+ if (STAILQ_EMPTY(&free_list))
+ return FALSE;
+
+ cell = STAILQ_FIRST(&free_list);
+ STAILQ_REMOVE_HEAD(&free_list, next);
+
+ cell->mess = *mess;
+ cell->ipc_status = ipc_status;
+
+ STAILQ_INSERT_TAIL(&queue[thread_id], cell, next);
+
+ return TRUE;
}
-/*
- * $PchId: mq.c,v 1.7 1998/10/23 20:10:47 philip Exp $
+/*===========================================================================*
+ * driver_mq_dequeue *
+ *===========================================================================*/
+PUBLIC int driver_mq_dequeue(thread_id_t thread_id, message *mess,
+ int *ipc_status)
+{
+/* Return and remove a message, including its IPC status, from the message
+ * queue of a thread. Return TRUE iff a message was available.
*/
+ struct mq_cell *cell;
+
+ assert(thread_id >= 0 && thread_id < DRIVER_MT_MAX_WORKERS);
+
+ if (STAILQ_EMPTY(&queue[thread_id]))
+ return FALSE;
+
+ cell = STAILQ_FIRST(&queue[thread_id]);
+ STAILQ_REMOVE_HEAD(&queue[thread_id], next);
+
+ *mess = cell->mess;
+ *ipc_status = cell->ipc_status;
+
+ STAILQ_INSERT_HEAD(&free_list, cell, next);
+
+ return TRUE;
+}
--- /dev/null
+#ifndef _DRIVER_MQ_H
+#define _DRIVER_MQ_H
+
+#define DRIVER_MQ_SINGLE 0 /* thread ID for single-threading */
+
+_PROTOTYPE( void driver_mq_init, (void) );
+_PROTOTYPE( int driver_mq_enqueue, (thread_id_t thread_id, const message *mess,
+ int ipc_status) );
+_PROTOTYPE( int driver_mq_dequeue, (thread_id_t thread_id, message *mess,
+ int *ipc_status) );
+
+#endif /* _DRIVER_MQ_H */