PRIVATE long ahci_sig_checks = NR_SIG_CHECKS;
PRIVATE long ahci_command_timeout = COMMAND_TIMEOUT;
PRIVATE long ahci_transfer_timeout = TRANSFER_TIMEOUT;
+PRIVATE long ahci_flush_timeout = FLUSH_TIMEOUT;
PRIVATE int ahci_map[MAX_DRIVES]; /* device-to-port mapping */
ATA_ID_GCAP_TYPE_SHIFT) == ATAPI_TYPE_CDROM)
ps->flags |= FLAG_READONLY;
+ if ((buf[ATA_ID_SUP1] & ATA_ID_SUP1_VALID_MASK) == ATA_ID_SUP1_VALID &&
+ !(ps->flags & FLAG_READONLY)) {
+ /* Save write cache related capabilities of the device. It is
+ * possible, although unlikely, that a device has support for
+ * either of these but not both.
+ */
+ if (buf[ATA_ID_SUP0] & ATA_ID_SUP0_WCACHE)
+ ps->flags |= FLAG_HAS_WCACHE;
+
+ if (buf[ATA_ID_SUP1] & ATA_ID_SUP1_FLUSH)
+ ps->flags |= FLAG_HAS_FLUSH;
+ }
+
return TRUE;
}
*/
/* This must be an ATA device; it must not have removable media;
- * it must support LBA and DMA; it must support 48-bit addressing.
+ * it must support LBA and DMA; it must support the FLUSH CACHE
+ * command; it must support 48-bit addressing.
*/
if ((buf[ATA_ID_GCAP] & (ATA_ID_GCAP_ATA_MASK | ATA_ID_GCAP_REMOVABLE |
ATA_ID_GCAP_INCOMPLETE)) != ATA_ID_GCAP_ATA ||
(buf[ATA_ID_CAP] & (ATA_ID_CAP_LBA | ATA_ID_CAP_DMA)) !=
(ATA_ID_CAP_LBA | ATA_ID_CAP_DMA) ||
- (buf[ATA_ID_SUP1] &
- (ATA_ID_SUP1_VALID_MASK | ATA_ID_SUP1_LBA48)) !=
- (ATA_ID_SUP1_VALID | ATA_ID_SUP1_LBA48)) {
+ (buf[ATA_ID_SUP1] & (ATA_ID_SUP1_VALID_MASK |
+ ATA_ID_SUP1_FLUSH | ATA_ID_SUP1_LBA48)) !=
+ (ATA_ID_SUP1_VALID | ATA_ID_SUP1_FLUSH | ATA_ID_SUP1_LBA48)) {
dprintf(V_ERR, ("%s: unsupported ATA device\n",
ahci_portname(ps)));
return FALSE;
}
- ps->flags |= FLAG_HAS_MEDIUM;
+ ps->flags |= FLAG_HAS_MEDIUM | FLAG_HAS_FLUSH;
+
+ /* FLUSH CACHE is mandatory for ATA devices; write caches are not. */
+ if (buf[ATA_ID_SUP0] & ATA_ID_SUP0_WCACHE)
+ ps->flags |= FLAG_HAS_WCACHE;
return TRUE;
}
/*===========================================================================*
* gen_identify *
*===========================================================================*/
-PRIVATE void gen_identify(struct port_state *ps, int cmd)
+PRIVATE int gen_identify(struct port_state *ps, int cmd, int blocking)
{
- /* Identify an ATA or ATAPI device.
+ /* Identify an ATA or ATAPI device. If the blocking flag is set, block
+ * until the command has completed; otherwise return immediately.
*/
cmd_fis_t fis;
prd_t prd;
prd.prd_phys = ps->tmp_phys;
prd.prd_size = ATA_ID_SIZE;
- /* Start the command, but do not wait for the result. */
+ /* Start the command, and possibly wait for the result. */
port_set_cmd(ps, cmd, &fis, NULL /*packet*/, &prd, 1, FALSE /*write*/);
+ if (blocking)
+ return port_exec(ps, cmd, ahci_command_timeout);
+
port_issue(ps, cmd, ahci_command_timeout);
+
+ return OK;
+}
+
+/*===========================================================================*
+ * gen_flush_wcache *
+ *===========================================================================*/
+PRIVATE int gen_flush_wcache(struct port_state *ps, int cmd)
+{
+ /* Flush the device's write cache.
+ */
+ cmd_fis_t fis;
+
+ /* The FLUSH CACHE command may not be supported by all (writable ATAPI)
+ * devices.
+ */
+ if (!(ps->flags & FLAG_HAS_FLUSH))
+ return EINVAL;
+
+ /* Use the FLUSH CACHE command for both ATA and ATAPI. We are not
+ * interested in the disk location of a failure, so there is no reason
+ * to use the ATA-only FLUSH CACHE EXT command. Either way, the command
+ * may indeed fail due to a disk error, in which case it should be
+ * repeated. For now, we shift this responsibility onto the caller.
+ */
+ memset(&fis, 0, sizeof(fis));
+ fis.cf_cmd = ATA_CMD_FLUSH_CACHE;
+
+ /* Start the command, and wait for it to complete or fail.
+ * The flush command may take longer than regular I/O commands.
+ */
+ port_set_cmd(ps, cmd, &fis, NULL /*packet*/, NULL /*prdt*/, 0,
+ FALSE /*write*/);
+
+ return port_exec(ps, cmd, ahci_flush_timeout);
+}
+
+/*===========================================================================*
+ * gen_get_wcache *
+ *===========================================================================*/
+PRIVATE int gen_get_wcache(struct port_state *ps, int cmd, int *val)
+{
+ /* Retrieve the status of the device's write cache.
+ */
+ int r;
+
+ /* Write caches are not mandatory. */
+ if (!(ps->flags & FLAG_HAS_WCACHE))
+ return EINVAL;
+
+ /* Retrieve information about the device. */
+ if ((r = gen_identify(ps, cmd, TRUE /*blocking*/)) != OK)
+ return r;
+
+ /* Return the current setting. */
+ *val = !!(((u16_t *) ps->tmp_base)[ATA_ID_ENA0] & ATA_ID_ENA0_WCACHE);
+
+ return OK;
+}
+
+/*===========================================================================*
+ * gen_set_wcache *
+ *===========================================================================*/
+PRIVATE int gen_set_wcache(struct port_state *ps, int cmd, int enable)
+{
+ /* Enable or disable the device's write cache.
+ */
+ cmd_fis_t fis;
+ clock_t timeout;
+
+ /* Write caches are not mandatory. */
+ if (!(ps->flags & FLAG_HAS_WCACHE))
+ return EINVAL;
+
+ /* Disabling the write cache causes a (blocking) cache flush. Cache
+ * flushes may take much longer than regular commands.
+ */
+ timeout = enable ? ahci_command_timeout : ahci_flush_timeout;
+
+ /* Set up a command. */
+ memset(&fis, 0, sizeof(fis));
+ fis.cf_cmd = ATA_CMD_SET_FEATURES;
+ fis.cf_feat = enable ? ATA_SF_EN_WCACHE : ATA_SF_DI_WCACHE;
+
+ /* Start the command, and wait for it to complete or fail. */
+ port_set_cmd(ps, cmd, &fis, NULL /*packet*/, NULL /*prdt*/, 0,
+ FALSE /*write*/);
+
+ return port_exec(ps, cmd, timeout);
}
/*===========================================================================*
ps->state = STATE_WAIT_ID;
ps->reg[AHCI_PORT_IE] = AHCI_PORT_IE_MASK;
- gen_identify(ps, 0);
+ (void) gen_identify(ps, 0, FALSE /*blocking*/);
}
/*===========================================================================*
for (port = 0; port < hba_state.nr_ports; port++) {
if (port_state[port].state != STATE_NO_PORT) {
+ if (port_state[port].state == STATE_GOOD_DEV)
+ (void) gen_flush_wcache(&port_state[port], 0);
+
port_stop(&port_state[port]);
port_free(&port_state[port]);
ahci_get_var("ahci_sig_checks", &ahci_sig_checks, FALSE);
ahci_get_var("ahci_cmd_timeout", &ahci_command_timeout, TRUE);
ahci_get_var("ahci_io_timeout", &ahci_transfer_timeout, TRUE);
+ ahci_get_var("ahci_flush_timeout", &ahci_flush_timeout, TRUE);
}
/*===========================================================================*
/* Process any messages not covered by the other calls.
* This function only implements IOCTLs.
*/
+ int r, val;
if (m->m_type != DEV_IOCTL_S)
return EINVAL;
return sys_safecopyto(m->IO_ENDPT, (cp_grant_id_t) m->IO_GRANT,
0, (vir_bytes) ¤t_port->open_count,
sizeof(current_port->open_count), D);
+
+ case DIOCFLUSH:
+ if (current_port->state != STATE_GOOD_DEV)
+ return EIO;
+
+ return gen_flush_wcache(current_port, 0);
+
+ case DIOCSETWC:
+ if (current_port->state != STATE_GOOD_DEV)
+ return EIO;
+
+ if ((r = sys_safecopyfrom(m->IO_ENDPT,
+ (cp_grant_id_t) m->IO_GRANT, 0, (vir_bytes) &val,
+ sizeof(val), D)) != OK)
+ return r;
+
+ return gen_set_wcache(current_port, 0, val);
+
+ case DIOCGETWC:
+ if (current_port->state != STATE_GOOD_DEV)
+ return EIO;
+
+ if ((r = gen_get_wcache(current_port, 0, &val)) != OK)
+ return r;
+
+ return sys_safecopyto(m->IO_ENDPT, (cp_grant_id_t) m->IO_GRANT,
+ 0, (vir_bytes) &val, sizeof(val), D);
}
return EINVAL;
#define NR_SIG_CHECKS 60 /* maximum number of times to check */
#define COMMAND_TIMEOUT 5000 /* time to wait for non-I/O cmd (ms) */
#define TRANSFER_TIMEOUT 30000 /* time to wait for I/O cmd (ms) */
+#define FLUSH_TIMEOUT 60000 /* time to wait for flush cmd (ms) */
/* Time values that are defined by the standards. */
#define SPINUP_DELAY 1 /* time to assert spin-up flag (ms) */
#define ATA_CMD_WRITE_DMA_EXT 0x35 /* WRITE DMA EXT */
#define ATA_CMD_PACKET 0xA0 /* PACKET */
#define ATA_CMD_IDENTIFY_PACKET 0xA1 /* IDENTIFY PACKET DEVICE */
+#define ATA_CMD_FLUSH_CACHE 0xE7 /* FLUSH CACHE */
#define ATA_CMD_IDENTIFY 0xEC /* IDENTIFY DEVICE */
+#define ATA_CMD_SET_FEATURES 0xEF /* SET FEATURES */
#define ATA_H2D_FEAT 3 /* Features */
#define ATA_FEAT_PACKET_DMA 0x01 /* use DMA */
#define ATA_FEAT_PACKET_DMADIR 0x03 /* DMA is inbound */
#define ATA_ID_DMADIR 62 /* DMADIR */
#define ATA_ID_DMADIR_DMADIR 0x8000 /* DMADIR required */
#define ATA_ID_DMADIR_DMA 0x0400 /* DMA supported (DMADIR) */
+#define ATA_ID_SUP0 82 /* Features supported (1/3) */
+#define ATA_ID_SUP0_WCACHE 0x0020 /* Write cache supported */
#define ATA_ID_SUP1 83 /* Features supported (2/3) */
#define ATA_ID_SUP1_VALID_MASK 0xC000 /* Word validity mask */
#define ATA_ID_SUP1_VALID 0x4000 /* Word contents are valid */
+#define ATA_ID_SUP1_FLUSH 0x1000 /* FLUSH CACHE supported */
#define ATA_ID_SUP1_LBA48 0x0400 /* 48-bit LBA supported */
+#define ATA_ID_ENA0 85 /* Features enabled (1/3) */
+#define ATA_ID_ENA0_WCACHE 0x0020 /* Write cache enabled */
+#define ATA_ID_ENA2 87 /* Features enabled (3/3) */
+#define ATA_ID_ENA2_VALID_MASK 0xC000 /* Word validity mask */
+#define ATA_ID_ENA2_VALID 0x4000 /* Word contents are valid */
#define ATA_ID_LBA0 100 /* Max. LBA48 address (LSW) */
#define ATA_ID_LBA1 101 /* Max. LBA48 address */
#define ATA_ID_LBA2 102 /* Max. LBA48 address */
#define ATA_ID_LSS0 118 /* Logical sector size (LSW) */
#define ATA_ID_LSS1 119 /* Logical sector size (MSW) */
+#define ATA_SF_EN_WCACHE 0x02 /* Enable write cache */
+#define ATA_SF_DI_WCACHE 0x82 /* Disable write cache */
+
/* ATAPI constants. */
#define ATAPI_PACKET_SIZE 16 /* ATAPI packet size */
#define FLAG_BUSY 0x00000010 /* is an operation ongoing? */
#define FLAG_FAILURE 0x00000020 /* did the operation fail? */
#define FLAG_BARRIER 0x00000040 /* no access until unset */
+#define FLAG_HAS_WCACHE 0x00000080 /* is a write cache present? */
+#define FLAG_HAS_FLUSH 0x00000100 /* is FLUSH CACHE supported? */
/* Mapping between devices and ports. */
#define NO_PORT -1 /* this device maps to no port */