From: Albert H. <he...@us...> - 2005-09-25 19:56:00
|
Update of /cvsroot/gc-linux/linux/drivers/exi In directory sc8-pr-cvs1.sourceforge.net:/tmp/cvs-serv2087/drivers/exi Modified Files: exi-hw.h exi-hw.c exi-driver.c Log Message: Added a interrupt-driven immediate (aka idi) transfer mode to the EXI layer. Added support for pluggable devices on memcard slots. Index: exi-driver.c =================================================================== RCS file: /cvsroot/gc-linux/linux/drivers/exi/exi-driver.c,v retrieving revision 1.9 retrieving revision 1.10 diff -u -d -r1.9 -r1.10 --- exi-driver.c 20 Mar 2005 17:10:38 -0000 1.9 +++ exi-driver.c 25 Sep 2005 19:55:50 -0000 1.10 @@ -17,9 +17,9 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/device.h> -#include <linux/exi.h> - +#include <linux/kthread.h> #include <linux/delay.h> +#include <linux/exi.h> #define DRV_MODULE_NAME "exi-driver" #define DRV_DESCRIPTION "Nintendo GameCube EXpansion Interface driver" @@ -27,7 +27,7 @@ "Todd Jeffreys <to...@vo...>, " \ "Albert Herranz" -static char exi_driver_version[] = "3.0"; +static char exi_driver_version[] = "3.1-isobel"; extern struct device exi_bus_devices[EXI_MAX_CHANNELS]; @@ -73,7 +73,6 @@ return "Unknown"; } - /* * Internal. Check if an exi device matches a given exi device id. */ @@ -125,6 +124,12 @@ return 0; } +/* + * Device release. + */ +static void exi_device_release(struct device *dev) +{ +} /* * Internal. Initialize an exi_device structure. @@ -137,16 +142,16 @@ exi_device->eid.id = EXI_ID_INVALID; exi_device->eid.channel = channel; exi_device->eid.device = device; - exi_device->frequency = -1; + exi_device->frequency = EXI_FREQ_SCAN; exi_device->exi_channel = to_exi_channel(channel); - device_initialize(&exi_device->dev); exi_device->dev.parent = &exi_bus_devices[channel]; exi_device->dev.bus = &exi_bus_type; + sprintf(exi_device->dev.bus_id, "%01x:%01x", channel, device); exi_device->dev.platform_data = to_exi_channel(channel); + exi_device->dev.release = exi_device_release; - sprintf(exi_device->dev.bus_id, "%01x:%01x", channel, device); } /** @@ -164,6 +169,7 @@ return exi_device; } + /** * exi_device_put - Releases a use of the exi device * @exi_device: device that's been disconnected @@ -184,6 +190,7 @@ struct exi_device *exi_get_exi_device(struct exi_channel *exi_channel, int device) { + // FIXME, maybe exi_device_get it too return &exi_devices[to_channel(exi_channel)][device]; } @@ -225,6 +232,9 @@ if (exi_driver->remove) exi_driver->remove(exi_device); + if (!exi_is_dying(exi_device)) + exi_device->eid.id = EXI_ID_INVALID; + return 0; } @@ -260,50 +270,56 @@ /* + * Internal. Re-scan a given device. + */ +static void exi_device_rescan(struct exi_device *exi_device) +{ + unsigned int id; + + /* do nothing if the device is marked to die */ + if (exi_is_dying(exi_device)) + return; + + /* now ID the device */ + id = exi_get_id(exi_device); + + if (exi_device->eid.id != EXI_ID_INVALID) { + /* device removed or changed */ + exi_printk(KERN_INFO, "removed [%s] id=0x%08x %s\n", + exi_device->dev.bus_id, + exi_device->eid.id, + exi_name_id(exi_device->eid.id)); + device_unregister(&exi_device->dev); + exi_device->eid.id = EXI_ID_INVALID; + } + + if (id != EXI_ID_INVALID) { + /* a new device has been found */ + exi_printk(KERN_INFO, "added [%s] id=0x%08x %s\n", + exi_device->dev.bus_id, + id, exi_name_id(id)); + exi_device->eid.id = id; + device_register(&exi_device->dev); + } + + exi_update_ext_status(exi_get_exi_channel(exi_device)); +} + +/* * Internal. Re-scan a given exi channel, looking for added, changed and * removed exi devices. - * XXX Currently, only _new_ devices are taken into account. */ static void exi_channel_rescan(struct exi_channel *exi_channel) { struct exi_device *exi_device; - unsigned int channel, device, id; - - spin_lock(&exi_channel->lock); + unsigned int channel, device; /* add the exi devices underneath the parents */ for (device = 0; device < EXI_DEVICES_PER_CHANNEL; ++device) { channel = to_channel(exi_channel); exi_device = &exi_devices[channel][device]; - - /* now ID the device */ - id = exi_get_id(exi_channel, device, EXI_FREQ_SCAN); - - /* - * We only process currently _new_ devices here. - */ - if (id != EXI_ID_INVALID) { - exi_printk(KERN_INFO, "[%s] id=0x%08x %s\n", - exi_device->dev.bus_id, - id, exi_name_id(id)); - - if (exi_device->eid.id == EXI_ID_INVALID) { - /* a new device has been found */ - exi_device->eid.id = id; - device_register(&exi_device->dev); - } else { - /* device changed */ - /* remove, add */ - } - } else { - if (exi_device->eid.id != EXI_ID_INVALID) { - /* device removed */ - /* remove */ - } - } + exi_device_rescan(exi_device); } - - spin_unlock(&exi_channel->lock); } /* @@ -321,6 +337,40 @@ } +static struct task_struct *exi_bus_task; +wait_queue_head_t exi_bus_waitq; + +/* + * Internal. Looks for new, changed or removed devices. + */ +static int exi_bus_thread(void *__unused) +{ + struct exi_channel *exi_channel; + struct exi_device *exi_device; + unsigned int channel; + int is_loaded, was_loaded; + + while(!kthread_should_stop()) { + sleep_on_timeout(&exi_bus_waitq, HZ); + + /* scan the memcard slot channels for device changes */ + for (channel = 0; channel <= 1; ++channel) { + exi_channel = to_exi_channel(channel); + + is_loaded = exi_get_ext_line(exi_channel); + was_loaded = (exi_channel->flags & EXI_EXT)?1:0; + + if (is_loaded ^ was_loaded) { + exi_device = &exi_devices[channel][0]; + exi_device_rescan(exi_device); + } + } + } + + return 0; +} + + static struct exi_device exi_devices[EXI_MAX_CHANNELS][EXI_DEVICES_PER_CHANNEL]; static struct bus_type exi_bus_type = { @@ -378,6 +428,13 @@ /* now enumerate through the bus and add all detected devices */ exi_bus_rescan(); + /* setup a thread to manage plugable devices */ + init_waitqueue_head(&exi_bus_waitq); + exi_bus_task = kthread_run(exi_bus_thread, NULL, "kexid"); + if (IS_ERR(exi_bus_task)) { + exi_printk(KERN_WARNING, "failed to start exi kernel thread\n"); + } + return 0; err_bus_register: Index: exi-hw.h =================================================================== RCS file: /cvsroot/gc-linux/linux/drivers/exi/exi-hw.h,v retrieving revision 1.5 retrieving revision 1.6 diff -u -d -r1.5 -r1.6 --- exi-hw.h 14 Sep 2005 19:21:47 -0000 1.5 +++ exi-hw.h 25 Sep 2005 19:55:50 -0000 1.6 @@ -42,6 +42,8 @@ #define EXI_READ 0 #define EXI_WRITE 1 +#define EXI_IDI_MAX_SIZE 4 + #define EXI_IRQ 4 @@ -106,8 +108,9 @@ int channel; unsigned long flags; -#define EXI_SELECTED (1<<0) -#define EXI_DMABUSY (1<<1) +#define EXI_SELECTED (1<<0) +#define EXI_DMABUSY (1<<1) +#define EXI_EXT (1<<8) spinlock_t io_lock; /* serializes access to CSR */ void __iomem *io_base; @@ -116,7 +119,7 @@ struct exi_device *device_selected; wait_queue_head_t wait_queue; - struct exi_command *dma_cmd; + struct exi_command *queued_cmd; struct exi_command post_cmd; unsigned long csr; @@ -125,6 +128,9 @@ struct exi_event_handler events[EXI_MAX_EVENTS]; }; +extern int exi_get_ext_line(struct exi_channel *exi_channel); +extern void exi_update_ext_status(struct exi_channel *exi_channel); + extern int exi_hw_init(char *); extern void exi_hw_exit(void); Index: exi-hw.c =================================================================== RCS file: /cvsroot/gc-linux/linux/drivers/exi/exi-hw.c,v retrieving revision 1.10 retrieving revision 1.11 diff -u -d -r1.10 -r1.11 --- exi-hw.c 14 Sep 2005 19:47:20 -0000 1.10 +++ exi-hw.c 25 Sep 2005 19:55:50 -0000 1.11 @@ -76,6 +76,7 @@ # define DBG(fmt, args...) #endif +extern wait_queue_head_t exi_bus_waitq; static void exi_tasklet(unsigned long param); @@ -261,11 +262,87 @@ } /* + * Internal. Start a transfer using "interrupt-driven immediate" mode. + */ +static void exi_start_idi_transfer_raw(struct exi_channel *exi_channel, + void *data, size_t len, int mode) +{ + void __iomem *io_base = exi_channel->io_base; + u32 __iomem *csr_reg = io_base + EXI_CSR; + u32 val = ~0; + unsigned long flags; + + BUG_ON(len < 1 || len > 4); + + if ((mode & EXI_OP_WRITE)) { + switch(len) { + case 1: + val = *((u8*)data) << 24; + break; + case 2: + val = *((u16*)data) << 16; + break; + case 3: + val = *((u16*)data) << 16; + val |= *((u8*)data+2) << 8; + break; + case 4: + val = *((u32*)data); + break; + default: + break; + } + } + + writel(val, io_base + EXI_DATA); + + /* enable the Transfer Complete interrupt */ + spin_lock_irqsave(&exi_channel->io_lock, flags); + writel(readl(csr_reg) | EXI_CSR_TCINTMASK, csr_reg); + spin_unlock_irqrestore(&exi_channel->io_lock, flags); + + /* start the transfer */ + writel(EXI_CR_TSTART | EXI_CR_TLEN(len) | (mode&0xf), io_base + EXI_CR); +} + +/* + * Internal. Finish a transfer using "interrupt-driven immediate" mode. + */ +static void exi_end_idi_transfer_raw(struct exi_channel *exi_channel, + void *data, size_t len, int mode) +{ + void __iomem *io_base = exi_channel->io_base; + u32 val = ~0; + + BUG_ON(len < 1 || len > 4); + + if ((mode&0xf) != EXI_OP_WRITE) { + val = readl(io_base + EXI_DATA); + switch(len) { + case 1: + *((u8*)data) = (u8)(val >> 24); + break; + case 2: + *((u16*)data) = (u16)(val >> 16); + break; + case 3: + *((u16*)data) = (u16)(val >> 16); + *((u8*)data+2) = (u8)(val >> 8); + break; + case 4: + *((u32*)data) = (u32)(val); + break; + default: + break; + } + } +} + +/* * Internal. Start a transfer using DMA mode. */ -static inline void exi_start_dma_transfer_raw(struct exi_channel *exi_channel, - dma_addr_t data, size_t len, - int mode) +static void exi_start_dma_transfer_raw(struct exi_channel *exi_channel, + dma_addr_t data, size_t len, int mode) { void __iomem *io_base = exi_channel->io_base; u32 __iomem *csr_reg = io_base + EXI_CSR; @@ -297,8 +374,7 @@ /* * Internal. Busy-wait until a DMA mode transfer operation completes. */ -static inline -void exi_wait_for_dma_transfer_raw(struct exi_channel *exi_channel) +static void exi_wait_for_transfer_raw(struct exi_channel *exi_channel) { u32 __iomem *cr_reg = exi_channel->io_base + EXI_CR; u32 __iomem *csr_reg = exi_channel->io_base + EXI_CSR; @@ -319,74 +395,136 @@ spin_unlock_irqrestore(&exi_channel->io_lock, flags); } +static void exi_command_done(struct exi_command *cmd); -/** - * exi_dma_transfer_raw - performs an exi transfer in DMA mode - * @exi_channel: channel - * @data: address of data being read/writen (32 byte aligned) - * @len: length of data (32 byte aligned) - * @mode: direction of transfer (EXI_OP_READ or EXI_OP_WRITE) - * - * Read or write data on a given EXI channel, using DMA mode, and - * busy-wait until transfer is done. - * +/* + * Internal. Check if an exi channel has delayed work to do. */ -void exi_dma_transfer_raw(struct exi_channel *exi_channel, - dma_addr_t data, size_t len, int mode) +static void exi_check_pending_work(void) { - if (len <= 0) - return; + struct exi_channel *exi_channel; - exi_start_dma_transfer_raw(exi_channel, data, len, mode); - exi_wait_for_dma_transfer_raw(exi_channel); + exi_channel_for_each(exi_channel) { + if (exi_channel->csr) { + tasklet_schedule(&exi_channel->tasklet); + } + } } +/* + * Internal. Finish a DMA transfer. + * Caller holds the channel lock. + */ +static void exi_end_dma_transfer(struct exi_channel *exi_channel) +{ + struct exi_command *cmd; + cmd = exi_channel->queued_cmd; + if (cmd) { + BUG_ON(!(exi_channel->flags & EXI_DMABUSY)); -static void exi_command_done(struct exi_command *cmd); + exi_channel->flags &= ~EXI_DMABUSY; + dma_unmap_single(&exi_channel->device_selected->dev, + cmd->dma_addr, cmd->dma_len, + (cmd->opcode == EXI_OP_READ)? + DMA_FROM_DEVICE:DMA_TO_DEVICE); + + exi_channel->queued_cmd = NULL; + } +} /* - * Internal. Check if an exi channel has delayed work to do. + * Internal. Finish an "interrupt-driven immediate" transfer. + * Caller holds the channel lock. + * + * If more data is pending transfer, schedules a new transfer. + * Returns zero if no more transfers are required, non-zero otherwise. + * */ -static void exi_check_pending_work(void) +static int exi_end_idi_transfer(struct exi_channel *exi_channel) { - struct exi_channel *exi_channel; + struct exi_command *cmd; + int len, offset; + unsigned int balance = 16 /* / sizeof(u32) */; - exi_channel_for_each(exi_channel) { - if (exi_channel->csr) { - tasklet_schedule(&exi_channel->tasklet); + cmd = exi_channel->queued_cmd; + if (cmd) { + BUG_ON((exi_channel->flags & EXI_DMABUSY)); + + len = (cmd->bytes_left > 4)?4:cmd->bytes_left; + offset = cmd->len - cmd->bytes_left; + exi_end_idi_transfer_raw(exi_channel, + cmd->data + offset, len, + cmd->opcode); + cmd->bytes_left -= len; + + if (balance && cmd->bytes_left > 0) { + offset += len; + len = (cmd->bytes_left > balance)? + balance:cmd->bytes_left; + exi_transfer_raw(exi_channel, + cmd->data + offset, len, cmd->opcode); + cmd->bytes_left -= len; + } + + if (cmd->bytes_left > 0) { + offset = cmd->len - cmd->bytes_left; + len = (cmd->bytes_left > 4)?4:cmd->bytes_left; + + exi_start_idi_transfer_raw(exi_channel, + cmd->data + offset, len, + cmd->opcode); + } else { + exi_channel->queued_cmd = NULL; } } + + return (exi_channel->queued_cmd)?1:0; } /* - * Internal. Wait until a DMA transfer completes and launch callbacks. + * Internal. Wait until a single transfer completes, and launch callbacks + * when the whole transfer is completed. */ -static inline void exi_wait_for_dma_transfer(struct exi_channel *exi_channel) +static int exi_wait_for_transfer_one(struct exi_channel *exi_channel) { struct exi_command *cmd; unsigned long flags; + int pending = 0; spin_lock_irqsave(&exi_channel->lock, flags); - exi_wait_for_dma_transfer_raw(exi_channel); + exi_wait_for_transfer_raw(exi_channel); - exi_channel->flags &= ~EXI_DMABUSY; - cmd = exi_channel->dma_cmd; + cmd = exi_channel->queued_cmd; if (cmd) { - dma_unmap_single(&exi_channel->device_selected->dev, - cmd->dma_addr, cmd->dma_len, - (cmd->opcode == EXI_OP_READ)? - DMA_FROM_DEVICE:DMA_TO_DEVICE); - exi_channel->dma_cmd = NULL; + if ((exi_channel->flags & EXI_DMABUSY)) { + /* dma transfers need just one transfer */ + exi_end_dma_transfer(exi_channel); + } else { + pending = exi_end_idi_transfer(exi_channel); + } + spin_unlock_irqrestore(&exi_channel->lock, flags); - exi_command_done(cmd); - return; + + if (!pending) + exi_command_done(cmd); + goto out; } spin_unlock_irqrestore(&exi_channel->lock, flags); +out: + return pending; } +/* + * Internal. Wait until a full transfer completes and launch callbacks. + */ +static void exi_wait_for_transfer(struct exi_channel *exi_channel) +{ + while(exi_wait_for_transfer_one(exi_channel)) + cpu_relax(); +} /* * Internal. Call any done hooks. @@ -511,11 +649,24 @@ opcode = cmd->opcode; data = cmd->data; + /* interrupt driven immediate transfer... */ + if ((cmd->flags & EXI_CMD_IDI)) { + exi_channel->queued_cmd = cmd; + exi_channel->flags &= ~EXI_DMABUSY; + + cmd->bytes_left = cmd->len; + len = (cmd->bytes_left > 4)?4:cmd->bytes_left; + exi_start_idi_transfer_raw(exi_channel, data, len, opcode); + + retval = 1; /* wait */ + goto done; + } + /* * We can't do DMA transfers unless we have at least 32 bytes. * And we won't do DMA transfers if user requests that. */ - if (len < EXI_DMA_ALIGN+1 || (cmd->flags & EXI_NODMA)) { + if (len < EXI_DMA_ALIGN+1 || (cmd->flags & EXI_CMD_NODMA)) { exi_transfer_raw(exi_channel, data, len, opcode); goto done; } @@ -594,7 +745,7 @@ cmd->done = exi_cmd_post_transfer; } - exi_channel->dma_cmd = cmd; + exi_channel->queued_cmd = cmd; exi_channel->flags |= EXI_DMABUSY; cmd->dma_len = len; @@ -631,11 +782,11 @@ spin_lock_irqsave(&exi_channel->lock, flags); /* ensure atomic operations are serialized */ - while (exi_channel->dma_cmd) { + while (exi_channel->queued_cmd) { DBG("cmd %d while dma in flight on channel %d\n", cmd->opcode, exi_channel->channel); spin_unlock_irqrestore(&exi_channel->lock, flags); - exi_wait_for_dma_transfer(exi_channel); + exi_wait_for_transfer(exi_channel); spin_lock_irqsave(&exi_channel->lock, flags); } @@ -703,7 +854,7 @@ retval = exi_run_command(cmd); if (retval > 0) { if (in_atomic() || irqs_disabled()) { - exi_wait_for_dma_transfer(cmd->exi_channel); + exi_wait_for_transfer(cmd->exi_channel); } else { wait_for_completion(&complete); } @@ -752,55 +903,15 @@ * Read or write data on a given EXI channel. */ void exi_transfer(struct exi_channel *exi_channel, void *data, size_t len, - int opcode) + int opcode, unsigned long flags) { struct exi_command cmd; exi_op_transfer(&cmd, exi_channel, data, len, opcode); + cmd.flags |= flags; exi_run_command_and_wait(&cmd); } - -/** - * exi_get_id - Returns the EXI ID of a device - * @exi_channel: channel - * @device: device number on channel - * @freq: clock frequency index - * - * Returns the EXI ID of an EXI device on a given channel. - * Caller holds the channel lock. - */ -u32 exi_get_id(struct exi_channel *exi_channel, unsigned int device, - unsigned int freq) -{ - u32 __iomem *csr_reg = exi_channel->io_base + EXI_CSR; - u32 id = EXI_ID_INVALID; - u16 cmd = 0; - - exi_select_raw(exi_channel, device, freq); - __exi_transfer_raw_u16(exi_channel, &cmd, EXI_OP_WRITE); - __exi_transfer_raw_u32(exi_channel, &id, EXI_OP_READ); - exi_deselect_raw(exi_channel); - - /* - * We return a EXI_ID_NONE if there is some unidentified device - * inserted in memcard slot A or memcard slot B. - * This, for example, allows the SD/MMC driver to see cards. - */ - if (id == EXI_ID_INVALID) { - if ((__to_channel(exi_channel) == 0 || - __to_channel(exi_channel) == 1) - && device == 0) { - if (readl(csr_reg) & EXI_CSR_EXT) { - id = EXI_ID_NONE; - } - } - } - - return id; -} - - /* * Internal. Count number of busy exi channels given a channel mask. * Caller holds the channel lock. @@ -887,8 +998,8 @@ DBG("channel=%d, csr=%08lx\n", exi_channel->channel, exi_channel->csr); - if (exi_channel->dma_cmd) { - DBG("tasklet while dma in flight on channel %d, csr = %08lx\n", + if (exi_channel->queued_cmd) { + DBG("tasklet while xfer in flight on channel %d, csr = %08lx\n", exi_channel->channel, exi_channel->csr); } @@ -942,7 +1053,10 @@ spin_unlock_irqrestore(&exi_channel->io_lock, flags); if ((status & EXI_CSR_TCINT)) { - exi_wait_for_dma_transfer(exi_channel); + exi_wait_for_transfer_one(exi_channel); + } + if ((status & EXI_CSR_EXTIN)) { + wake_up(&exi_bus_waitq); } if (exi_channel->csr && !exi_is_selected(exi_channel)) { @@ -1080,7 +1194,7 @@ static void exi_quiesce_channel(struct exi_channel *exi_channel, u32 csr_mask) { /* wait for dma transfers to complete */ - exi_wait_for_dma_transfer_raw(exi_channel); + exi_wait_for_transfer_raw(exi_channel); /* ack and mask all interrupts */ writel(EXI_CSR_TCINT | EXI_CSR_EXIINT | EXI_CSR_EXTIN | csr_mask, @@ -1099,6 +1213,67 @@ } } +/** + * exi_get_id - Returns the EXI ID of a device + * @exi_channel: channel + * @device: device number on channel + * @freq: clock frequency index + * + * Returns the EXI ID of an EXI device on a given channel. + * Might sleep. + */ +u32 exi_get_id(struct exi_device *exi_device) +{ + struct exi_channel *exi_channel = exi_device->exi_channel; + u32 __iomem *csr_reg = exi_channel->io_base + EXI_CSR; + u32 id = EXI_ID_INVALID; + u16 cmd = 0; + + /* ask for the EXI id */ + exi_dev_select(exi_device); + exi_dev_write(exi_device, &cmd, sizeof(cmd)); + exi_dev_read(exi_device, &id, sizeof(id)); + exi_dev_deselect(exi_device); + + /* + * We return a EXI_ID_NONE if there is some unidentified device + * inserted in memcard slot A or memcard slot B. + * This, for example, allows the SD/MMC driver to see cards. + */ + if (id == EXI_ID_INVALID) { + if ((__to_channel(exi_channel) == 0 || + __to_channel(exi_channel) == 1) + && exi_device->eid.device == 0) { + if (readl(csr_reg) & EXI_CSR_EXT) { + id = EXI_ID_NONE; + } + } + } + + return id; +} + +/* + * Tells if there is a device inserted in one of the memory card slots. + */ +int exi_get_ext_line(struct exi_channel *exi_channel) +{ + u32 __iomem *csr_reg = exi_channel->io_base + EXI_CSR; + return (readl(csr_reg) & EXI_CSR_EXT)?1:0; +} + +/* + * Saves the current insertion status of a given channel. + */ +void exi_update_ext_status(struct exi_channel *exi_channel) +{ + if (exi_get_ext_line(exi_channel)) { + exi_channel->flags |= EXI_EXT; + } else { + exi_channel->flags &= ~EXI_EXT; + } +} + /* * Pseudo-Internal. Initialize basic channel structures and hardware. */ @@ -1115,7 +1290,7 @@ exi_channel_init(exi_channel, channel); } - /* calm down the hardware and allow external insertions */ + /* calm down the hardware and allow extractions */ exi_quiesce_all_channels(EXI_CSR_EXTINMASK); /* register the exi interrupt handler */ @@ -1143,7 +1318,6 @@ EXPORT_SYMBOL(exi_select_raw); EXPORT_SYMBOL(exi_deselect_raw); EXPORT_SYMBOL(exi_transfer_raw); -EXPORT_SYMBOL(exi_dma_transfer_raw); EXPORT_SYMBOL(exi_select); EXPORT_SYMBOL(exi_deselect); |