diff options
Diffstat (limited to 'drivers')
59 files changed, 1970 insertions, 1325 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index ce9dead..087a702 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -50,6 +50,7 @@ config ACPI_SLEEP config ACPI_PROCFS bool "Deprecated /proc/acpi files" depends on PROC_FS + default y ---help--- For backwards compatibility, this option allows deprecated /proc/acpi/ files to exist, even when diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c index e03de37..30238f6 100644 --- a/drivers/acpi/ac.c +++ b/drivers/acpi/ac.c @@ -27,8 +27,10 @@ #include <linux/module.h> #include <linux/init.h> #include <linux/types.h> +#ifdef CONFIG_ACPI_PROCFS #include <linux/proc_fs.h> #include <linux/seq_file.h> +#endif #include <linux/power_supply.h> #include <acpi/acpi_bus.h> #include <acpi/acpi_drivers.h> @@ -49,12 +51,15 @@ MODULE_AUTHOR("Paul Diefenbaugh"); MODULE_DESCRIPTION("ACPI AC Adapter Driver"); MODULE_LICENSE("GPL"); +#ifdef CONFIG_ACPI_PROCFS extern struct proc_dir_entry *acpi_lock_ac_dir(void); extern void *acpi_unlock_ac_dir(struct proc_dir_entry *acpi_ac_dir); +static int acpi_ac_open_fs(struct inode *inode, struct file *file); +#endif static int acpi_ac_add(struct acpi_device *device); static int acpi_ac_remove(struct acpi_device *device, int type); -static int acpi_ac_open_fs(struct inode *inode, struct file *file); +static int acpi_ac_resume(struct acpi_device *device); const static struct acpi_device_id ac_device_ids[] = { {"ACPI0003", 0}, @@ -69,6 +74,7 @@ static struct acpi_driver acpi_ac_driver = { .ops = { .add = acpi_ac_add, .remove = acpi_ac_remove, + .resume = acpi_ac_resume, }, }; @@ -80,12 +86,15 @@ struct acpi_ac { #define to_acpi_ac(x) container_of(x, struct acpi_ac, charger); +#ifdef CONFIG_ACPI_PROCFS static const struct file_operations acpi_ac_fops = { .open = acpi_ac_open_fs, .read = seq_read, .llseek = seq_lseek, .release = single_release, }; +#endif + static int get_ac_property(struct power_supply *psy, enum power_supply_property psp, union power_supply_propval *val) @@ -127,6 +136,7 @@ static int acpi_ac_get_state(struct acpi_ac *ac) return 0; } +#ifdef CONFIG_ACPI_PROCFS /* -------------------------------------------------------------------------- FS Interface (/proc) -------------------------------------------------------------------------- */ @@ -206,6 +216,7 @@ static int acpi_ac_remove_fs(struct acpi_device *device) return 0; } +#endif /* -------------------------------------------------------------------------- Driver Model @@ -264,7 +275,9 @@ static int acpi_ac_add(struct acpi_device *device) if (result) goto end; +#ifdef CONFIG_ACPI_PROCFS result = acpi_ac_add_fs(device); +#endif if (result) goto end; ac->charger.name = acpi_device_bid(device); @@ -287,13 +300,30 @@ static int acpi_ac_add(struct acpi_device *device) end: if (result) { +#ifdef CONFIG_ACPI_PROCFS acpi_ac_remove_fs(device); +#endif kfree(ac); } return result; } +static int acpi_ac_resume(struct acpi_device *device) +{ + struct acpi_ac *ac; + unsigned old_state; + if (!device || !acpi_driver_data(device)) + return -EINVAL; + ac = acpi_driver_data(device); + old_state = ac->state; + if (acpi_ac_get_state(ac)) + return 0; + if (old_state != ac->state) + kobject_uevent(&ac->charger.dev->kobj, KOBJ_CHANGE); + return 0; +} + static int acpi_ac_remove(struct acpi_device *device, int type) { acpi_status status = AE_OK; @@ -309,7 +339,9 @@ static int acpi_ac_remove(struct acpi_device *device, int type) ACPI_ALL_NOTIFY, acpi_ac_notify); if (ac->charger.dev) power_supply_unregister(&ac->charger); +#ifdef CONFIG_ACPI_PROCFS acpi_ac_remove_fs(device); +#endif kfree(ac); @@ -323,13 +355,17 @@ static int __init acpi_ac_init(void) if (acpi_disabled) return -ENODEV; +#ifdef CONFIG_ACPI_PROCFS acpi_ac_dir = acpi_lock_ac_dir(); if (!acpi_ac_dir) return -ENODEV; +#endif result = acpi_bus_register_driver(&acpi_ac_driver); if (result < 0) { +#ifdef CONFIG_ACPI_PROCFS acpi_unlock_ac_dir(acpi_ac_dir); +#endif return -ENODEV; } @@ -341,7 +377,9 @@ static void __exit acpi_ac_exit(void) acpi_bus_unregister_driver(&acpi_ac_driver); +#ifdef CONFIG_ACPI_PROCFS acpi_unlock_ac_dir(acpi_ac_dir); +#endif return; } diff --git a/drivers/acpi/toshiba_acpi.c b/drivers/acpi/toshiba_acpi.c index a736ef7..9e8c20c 100644 --- a/drivers/acpi/toshiba_acpi.c +++ b/drivers/acpi/toshiba_acpi.c @@ -591,9 +591,12 @@ static int __init toshiba_acpi_init(void) NULL, &toshiba_backlight_data); if (IS_ERR(toshiba_backlight_device)) { + int ret = PTR_ERR(toshiba_backlight_device); + printk(KERN_ERR "Could not register toshiba backlight device\n"); toshiba_backlight_device = NULL; toshiba_acpi_exit(); + return ret; } toshiba_backlight_device->props.max_brightness = HCI_LCD_BRIGHTNESS_LEVELS - 1; diff --git a/drivers/block/paride/pf.c b/drivers/block/paride/pf.c index ceffa60..e7fe6ca 100644 --- a/drivers/block/paride/pf.c +++ b/drivers/block/paride/pf.c @@ -488,13 +488,11 @@ static int pf_atapi(struct pf_unit *pf, char *cmd, int dlen, char *buf, char *fu return r; } -#define DBMSG(msg) ((verbose>1)?(msg):NULL) - static void pf_lock(struct pf_unit *pf, int func) { char lo_cmd[12] = { ATAPI_LOCK, pf->lun << 5, 0, 0, func, 0, 0, 0, 0, 0, 0, 0 }; - pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "unlock" : "lock"); + pf_atapi(pf, lo_cmd, 0, pf_scratch, func ? "lock" : "unlock"); } static void pf_eject(struct pf_unit *pf) @@ -555,7 +553,7 @@ static void pf_mode_sense(struct pf_unit *pf) { ATAPI_MODE_SENSE, pf->lun << 5, 0, 0, 0, 0, 0, 0, 8, 0, 0, 0 }; char buf[8]; - pf_atapi(pf, ms_cmd, 8, buf, DBMSG("mode sense")); + pf_atapi(pf, ms_cmd, 8, buf, "mode sense"); pf->media_status = PF_RW; if (buf[3] & 0x80) pf->media_status = PF_RO; @@ -591,7 +589,7 @@ static void pf_get_capacity(struct pf_unit *pf) char buf[8]; int bs; - if (pf_atapi(pf, rc_cmd, 8, buf, DBMSG("get capacity"))) { + if (pf_atapi(pf, rc_cmd, 8, buf, "get capacity")) { pf->media_status = PF_NM; return; } @@ -804,13 +802,18 @@ static int pf_next_buf(void) pf_buf += 512; pf_block++; if (!pf_run) - return 0; - if (!pf_count) return 1; - spin_lock_irqsave(&pf_spin_lock, saved_flags); - pf_end_request(1); - spin_unlock_irqrestore(&pf_spin_lock, saved_flags); - return 1; + if (!pf_count) { + spin_lock_irqsave(&pf_spin_lock, saved_flags); + pf_end_request(1); + pf_req = elv_next_request(pf_queue); + spin_unlock_irqrestore(&pf_spin_lock, saved_flags); + if (!pf_req) + return 1; + pf_count = pf_req->current_nr_sectors; + pf_buf = pf_req->buffer; + } + return 0; } static inline void next_request(int success) diff --git a/drivers/block/rd.c b/drivers/block/rd.c index 47f8ac6..82f4eec 100644 --- a/drivers/block/rd.c +++ b/drivers/block/rd.c @@ -189,6 +189,18 @@ static int ramdisk_set_page_dirty(struct page *page) return 0; } +/* + * releasepage is called by pagevec_strip/try_to_release_page if + * buffers_heads_over_limit is true. Without a releasepage function + * try_to_free_buffers is called instead. That can unset the dirty + * bit of our ram disk pages, which will be eventually freed, even + * if the page is still in use. + */ +static int ramdisk_releasepage(struct page *page, gfp_t dummy) +{ + return 0; +} + static const struct address_space_operations ramdisk_aops = { .readpage = ramdisk_readpage, .prepare_write = ramdisk_prepare_write, @@ -196,6 +208,7 @@ static const struct address_space_operations ramdisk_aops = { .writepage = ramdisk_writepage, .set_page_dirty = ramdisk_set_page_dirty, .writepages = ramdisk_writepages, + .releasepage = ramdisk_releasepage, }; static int rd_blkdev_pagecache_IO(int rw, struct bio_vec *vec, sector_t sector, diff --git a/drivers/char/pcmcia/cm4000_cs.c b/drivers/char/pcmcia/cm4000_cs.c index cc5d777..02518da 100644 --- a/drivers/char/pcmcia/cm4000_cs.c +++ b/drivers/char/pcmcia/cm4000_cs.c @@ -47,7 +47,7 @@ /* #define ATR_CSUM */ #ifdef PCMCIA_DEBUG -#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle)) +#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) static int pc_debug = PCMCIA_DEBUG; module_param(pc_debug, int, 0600); #define DEBUGP(n, rdr, x, args...) do { \ diff --git a/drivers/char/pcmcia/cm4040_cs.c b/drivers/char/pcmcia/cm4040_cs.c index a0b9c87..5f291bf 100644 --- a/drivers/char/pcmcia/cm4040_cs.c +++ b/drivers/char/pcmcia/cm4040_cs.c @@ -41,7 +41,7 @@ #ifdef PCMCIA_DEBUG -#define reader_to_dev(x) (&handle_to_dev(x->p_dev->handle)) +#define reader_to_dev(x) (&handle_to_dev(x->p_dev)) static int pc_debug = PCMCIA_DEBUG; module_param(pc_debug, int, 0600); #define DEBUGP(n, rdr, x, args...) do { \ diff --git a/drivers/char/rtc.c b/drivers/char/rtc.c index ec6b65e..0c66b80 100644 --- a/drivers/char/rtc.c +++ b/drivers/char/rtc.c @@ -918,6 +918,31 @@ static const struct file_operations rtc_proc_fops = { }; #endif +static resource_size_t rtc_size; + +static struct resource * __init rtc_request_region(resource_size_t size) +{ + struct resource *r; + + if (RTC_IOMAPPED) + r = request_region(RTC_PORT(0), size, "rtc"); + else + r = request_mem_region(RTC_PORT(0), size, "rtc"); + + if (r) + rtc_size = size; + + return r; +} + +static void rtc_release_region(void) +{ + if (RTC_IOMAPPED) + release_region(RTC_PORT(0), rtc_size); + else + release_mem_region(RTC_PORT(0), rtc_size); +} + static int __init rtc_init(void) { #ifdef CONFIG_PROC_FS @@ -968,10 +993,17 @@ found: } no_irq: #else - if (RTC_IOMAPPED) - r = request_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); - else - r = request_mem_region(RTC_PORT(0), RTC_IO_EXTENT, "rtc"); + r = rtc_request_region(RTC_IO_EXTENT); + + /* + * If we've already requested a smaller range (for example, because + * PNPBIOS or ACPI told us how the device is configured), the request + * above might fail because it's too big. + * + * If so, request just the range we actually use. + */ + if (!r) + r = rtc_request_region(RTC_IO_EXTENT_USED); if (!r) { #ifdef RTC_IRQ rtc_has_irq = 0; @@ -992,10 +1024,7 @@ no_irq: /* Yeah right, seeing as irq 8 doesn't even hit the bus. */ rtc_has_irq = 0; printk(KERN_ERR "rtc: IRQ %d is not free.\n", RTC_IRQ); - if (RTC_IOMAPPED) - release_region(RTC_PORT(0), RTC_IO_EXTENT); - else - release_mem_region(RTC_PORT(0), RTC_IO_EXTENT); + rtc_release_region(); return -EIO; } hpet_rtc_timer_init(); @@ -1009,7 +1038,7 @@ no_irq: free_irq(RTC_IRQ, NULL); rtc_has_irq = 0; #endif - release_region(RTC_PORT(0), RTC_IO_EXTENT); + rtc_release_region(); return -ENODEV; } @@ -1091,10 +1120,7 @@ static void __exit rtc_exit (void) if (rtc_has_irq) free_irq (rtc_irq, &rtc_port); #else - if (RTC_IOMAPPED) - release_region(RTC_PORT(0), RTC_IO_EXTENT); - else - release_mem_region(RTC_PORT(0), RTC_IO_EXTENT); + rtc_release_region(); #ifdef RTC_IRQ if (rtc_has_irq) free_irq (RTC_IRQ, NULL); diff --git a/drivers/dma/dmaengine.c b/drivers/dma/dmaengine.c index 8248992..d59b2f4 100644 --- a/drivers/dma/dmaengine.c +++ b/drivers/dma/dmaengine.c @@ -182,10 +182,9 @@ static void dma_client_chan_alloc(struct dma_client *client) /* we are done once this client rejects * an available resource */ - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_get(chan); - kref_get(&device->refcount); - } else if (ack == DMA_NAK) + else if (ack == DMA_NAK) return; } } @@ -272,11 +271,8 @@ static void dma_clients_notify_removed(struct dma_chan *chan) /* client was holding resources for this channel so * free it */ - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_put(chan); - kref_put(&chan->device->refcount, - dma_async_device_cleanup); - } } mutex_unlock(&dma_list_mutex); @@ -316,11 +312,8 @@ void dma_async_client_unregister(struct dma_client *client) ack = client->event_callback(client, chan, DMA_RESOURCE_REMOVED); - if (ack == DMA_ACK) { + if (ack == DMA_ACK) dma_chan_put(chan); - kref_put(&chan->device->refcount, - dma_async_device_cleanup); - } } list_del(&client->global_node); @@ -397,6 +390,8 @@ int dma_async_device_register(struct dma_device *device) goto err_out; } + /* One for the channel, one of the class device */ + kref_get(&device->refcount); kref_get(&device->refcount); kref_init(&chan->refcount); chan->slow_ref = 0; diff --git a/drivers/dma/ioat.c b/drivers/dma/ioat.c index f204c39..16e0fd8 100644 --- a/drivers/dma/ioat.c +++ b/drivers/dma/ioat.c @@ -39,10 +39,14 @@ MODULE_LICENSE("GPL"); MODULE_AUTHOR("Intel Corporation"); static struct pci_device_id ioat_pci_tbl[] = { + /* I/OAT v1 platforms */ { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_CNB) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SCNB) }, { PCI_DEVICE(PCI_VENDOR_ID_UNISYS, PCI_DEVICE_ID_UNISYS_DMA_DIRECTOR) }, + + /* I/OAT v2 platforms */ + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_IOAT_SNB) }, { 0, } }; @@ -74,10 +78,17 @@ static int ioat_setup_functionality(struct pci_dev *pdev, void __iomem *iobase) if (device->dma && ioat_dca_enabled) device->dca = ioat_dca_init(pdev, iobase); break; + case IOAT_VER_2_0: + device->dma = ioat_dma_probe(pdev, iobase); + if (device->dma && ioat_dca_enabled) + device->dca = ioat2_dca_init(pdev, iobase); + break; default: err = -ENODEV; break; } + if (!device->dma) + err = -ENODEV; return err; } diff --git a/drivers/dma/ioat_dca.c b/drivers/dma/ioat_dca.c index ba98571..0fa8a98 100644 --- a/drivers/dma/ioat_dca.c +++ b/drivers/dma/ioat_dca.c @@ -261,3 +261,167 @@ struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase) return dca; } + +static int ioat2_dca_add_requester(struct dca_provider *dca, struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 id; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + id = dcaid_from_pcidev(pdev); + + if (ioatdca->requester_count == ioatdca->max_requesters) + return -ENODEV; + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == NULL) { + /* found an empty slot */ + ioatdca->requester_count++; + ioatdca->req_slots[i].pdev = pdev; + ioatdca->req_slots[i].rid = id; + global_req_table = + readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); + writel(id | IOAT_DCA_GREQID_VALID, + ioatdca->iobase + global_req_table + (i * 4)); + return i; + } + } + /* Error, ioatdma->requester_count is out of whack */ + return -EFAULT; +} + +static int ioat2_dca_remove_requester(struct dca_provider *dca, + struct device *dev) +{ + struct ioat_dca_priv *ioatdca = dca_priv(dca); + struct pci_dev *pdev; + int i; + u16 global_req_table; + + /* This implementation only supports PCI-Express */ + if (dev->bus != &pci_bus_type) + return -ENODEV; + pdev = to_pci_dev(dev); + + for (i = 0; i < ioatdca->max_requesters; i++) { + if (ioatdca->req_slots[i].pdev == pdev) { + global_req_table = + readw(ioatdca->dca_base + IOAT_DCA_GREQID_OFFSET); + writel(0, ioatdca->iobase + global_req_table + (i * 4)); + ioatdca->req_slots[i].pdev = NULL; + ioatdca->req_slots[i].rid = 0; + ioatdca->requester_count--; + return i; + } + } + return -ENODEV; +} + +static u8 ioat2_dca_get_tag(struct dca_provider *dca, int cpu) +{ + u8 tag; + + tag = ioat_dca_get_tag(dca, cpu); + tag = (~tag) & 0x1F; + return tag; +} + +static struct dca_ops ioat2_dca_ops = { + .add_requester = ioat2_dca_add_requester, + .remove_requester = ioat2_dca_remove_requester, + .get_tag = ioat2_dca_get_tag, +}; + +static int ioat2_dca_count_dca_slots(void *iobase, u16 dca_offset) +{ + int slots = 0; + u32 req; + u16 global_req_table; + + global_req_table = readw(iobase + dca_offset + IOAT_DCA_GREQID_OFFSET); + if (global_req_table == 0) + return 0; + do { + req = readl(iobase + global_req_table + (slots * sizeof(u32))); + slots++; + } while ((req & IOAT_DCA_GREQID_LASTID) == 0); + + return slots; +} + +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase) +{ + struct dca_provider *dca; + struct ioat_dca_priv *ioatdca; + int slots; + int i; + int err; + u32 tag_map; + u16 dca_offset; + u16 csi_fsb_control; + u16 pcie_control; + u8 bit; + + if (!system_has_dca_enabled(pdev)) + return NULL; + + dca_offset = readw(iobase + IOAT_DCAOFFSET_OFFSET); + if (dca_offset == 0) + return NULL; + + slots = ioat2_dca_count_dca_slots(iobase, dca_offset); + if (slots == 0) + return NULL; + + dca = alloc_dca_provider(&ioat2_dca_ops, + sizeof(*ioatdca) + + (sizeof(struct ioat_dca_slot) * slots)); + if (!dca) + return NULL; + + ioatdca = dca_priv(dca); + ioatdca->iobase = iobase; + ioatdca->dca_base = iobase + dca_offset; + ioatdca->max_requesters = slots; + + /* some bios might not know to turn these on */ + csi_fsb_control = readw(ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); + if ((csi_fsb_control & IOAT_FSB_CAP_ENABLE_PREFETCH) == 0) { + csi_fsb_control |= IOAT_FSB_CAP_ENABLE_PREFETCH; + writew(csi_fsb_control, + ioatdca->dca_base + IOAT_FSB_CAP_ENABLE_OFFSET); + } + pcie_control = readw(ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); + if ((pcie_control & IOAT_PCI_CAP_ENABLE_MEMWR) == 0) { + pcie_control |= IOAT_PCI_CAP_ENABLE_MEMWR; + writew(pcie_control, + ioatdca->dca_base + IOAT_PCI_CAP_ENABLE_OFFSET); + } + + + /* TODO version, compatibility and configuration checks */ + + /* copy out the APIC to DCA tag map */ + tag_map = readl(ioatdca->dca_base + IOAT_APICID_TAG_MAP_OFFSET); + for (i = 0; i < 5; i++) { + bit = (tag_map >> (4 * i)) & 0x0f; + if (bit < 8) + ioatdca->tag_map[i] = bit | DCA_TAG_MAP_VALID; + else + ioatdca->tag_map[i] = 0; + } + + err = register_dca_provider(dca, &pdev->dev); + if (err) { + free_dca_provider(dca); + return NULL; + } + + return dca; +} diff --git a/drivers/dma/ioat_dma.c b/drivers/dma/ioat_dma.c index 7e4a785..c1c2dcc 100644 --- a/drivers/dma/ioat_dma.c +++ b/drivers/dma/ioat_dma.c @@ -36,18 +36,24 @@ #include "ioatdma_registers.h" #include "ioatdma_hw.h" -#define INITIAL_IOAT_DESC_COUNT 128 - #define to_ioat_chan(chan) container_of(chan, struct ioat_dma_chan, common) #define to_ioatdma_device(dev) container_of(dev, struct ioatdma_device, common) #define to_ioat_desc(lh) container_of(lh, struct ioat_desc_sw, node) #define tx_to_ioat_desc(tx) container_of(tx, struct ioat_desc_sw, async_tx) +static int ioat_pending_level = 4; +module_param(ioat_pending_level, int, 0644); +MODULE_PARM_DESC(ioat_pending_level, + "high-water mark for pushing ioat descriptors (default: 4)"); + /* internal functions */ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan); static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan); + +static struct ioat_desc_sw * +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan); static inline struct ioat_dma_chan *ioat_lookup_chan_by_index( struct ioatdma_device *device, @@ -130,6 +136,12 @@ static int ioat_dma_enumerate_channels(struct ioatdma_device *device) ioat_chan->device = device; ioat_chan->reg_base = device->reg_base + (0x80 * (i + 1)); ioat_chan->xfercap = xfercap; + ioat_chan->desccount = 0; + if (ioat_chan->device->version != IOAT_VER_1_2) { + writel(IOAT_DCACTRL_CMPL_WRITE_ENABLE + | IOAT_DMA_DCA_ANY_CPU, + ioat_chan->reg_base + IOAT_DCACTRL_OFFSET); + } spin_lock_init(&ioat_chan->cleanup_lock); spin_lock_init(&ioat_chan->desc_lock); INIT_LIST_HEAD(&ioat_chan->free_desc); @@ -161,13 +173,17 @@ static void ioat_set_dest(dma_addr_t addr, tx_to_ioat_desc(tx)->dst = addr; } -static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) +static inline void __ioat1_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan); +static inline void __ioat2_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan); + +static dma_cookie_t ioat1_tx_submit(struct dma_async_tx_descriptor *tx) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); struct ioat_desc_sw *first = tx_to_ioat_desc(tx); struct ioat_desc_sw *prev, *new; struct ioat_dma_descriptor *hw; - int append = 0; dma_cookie_t cookie; LIST_HEAD(new_chain); u32 copy; @@ -209,7 +225,7 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) list_add_tail(&new->node, &new_chain); desc_count++; prev = new; - } while (len && (new = ioat_dma_get_next_descriptor(ioat_chan))); + } while (len && (new = ioat1_dma_get_next_descriptor(ioat_chan))); hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; if (new->async_tx.callback) { @@ -246,20 +262,98 @@ static dma_cookie_t ioat_tx_submit(struct dma_async_tx_descriptor *tx) first->async_tx.phys; __list_splice(&new_chain, ioat_chan->used_desc.prev); + ioat_chan->dmacount += desc_count; ioat_chan->pending += desc_count; - if (ioat_chan->pending >= 4) { - append = 1; - ioat_chan->pending = 0; - } + if (ioat_chan->pending >= ioat_pending_level) + __ioat1_dma_memcpy_issue_pending(ioat_chan); spin_unlock_bh(&ioat_chan->desc_lock); - if (append) - writeb(IOAT_CHANCMD_APPEND, - ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); + return cookie; +} + +static dma_cookie_t ioat2_tx_submit(struct dma_async_tx_descriptor *tx) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(tx->chan); + struct ioat_desc_sw *first = tx_to_ioat_desc(tx); + struct ioat_desc_sw *new; + struct ioat_dma_descriptor *hw; + dma_cookie_t cookie; + u32 copy; + size_t len; + dma_addr_t src, dst; + int orig_ack; + unsigned int desc_count = 0; + + /* src and dest and len are stored in the initial descriptor */ + len = first->len; + src = first->src; + dst = first->dst; + orig_ack = first->async_tx.ack; + new = first; + + /* ioat_chan->desc_lock is still in force in version 2 path */ + + do { + copy = min((u32) len, ioat_chan->xfercap); + + new->async_tx.ack = 1; + + hw = new->hw; + hw->size = copy; + hw->ctl = 0; + hw->src_addr = src; + hw->dst_addr = dst; + + len -= copy; + dst += copy; + src += copy; + desc_count++; + } while (len && (new = ioat2_dma_get_next_descriptor(ioat_chan))); + + hw->ctl = IOAT_DMA_DESCRIPTOR_CTL_CP_STS; + if (new->async_tx.callback) { + hw->ctl |= IOAT_DMA_DESCRIPTOR_CTL_INT_GN; + if (first != new) { + /* move callback into to last desc */ + new->async_tx.callback = first->async_tx.callback; + new->async_tx.callback_param + = first->async_tx.callback_param; + first->async_tx.callback = NULL; + first->async_tx.callback_param = NULL; + } + } + + new->tx_cnt = desc_count; + new->async_tx.ack = orig_ack; /* client is in control of this ack */ + + /* store the original values for use in later cleanup */ + if (new != first) { + new->src = first->src; + new->dst = first->dst; + new->len = first->len; + } + + /* cookie incr and addition to used_list must be atomic */ + cookie = ioat_chan->common.cookie; + cookie++; + if (cookie < 0) + cookie = 1; + ioat_chan->common.cookie = new->async_tx.cookie = cookie; + + ioat_chan->dmacount += desc_count; + ioat_chan->pending += desc_count; + if (ioat_chan->pending >= ioat_pending_level) + __ioat2_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); return cookie; } +/** + * ioat_dma_alloc_descriptor - allocate and return a sw and hw descriptor pair + * @ioat_chan: the channel supplying the memory pool for the descriptors + * @flags: allocation flags + */ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( struct ioat_dma_chan *ioat_chan, gfp_t flags) @@ -284,15 +378,57 @@ static struct ioat_desc_sw *ioat_dma_alloc_descriptor( dma_async_tx_descriptor_init(&desc_sw->async_tx, &ioat_chan->common); desc_sw->async_tx.tx_set_src = ioat_set_src; desc_sw->async_tx.tx_set_dest = ioat_set_dest; - desc_sw->async_tx.tx_submit = ioat_tx_submit; + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + desc_sw->async_tx.tx_submit = ioat1_tx_submit; + break; + case IOAT_VER_2_0: + desc_sw->async_tx.tx_submit = ioat2_tx_submit; + break; + } INIT_LIST_HEAD(&desc_sw->async_tx.tx_list); + desc_sw->hw = desc; desc_sw->async_tx.phys = phys; return desc_sw; } -/* returns the actual number of allocated descriptors */ +static int ioat_initial_desc_count = 256; +module_param(ioat_initial_desc_count, int, 0644); +MODULE_PARM_DESC(ioat_initial_desc_count, + "initial descriptors per channel (default: 256)"); + +/** + * ioat2_dma_massage_chan_desc - link the descriptors into a circle + * @ioat_chan: the channel to be massaged + */ +static void ioat2_dma_massage_chan_desc(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *desc, *_desc; + + /* setup used_desc */ + ioat_chan->used_desc.next = ioat_chan->free_desc.next; + ioat_chan->used_desc.prev = NULL; + + /* pull free_desc out of the circle so that every node is a hw + * descriptor, but leave it pointing to the list + */ + ioat_chan->free_desc.prev->next = ioat_chan->free_desc.next; + ioat_chan->free_desc.next->prev = ioat_chan->free_desc.prev; + + /* circle link the hw descriptors */ + desc = to_ioat_desc(ioat_chan->free_desc.next); + desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + list_for_each_entry_safe(desc, _desc, ioat_chan->free_desc.next, node) { + desc->hw->next = to_ioat_desc(desc->node.next)->async_tx.phys; + } +} + +/** + * ioat_dma_alloc_chan_resources - returns the number of allocated descriptors + * @chan: the channel to be filled out + */ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); @@ -304,7 +440,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) /* have we already been set up? */ if (!list_empty(&ioat_chan->free_desc)) - return INITIAL_IOAT_DESC_COUNT; + return ioat_chan->desccount; /* Setup register to interrupt and write completion status on error */ chanctrl = IOAT_CHANCTRL_ERR_INT_EN | @@ -320,7 +456,7 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) } /* Allocate descriptors */ - for (i = 0; i < INITIAL_IOAT_DESC_COUNT; i++) { + for (i = 0; i < ioat_initial_desc_count; i++) { desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_KERNEL); if (!desc) { dev_err(&ioat_chan->device->pdev->dev, @@ -330,7 +466,10 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) list_add_tail(&desc->node, &tmp_list); } spin_lock_bh(&ioat_chan->desc_lock); + ioat_chan->desccount = i; list_splice(&tmp_list, &ioat_chan->free_desc); + if (ioat_chan->device->version != IOAT_VER_1_2) + ioat2_dma_massage_chan_desc(ioat_chan); spin_unlock_bh(&ioat_chan->desc_lock); /* allocate a completion writeback area */ @@ -347,10 +486,14 @@ static int ioat_dma_alloc_chan_resources(struct dma_chan *chan) ioat_chan->reg_base + IOAT_CHANCMP_OFFSET_HIGH); tasklet_enable(&ioat_chan->cleanup_task); - ioat_dma_start_null_desc(ioat_chan); - return i; + ioat_dma_start_null_desc(ioat_chan); /* give chain to dma device */ + return ioat_chan->desccount; } +/** + * ioat_dma_free_chan_resources - release all the descriptors + * @chan: the channel to be cleaned + */ static void ioat_dma_free_chan_resources(struct dma_chan *chan) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); @@ -364,22 +507,45 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) /* Delay 100ms after reset to allow internal DMA logic to quiesce * before removing DMA descriptor resources. */ - writeb(IOAT_CHANCMD_RESET, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); + writeb(IOAT_CHANCMD_RESET, + ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); mdelay(100); spin_lock_bh(&ioat_chan->desc_lock); - list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { - in_use_descs++; - list_del(&desc->node); - pci_pool_free(ioatdma_device->dma_pool, desc->hw, - desc->async_tx.phys); - kfree(desc); - } - list_for_each_entry_safe(desc, _desc, &ioat_chan->free_desc, node) { - list_del(&desc->node); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + list_for_each_entry_safe(desc, _desc, + &ioat_chan->used_desc, node) { + in_use_descs++; + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + list_for_each_entry_safe(desc, _desc, + &ioat_chan->free_desc, node) { + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + break; + case IOAT_VER_2_0: + list_for_each_entry_safe(desc, _desc, + ioat_chan->free_desc.next, node) { + list_del(&desc->node); + pci_pool_free(ioatdma_device->dma_pool, desc->hw, + desc->async_tx.phys); + kfree(desc); + } + desc = to_ioat_desc(ioat_chan->free_desc.next); pci_pool_free(ioatdma_device->dma_pool, desc->hw, desc->async_tx.phys); kfree(desc); + INIT_LIST_HEAD(&ioat_chan->free_desc); + INIT_LIST_HEAD(&ioat_chan->used_desc); + break; } spin_unlock_bh(&ioat_chan->desc_lock); @@ -395,6 +561,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) ioat_chan->last_completion = ioat_chan->completion_addr = 0; ioat_chan->pending = 0; + ioat_chan->dmacount = 0; } /** @@ -406,7 +573,7 @@ static void ioat_dma_free_chan_resources(struct dma_chan *chan) * has run out. */ static struct ioat_desc_sw * -ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +ioat1_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) { struct ioat_desc_sw *new = NULL; @@ -425,7 +592,82 @@ ioat_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) return new; } -static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( +static struct ioat_desc_sw * +ioat2_dma_get_next_descriptor(struct ioat_dma_chan *ioat_chan) +{ + struct ioat_desc_sw *new = NULL; + + /* + * used.prev points to where to start processing + * used.next points to next free descriptor + * if used.prev == NULL, there are none waiting to be processed + * if used.next == used.prev.prev, there is only one free descriptor, + * and we need to use it to as a noop descriptor before + * linking in a new set of descriptors, since the device + * has probably already read the pointer to it + */ + if (ioat_chan->used_desc.prev && + ioat_chan->used_desc.next == ioat_chan->used_desc.prev->prev) { + + struct ioat_desc_sw *desc = NULL; + struct ioat_desc_sw *noop_desc = NULL; + int i; + + /* set up the noop descriptor */ + noop_desc = to_ioat_desc(ioat_chan->used_desc.next); + noop_desc->hw->size = 0; + noop_desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL; + noop_desc->hw->src_addr = 0; + noop_desc->hw->dst_addr = 0; + + ioat_chan->used_desc.next = ioat_chan->used_desc.next->next; + ioat_chan->pending++; + ioat_chan->dmacount++; + + /* get a few more descriptors */ + for (i = 16; i; i--) { + desc = ioat_dma_alloc_descriptor(ioat_chan, GFP_ATOMIC); + BUG_ON(!desc); + list_add_tail(&desc->node, ioat_chan->used_desc.next); + + desc->hw->next + = to_ioat_desc(desc->node.next)->async_tx.phys; + to_ioat_desc(desc->node.prev)->hw->next + = desc->async_tx.phys; + ioat_chan->desccount++; + } + + ioat_chan->used_desc.next = noop_desc->node.next; + } + new = to_ioat_desc(ioat_chan->used_desc.next); + prefetch(new); + ioat_chan->used_desc.next = new->node.next; + + if (ioat_chan->used_desc.prev == NULL) + ioat_chan->used_desc.prev = &new->node; + + prefetch(new->hw); + return new; +} + +static struct ioat_desc_sw *ioat_dma_get_next_descriptor( + struct ioat_dma_chan *ioat_chan) +{ + if (!ioat_chan) + return NULL; + + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + return ioat1_dma_get_next_descriptor(ioat_chan); + break; + case IOAT_VER_2_0: + return ioat2_dma_get_next_descriptor(ioat_chan); + break; + } + return NULL; +} + +static struct dma_async_tx_descriptor *ioat1_dma_prep_memcpy( struct dma_chan *chan, size_t len, int int_en) @@ -441,19 +683,62 @@ static struct dma_async_tx_descriptor *ioat_dma_prep_memcpy( return new ? &new->async_tx : NULL; } +static struct dma_async_tx_descriptor *ioat2_dma_prep_memcpy( + struct dma_chan *chan, + size_t len, + int int_en) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + struct ioat_desc_sw *new; + + spin_lock_bh(&ioat_chan->desc_lock); + new = ioat2_dma_get_next_descriptor(ioat_chan); + new->len = len; + + /* leave ioat_chan->desc_lock set in version 2 path */ + return new ? &new->async_tx : NULL; +} + + /** * ioat_dma_memcpy_issue_pending - push potentially unrecognized appended * descriptors to hw * @chan: DMA channel handle */ -static void ioat_dma_memcpy_issue_pending(struct dma_chan *chan) +static inline void __ioat1_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan) +{ + ioat_chan->pending = 0; + writeb(IOAT_CHANCMD_APPEND, ioat_chan->reg_base + IOAT1_CHANCMD_OFFSET); +} + +static void ioat1_dma_memcpy_issue_pending(struct dma_chan *chan) { struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); if (ioat_chan->pending != 0) { - ioat_chan->pending = 0; - writeb(IOAT_CHANCMD_APPEND, - ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); + spin_lock_bh(&ioat_chan->desc_lock); + __ioat1_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); + } +} + +static inline void __ioat2_dma_memcpy_issue_pending( + struct ioat_dma_chan *ioat_chan) +{ + ioat_chan->pending = 0; + writew(ioat_chan->dmacount, + ioat_chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); +} + +static void ioat2_dma_memcpy_issue_pending(struct dma_chan *chan) +{ + struct ioat_dma_chan *ioat_chan = to_ioat_chan(chan); + + if (ioat_chan->pending != 0) { + spin_lock_bh(&ioat_chan->desc_lock); + __ioat2_dma_memcpy_issue_pending(ioat_chan); + spin_unlock_bh(&ioat_chan->desc_lock); } } @@ -465,11 +750,17 @@ static void ioat_dma_cleanup_tasklet(unsigned long data) chan->reg_base + IOAT_CHANCTRL_OFFSET); } +/** + * ioat_dma_memcpy_cleanup - cleanup up finished descriptors + * @chan: ioat channel to be cleaned up + */ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) { unsigned long phys_complete; struct ioat_desc_sw *desc, *_desc; dma_cookie_t cookie = 0; + unsigned long desc_phys; + struct ioat_desc_sw *latest_desc; prefetch(ioat_chan->completion_virt); @@ -507,56 +798,115 @@ static void ioat_dma_memcpy_cleanup(struct ioat_dma_chan *ioat_chan) cookie = 0; spin_lock_bh(&ioat_chan->desc_lock); - list_for_each_entry_safe(desc, _desc, &ioat_chan->used_desc, node) { - - /* - * Incoming DMA requests may use multiple descriptors, due to - * exceeding xfercap, perhaps. If so, only the last one will - * have a cookie, and require unmapping. - */ - if (desc->async_tx.cookie) { - cookie = desc->async_tx.cookie; + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + list_for_each_entry_safe(desc, _desc, + &ioat_chan->used_desc, node) { /* - * yes we are unmapping both _page and _single alloc'd - * regions with unmap_page. Is this *really* that bad? + * Incoming DMA requests may use multiple descriptors, + * due to exceeding xfercap, perhaps. If so, only the + * last one will have a cookie, and require unmapping. */ - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, dst), - pci_unmap_len(desc, len), - PCI_DMA_FROMDEVICE); - pci_unmap_page(ioat_chan->device->pdev, - pci_unmap_addr(desc, src), - pci_unmap_len(desc, len), - PCI_DMA_TODEVICE); - if (desc->async_tx.callback) { - desc->async_tx.callback( - desc->async_tx.callback_param); - desc->async_tx.callback = NULL; + if (desc->async_tx.cookie) { + cookie = desc->async_tx.cookie; + + /* + * yes we are unmapping both _page and _single + * alloc'd regions with unmap_page. Is this + * *really* that bad? + */ + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + + if (desc->async_tx.callback) { + desc->async_tx.callback(desc->async_tx.callback_param); + desc->async_tx.callback = NULL; + } } - } - if (desc->async_tx.phys != phys_complete) { - /* - * a completed entry, but not the last, so cleanup - * if the client is done with the descriptor - */ - if (desc->async_tx.ack) { - list_del(&desc->node); - list_add_tail(&desc->node, - &ioat_chan->free_desc); - } else + if (desc->async_tx.phys != phys_complete) { + /* + * a completed entry, but not the last, so clean + * up if the client is done with the descriptor + */ + if (desc->async_tx.ack) { + list_del(&desc->node); + list_add_tail(&desc->node, + &ioat_chan->free_desc); + } else + desc->async_tx.cookie = 0; + } else { + /* + * last used desc. Do not remove, so we can + * append from it, but don't look at it next + * time, either + */ desc->async_tx.cookie = 0; - } else { - /* - * last used desc. Do not remove, so we can append from - * it, but don't look at it next time, either - */ - desc->async_tx.cookie = 0; - /* TODO check status bits? */ + /* TODO check status bits? */ + break; + } + } + break; + case IOAT_VER_2_0: + /* has some other thread has already cleaned up? */ + if (ioat_chan->used_desc.prev == NULL) break; + + /* work backwards to find latest finished desc */ + desc = to_ioat_desc(ioat_chan->used_desc.next); + latest_desc = NULL; + do { + desc = to_ioat_desc(desc->node.prev); + desc_phys = (unsigned long)desc->async_tx.phys + & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR; + if (desc_phys == phys_complete) { + latest_desc = desc; + break; + } + } while (&desc->node != ioat_chan->used_desc.prev); + + if (latest_desc != NULL) { + + /* work forwards to clear finished descriptors */ + for (desc = to_ioat_desc(ioat_chan->used_desc.prev); + &desc->node != latest_desc->node.next && + &desc->node != ioat_chan->used_desc.next; + desc = to_ioat_desc(desc->node.next)) { + if (desc->async_tx.cookie) { + cookie = desc->async_tx.cookie; + desc->async_tx.cookie = 0; + + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, dst), + pci_unmap_len(desc, len), + PCI_DMA_FROMDEVICE); + pci_unmap_page(ioat_chan->device->pdev, + pci_unmap_addr(desc, src), + pci_unmap_len(desc, len), + PCI_DMA_TODEVICE); + + if (desc->async_tx.callback) { + desc->async_tx.callback(desc->async_tx.callback_param); + desc->async_tx.callback = NULL; + } + } + } + + /* move used.prev up beyond those that are finished */ + if (&desc->node == ioat_chan->used_desc.next) + ioat_chan->used_desc.prev = NULL; + else + ioat_chan->used_desc.prev = &desc->node; } + break; } spin_unlock_bh(&ioat_chan->desc_lock); @@ -621,8 +971,6 @@ static enum dma_status ioat_dma_is_complete(struct dma_chan *chan, return dma_async_is_complete(cookie, last_complete, last_used); } -/* PCI API */ - static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) { struct ioat_desc_sw *desc; @@ -633,21 +981,34 @@ static void ioat_dma_start_null_desc(struct ioat_dma_chan *ioat_chan) desc->hw->ctl = IOAT_DMA_DESCRIPTOR_NUL | IOAT_DMA_DESCRIPTOR_CTL_INT_GN | IOAT_DMA_DESCRIPTOR_CTL_CP_STS; - desc->hw->next = 0; desc->hw->size = 0; desc->hw->src_addr = 0; desc->hw->dst_addr = 0; desc->async_tx.ack = 1; - - list_add_tail(&desc->node, &ioat_chan->used_desc); + switch (ioat_chan->device->version) { + case IOAT_VER_1_2: + desc->hw->next = 0; + list_add_tail(&desc->node, &ioat_chan->used_desc); + + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT1_CHAINADDR_OFFSET_HIGH); + + writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + + IOAT_CHANCMD_OFFSET(ioat_chan->device->version)); + break; + case IOAT_VER_2_0: + writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_LOW); + writel(((u64) desc->async_tx.phys) >> 32, + ioat_chan->reg_base + IOAT2_CHAINADDR_OFFSET_HIGH); + + ioat_chan->dmacount++; + __ioat2_dma_memcpy_issue_pending(ioat_chan); + break; + } spin_unlock_bh(&ioat_chan->desc_lock); - - writel(((u64) desc->async_tx.phys) & 0x00000000FFFFFFFF, - ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_LOW); - writel(((u64) desc->async_tx.phys) >> 32, - ioat_chan->reg_base + IOAT_CHAINADDR_OFFSET_HIGH); - - writeb(IOAT_CHANCMD_START, ioat_chan->reg_base + IOAT_CHANCMD_OFFSET); } /* @@ -693,14 +1054,14 @@ static int ioat_dma_self_test(struct ioatdma_device *device) dma_chan = container_of(device->common.channels.next, struct dma_chan, device_node); - if (ioat_dma_alloc_chan_resources(dma_chan) < 1) { + if (device->common.device_alloc_chan_resources(dma_chan) < 1) { dev_err(&device->pdev->dev, "selftest cannot allocate chan resource\n"); err = -ENODEV; goto out; } - tx = ioat_dma_prep_memcpy(dma_chan, IOAT_TEST_SIZE, 0); + tx = device->common.device_prep_dma_memcpy(dma_chan, IOAT_TEST_SIZE, 0); if (!tx) { dev_err(&device->pdev->dev, "Self-test prep failed, disabling\n"); @@ -710,24 +1071,25 @@ static int ioat_dma_self_test(struct ioatdma_device *device) async_tx_ack(tx); addr = dma_map_single(dma_chan->device->dev, src, IOAT_TEST_SIZE, - DMA_TO_DEVICE); - ioat_set_src(addr, tx, 0); + DMA_TO_DEVICE); + tx->tx_set_src(addr, tx, 0); addr = dma_map_single(dma_chan->device->dev, dest, IOAT_TEST_SIZE, - DMA_FROM_DEVICE); - ioat_set_dest(addr, tx, 0); + DMA_FROM_DEVICE); + tx->tx_set_dest(addr, tx, 0); tx->callback = ioat_dma_test_callback; tx->callback_param = (void *)0x8086; - cookie = ioat_tx_submit(tx); + cookie = tx->tx_submit(tx); if (cookie < 0) { dev_err(&device->pdev->dev, "Self-test setup failed, disabling\n"); err = -ENODEV; goto free_resources; } - ioat_dma_memcpy_issue_pending(dma_chan); + device->common.device_issue_pending(dma_chan); msleep(1); - if (ioat_dma_is_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (device->common.device_is_tx_complete(dma_chan, cookie, NULL, NULL) + != DMA_SUCCESS) { dev_err(&device->pdev->dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -741,7 +1103,7 @@ static int ioat_dma_self_test(struct ioatdma_device *device) } free_resources: - ioat_dma_free_chan_resources(dma_chan); + device->common.device_free_chan_resources(dma_chan); out: kfree(src); kfree(dest); @@ -941,16 +1303,28 @@ struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, INIT_LIST_HEAD(&device->common.channels); ioat_dma_enumerate_channels(device); - dma_cap_set(DMA_MEMCPY, device->common.cap_mask); device->common.device_alloc_chan_resources = ioat_dma_alloc_chan_resources; device->common.device_free_chan_resources = ioat_dma_free_chan_resources; - device->common.device_prep_dma_memcpy = ioat_dma_prep_memcpy; + device->common.dev = &pdev->dev; + + dma_cap_set(DMA_MEMCPY, device->common.cap_mask); device->common.device_is_tx_complete = ioat_dma_is_complete; - device->common.device_issue_pending = ioat_dma_memcpy_issue_pending; device->common.device_dependency_added = ioat_dma_dependency_added; - device->common.dev = &pdev->dev; + switch (device->version) { + case IOAT_VER_1_2: + device->common.device_prep_dma_memcpy = ioat1_dma_prep_memcpy; + device->common.device_issue_pending = + ioat1_dma_memcpy_issue_pending; + break; + case IOAT_VER_2_0: + device->common.device_prep_dma_memcpy = ioat2_dma_prep_memcpy; + device->common.device_issue_pending = + ioat2_dma_memcpy_issue_pending; + break; + } + dev_err(&device->pdev->dev, "Intel(R) I/OAT DMA Engine found," " %d channels, device version 0x%02x, driver version %s\n", diff --git a/drivers/dma/ioatdma.h b/drivers/dma/ioatdma.h index 5f9881e7..b668234 100644 --- a/drivers/dma/ioatdma.h +++ b/drivers/dma/ioatdma.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -28,7 +28,7 @@ #include <linux/cache.h> #include <linux/pci_ids.h> -#define IOAT_DMA_VERSION "1.26" +#define IOAT_DMA_VERSION "2.04" enum ioat_interrupt { none = 0, @@ -39,6 +39,8 @@ enum ioat_interrupt { }; #define IOAT_LOW_COMPLETION_MASK 0xffffffc0 +#define IOAT_DMA_DCA_ANY_CPU ~0 + /** * struct ioatdma_device - internal representation of a IOAT device @@ -47,6 +49,9 @@ enum ioat_interrupt { * @dma_pool: for allocating DMA descriptors * @common: embedded struct dma_device * @version: version of ioatdma device + * @irq_mode: which style irq to use + * @msix_entries: irq handlers + * @idx: per channel data */ struct ioatdma_device { @@ -63,23 +68,7 @@ struct ioatdma_device { /** * struct ioat_dma_chan - internal representation of a DMA channel - * @device: - * @reg_base: - * @sw_in_use: - * @completion: - * @completion_low: - * @completion_high: - * @completed_cookie: last cookie seen completed on cleanup - * @cookie: value of last cookie given to client - * @last_completion: - * @xfercap: - * @desc_lock: - * @free_desc: - * @used_desc: - * @resource: - * @device_node: */ - struct ioat_dma_chan { void __iomem *reg_base; @@ -95,6 +84,8 @@ struct ioat_dma_chan { struct list_head used_desc; int pending; + int dmacount; + int desccount; struct ioatdma_device *device; struct dma_chan common; @@ -134,12 +125,13 @@ struct ioat_desc_sw { struct ioatdma_device *ioat_dma_probe(struct pci_dev *pdev, void __iomem *iobase); void ioat_dma_remove(struct ioatdma_device *device); -struct dca_provider *ioat_dca_init(struct pci_dev *pdev, - void __iomem *iobase); +struct dca_provider *ioat_dca_init(struct pci_dev *pdev, void __iomem *iobase); +struct dca_provider *ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); #else #define ioat_dma_probe(pdev, iobase) NULL #define ioat_dma_remove(device) do { } while (0) #define ioat_dca_init(pdev, iobase) NULL +#define ioat2_dca_init(pdev, iobase) NULL #endif #endif /* IOATDMA_H */ diff --git a/drivers/dma/ioatdma_hw.h b/drivers/dma/ioatdma_hw.h index 9e7434e..dd470fa 100644 --- a/drivers/dma/ioatdma_hw.h +++ b/drivers/dma/ioatdma_hw.h @@ -1,5 +1,5 @@ /* - * Copyright(c) 2004 - 2006 Intel Corporation. All rights reserved. + * Copyright(c) 2004 - 2007 Intel Corporation. All rights reserved. * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free @@ -22,12 +22,19 @@ #define _IOAT_HW_H_ /* PCI Configuration Space Values */ -#define IOAT_PCI_VID 0x8086 -#define IOAT_PCI_DID 0x1A38 -#define IOAT_PCI_RID 0x00 -#define IOAT_PCI_SVID 0x8086 -#define IOAT_PCI_SID 0x8086 -#define IOAT_VER_1_2 0x12 /* Version 1.2 */ +#define IOAT_PCI_VID 0x8086 + +/* CB device ID's */ +#define IOAT_PCI_DID_5000 0x1A38 +#define IOAT_PCI_DID_CNB 0x360B +#define IOAT_PCI_DID_SCNB 0x65FF +#define IOAT_PCI_DID_SNB 0x402F + +#define IOAT_PCI_RID 0x00 +#define IOAT_PCI_SVID 0x8086 +#define IOAT_PCI_SID 0x8086 +#define IOAT_VER_1_2 0x12 /* Version 1.2 */ +#define IOAT_VER_2_0 0x20 /* Version 2.0 */ struct ioat_dma_descriptor { uint32_t size; @@ -47,6 +54,16 @@ struct ioat_dma_descriptor { #define IOAT_DMA_DESCRIPTOR_CTL_CP_STS 0x00000008 #define IOAT_DMA_DESCRIPTOR_CTL_FRAME 0x00000010 #define IOAT_DMA_DESCRIPTOR_NUL 0x00000020 -#define IOAT_DMA_DESCRIPTOR_OPCODE 0xFF000000 +#define IOAT_DMA_DESCRIPTOR_CTL_SP_BRK 0x00000040 +#define IOAT_DMA_DESCRIPTOR_CTL_DP_BRK 0x00000080 +#define IOAT_DMA_DESCRIPTOR_CTL_BNDL 0x00000100 +#define IOAT_DMA_DESCRIPTOR_CTL_DCA 0x00000200 +#define IOAT_DMA_DESCRIPTOR_CTL_BUFHINT 0x00000400 + +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_CONTEXT 0xFF000000 +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_DMA 0x00000000 + +#define IOAT_DMA_DESCRIPTOR_CTL_CONTEXT_DCA 0x00000001 +#define IOAT_DMA_DESCRIPTOR_CTL_OPCODE_MASK 0xFF000000 #endif diff --git a/drivers/dma/ioatdma_registers.h b/drivers/dma/ioatdma_registers.h index baaab5e..9832d7e 100644 --- a/drivers/dma/ioatdma_registers.h +++ b/drivers/dma/ioatdma_registers.h @@ -42,26 +42,25 @@ #define IOAT_INTRCTRL_MASTER_INT_EN 0x01 /* Master Interrupt Enable */ #define IOAT_INTRCTRL_INT_STATUS 0x02 /* ATTNSTATUS -or- Channel Int */ #define IOAT_INTRCTRL_INT 0x04 /* INT_STATUS -and- MASTER_INT_EN */ -#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ +#define IOAT_INTRCTRL_MSIX_VECTOR_CONTROL 0x08 /* Enable all MSI-X vectors */ #define IOAT_ATTNSTATUS_OFFSET 0x04 /* Each bit is a channel */ #define IOAT_VER_OFFSET 0x08 /* 8-bit */ #define IOAT_VER_MAJOR_MASK 0xF0 #define IOAT_VER_MINOR_MASK 0x0F -#define GET_IOAT_VER_MAJOR(x) ((x) & IOAT_VER_MAJOR_MASK) +#define GET_IOAT_VER_MAJOR(x) (((x) & IOAT_VER_MAJOR_MASK) >> 4) #define GET_IOAT_VER_MINOR(x) ((x) & IOAT_VER_MINOR_MASK) #define IOAT_PERPORTOFFSET_OFFSET 0x0A /* 16-bit */ #define IOAT_INTRDELAY_OFFSET 0x0C /* 16-bit */ #define IOAT_INTRDELAY_INT_DELAY_MASK 0x3FFF /* Interrupt Delay Time */ -#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalesing Supported */ +#define IOAT_INTRDELAY_COALESE_SUPPORT 0x8000 /* Interrupt Coalescing Supported */ #define IOAT_DEVICE_STATUS_OFFSET 0x0E /* 16-bit */ #define IOAT_DEVICE_STATUS_DEGRADED_MODE 0x0001 - #define IOAT_CHANNEL_MMIO_SIZE 0x80 /* Each Channel MMIO space is this size */ /* DMA Channel Registers */ @@ -74,25 +73,101 @@ #define IOAT_CHANCTRL_ERR_COMPLETION_EN 0x0004 #define IOAT_CHANCTRL_INT_DISABLE 0x0001 -#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatability */ -#define IOAT_DMA_COMP_V1 0x0001 /* Compatability with DMA version 1 */ - -#define IOAT_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ -#define IOAT_CHANSTS_OFFSET_LOW 0x04 -#define IOAT_CHANSTS_OFFSET_HIGH 0x08 -#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR 0xFFFFFFFFFFFFFFC0UL +#define IOAT_DMA_COMP_OFFSET 0x02 /* 16-bit DMA channel compatibility */ +#define IOAT_DMA_COMP_V1 0x0001 /* Compatibility with DMA version 1 */ +#define IOAT_DMA_COMP_V2 0x0002 /* Compatibility with DMA version 2 */ + + +#define IOAT1_CHANSTS_OFFSET 0x04 /* 64-bit Channel Status Register */ +#define IOAT2_CHANSTS_OFFSET 0x08 /* 64-bit Channel Status Register */ +#define IOAT_CHANSTS_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET : IOAT2_CHANSTS_OFFSET) +#define IOAT1_CHANSTS_OFFSET_LOW 0x04 +#define IOAT2_CHANSTS_OFFSET_LOW 0x08 +#define IOAT_CHANSTS_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET_LOW : IOAT2_CHANSTS_OFFSET_LOW) +#define IOAT1_CHANSTS_OFFSET_HIGH 0x08 +#define IOAT2_CHANSTS_OFFSET_HIGH 0x0C +#define IOAT_CHANSTS_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANSTS_OFFSET_HIGH : IOAT2_CHANSTS_OFFSET_HIGH) +#define IOAT_CHANSTS_COMPLETED_DESCRIPTOR_ADDR ~0x3F #define IOAT_CHANSTS_SOFT_ERR 0x0000000000000010 +#define IOAT_CHANSTS_UNAFFILIATED_ERR 0x0000000000000008 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS 0x0000000000000007 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_ACTIVE 0x0 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_DONE 0x1 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_SUSPENDED 0x2 #define IOAT_CHANSTS_DMA_TRANSFER_STATUS_HALTED 0x3 -#define IOAT_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ -#define IOAT_CHAINADDR_OFFSET_LOW 0x0C -#define IOAT_CHAINADDR_OFFSET_HIGH 0x10 -#define IOAT_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */ + +#define IOAT_CHAN_DMACOUNT_OFFSET 0x06 /* 16-bit DMA Count register */ + +#define IOAT_DCACTRL_OFFSET 0x30 /* 32 bit Direct Cache Access Control Register */ +#define IOAT_DCACTRL_CMPL_WRITE_ENABLE 0x10000 +#define IOAT_DCACTRL_TARGET_CPU_MASK 0xFFFF /* APIC ID */ + +/* CB DCA Memory Space Registers */ +#define IOAT_DCAOFFSET_OFFSET 0x14 +/* CB_BAR + IOAT_DCAOFFSET value */ +#define IOAT_DCA_VER_OFFSET 0x00 +#define IOAT_DCA_VER_MAJOR_MASK 0xF0 +#define IOAT_DCA_VER_MINOR_MASK 0x0F + +#define IOAT_DCA_COMP_OFFSET 0x02 +#define IOAT_DCA_COMP_V1 0x1 + +#define IOAT_FSB_CAPABILITY_OFFSET 0x04 +#define IOAT_FSB_CAPABILITY_PREFETCH 0x1 + +#define IOAT_PCI_CAPABILITY_OFFSET 0x06 +#define IOAT_PCI_CAPABILITY_MEMWR 0x1 + +#define IOAT_FSB_CAP_ENABLE_OFFSET 0x08 +#define IOAT_FSB_CAP_ENABLE_PREFETCH 0x1 + +#define IOAT_PCI_CAP_ENABLE_OFFSET 0x0A +#define IOAT_PCI_CAP_ENABLE_MEMWR 0x1 + +#define IOAT_APICID_TAG_MAP_OFFSET 0x0C +#define IOAT_APICID_TAG_MAP_TAG0 0x0000000F +#define IOAT_APICID_TAG_MAP_TAG0_SHIFT 0 +#define IOAT_APICID_TAG_MAP_TAG1 0x000000F0 +#define IOAT_APICID_TAG_MAP_TAG1_SHIFT 4 +#define IOAT_APICID_TAG_MAP_TAG2 0x00000F00 +#define IOAT_APICID_TAG_MAP_TAG2_SHIFT 8 +#define IOAT_APICID_TAG_MAP_TAG3 0x0000F000 +#define IOAT_APICID_TAG_MAP_TAG3_SHIFT 12 +#define IOAT_APICID_TAG_MAP_TAG4 0x000F0000 +#define IOAT_APICID_TAG_MAP_TAG4_SHIFT 16 +#define IOAT_APICID_TAG_CB2_VALID 0x8080808080 + +#define IOAT_DCA_GREQID_OFFSET 0x10 +#define IOAT_DCA_GREQID_SIZE 0x04 +#define IOAT_DCA_GREQID_MASK 0xFFFF +#define IOAT_DCA_GREQID_IGNOREFUN 0x10000000 +#define IOAT_DCA_GREQID_VALID 0x20000000 +#define IOAT_DCA_GREQID_LASTID 0x80000000 + + + +#define IOAT1_CHAINADDR_OFFSET 0x0C /* 64-bit Descriptor Chain Address Register */ +#define IOAT2_CHAINADDR_OFFSET 0x10 /* 64-bit Descriptor Chain Address Register */ +#define IOAT_CHAINADDR_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET : IOAT2_CHAINADDR_OFFSET) +#define IOAT1_CHAINADDR_OFFSET_LOW 0x0C +#define IOAT2_CHAINADDR_OFFSET_LOW 0x10 +#define IOAT_CHAINADDR_OFFSET_LOW(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET_LOW : IOAT2_CHAINADDR_OFFSET_LOW) +#define IOAT1_CHAINADDR_OFFSET_HIGH 0x10 +#define IOAT2_CHAINADDR_OFFSET_HIGH 0x14 +#define IOAT_CHAINADDR_OFFSET_HIGH(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHAINADDR_OFFSET_HIGH : IOAT2_CHAINADDR_OFFSET_HIGH) + +#define IOAT1_CHANCMD_OFFSET 0x14 /* 8-bit DMA Channel Command Register */ +#define IOAT2_CHANCMD_OFFSET 0x04 /* 8-bit DMA Channel Command Register */ +#define IOAT_CHANCMD_OFFSET(ver) ((ver) < IOAT_VER_2_0 \ + ? IOAT1_CHANCMD_OFFSET : IOAT2_CHANCMD_OFFSET) #define IOAT_CHANCMD_RESET 0x20 #define IOAT_CHANCMD_RESUME 0x10 #define IOAT_CHANCMD_ABORT 0x08 @@ -124,6 +199,7 @@ #define IOAT_CHANERR_COMPLETION_ADDR_ERR 0x1000 #define IOAT_CHANERR_INT_CONFIGURATION_ERR 0x2000 #define IOAT_CHANERR_SOFT_ERR 0x4000 +#define IOAT_CHANERR_UNAFFILIATED_ERR 0x8000 #define IOAT_CHANERR_MASK_OFFSET 0x2C /* 32-bit Channel Error Register */ diff --git a/drivers/edac/i5000_edac.c b/drivers/edac/i5000_edac.c index 96f7e63..a1f24c4 100644 --- a/drivers/edac/i5000_edac.c +++ b/drivers/edac/i5000_edac.c @@ -1462,7 +1462,7 @@ MODULE_DEVICE_TABLE(pci, i5000_pci_tbl); * */ static struct pci_driver i5000_driver = { - .name = __stringify(KBUILD_BASENAME), + .name = KBUILD_BASENAME, .probe = i5000_init_one, .remove = __devexit_p(i5000_remove_one), .id_table = i5000_pci_tbl, diff --git a/drivers/ide/Kconfig b/drivers/ide/Kconfig index d1e8df1..e445fe6 100644 --- a/drivers/ide/Kconfig +++ b/drivers/ide/Kconfig @@ -203,10 +203,6 @@ config BLK_DEV_IDECD CD-ROM drive, you can say N to all other CD-ROM options, but be sure to say Y or M to "ISO 9660 CD-ROM file system support". - Note that older versions of LILO (LInux LOader) cannot properly deal - with IDE/ATAPI CD-ROMs, so install LILO 16 or higher, available from - <http://lilo.go.dyndns.org/>. - To compile this driver as a module, choose M here: the module will be called ide-cd. diff --git a/drivers/ide/cris/ide-cris.c b/drivers/ide/cris/ide-cris.c index e196aef..7f5bc2e 100644 --- a/drivers/ide/cris/ide-cris.c +++ b/drivers/ide/cris/ide-cris.c @@ -748,8 +748,7 @@ static void cris_set_dma_mode(ide_drive_t *drive, const u8 speed) hold = ATA_DMA2_HOLD; break; default: - BUG(); - break; + return; } if (speed >= XFER_UDMA_0) diff --git a/drivers/ide/ide-io.c b/drivers/ide/ide-io.c index 7550118..db22d1f 100644 --- a/drivers/ide/ide-io.c +++ b/drivers/ide/ide-io.c @@ -885,7 +885,6 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, return do_rw_taskfile(drive, args); } else if (rq->cmd_type == REQ_TYPE_ATA_TASK) { u8 *args = rq->buffer; - u8 sel; if (!args) goto done; @@ -903,10 +902,7 @@ static ide_startstop_t execute_drive_cmd (ide_drive_t *drive, hwif->OUTB(args[3], IDE_SECTOR_REG); hwif->OUTB(args[4], IDE_LCYL_REG); hwif->OUTB(args[5], IDE_HCYL_REG); - sel = (args[6] & ~0x10); - if (drive->select.b.unit) - sel |= 0x10; - hwif->OUTB(sel, IDE_SELECT_REG); + hwif->OUTB((args[6] & 0xEF)|drive->select.all, IDE_SELECT_REG); ide_cmd(drive, args[0], args[2], &drive_cmd_intr); return ide_started; } else if (rq->cmd_type == REQ_TYPE_ATA_CMD) { diff --git a/drivers/ide/ide-lib.c b/drivers/ide/ide-lib.c index af86433..1609b86 100644 --- a/drivers/ide/ide-lib.c +++ b/drivers/ide/ide-lib.c @@ -514,6 +514,7 @@ static u8 ide_dump_ata_status(ide_drive_t *drive, const char *msg, u8 stat) if (drive->addressing == 1) { __u64 sectors = 0; u32 low = 0, high = 0; + hwif->OUTB(drive->ctl&~0x80, IDE_CONTROL_REG); low = ide_read_24(drive); hwif->OUTB(drive->ctl|0x80, IDE_CONTROL_REG); high = ide_read_24(drive); diff --git a/drivers/ide/pci/cmd64x.c b/drivers/ide/pci/cmd64x.c index ea0143e..51fca44 100644 --- a/drivers/ide/pci/cmd64x.c +++ b/drivers/ide/pci/cmd64x.c @@ -1,5 +1,5 @@ /* - * linux/drivers/ide/pci/cmd64x.c Version 1.50 May 10, 2007 + * linux/drivers/ide/pci/cmd64x.c Version 1.51 Nov 8, 2007 * * cmd64x.c: Enable interrupts at initialization time on Ultra/PCI machines. * Due to massive hardware bugs, UltraDMA is only supported @@ -339,7 +339,8 @@ static int cmd648_ide_dma_end (ide_drive_t *drive) u8 mrdmode = inb(hwif->dma_master + 0x01); /* clear the interrupt bit */ - outb(mrdmode | irq_mask, hwif->dma_master + 0x01); + outb((mrdmode & ~(MRDMODE_INTR_CH0 | MRDMODE_INTR_CH1)) | irq_mask, + hwif->dma_master + 0x01); return err; } diff --git a/drivers/ide/pci/cs5530.c b/drivers/ide/pci/cs5530.c index 5994089..5476903 100644 --- a/drivers/ide/pci/cs5530.c +++ b/drivers/ide/pci/cs5530.c @@ -117,8 +117,7 @@ static void cs5530_set_dma_mode(ide_drive_t *drive, const u8 mode) case XFER_MW_DMA_1: timings = 0x00012121; break; case XFER_MW_DMA_2: timings = 0x00002020; break; default: - BUG(); - break; + return; } basereg = CS5530_BASEREG(drive->hwif); reg = inl(basereg + 4); /* get drive0 config register */ diff --git a/drivers/ide/pci/it821x.c b/drivers/ide/pci/it821x.c index 5c99754..99b7d76 100644 --- a/drivers/ide/pci/it821x.c +++ b/drivers/ide/pci/it821x.c @@ -653,8 +653,7 @@ static const struct ide_port_info it821x_chipsets[] __devinitdata = { static int __devinit it821x_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); - return 0; + return ide_setup_pci_device(dev, &it821x_chipsets[id->driver_data]); } static const struct pci_device_id it821x_pci_tbl[] = { diff --git a/drivers/ide/pci/jmicron.c b/drivers/ide/pci/jmicron.c index bdf64d9..0083eaf 100644 --- a/drivers/ide/pci/jmicron.c +++ b/drivers/ide/pci/jmicron.c @@ -139,8 +139,7 @@ static const struct ide_port_info jmicron_chipset __devinitdata = { static int __devinit jmicron_init_one(struct pci_dev *dev, const struct pci_device_id *id) { - ide_setup_pci_device(dev, &jmicron_chipset); - return 0; + return ide_setup_pci_device(dev, &jmicron_chipset); } /* All JMB PATA controllers have and will continue to have the same diff --git a/drivers/ide/pci/sc1200.c b/drivers/ide/pci/sc1200.c index 0a7b320..707d5ff 100644 --- a/drivers/ide/pci/sc1200.c +++ b/drivers/ide/pci/sc1200.c @@ -186,8 +186,7 @@ static void sc1200_set_dma_mode(ide_drive_t *drive, const u8 mode) } break; default: - BUG(); - break; + return; } if (unit == 0) { /* are we configuring drive0? */ diff --git a/drivers/ide/pci/sis5513.c b/drivers/ide/pci/sis5513.c index 6b7bb53..f6e2ab3 100644 --- a/drivers/ide/pci/sis5513.c +++ b/drivers/ide/pci/sis5513.c @@ -356,7 +356,6 @@ static void sis_set_dma_mode(ide_drive_t *drive, const u8 speed) sis_program_timings(drive, speed); break; default: - BUG(); break; } } diff --git a/drivers/ide/ppc/pmac.c b/drivers/ide/ppc/pmac.c index 816b531..5afdfef 100644 --- a/drivers/ide/ppc/pmac.c +++ b/drivers/ide/ppc/pmac.c @@ -1138,6 +1138,7 @@ pmac_ide_setup_device(pmac_ide_hwif_t *pmif, ide_hwif_t *hwif) hwif->drives[0].autotune = IDE_TUNE_AUTO; hwif->drives[1].autotune = IDE_TUNE_AUTO; hwif->host_flags = IDE_HFLAG_SET_PIO_MODE_KEEP_DMA | + IDE_HFLAG_PIO_NO_DOWNGRADE | IDE_HFLAG_POST_SET_MODE; hwif->pio_mask = ATA_PIO4; hwif->set_pio_mode = pmac_ide_set_pio_mode; diff --git a/drivers/ide/setup-pci.c b/drivers/ide/setup-pci.c index 02d14bf..25fd090 100644 --- a/drivers/ide/setup-pci.c +++ b/drivers/ide/setup-pci.c @@ -7,11 +7,6 @@ * May be copied or modified under the terms of the GNU General Public License */ -/* - * This module provides support for automatic detection and - * configuration of all PCI IDE interfaces present in a system. - */ - #include <linux/module.h> #include <linux/types.h> #include <linux/kernel.h> diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c index 9d716fa..3b92a61 100644 --- a/drivers/lguest/lguest_user.c +++ b/drivers/lguest/lguest_user.c @@ -184,7 +184,7 @@ static int initialize(struct file *file, const unsigned long __user *input) free_regs: free_page(lg->regs_page); release_guest: - memset(lg, 0, sizeof(*lg)); + kfree(lg); unlock: mutex_unlock(&lguest_lock); return err; diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c index 1cfc984..a5aad8c 100644 --- a/drivers/md/raid5.c +++ b/drivers/md/raid5.c @@ -688,7 +688,8 @@ ops_run_prexor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) } static struct dma_async_tx_descriptor * -ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, + unsigned long pending) { int disks = sh->disks; int pd_idx = sh->pd_idx, i; @@ -696,7 +697,7 @@ ops_run_biodrain(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) /* check if prexor is active which means only process blocks * that are part of a read-modify-write (Wantprexor) */ - int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); + int prexor = test_bit(STRIPE_OP_PREXOR, &pending); pr_debug("%s: stripe %llu\n", __FUNCTION__, (unsigned long long)sh->sector); @@ -773,7 +774,8 @@ static void ops_complete_write(void *stripe_head_ref) } static void -ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) +ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx, + unsigned long pending) { /* kernel stack size limits the total number of disks */ int disks = sh->disks; @@ -781,7 +783,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) int count = 0, pd_idx = sh->pd_idx, i; struct page *xor_dest; - int prexor = test_bit(STRIPE_OP_PREXOR, &sh->ops.pending); + int prexor = test_bit(STRIPE_OP_PREXOR, &pending); unsigned long flags; dma_async_tx_callback callback; @@ -808,7 +810,7 @@ ops_run_postxor(struct stripe_head *sh, struct dma_async_tx_descriptor *tx) } /* check whether this postxor is part of a write */ - callback = test_bit(STRIPE_OP_BIODRAIN, &sh->ops.pending) ? + callback = test_bit(STRIPE_OP_BIODRAIN, &pending) ? ops_complete_write : ops_complete_postxor; /* 1/ if we prexor'd then the dest is reused as a source @@ -896,12 +898,12 @@ static void raid5_run_ops(struct stripe_head *sh, unsigned long pending) tx = ops_run_prexor(sh, tx); if (test_bit(STRIPE_OP_BIODRAIN, &pending)) { - tx = ops_run_biodrain(sh, tx); + tx = ops_run_biodrain(sh, tx, pending); overlap_clear++; } if (test_bit(STRIPE_OP_POSTXOR, &pending)) - ops_run_postxor(sh, tx); + ops_run_postxor(sh, tx, pending); if (test_bit(STRIPE_OP_CHECK, &pending)) ops_run_check(sh); diff --git a/drivers/misc/ioc4.c b/drivers/misc/ioc4.c index 6a5a05d..05172d2 100644 --- a/drivers/misc/ioc4.c +++ b/drivers/misc/ioc4.c @@ -244,10 +244,11 @@ ioc4_variant(struct ioc4_driver_data *idd) idd->idd_pdev->bus->number == pdev->bus->number && 3 == PCI_SLOT(pdev->devfn)) found = 1; - pci_dev_put(pdev); } while (pdev && !found); - if (NULL != pdev) + if (NULL != pdev) { + pci_dev_put(pdev); return IOC4_VARIANT_IO9; + } /* IO10: Look for a Vitesse VSC 7174 at the same bus and slot 3. */ pdev = NULL; @@ -258,10 +259,11 @@ ioc4_variant(struct ioc4_driver_data *idd) idd->idd_pdev->bus->number == pdev->bus->number && 3 == PCI_SLOT(pdev->devfn)) found = 1; - pci_dev_put(pdev); } while (pdev && !found); - if (NULL != pdev) + if (NULL != pdev) { + pci_dev_put(pdev); return IOC4_VARIANT_IO10; + } /* PCI-RT: No SCSI/SATA controller will be present */ return IOC4_VARIANT_PCI_RT; diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c index edd6828..917b7b4 100644 --- a/drivers/net/cris/eth_v10.c +++ b/drivers/net/cris/eth_v10.c @@ -250,6 +250,7 @@ #include <asm/system.h> #include <asm/ethernet.h> #include <asm/cache.h> +#include <asm/arch/io_interface_mux.h> //#define ETHDEBUG #define D(x) @@ -279,6 +280,9 @@ struct net_local { * by this lock as well. */ spinlock_t lock; + + spinlock_t led_lock; /* Protect LED state */ + spinlock_t transceiver_lock; /* Protect transceiver state. */ }; typedef struct etrax_eth_descr @@ -295,8 +299,6 @@ struct transceiver_ops void (*check_duplex)(struct net_device* dev); }; -struct transceiver_ops* transceiver; - /* Duplex settings */ enum duplex { @@ -307,7 +309,7 @@ enum duplex /* Dma descriptors etc. */ -#define MAX_MEDIA_DATA_SIZE 1518 +#define MAX_MEDIA_DATA_SIZE 1522 #define MIN_PACKET_LEN 46 #define ETHER_HEAD_LEN 14 @@ -332,8 +334,8 @@ enum duplex /*Intel LXT972A specific*/ #define MDIO_INT_STATUS_REG_2 0x0011 -#define MDIO_INT_FULL_DUPLEX_IND ( 1 << 9 ) -#define MDIO_INT_SPEED ( 1 << 14 ) +#define MDIO_INT_FULL_DUPLEX_IND (1 << 9) +#define MDIO_INT_SPEED (1 << 14) /* Network flash constants */ #define NET_FLASH_TIME (HZ/50) /* 20 ms */ @@ -344,8 +346,8 @@ enum duplex #define NO_NETWORK_ACTIVITY 0 #define NETWORK_ACTIVITY 1 -#define NBR_OF_RX_DESC 64 -#define NBR_OF_TX_DESC 256 +#define NBR_OF_RX_DESC 32 +#define NBR_OF_TX_DESC 16 /* Large packets are sent directly to upper layers while small packets are */ /* copied (to reduce memory waste). The following constant decides the breakpoint */ @@ -367,7 +369,6 @@ enum duplex static etrax_eth_descr *myNextRxDesc; /* Points to the next descriptor to to be processed */ static etrax_eth_descr *myLastRxDesc; /* The last processed descriptor */ -static etrax_eth_descr *myPrevRxDesc; /* The descriptor right before myNextRxDesc */ static etrax_eth_descr RxDescList[NBR_OF_RX_DESC] __attribute__ ((aligned(32))); @@ -377,7 +378,6 @@ static etrax_eth_descr* myNextTxDesc; /* Next descriptor to use */ static etrax_eth_descr TxDescList[NBR_OF_TX_DESC] __attribute__ ((aligned(32))); static unsigned int network_rec_config_shadow = 0; -static unsigned int mdio_phy_addr; /* Transciever address */ static unsigned int network_tr_ctrl_shadow = 0; @@ -411,7 +411,7 @@ static int e100_set_config(struct net_device* dev, struct ifmap* map); static void e100_tx_timeout(struct net_device *dev); static struct net_device_stats *e100_get_stats(struct net_device *dev); static void set_multicast_list(struct net_device *dev); -static void e100_hardware_send_packet(char *buf, int length); +static void e100_hardware_send_packet(struct net_local* np, char *buf, int length); static void update_rx_stats(struct net_device_stats *); static void update_tx_stats(struct net_device_stats *); static int e100_probe_transceiver(struct net_device* dev); @@ -434,7 +434,10 @@ static void e100_clear_network_leds(unsigned long dummy); static void e100_set_network_leds(int active); static const struct ethtool_ops e100_ethtool_ops; - +#if defined(CONFIG_ETRAX_NO_PHY) +static void dummy_check_speed(struct net_device* dev); +static void dummy_check_duplex(struct net_device* dev); +#else static void broadcom_check_speed(struct net_device* dev); static void broadcom_check_duplex(struct net_device* dev); static void tdk_check_speed(struct net_device* dev); @@ -443,16 +446,28 @@ static void intel_check_speed(struct net_device* dev); static void intel_check_duplex(struct net_device* dev); static void generic_check_speed(struct net_device* dev); static void generic_check_duplex(struct net_device* dev); +#endif +#ifdef CONFIG_NET_POLL_CONTROLLER +static void e100_netpoll(struct net_device* dev); +#endif + +static int autoneg_normal = 1; struct transceiver_ops transceivers[] = { +#if defined(CONFIG_ETRAX_NO_PHY) + {0x0000, dummy_check_speed, dummy_check_duplex} /* Dummy */ +#else {0x1018, broadcom_check_speed, broadcom_check_duplex}, /* Broadcom */ {0xC039, tdk_check_speed, tdk_check_duplex}, /* TDK 2120 */ {0x039C, tdk_check_speed, tdk_check_duplex}, /* TDK 2120C */ {0x04de, intel_check_speed, intel_check_duplex}, /* Intel LXT972A*/ {0x0000, generic_check_speed, generic_check_duplex} /* Generic, must be last */ +#endif }; +struct transceiver_ops* transceiver = &transceivers[0]; + #define tx_done(dev) (*R_DMA_CH0_CMD == 0) /* @@ -471,14 +486,22 @@ etrax_ethernet_init(void) int i, err; printk(KERN_INFO - "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 2000-2003 Axis Communications AB\n"); + "ETRAX 100LX 10/100MBit ethernet v2.0 (c) 1998-2007 Axis Communications AB\n"); - dev = alloc_etherdev(sizeof(struct net_local)); - np = dev->priv; + if (cris_request_io_interface(if_eth, cardname)) { + printk(KERN_CRIT "etrax_ethernet_init failed to get IO interface\n"); + return -EBUSY; + } + dev = alloc_etherdev(sizeof(struct net_local)); if (!dev) return -ENOMEM; + np = netdev_priv(dev); + + /* we do our own locking */ + dev->features |= NETIF_F_LLTX; + dev->base_addr = (unsigned int)R_NETWORK_SA_0; /* just to have something to show */ /* now setup our etrax specific stuff */ @@ -498,14 +521,22 @@ etrax_ethernet_init(void) dev->do_ioctl = e100_ioctl; dev->set_config = e100_set_config; dev->tx_timeout = e100_tx_timeout; +#ifdef CONFIG_NET_POLL_CONTROLLER + dev->poll_controller = e100_netpoll; +#endif + + spin_lock_init(&np->lock); + spin_lock_init(&np->led_lock); + spin_lock_init(&np->transceiver_lock); /* Initialise the list of Etrax DMA-descriptors */ /* Initialise receive descriptors */ for (i = 0; i < NBR_OF_RX_DESC; i++) { - /* Allocate two extra cachelines to make sure that buffer used by DMA - * does not share cacheline with any other data (to avoid cache bug) + /* Allocate two extra cachelines to make sure that buffer used + * by DMA does not share cacheline with any other data (to + * avoid cache bug) */ RxDescList[i].skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); if (!RxDescList[i].skb) @@ -541,7 +572,6 @@ etrax_ethernet_init(void) myNextRxDesc = &RxDescList[0]; myLastRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; - myPrevRxDesc = &RxDescList[NBR_OF_RX_DESC - 1]; myFirstTxDesc = &TxDescList[0]; myNextTxDesc = &TxDescList[0]; myLastTxDesc = &TxDescList[NBR_OF_TX_DESC - 1]; @@ -562,10 +592,11 @@ etrax_ethernet_init(void) current_speed = 10; current_speed_selection = 0; /* Auto */ speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; - duplex_timer.data = (unsigned long)dev; + speed_timer.data = (unsigned long)dev; speed_timer.function = e100_check_speed; clear_led_timer.function = e100_clear_network_leds; + clear_led_timer.data = (unsigned long)dev; full_duplex = 0; current_duplex = autoneg; @@ -574,7 +605,6 @@ etrax_ethernet_init(void) duplex_timer.function = e100_check_duplex; /* Initialize mii interface */ - np->mii_if.phy_id = mdio_phy_addr; np->mii_if.phy_id_mask = 0x1f; np->mii_if.reg_num_mask = 0x1f; np->mii_if.dev = dev; @@ -585,6 +615,9 @@ etrax_ethernet_init(void) /* unwanted addresses are matched */ *R_NETWORK_GA_0 = 0x00000000; *R_NETWORK_GA_1 = 0x00000000; + + /* Initialize next time the led can flash */ + led_next_time = jiffies; return 0; } @@ -595,9 +628,9 @@ etrax_ethernet_init(void) static int e100_set_mac_address(struct net_device *dev, void *p) { - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); struct sockaddr *addr = p; - int i; + DECLARE_MAC_BUF(mac); spin_lock(&np->lock); /* preemption protection */ @@ -686,6 +719,25 @@ e100_open(struct net_device *dev) goto grace_exit2; } + /* + * Always allocate the DMA channels after the IRQ, + * and clean up on failure. + */ + + if (cris_request_dma(NETWORK_TX_DMA_NBR, + cardname, + DMA_VERBOSE_ON_ERROR, + dma_eth)) { + goto grace_exit3; + } + + if (cris_request_dma(NETWORK_RX_DMA_NBR, + cardname, + DMA_VERBOSE_ON_ERROR, + dma_eth)) { + goto grace_exit4; + } + /* give the HW an idea of what MAC address we want */ *R_NETWORK_SA_0 = dev->dev_addr[0] | (dev->dev_addr[1] << 8) | @@ -700,6 +752,7 @@ e100_open(struct net_device *dev) *R_NETWORK_REC_CONFIG = 0xd; /* broadcast rec, individ. rec, ma0 enabled */ #else + SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, max_size, size1522); SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, broadcast, receive); SETS(network_rec_config_shadow, R_NETWORK_REC_CONFIG, ma0, enable); SETF(network_rec_config_shadow, R_NETWORK_REC_CONFIG, duplex, full_duplex); @@ -719,8 +772,7 @@ e100_open(struct net_device *dev) SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, crc, enable); *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; - save_flags(flags); - cli(); + local_irq_save(flags); /* enable the irq's for ethernet DMA */ @@ -752,12 +804,13 @@ e100_open(struct net_device *dev) *R_DMA_CH0_FIRST = 0; *R_DMA_CH0_DESCR = virt_to_phys(myLastTxDesc); + netif_start_queue(dev); - restore_flags(flags); + local_irq_restore(flags); /* Probe for transceiver */ if (e100_probe_transceiver(dev)) - goto grace_exit3; + goto grace_exit5; /* Start duplex/speed timers */ add_timer(&speed_timer); @@ -766,10 +819,14 @@ e100_open(struct net_device *dev) /* We are now ready to accept transmit requeusts from * the queueing layer of the networking. */ - netif_start_queue(dev); + netif_carrier_on(dev); return 0; +grace_exit5: + cris_free_dma(NETWORK_RX_DMA_NBR, cardname); +grace_exit4: + cris_free_dma(NETWORK_TX_DMA_NBR, cardname); grace_exit3: free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); grace_exit2: @@ -780,12 +837,20 @@ grace_exit0: return -EAGAIN; } - +#if defined(CONFIG_ETRAX_NO_PHY) +static void +dummy_check_speed(struct net_device* dev) +{ + current_speed = 100; +} +#else static void generic_check_speed(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); if ((data & ADVERTISE_100FULL) || (data & ADVERTISE_100HALF)) current_speed = 100; @@ -797,7 +862,10 @@ static void tdk_check_speed(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_TDK_DIAGNOSTIC_REG); current_speed = (data & MDIO_TDK_DIAGNOSTIC_RATE ? 100 : 10); } @@ -805,7 +873,10 @@ static void broadcom_check_speed(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_AUX_CTRL_STATUS_REG); current_speed = (data & MDIO_BC_SPEED ? 100 : 10); } @@ -813,46 +884,62 @@ static void intel_check_speed(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_INT_STATUS_REG_2); current_speed = (data & MDIO_INT_SPEED ? 100 : 10); } - +#endif static void e100_check_speed(unsigned long priv) { struct net_device* dev = (struct net_device*)priv; + struct net_local *np = netdev_priv(dev); static int led_initiated = 0; unsigned long data; int old_speed = current_speed; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR); + spin_lock(&np->transceiver_lock); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMSR); if (!(data & BMSR_LSTATUS)) { current_speed = 0; } else { transceiver->check_speed(dev); } + spin_lock(&np->led_lock); if ((old_speed != current_speed) || !led_initiated) { led_initiated = 1; e100_set_network_leds(NO_NETWORK_ACTIVITY); + if (current_speed) + netif_carrier_on(dev); + else + netif_carrier_off(dev); } + spin_unlock(&np->led_lock); /* Reinitialize the timer. */ speed_timer.expires = jiffies + NET_LINK_UP_CHECK_INTERVAL; add_timer(&speed_timer); + + spin_unlock(&np->transceiver_lock); } static void e100_negotiate(struct net_device* dev) { - unsigned short data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); + struct net_local *np = netdev_priv(dev); + unsigned short data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MII_ADVERTISE); /* Discard old speed and duplex settings */ data &= ~(ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_10HALF | ADVERTISE_10FULL); switch (current_speed_selection) { - case 10 : + case 10: if (current_duplex == full) data |= ADVERTISE_10FULL; else if (current_duplex == half) @@ -861,7 +948,7 @@ e100_negotiate(struct net_device* dev) data |= ADVERTISE_10HALF | ADVERTISE_10FULL; break; - case 100 : + case 100: if (current_duplex == full) data |= ADVERTISE_100FULL; else if (current_duplex == half) @@ -870,7 +957,7 @@ e100_negotiate(struct net_device* dev) data |= ADVERTISE_100HALF | ADVERTISE_100FULL; break; - case 0 : /* Auto */ + case 0: /* Auto */ if (current_duplex == full) data |= ADVERTISE_100FULL | ADVERTISE_10FULL; else if (current_duplex == half) @@ -880,35 +967,44 @@ e100_negotiate(struct net_device* dev) ADVERTISE_100HALF | ADVERTISE_100FULL; break; - default : /* assume autoneg speed and duplex */ + default: /* assume autoneg speed and duplex */ data |= ADVERTISE_10HALF | ADVERTISE_10FULL | ADVERTISE_100HALF | ADVERTISE_100FULL; + break; } - e100_set_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE, data); + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); /* Renegotiate with link partner */ - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); + if (autoneg_normal) { + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); data |= BMCR_ANENABLE | BMCR_ANRESTART; - - e100_set_mdio_reg(dev, mdio_phy_addr, MII_BMCR, data); + } + e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); } static void e100_set_speed(struct net_device* dev, unsigned long speed) { + struct net_local *np = netdev_priv(dev); + + spin_lock(&np->transceiver_lock); if (speed != current_speed_selection) { current_speed_selection = speed; e100_negotiate(dev); } + spin_unlock(&np->transceiver_lock); } static void e100_check_duplex(unsigned long priv) { struct net_device *dev = (struct net_device *)priv; - struct net_local *np = (struct net_local *)dev->priv; - int old_duplex = full_duplex; + struct net_local *np = netdev_priv(dev); + int old_duplex; + + spin_lock(&np->transceiver_lock); + old_duplex = full_duplex; transceiver->check_duplex(dev); if (old_duplex != full_duplex) { /* Duplex changed */ @@ -920,13 +1016,22 @@ e100_check_duplex(unsigned long priv) duplex_timer.expires = jiffies + NET_DUPLEX_CHECK_INTERVAL; add_timer(&duplex_timer); np->mii_if.full_duplex = full_duplex; + spin_unlock(&np->transceiver_lock); } - +#if defined(CONFIG_ETRAX_NO_PHY) +static void +dummy_check_duplex(struct net_device* dev) +{ + full_duplex = 1; +} +#else static void generic_check_duplex(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_ADVERTISE); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE); if ((data & ADVERTISE_10FULL) || (data & ADVERTISE_100FULL)) full_duplex = 1; @@ -938,7 +1043,10 @@ static void tdk_check_duplex(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_TDK_DIAGNOSTIC_REG); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_TDK_DIAGNOSTIC_REG); full_duplex = (data & MDIO_TDK_DIAGNOSTIC_DPLX) ? 1 : 0; } @@ -946,7 +1054,10 @@ static void broadcom_check_duplex(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_AUX_CTRL_STATUS_REG); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_AUX_CTRL_STATUS_REG); full_duplex = (data & MDIO_BC_FULL_DUPLEX_IND) ? 1 : 0; } @@ -954,38 +1065,55 @@ static void intel_check_duplex(struct net_device* dev) { unsigned long data; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MDIO_INT_STATUS_REG_2); + struct net_local *np = netdev_priv(dev); + + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, + MDIO_INT_STATUS_REG_2); full_duplex = (data & MDIO_INT_FULL_DUPLEX_IND) ? 1 : 0; } - +#endif static void e100_set_duplex(struct net_device* dev, enum duplex new_duplex) { + struct net_local *np = netdev_priv(dev); + + spin_lock(&np->transceiver_lock); if (new_duplex != current_duplex) { current_duplex = new_duplex; e100_negotiate(dev); } + spin_unlock(&np->transceiver_lock); } static int e100_probe_transceiver(struct net_device* dev) { + int ret = 0; + +#if !defined(CONFIG_ETRAX_NO_PHY) unsigned int phyid_high; unsigned int phyid_low; unsigned int oui; struct transceiver_ops* ops = NULL; + struct net_local *np = netdev_priv(dev); + + spin_lock(&np->transceiver_lock); /* Probe MDIO physical address */ - for (mdio_phy_addr = 0; mdio_phy_addr <= 31; mdio_phy_addr++) { - if (e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMSR) != 0xffff) + for (np->mii_if.phy_id = 0; np->mii_if.phy_id <= 31; + np->mii_if.phy_id++) { + if (e100_get_mdio_reg(dev, + np->mii_if.phy_id, MII_BMSR) != 0xffff) break; } - if (mdio_phy_addr == 32) - return -ENODEV; + if (np->mii_if.phy_id == 32) { + ret = -ENODEV; + goto out; + } /* Get manufacturer */ - phyid_high = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID1); - phyid_low = e100_get_mdio_reg(dev, mdio_phy_addr, MII_PHYSID2); + phyid_high = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID1); + phyid_low = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_PHYSID2); oui = (phyid_high << 6) | (phyid_low >> 10); for (ops = &transceivers[0]; ops->oui; ops++) { @@ -993,8 +1121,10 @@ e100_probe_transceiver(struct net_device* dev) break; } transceiver = ops; - - return 0; +out: + spin_unlock(&np->transceiver_lock); +#endif + return ret; } static int @@ -1088,13 +1218,14 @@ e100_receive_mdio_bit() static void e100_reset_transceiver(struct net_device* dev) { + struct net_local *np = netdev_priv(dev); unsigned short cmd; unsigned short data; int bitCounter; - data = e100_get_mdio_reg(dev, mdio_phy_addr, MII_BMCR); + data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); - cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (mdio_phy_addr << 7) | (MII_BMCR << 2); + cmd = (MDIO_START << 14) | (MDIO_WRITE << 12) | (np->mii_if.phy_id << 7) | (MII_BMCR << 2); e100_send_mdio_cmd(cmd, 1); @@ -1112,7 +1243,7 @@ e100_reset_transceiver(struct net_device* dev) static void e100_tx_timeout(struct net_device *dev) { - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); unsigned long flags; spin_lock_irqsave(&np->lock, flags); @@ -1134,8 +1265,7 @@ e100_tx_timeout(struct net_device *dev) e100_reset_transceiver(dev); /* and get rid of the packets that never got an interrupt */ - while (myFirstTxDesc != myNextTxDesc) - { + while (myFirstTxDesc != myNextTxDesc) { dev_kfree_skb(myFirstTxDesc->skb); myFirstTxDesc->skb = 0; myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); @@ -1161,7 +1291,7 @@ e100_tx_timeout(struct net_device *dev) static int e100_send_packet(struct sk_buff *skb, struct net_device *dev) { - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); unsigned char *buf = skb->data; unsigned long flags; @@ -1174,7 +1304,7 @@ e100_send_packet(struct sk_buff *skb, struct net_device *dev) dev->trans_start = jiffies; - e100_hardware_send_packet(buf, skb->len); + e100_hardware_send_packet(np, buf, skb->len); myNextTxDesc = phys_to_virt(myNextTxDesc->descr.next); @@ -1197,13 +1327,15 @@ static irqreturn_t e100rxtx_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct net_local *np = (struct net_local *)dev->priv; - unsigned long irqbits = *R_IRQ_MASK2_RD; + struct net_local *np = netdev_priv(dev); + unsigned long irqbits; - /* Disable RX/TX IRQs to avoid reentrancy */ - *R_IRQ_MASK2_CLR = - IO_STATE(R_IRQ_MASK2_CLR, dma0_eop, clr) | - IO_STATE(R_IRQ_MASK2_CLR, dma1_eop, clr); + /* + * Note that both rx and tx interrupts are blocked at this point, + * regardless of which got us here. + */ + + irqbits = *R_IRQ_MASK2_RD; /* Handle received packets */ if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma1_eop, active)) { @@ -1219,7 +1351,7 @@ e100rxtx_interrupt(int irq, void *dev_id) * allocate a new buffer to put a packet in. */ e100_rx(dev); - ((struct net_local *)dev->priv)->stats.rx_packets++; + np->stats.rx_packets++; /* restart/continue on the channel, for safety */ *R_DMA_CH1_CMD = IO_STATE(R_DMA_CH1_CMD, cmd, restart); /* clear dma channel 1 eop/descr irq bits */ @@ -1233,9 +1365,8 @@ e100rxtx_interrupt(int irq, void *dev_id) } /* Report any packets that have been sent */ - while (myFirstTxDesc != phys_to_virt(*R_DMA_CH0_FIRST) && - myFirstTxDesc != myNextTxDesc) - { + while (virt_to_phys(myFirstTxDesc) != *R_DMA_CH0_FIRST && + (netif_queue_stopped(dev) || myFirstTxDesc != myNextTxDesc)) { np->stats.tx_bytes += myFirstTxDesc->skb->len; np->stats.tx_packets++; @@ -1244,19 +1375,15 @@ e100rxtx_interrupt(int irq, void *dev_id) dev_kfree_skb_irq(myFirstTxDesc->skb); myFirstTxDesc->skb = 0; myFirstTxDesc = phys_to_virt(myFirstTxDesc->descr.next); + /* Wake up queue. */ + netif_wake_queue(dev); } if (irqbits & IO_STATE(R_IRQ_MASK2_RD, dma0_eop, active)) { - /* acknowledge the eop interrupt and wake up queue */ + /* acknowledge the eop interrupt. */ *R_DMA_CH0_CLR_INTR = IO_STATE(R_DMA_CH0_CLR_INTR, clr_eop, do); - netif_wake_queue(dev); } - /* Enable RX/TX IRQs again */ - *R_IRQ_MASK2_SET = - IO_STATE(R_IRQ_MASK2_SET, dma0_eop, set) | - IO_STATE(R_IRQ_MASK2_SET, dma1_eop, set); - return IRQ_HANDLED; } @@ -1264,7 +1391,7 @@ static irqreturn_t e100nw_interrupt(int irq, void *dev_id) { struct net_device *dev = (struct net_device *)dev_id; - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); unsigned long irqbits = *R_IRQ_MASK0_RD; /* check for underrun irq */ @@ -1286,7 +1413,6 @@ e100nw_interrupt(int irq, void *dev_id) SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, clr); *R_NETWORK_TR_CTRL = network_tr_ctrl_shadow; SETS(network_tr_ctrl_shadow, R_NETWORK_TR_CTRL, clr_error, nop); - *R_NETWORK_TR_CTRL = IO_STATE(R_NETWORK_TR_CTRL, clr_error, clr); np->stats.tx_errors++; D(printk("ethernet excessive collisions!\n")); } @@ -1299,12 +1425,13 @@ e100_rx(struct net_device *dev) { struct sk_buff *skb; int length = 0; - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); unsigned char *skb_data_ptr; #ifdef ETHDEBUG int i; #endif - + etrax_eth_descr *prevRxDesc; /* The descriptor right before myNextRxDesc */ + spin_lock(&np->led_lock); if (!led_active && time_after(jiffies, led_next_time)) { /* light the network leds depending on the current speed. */ e100_set_network_leds(NETWORK_ACTIVITY); @@ -1314,9 +1441,10 @@ e100_rx(struct net_device *dev) led_active = 1; mod_timer(&clear_led_timer, jiffies + HZ/10); } + spin_unlock(&np->led_lock); length = myNextRxDesc->descr.hw_len - 4; - ((struct net_local *)dev->priv)->stats.rx_bytes += length; + np->stats.rx_bytes += length; #ifdef ETHDEBUG printk("Got a packet of length %d:\n", length); @@ -1336,7 +1464,7 @@ e100_rx(struct net_device *dev) if (!skb) { np->stats.rx_errors++; printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - return; + goto update_nextrxdesc; } skb_put(skb, length - ETHER_HEAD_LEN); /* allocate room for the packet body */ @@ -1354,15 +1482,15 @@ e100_rx(struct net_device *dev) else { /* Large packet, send directly to upper layers and allocate new * memory (aligned to cache line boundary to avoid bug). - * Before sending the skb to upper layers we must make sure that - * skb->data points to the aligned start of the packet. + * Before sending the skb to upper layers we must make sure + * that skb->data points to the aligned start of the packet. */ int align; struct sk_buff *new_skb = dev_alloc_skb(MAX_MEDIA_DATA_SIZE + 2 * L1_CACHE_BYTES); if (!new_skb) { np->stats.rx_errors++; printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n", dev->name); - return; + goto update_nextrxdesc; } skb = myNextRxDesc->skb; align = (int)phys_to_virt(myNextRxDesc->descr.buf) - (int)skb->data; @@ -1377,9 +1505,10 @@ e100_rx(struct net_device *dev) /* Send the packet to the upper layers */ netif_rx(skb); + update_nextrxdesc: /* Prepare for next packet */ myNextRxDesc->descr.status = 0; - myPrevRxDesc = myNextRxDesc; + prevRxDesc = myNextRxDesc; myNextRxDesc = phys_to_virt(myNextRxDesc->descr.next); rx_queue_len++; @@ -1387,9 +1516,9 @@ e100_rx(struct net_device *dev) /* Check if descriptors should be returned */ if (rx_queue_len == RX_QUEUE_THRESHOLD) { flush_etrax_cache(); - myPrevRxDesc->descr.ctrl |= d_eol; + prevRxDesc->descr.ctrl |= d_eol; myLastRxDesc->descr.ctrl &= ~d_eol; - myLastRxDesc = myPrevRxDesc; + myLastRxDesc = prevRxDesc; rx_queue_len = 0; } } @@ -1398,7 +1527,7 @@ e100_rx(struct net_device *dev) static int e100_close(struct net_device *dev) { - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); printk(KERN_INFO "Closing %s.\n", dev->name); @@ -1426,6 +1555,9 @@ e100_close(struct net_device *dev) free_irq(NETWORK_DMA_TX_IRQ_NBR, (void *)dev); free_irq(NETWORK_STATUS_IRQ_NBR, (void *)dev); + cris_free_dma(NETWORK_TX_DMA_NBR, cardname); + cris_free_dma(NETWORK_RX_DMA_NBR, cardname); + /* Update the statistics here. */ update_rx_stats(&np->stats); @@ -1443,18 +1575,11 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct mii_ioctl_data *data = if_mii(ifr); struct net_local *np = netdev_priv(dev); + int rc = 0; + int old_autoneg; spin_lock(&np->lock); /* Preempt protection */ switch (cmd) { - case SIOCGMIIPHY: /* Get PHY address */ - data->phy_id = mdio_phy_addr; - break; - case SIOCGMIIREG: /* Read MII register */ - data->val_out = e100_get_mdio_reg(dev, mdio_phy_addr, data->reg_num); - break; - case SIOCSMIIREG: /* Write MII register */ - e100_set_mdio_reg(dev, mdio_phy_addr, data->reg_num, data->val_in); - break; /* The ioctls below should be considered obsolete but are */ /* still present for compatability with old scripts/apps */ case SET_ETH_SPEED_10: /* 10 Mbps */ @@ -1463,60 +1588,47 @@ e100_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) case SET_ETH_SPEED_100: /* 100 Mbps */ e100_set_speed(dev, 100); break; - case SET_ETH_SPEED_AUTO: /* Auto negotiate speed */ + case SET_ETH_SPEED_AUTO: /* Auto-negotiate speed */ e100_set_speed(dev, 0); break; - case SET_ETH_DUPLEX_HALF: /* Half duplex. */ + case SET_ETH_DUPLEX_HALF: /* Half duplex */ e100_set_duplex(dev, half); break; - case SET_ETH_DUPLEX_FULL: /* Full duplex. */ + case SET_ETH_DUPLEX_FULL: /* Full duplex */ e100_set_duplex(dev, full); break; - case SET_ETH_DUPLEX_AUTO: /* Autonegotiate duplex*/ + case SET_ETH_DUPLEX_AUTO: /* Auto-negotiate duplex */ e100_set_duplex(dev, autoneg); break; + case SET_ETH_AUTONEG: + old_autoneg = autoneg_normal; + autoneg_normal = *(int*)data; + if (autoneg_normal != old_autoneg) + e100_negotiate(dev); + break; default: - return -EINVAL; + rc = generic_mii_ioctl(&np->mii_if, if_mii(ifr), + cmd, NULL); + break; } spin_unlock(&np->lock); - return 0; + return rc; } -static int e100_set_settings(struct net_device *dev, - struct ethtool_cmd *ecmd) +static int e100_get_settings(struct net_device *dev, + struct ethtool_cmd *cmd) { - ecmd->supported = SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII | - SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | - SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full; - ecmd->port = PORT_TP; - ecmd->transceiver = XCVR_EXTERNAL; - ecmd->phy_address = mdio_phy_addr; - ecmd->speed = current_speed; - ecmd->duplex = full_duplex ? DUPLEX_FULL : DUPLEX_HALF; - ecmd->advertising = ADVERTISED_TP; + struct net_local *np = netdev_priv(dev); + int err; - if (current_duplex == autoneg && current_speed_selection == 0) - ecmd->advertising |= ADVERTISED_Autoneg; - else { - ecmd->advertising |= - ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; - if (current_speed_selection == 10) - ecmd->advertising &= ~(ADVERTISED_100baseT_Half | - ADVERTISED_100baseT_Full); - else if (current_speed_selection == 100) - ecmd->advertising &= ~(ADVERTISED_10baseT_Half | - ADVERTISED_10baseT_Full); - if (current_duplex == half) - ecmd->advertising &= ~(ADVERTISED_10baseT_Full | - ADVERTISED_100baseT_Full); - else if (current_duplex == full) - ecmd->advertising &= ~(ADVERTISED_10baseT_Half | - ADVERTISED_100baseT_Half); - } + spin_lock_irq(&np->lock); + err = mii_ethtool_gset(&np->mii_if, cmd); + spin_unlock_irq(&np->lock); - ecmd->autoneg = AUTONEG_ENABLE; - return 0; + /* The PHY may support 1000baseT, but the Etrax100 does not. */ + cmd->supported &= ~(SUPPORTED_1000baseT_Half + | SUPPORTED_1000baseT_Full); + return err; } static int e100_set_settings(struct net_device *dev, @@ -1560,7 +1672,8 @@ static const struct ethtool_ops e100_ethtool_ops = { static int e100_set_config(struct net_device *dev, struct ifmap *map) { - struct net_local *np = (struct net_local *)dev->priv; + struct net_local *np = netdev_priv(dev); + spin_lock(&np->lock); /* Preempt protection */ switch(map->port) { @@ -1612,7 +1725,6 @@ update_tx_stats(struct net_device_stats *es) es->collisions += IO_EXTRACT(R_TR_COUNTERS, single_col, r) + IO_EXTRACT(R_TR_COUNTERS, multiple_col, r); - es->tx_errors += IO_EXTRACT(R_TR_COUNTERS, deferred, r); } /* @@ -1622,8 +1734,9 @@ update_tx_stats(struct net_device_stats *es) static struct net_device_stats * e100_get_stats(struct net_device *dev) { - struct net_local *lp = (struct net_local *)dev->priv; + struct net_local *lp = netdev_priv(dev); unsigned long flags; + spin_lock_irqsave(&lp->lock, flags); update_rx_stats(&lp->stats); @@ -1643,13 +1756,13 @@ e100_get_stats(struct net_device *dev) static void set_multicast_list(struct net_device *dev) { - struct net_local *lp = (struct net_local *)dev->priv; + struct net_local *lp = netdev_priv(dev); int num_addr = dev->mc_count; unsigned long int lo_bits; unsigned long int hi_bits; + spin_lock(&lp->lock); - if (dev->flags & IFF_PROMISC) - { + if (dev->flags & IFF_PROMISC) { /* promiscuous mode */ lo_bits = 0xfffffffful; hi_bits = 0xfffffffful; @@ -1679,9 +1792,10 @@ set_multicast_list(struct net_device *dev) struct dev_mc_list *dmi = dev->mc_list; int i; char *baddr; + lo_bits = 0x00000000ul; hi_bits = 0x00000000ul; - for (i=0; i<num_addr; i++) { + for (i = 0; i < num_addr; i++) { /* Calculate the hash index for the GA registers */ hash_ix = 0; @@ -1708,8 +1822,7 @@ set_multicast_list(struct net_device *dev) if (hash_ix >= 32) { hi_bits |= (1 << (hash_ix-32)); - } - else { + } else { lo_bits |= (1 << hash_ix); } dmi = dmi->next; @@ -1724,10 +1837,11 @@ set_multicast_list(struct net_device *dev) } void -e100_hardware_send_packet(char *buf, int length) +e100_hardware_send_packet(struct net_local *np, char *buf, int length) { D(printk("e100 send pack, buf 0x%x len %d\n", buf, length)); + spin_lock(&np->led_lock); if (!led_active && time_after(jiffies, led_next_time)) { /* light the network leds depending on the current speed. */ e100_set_network_leds(NETWORK_ACTIVITY); @@ -1737,6 +1851,7 @@ e100_hardware_send_packet(char *buf, int length) led_active = 1; mod_timer(&clear_led_timer, jiffies + HZ/10); } + spin_unlock(&np->led_lock); /* configure the tx dma descriptor */ myNextTxDesc->descr.sw_len = length; @@ -1754,6 +1869,11 @@ e100_hardware_send_packet(char *buf, int length) static void e100_clear_network_leds(unsigned long dummy) { + struct net_device *dev = (struct net_device *)dummy; + struct net_local *np = netdev_priv(dev); + + spin_lock(&np->led_lock); + if (led_active && time_after(jiffies, led_next_time)) { e100_set_network_leds(NO_NETWORK_ACTIVITY); @@ -1761,6 +1881,8 @@ e100_clear_network_leds(unsigned long dummy) led_next_time = jiffies + NET_FLASH_PAUSE; led_active = 0; } + + spin_unlock(&np->led_lock); } static void @@ -1781,19 +1903,25 @@ e100_set_network_leds(int active) #else LED_NETWORK_SET(LED_OFF); #endif - } - else if (light_leds) { + } else if (light_leds) { if (current_speed == 10) { LED_NETWORK_SET(LED_ORANGE); } else { LED_NETWORK_SET(LED_GREEN); } - } - else { + } else { LED_NETWORK_SET(LED_OFF); } } +#ifdef CONFIG_NET_POLL_CONTROLLER +static void +e100_netpoll(struct net_device* netdev) +{ + e100rxtx_interrupt(NETWORK_DMA_TX_IRQ_NBR, netdev, NULL); +} +#endif + static int etrax_init_module(void) { diff --git a/drivers/oprofile/cpu_buffer.c b/drivers/oprofile/cpu_buffer.c index a83c3db..c93d3d2 100644 --- a/drivers/oprofile/cpu_buffer.c +++ b/drivers/oprofile/cpu_buffer.c @@ -64,6 +64,8 @@ int alloc_cpu_buffers(void) b->head_pos = 0; b->sample_received = 0; b->sample_lost_overflow = 0; + b->backtrace_aborted = 0; + b->sample_invalid_eip = 0; b->cpu = i; INIT_DELAYED_WORK(&b->work, wq_sync_buffer); } @@ -175,6 +177,11 @@ static int log_sample(struct oprofile_cpu_buffer * cpu_buf, unsigned long pc, cpu_buf->sample_received++; + if (pc == ESCAPE_CODE) { + cpu_buf->sample_invalid_eip++; + return 0; + } + if (nr_available_slots(cpu_buf) < 3) { cpu_buf->sample_lost_overflow++; return 0; diff --git a/drivers/oprofile/cpu_buffer.h b/drivers/oprofile/cpu_buffer.h index 49900d9..c66c025 100644 --- a/drivers/oprofile/cpu_buffer.h +++ b/drivers/oprofile/cpu_buffer.h @@ -42,6 +42,7 @@ struct oprofile_cpu_buffer { unsigned long sample_received; unsigned long sample_lost_overflow; unsigned long backtrace_aborted; + unsigned long sample_invalid_eip; int cpu; struct delayed_work work; } ____cacheline_aligned; diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c index f0acb66..d1f6d77 100644 --- a/drivers/oprofile/oprofile_stats.c +++ b/drivers/oprofile/oprofile_stats.c @@ -26,6 +26,8 @@ void oprofile_reset_stats(void) cpu_buf = &cpu_buffer[i]; cpu_buf->sample_received = 0; cpu_buf->sample_lost_overflow = 0; + cpu_buf->backtrace_aborted = 0; + cpu_buf->sample_invalid_eip = 0; } atomic_set(&oprofile_stats.sample_lost_no_mm, 0); @@ -61,6 +63,8 @@ void oprofile_create_stats_files(struct super_block * sb, struct dentry * root) &cpu_buf->sample_lost_overflow); oprofilefs_create_ro_ulong(sb, cpudir, "backtrace_aborted", &cpu_buf->backtrace_aborted); + oprofilefs_create_ro_ulong(sb, cpudir, "sample_invalid_eip", + &cpu_buf->sample_invalid_eip); } oprofilefs_create_ro_atomic(sb, dir, "sample_lost_no_mm", diff --git a/drivers/rtc/Kconfig b/drivers/rtc/Kconfig index cbde770..e5cdc02 100644 --- a/drivers/rtc/Kconfig +++ b/drivers/rtc/Kconfig @@ -36,7 +36,9 @@ config RTC_HCTOSYS_DEVICE help The RTC device that will be used to (re)initialize the system clock, usually rtc0. Initialization is done when the system - starts up, and when it resumes from a low power state. + starts up, and when it resumes from a low power state. This + device should record time in UTC, since the kernel won't do + timezone correction. The driver for this RTC device must be loaded before late_initcall functions run, so it must usually be statically linked. @@ -133,8 +135,8 @@ config RTC_DRV_DS1307 The first seven registers on these chips hold an RTC, and other registers may add features such as NVRAM, a trickle charger for - the RTC/NVRAM backup power, and alarms. This driver may not - expose all those available chip features. + the RTC/NVRAM backup power, and alarms. NVRAM is visible in + sysfs, but other chip features may not be available. This driver can also be built as a module. If so, the module will be called rtc-ds1307. diff --git a/drivers/rtc/hctosys.c b/drivers/rtc/hctosys.c index 1785272..33c0e98 100644 --- a/drivers/rtc/hctosys.c +++ b/drivers/rtc/hctosys.c @@ -47,8 +47,8 @@ static int __init rtc_hctosys(void) do_settimeofday(&tv); dev_info(rtc->dev.parent, - "setting the system clock to " - "%d-%02d-%02d %02d:%02d:%02d (%u)\n", + "setting system clock to " + "%d-%02d-%02d %02d:%02d:%02d UTC (%u)\n", tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec, (unsigned int) tv.tv_sec); diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c index db6f3f0..bc1c7fe 100644 --- a/drivers/rtc/rtc-ds1307.c +++ b/drivers/rtc/rtc-ds1307.c @@ -89,6 +89,7 @@ enum ds_type { struct ds1307 { u8 reg_addr; + bool has_nvram; u8 regs[8]; enum ds_type type; struct i2c_msg msg[2]; @@ -242,6 +243,87 @@ static const struct rtc_class_ops ds13xx_rtc_ops = { .set_time = ds1307_set_time, }; +/*----------------------------------------------------------------------*/ + +#define NVRAM_SIZE 56 + +static ssize_t +ds1307_nvram_read(struct kobject *kobj, struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client; + struct ds1307 *ds1307; + struct i2c_msg msg[2]; + int result; + + client = to_i2c_client(container_of(kobj, struct device, kobj)); + ds1307 = i2c_get_clientdata(client); + + if (unlikely(off >= NVRAM_SIZE)) + return 0; + if ((off + count) > NVRAM_SIZE) + count = NVRAM_SIZE - off; + if (unlikely(!count)) + return count; + + msg[0].addr = client->addr; + msg[0].flags = 0; + msg[0].len = 1; + msg[0].buf = buf; + + buf[0] = 8 + off; + + msg[1].addr = client->addr; + msg[1].flags = I2C_M_RD; + msg[1].len = count; + msg[1].buf = buf; + + result = i2c_transfer(to_i2c_adapter(client->dev.parent), msg, 2); + if (result != 2) { + dev_err(&client->dev, "%s error %d\n", "nvram read", result); + return -EIO; + } + return count; +} + +static ssize_t +ds1307_nvram_write(struct kobject *kobj, struct bin_attribute *attr, + char *buf, loff_t off, size_t count) +{ + struct i2c_client *client; + u8 buffer[NVRAM_SIZE + 1]; + int ret; + + client = to_i2c_client(container_of(kobj, struct device, kobj)); + + if (unlikely(off >= NVRAM_SIZE)) + return -EFBIG; + if ((off + count) > NVRAM_SIZE) + count = NVRAM_SIZE - off; + if (unlikely(!count)) + return count; + + buffer[0] = 8 + off; + memcpy(buffer + 1, buf, count); + + ret = i2c_master_send(client, buffer, count + 1); + return (ret < 0) ? ret : (ret - 1); +} + +static struct bin_attribute nvram = { + .attr = { + .name = "nvram", + .mode = S_IRUGO | S_IWUSR, + .owner = THIS_MODULE, + }, + + .read = ds1307_nvram_read, + .write = ds1307_nvram_write, + .size = NVRAM_SIZE, +}; + +/*----------------------------------------------------------------------*/ + static struct i2c_driver ds1307_driver; static int __devinit ds1307_probe(struct i2c_client *client) @@ -413,6 +495,14 @@ read_rtc: goto exit_free; } + if (chip->nvram56) { + err = sysfs_create_bin_file(&client->dev.kobj, &nvram); + if (err == 0) { + ds1307->has_nvram = true; + dev_info(&client->dev, "56 bytes nvram\n"); + } + } + return 0; exit_bad: @@ -432,6 +522,9 @@ static int __devexit ds1307_remove(struct i2c_client *client) { struct ds1307 *ds1307 = i2c_get_clientdata(client); + if (ds1307->has_nvram) + sysfs_remove_bin_file(&client->dev.kobj, &nvram); + rtc_device_unregister(ds1307->rtc); kfree(ds1307); return 0; diff --git a/drivers/rtc/rtc-ds1553.c b/drivers/rtc/rtc-ds1553.c index bb53c09..d9e848d 100644 --- a/drivers/rtc/rtc-ds1553.c +++ b/drivers/rtc/rtc-ds1553.c @@ -291,7 +291,7 @@ static ssize_t ds1553_nvram_write(struct kobject *kobj, static struct bin_attribute ds1553_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, }, .size = RTC_OFFSET, .read = ds1553_nvram_read, diff --git a/drivers/rtc/rtc-ds1742.c b/drivers/rtc/rtc-ds1742.c index c535b78..2e73f0b 100644 --- a/drivers/rtc/rtc-ds1742.c +++ b/drivers/rtc/rtc-ds1742.c @@ -160,10 +160,13 @@ static ssize_t ds1742_nvram_write(struct kobject *kobj, static struct bin_attribute ds1742_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, }, .read = ds1742_nvram_read, .write = ds1742_nvram_write, + /* REVISIT: size in sysfs won't match actual size... if it's + * not a constant, each RTC should have its own attribute. + */ }; static int __devinit ds1742_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c index 2bad163..cd0bbc0 100644 --- a/drivers/rtc/rtc-m48t59.c +++ b/drivers/rtc/rtc-m48t59.c @@ -353,11 +353,12 @@ static ssize_t m48t59_nvram_write(struct kobject *kobj, static struct bin_attribute m48t59_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, .owner = THIS_MODULE, }, .read = m48t59_nvram_read, .write = m48t59_nvram_write, + .size = M48T59_NVRAM_SIZE, }; static int __devinit m48t59_rtc_probe(struct platform_device *pdev) diff --git a/drivers/rtc/rtc-stk17ta8.c b/drivers/rtc/rtc-stk17ta8.c index 8288b6b..a265da7 100644 --- a/drivers/rtc/rtc-stk17ta8.c +++ b/drivers/rtc/rtc-stk17ta8.c @@ -291,7 +291,7 @@ static ssize_t stk17ta8_nvram_write(struct kobject *kobj, static struct bin_attribute stk17ta8_nvram_attr = { .attr = { .name = "nvram", - .mode = S_IRUGO | S_IWUGO, + .mode = S_IRUGO | S_IWUSR, .owner = THIS_MODULE, }, .size = RTC_OFFSET, diff --git a/drivers/scsi/aic94xx/aic94xx_sds.c b/drivers/scsi/aic94xx/aic94xx_sds.c index 5b0932f..06509bf 100644 --- a/drivers/scsi/aic94xx/aic94xx_sds.c +++ b/drivers/scsi/aic94xx/aic94xx_sds.c @@ -377,7 +377,7 @@ out: #define FLASH_RESET 0xF0 -#define FLASH_SIZE 0x200000 +#define ASD_FLASH_SIZE 0x200000 #define FLASH_DIR_COOKIE "*** ADAPTEC FLASH DIRECTORY *** " #define FLASH_NEXT_ENTRY_OFFS 0x2000 #define FLASH_MAX_DIR_ENTRIES 32 @@ -609,7 +609,7 @@ static int asd_find_flash_dir(struct asd_ha_struct *asd_ha, struct asd_flash_dir *flash_dir) { u32 v; - for (v = 0; v < FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { + for (v = 0; v < ASD_FLASH_SIZE; v += FLASH_NEXT_ENTRY_OFFS) { asd_read_flash_seg(asd_ha, flash_dir, v, sizeof(FLASH_DIR_COOKIE)-1); if (memcmp(flash_dir->cookie, FLASH_DIR_COOKIE, diff --git a/drivers/serial/8250_pnp.c b/drivers/serial/8250_pnp.c index 926f58a..1de098e 100644 --- a/drivers/serial/8250_pnp.c +++ b/drivers/serial/8250_pnp.c @@ -69,6 +69,8 @@ static const struct pnp_device_id pnp_dev_table[] = { { "CTL3001", 0 }, /* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */ { "CTL3011", 0 }, + /* Davicom ISA 33.6K Modem */ + { "DAV0336", 0 }, /* Creative */ /* Creative Modem Blaster Flash56 DI5601-1 */ { "DMB1032", 0 }, @@ -345,6 +347,11 @@ static const struct pnp_device_id pnp_dev_table[] = { /* Fujitsu Wacom Tablet PC devices */ { "FUJ02E5", 0 }, { "FUJ02E6", 0 }, + /* + * LG C1 EXPRESS DUAL (C1-PB11A3) touch screen (actually a FUJ02E6 in + * disguise) + */ + { "LTS0001", 0 }, /* Rockwell's (PORALiNK) 33600 INT PNP */ { "WCI0003", 0 }, /* Unkown PnP modems */ @@ -432,7 +439,8 @@ serial_pnp_probe(struct pnp_dev *dev, const struct pnp_device_id *dev_id) } memset(&port, 0, sizeof(struct uart_port)); - port.irq = pnp_irq(dev, 0); + if (pnp_irq_valid(dev, 0)) + port.irq = pnp_irq(dev, 0); if (pnp_port_valid(dev, 0)) { port.iobase = pnp_port_start(dev, 0); port.iotype = UPIO_PORT; diff --git a/drivers/serial/atmel_serial.c b/drivers/serial/atmel_serial.c index 4d6b3c5..111da57 100644 --- a/drivers/serial/atmel_serial.c +++ b/drivers/serial/atmel_serial.c @@ -204,8 +204,6 @@ static u_int atmel_get_mctrl(struct uart_port *port) */ static void atmel_stop_tx(struct uart_port *port) { - struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; - UART_PUT_IDR(port, ATMEL_US_TXRDY); } @@ -214,8 +212,6 @@ static void atmel_stop_tx(struct uart_port *port) */ static void atmel_start_tx(struct uart_port *port) { - struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; - UART_PUT_IER(port, ATMEL_US_TXRDY); } @@ -224,8 +220,6 @@ static void atmel_start_tx(struct uart_port *port) */ static void atmel_stop_rx(struct uart_port *port) { - struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; - UART_PUT_IDR(port, ATMEL_US_RXRDY); } @@ -409,7 +403,6 @@ static irqreturn_t atmel_interrupt(int irq, void *dev_id) */ static int atmel_startup(struct uart_port *port) { - struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; int retval; /* @@ -456,8 +449,6 @@ static int atmel_startup(struct uart_port *port) */ static void atmel_shutdown(struct uart_port *port) { - struct atmel_uart_port *atmel_port = (struct atmel_uart_port *) port; - /* * Disable all interrupts, port and break condition. */ diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c index f523cdf..a4e23cf 100644 --- a/drivers/serial/crisv10.c +++ b/drivers/serial/crisv10.c @@ -1,426 +1,10 @@ -/* $Id: serial.c,v 1.25 2004/09/29 10:33:49 starvik Exp $ - * +/* * Serial port driver for the ETRAX 100LX chip * - * Copyright (C) 1998, 1999, 2000, 2001, 2002, 2003 Axis Communications AB + * Copyright (C) 1998-2007 Axis Communications AB * * Many, many authors. Based once upon a time on serial.c for 16x50. * - * $Log: serial.c,v $ - * Revision 1.25 2004/09/29 10:33:49 starvik - * Resolved a dealock when printing debug from kernel. - * - * Revision 1.24 2004/08/27 23:25:59 johana - * rs_set_termios() must call change_speed() if c_iflag has changed or - * automatic XOFF handling will be enabled and transmitter will stop - * if 0x13 is received. - * - * Revision 1.23 2004/08/24 06:57:13 starvik - * More whitespace cleanup - * - * Revision 1.22 2004/08/24 06:12:20 starvik - * Whitespace cleanup - * - * Revision 1.20 2004/05/24 12:00:20 starvik - * Big merge of stuff from Linux 2.4 (e.g. manual mode for the serial port). - * - * Revision 1.19 2004/05/17 13:12:15 starvik - * Kernel console hook - * Big merge from Linux 2.4 still pending. - * - * Revision 1.18 2003/10/28 07:18:30 starvik - * Compiles with debug info - * - * Revision 1.17 2003/07/04 08:27:37 starvik - * Merge of Linux 2.5.74 - * - * Revision 1.16 2003/06/13 10:05:19 johana - * Help the user to avoid trouble by: - * Forcing mixed mode for status/control lines if not all pins are used. - * - * Revision 1.15 2003/06/13 09:43:01 johana - * Merged in the following changes from os/linux/arch/cris/drivers/serial.c - * + some minor changes to reduce diff. - * - * Revision 1.49 2003/05/30 11:31:54 johana - * Merged in change-branch--serial9bit that adds CMSPAR support for sticky - * parity (mark/space) - * - * Revision 1.48 2003/05/30 11:03:57 johana - * Implemented rs_send_xchar() by disabling the DMA and writing manually. - * Added e100_disable_txdma_channel() and e100_enable_txdma_channel(). - * Fixed rs_throttle() and rs_unthrottle() to properly call rs_send_xchar - * instead of setting info->x_char and check the CRTSCTS flag before - * controlling the rts pin. - * - * Revision 1.14 2003/04/09 08:12:44 pkj - * Corrected typo changes made upstream. - * - * Revision 1.13 2003/04/09 05:20:47 starvik - * Merge of Linux 2.5.67 - * - * Revision 1.11 2003/01/22 06:48:37 starvik - * Fixed warnings issued by GCC 3.2.1 - * - * Revision 1.9 2002/12/13 09:07:47 starvik - * Alert user that RX_TIMEOUT_TICKS==0 doesn't work - * - * Revision 1.8 2002/12/11 13:13:57 starvik - * Added arch/ to v10 specific includes - * Added fix from Linux 2.4 in serial.c (flush_to_flip_buffer) - * - * Revision 1.7 2002/12/06 07:13:57 starvik - * Corrected work queue stuff - * Removed CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST - * - * Revision 1.6 2002/11/21 07:17:46 starvik - * Change static inline to extern inline where otherwise outlined with gcc-3.2 - * - * Revision 1.5 2002/11/14 15:59:49 starvik - * Linux 2.5 port of the latest serial driver from 2.4. The work queue stuff - * probably doesn't work yet. - * - * Revision 1.42 2002/11/05 09:08:47 johana - * Better implementation of rs_stop() and rs_start() that uses the XOFF - * register to start/stop transmission. - * change_speed() also initilises XOFF register correctly so that - * auto_xoff is enabled when IXON flag is set by user. - * This gives fast XOFF response times. - * - * Revision 1.41 2002/11/04 18:40:57 johana - * Implemented rs_stop() and rs_start(). - * Simple tests using hwtestserial indicates that this should be enough - * to make it work. - * - * Revision 1.40 2002/10/14 05:33:18 starvik - * RS-485 uses fast timers even if SERIAL_FAST_TIMER is disabled - * - * Revision 1.39 2002/09/30 21:00:57 johana - * Support for CONFIG_ETRAX_SERx_DTR_RI_DSR_CD_MIXED where the status and - * control pins can be mixed between PA and PB. - * If no serial port uses MIXED old solution is used - * (saves a few bytes and cycles). - * control_pins struct uses masks instead of bit numbers. - * Corrected dummy values and polarity in line_info() so - * /proc/tty/driver/serial is now correct. - * (the E100_xxx_GET() macros is really active low - perhaps not obvious) - * - * Revision 1.38 2002/08/23 11:01:36 starvik - * Check that serial port is enabled in all interrupt handlers to avoid - * restarts of DMA channels not assigned to serial ports - * - * Revision 1.37 2002/08/13 13:02:37 bjornw - * Removed some warnings because of unused code - * - * Revision 1.36 2002/08/08 12:50:01 starvik - * Serial interrupt is shared with synchronous serial port driver - * - * Revision 1.35 2002/06/03 10:40:49 starvik - * Increased RS-485 RTS toggle timer to 2 characters - * - * Revision 1.34 2002/05/28 18:59:36 johana - * Whitespace and comment fixing to be more like etrax100ser.c 1.71. - * - * Revision 1.33 2002/05/28 17:55:43 johana - * RS-485 uses FAST_TIMER if enabled, and starts a short (one char time) - * timer from tranismit_chars (interrupt context). - * The timer toggles RTS in interrupt context when expired giving minimum - * latencies. - * - * Revision 1.32 2002/05/22 13:58:00 johana - * Renamed rs_write() to raw_write() and made it inline. - * New rs_write() handles RS-485 if configured and enabled - * (moved code from e100_write_rs485()). - * RS-485 ioctl's uses copy_from_user() instead of verify_area(). - * - * Revision 1.31 2002/04/22 11:20:03 johana - * Updated copyright years. - * - * Revision 1.30 2002/04/22 09:39:12 johana - * RS-485 support compiles. - * - * Revision 1.29 2002/01/14 16:10:01 pkj - * Allocate the receive buffers dynamically. The static 4kB buffer was - * too small for the peaks. This means that we can get rid of the extra - * buffer and the copying to it. It also means we require less memory - * under normal operations, but can use more when needed (there is a - * cap at 64kB for safety reasons). If there is no memory available - * we panic(), and die a horrible death... - * - * Revision 1.28 2001/12/18 15:04:53 johana - * Cleaned up write_rs485() - now it works correctly without padding extra - * char. - * Added sane default initialisation of rs485. - * Added #ifdef around dummy variables. - * - * Revision 1.27 2001/11/29 17:00:41 pkj - * 2kB seems to be too small a buffer when using 921600 bps, - * so increase it to 4kB (this was already done for the elinux - * version of the serial driver). - * - * Revision 1.26 2001/11/19 14:20:41 pkj - * Minor changes to comments and unused code. - * - * Revision 1.25 2001/11/12 20:03:43 pkj - * Fixed compiler warnings. - * - * Revision 1.24 2001/11/12 15:10:05 pkj - * Total redesign of the receiving part of the serial driver. - * Uses eight chained descriptors to write to a 4kB buffer. - * This data is then serialised into a 2kB buffer. From there it - * is copied into the TTY's flip buffers when they become available. - * A lot of copying, and the sizes of the buffers might need to be - * tweaked, but all in all it should work better than the previous - * version, without the need to modify the TTY code in any way. - * Also note that erroneous bytes are now correctly marked in the - * flag buffers (instead of always marking the first byte). - * - * Revision 1.23 2001/10/30 17:53:26 pkj - * * Set info->uses_dma to 0 when a port is closed. - * * Mark the timer1 interrupt as a fast one (SA_INTERRUPT). - * * Call start_flush_timer() in start_receive() if - * CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST is defined. - * - * Revision 1.22 2001/10/30 17:44:03 pkj - * Use %lu for received and transmitted counters in line_info(). - * - * Revision 1.21 2001/10/30 17:40:34 pkj - * Clean-up. The only change to functionality is that - * CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS(=5) is used instead of - * MAX_FLUSH_TIME(=8). - * - * Revision 1.20 2001/10/30 15:24:49 johana - * Added char_time stuff from 2.0 driver. - * - * Revision 1.19 2001/10/30 15:23:03 johana - * Merged with 1.13.2 branch + fixed indentation - * and changed CONFIG_ETRAX100_XYS to CONFIG_ETRAX_XYZ - * - * Revision 1.18 2001/09/24 09:27:22 pkj - * Completed ext_baud_table[] in cflag_to_baud() and cflag_to_etrax_baud(). - * - * Revision 1.17 2001/08/24 11:32:49 ronny - * More fixes for the CONFIG_ETRAX_SERIAL_PORT0 define. - * - * Revision 1.16 2001/08/24 07:56:22 ronny - * Added config ifdefs around ser0 irq requests. - * - * Revision 1.15 2001/08/16 09:10:31 bjarne - * serial.c - corrected the initialization of rs_table, the wrong defines - * where used. - * Corrected a test in timed_flush_handler. - * Changed configured to enabled. - * serial.h - Changed configured to enabled. - * - * Revision 1.14 2001/08/15 07:31:23 bjarne - * Introduced two new members to the e100_serial struct. - * configured - Will be set to 1 if the port has been configured in .config - * uses_dma - Should be set to 1 if the port uses DMA. Currently it is set - * to 1 - * when a port is opened. This is used to limit the DMA interrupt - * routines to only manipulate DMA channels actually used by the - * serial driver. - * - * Revision 1.13.2.2 2001/10/17 13:57:13 starvik - * Receiver was broken by the break fixes - * - * Revision 1.13.2.1 2001/07/20 13:57:39 ronny - * Merge with new stuff from etrax100ser.c. Works but haven't checked stuff - * like break handling. - * - * Revision 1.13 2001/05/09 12:40:31 johana - * Use DMA_NBR and IRQ_NBR defines from dma.h and irq.h - * - * Revision 1.12 2001/04/19 12:23:07 bjornw - * CONFIG_RS485 -> CONFIG_ETRAX_RS485 - * - * Revision 1.11 2001/04/05 14:29:48 markusl - * Updated according to review remarks i.e. - * -Use correct types in port structure to avoid compiler warnings - * -Try to use IO_* macros whenever possible - * -Open should never return -EBUSY - * - * Revision 1.10 2001/03/05 13:14:07 bjornw - * Another spelling fix - * - * Revision 1.9 2001/02/23 13:46:38 bjornw - * Spellling check - * - * Revision 1.8 2001/01/23 14:56:35 markusl - * Made use of ser1 optional - * Needed by USB - * - * Revision 1.7 2001/01/19 16:14:48 perf - * Added kernel options for serial ports 234. - * Changed option names from CONFIG_ETRAX100_XYZ to CONFIG_ETRAX_XYZ. - * - * Revision 1.6 2000/11/22 16:36:09 bjornw - * Please marketing by using the correct case when spelling Etrax. - * - * Revision 1.5 2000/11/21 16:43:37 bjornw - * Fixed so it compiles under CONFIG_SVINTO_SIM - * - * Revision 1.4 2000/11/15 17:34:12 bjornw - * Added a timeout timer for flushing input channels. The interrupt-based - * fast flush system should be easy to merge with this later (works the same - * way, only with an irq instead of a system timer_list) - * - * Revision 1.3 2000/11/13 17:19:57 bjornw - * * Incredibly, this almost complete rewrite of serial.c worked (at least - * for output) the first time. - * - * Items worth noticing: - * - * No Etrax100 port 1 workarounds (does only compile on 2.4 anyway now) - * RS485 is not ported (why can't it be done in userspace as on x86 ?) - * Statistics done through async_icount - if any more stats are needed, - * that's the place to put them or in an arch-dep version of it. - * timeout_interrupt and the other fast timeout stuff not ported yet - * There be dragons in this 3k+ line driver - * - * Revision 1.2 2000/11/10 16:50:28 bjornw - * First shot at a 2.4 port, does not compile totally yet - * - * Revision 1.1 2000/11/10 16:47:32 bjornw - * Added verbatim copy of rev 1.49 etrax100ser.c from elinux - * - * Revision 1.49 2000/10/30 15:47:14 tobiasa - * Changed version number. - * - * Revision 1.48 2000/10/25 11:02:43 johana - * Changed %ul to %lu in printf's - * - * Revision 1.47 2000/10/18 15:06:53 pkj - * Compile correctly with CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST and - * CONFIG_ETRAX_SERIAL_PROC_ENTRY together. - * Some clean-up of the /proc/serial file. - * - * Revision 1.46 2000/10/16 12:59:40 johana - * Added CONFIG_ETRAX_SERIAL_PROC_ENTRY for statistics and debug info. - * - * Revision 1.45 2000/10/13 17:10:59 pkj - * Do not flush DMAs while flipping TTY buffers. - * - * Revision 1.44 2000/10/13 16:34:29 pkj - * Added a delay in ser_interrupt() for 2.3ms when an error is detected. - * We do not know why this delay is required yet, but without it the - * irmaflash program does not work (this was the program that needed - * the ser_interrupt() to be needed in the first place). This should not - * affect normal use of the serial ports. - * - * Revision 1.43 2000/10/13 16:30:44 pkj - * New version of the fast flush of serial buffers code. This time - * it is localized to the serial driver and uses a fast timer to - * do the work. - * - * Revision 1.42 2000/10/13 14:54:26 bennyo - * Fix for switching RTS when using rs485 - * - * Revision 1.41 2000/10/12 11:43:44 pkj - * Cleaned up a number of comments. - * - * Revision 1.40 2000/10/10 11:58:39 johana - * Made RS485 support generic for all ports. - * Toggle rts in interrupt if no delay wanted. - * WARNING: No true transmitter empty check?? - * Set d_wait bit when sending data so interrupt is delayed until - * fifo flushed. (Fix tcdrain() problem) - * - * Revision 1.39 2000/10/04 16:08:02 bjornw - * * Use virt_to_phys etc. for DMA addresses - * * Removed CONFIG_FLUSH_DMA_FAST hacks - * * Indentation fix - * - * Revision 1.38 2000/10/02 12:27:10 mattias - * * added variable used when using fast flush on serial dma. - * (CONFIG_FLUSH_DMA_FAST) - * - * Revision 1.37 2000/09/27 09:44:24 pkj - * Uncomment definition of SERIAL_HANDLE_EARLY_ERRORS. - * - * Revision 1.36 2000/09/20 13:12:52 johana - * Support for CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS: - * Number of timer ticks between flush of receive fifo (1 tick = 10ms). - * Try 0-3 for low latency applications. Approx 5 for high load - * applications (e.g. PPP). Maybe this should be more adaptive some day... - * - * Revision 1.35 2000/09/20 10:36:08 johana - * Typo in get_lsr_info() - * - * Revision 1.34 2000/09/20 10:29:59 johana - * Let rs_chars_in_buffer() check fifo content as well. - * get_lsr_info() might work now (not tested). - * Easier to change the port to debug. - * - * Revision 1.33 2000/09/13 07:52:11 torbjore - * Support RS485 - * - * Revision 1.32 2000/08/31 14:45:37 bjornw - * After sending a break we need to reset the transmit DMA channel - * - * Revision 1.31 2000/06/21 12:13:29 johana - * Fixed wait for all chars sent when closing port. - * (Used to always take 1 second!) - * Added shadows for directions of status/ctrl signals. - * - * Revision 1.30 2000/05/29 16:27:55 bjornw - * Simulator ifdef moved a bit - * - * Revision 1.29 2000/05/09 09:40:30 mattias - * * Added description of dma registers used in timeout_interrupt - * * Removed old code - * - * Revision 1.28 2000/05/08 16:38:58 mattias - * * Bugfix for flushing fifo in timeout_interrupt - * Problem occurs when bluetooth stack waits for a small number of bytes - * containing an event acknowledging free buffers in bluetooth HW - * As before, data was stuck in fifo until more data came on uart and - * flushed it up to the stack. - * - * Revision 1.27 2000/05/02 09:52:28 jonasd - * Added fix for peculiar etrax behaviour when eop is forced on an empty - * fifo. This is used when flashing the IRMA chip. Disabled by default. - * - * Revision 1.26 2000/03/29 15:32:02 bjornw - * 2.0.34 updates - * - * Revision 1.25 2000/02/16 16:59:36 bjornw - * * Receive DMA directly into the flip-buffer, eliminating an intermediary - * receive buffer and a memcpy. Will avoid some overruns. - * * Error message on debug port if an overrun or flip buffer overrun occurs. - * * Just use the first byte in the flag flip buffer for errors. - * * Check for timeout on the serial ports only each 5/100 s, not 1/100. - * - * Revision 1.24 2000/02/09 18:02:28 bjornw - * * Clear serial errors (overrun, framing, parity) correctly. Before, the - * receiver would get stuck if an error occurred and we did not restart - * the input DMA. - * * Cosmetics (indentation, some code made into inlines) - * * Some more debug options - * * Actually shut down the serial port (DMA irq, DMA reset, receiver stop) - * when the last open is closed. Corresponding fixes in startup(). - * * rs_close() "tx FIFO wait" code moved into right place, bug & -> && fixed - * and make a special case out of port 1 (R_DMA_CHx_STATUS is broken for that) - * * e100_disable_rx/enable_rx just disables/enables the receiver, not RTS - * - * Revision 1.23 2000/01/24 17:46:19 johana - * Wait for flush of DMA/FIFO when closing port. - * - * Revision 1.22 2000/01/20 18:10:23 johana - * Added TIOCMGET ioctl to return modem status. - * Implemented modem status/control that works with the extra signals - * (DTR, DSR, RI,CD) as well. - * 3 different modes supported: - * ser0 on PB (Bundy), ser1 on PB (Lisa) and ser2 on PA (Bundy) - * Fixed DEF_TX value that caused the serial transmitter pin (txd) to go to 0 when - * closing the last filehandle, NASTY!. - * Added break generation, not tested though! - * Use IRQF_SHARED when request_irq() for ser2 and ser3 (shared with) par0 and par1. - * You can't use them at the same time (yet..), but you can hopefully switch - * between ser2/par0, ser3/par1 with the same kernel config. - * Replaced some magic constants with defines - * - * */ static char *serial_version = "$Revision: 1.25 $"; @@ -446,6 +30,7 @@ static char *serial_version = "$Revision: 1.25 $"; #include <asm/io.h> #include <asm/irq.h> +#include <asm/dma.h> #include <asm/system.h> #include <linux/delay.h> @@ -454,8 +39,9 @@ static char *serial_version = "$Revision: 1.25 $"; /* non-arch dependent serial structures are in linux/serial.h */ #include <linux/serial.h> /* while we keep our own stuff (struct e100_serial) in a local .h file */ -#include "serial.h" +#include "crisv10.h" #include <asm/fasttimer.h> +#include <asm/arch/io_interface_mux.h> #ifdef CONFIG_ETRAX_SERIAL_FAST_TIMER #ifndef CONFIG_ETRAX_FAST_TIMER @@ -504,18 +90,6 @@ struct tty_driver *serial_driver; from eLinux */ #define SERIAL_HANDLE_EARLY_ERRORS -/* Defined and used in n_tty.c, but we need it here as well */ -#define TTY_THRESHOLD_THROTTLE 128 - -/* Due to buffersizes and threshold values, our SERIAL_DESCR_BUF_SIZE - * must not be to high or flow control won't work if we leave it to the tty - * layer so we have our own throttling in flush_to_flip - * TTY_FLIPBUF_SIZE=512, - * TTY_THRESHOLD_THROTTLE/UNTHROTTLE=128 - * BUF_SIZE can't be > 128 - */ -#define CRIS_BUF_SIZE 512 - /* Currently 16 descriptors x 128 bytes = 2048 bytes */ #define SERIAL_DESCR_BUF_SIZE 256 @@ -588,13 +162,13 @@ unsigned long timer_data_to_ns(unsigned long timer_data); static void change_speed(struct e100_serial *info); static void rs_throttle(struct tty_struct * tty); static void rs_wait_until_sent(struct tty_struct *tty, int timeout); -static int rs_write(struct tty_struct * tty, int from_user, - const unsigned char *buf, int count); +static int rs_write(struct tty_struct *tty, + const unsigned char *buf, int count); #ifdef CONFIG_ETRAX_RS485 -static int e100_write_rs485(struct tty_struct * tty, int from_user, - const unsigned char *buf, int count); +static int e100_write_rs485(struct tty_struct *tty, + const unsigned char *buf, int count); #endif -static int get_lsr_info(struct e100_serial * info, unsigned int *value); +static int get_lsr_info(struct e100_serial *info, unsigned int *value); #define DEF_BAUD 115200 /* 115.2 kbit/s */ @@ -679,20 +253,39 @@ static struct e100_serial rs_table[] = { .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 2, + .dma_owner = dma_ser0, + .io_if = if_serial_0, #ifdef CONFIG_ETRAX_SERIAL_PORT0 .enabled = 1, #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT .dma_out_enabled = 1, + .dma_out_nbr = SER0_TX_DMA_NBR, + .dma_out_irq_nbr = SER0_DMA_TX_IRQ_NBR, + .dma_out_irq_flags = IRQF_DISABLED, + .dma_out_irq_description = "serial 0 dma tr", #else .dma_out_enabled = 0, + .dma_out_nbr = UINT_MAX, + .dma_out_irq_nbr = 0, + .dma_out_irq_flags = 0, + .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN .dma_in_enabled = 1, + .dma_in_nbr = SER0_RX_DMA_NBR, + .dma_in_irq_nbr = SER0_DMA_RX_IRQ_NBR, + .dma_in_irq_flags = IRQF_DISABLED, + .dma_in_irq_description = "serial 0 dma rec", #else - .dma_in_enabled = 0 + .dma_in_enabled = 0, + .dma_in_nbr = UINT_MAX, + .dma_in_irq_nbr = 0, + .dma_in_irq_flags = 0, + .dma_in_irq_description = NULL, #endif #else .enabled = 0, + .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif @@ -714,20 +307,42 @@ static struct e100_serial rs_table[] = { .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 3, + .dma_owner = dma_ser1, + .io_if = if_serial_1, #ifdef CONFIG_ETRAX_SERIAL_PORT1 .enabled = 1, + .io_if_description = "ser1", #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT .dma_out_enabled = 1, + .dma_out_nbr = SER1_TX_DMA_NBR, + .dma_out_irq_nbr = SER1_DMA_TX_IRQ_NBR, + .dma_out_irq_flags = IRQF_DISABLED, + .dma_out_irq_description = "serial 1 dma tr", #else .dma_out_enabled = 0, + .dma_out_nbr = UINT_MAX, + .dma_out_irq_nbr = 0, + .dma_out_irq_flags = 0, + .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN .dma_in_enabled = 1, + .dma_in_nbr = SER1_RX_DMA_NBR, + .dma_in_irq_nbr = SER1_DMA_RX_IRQ_NBR, + .dma_in_irq_flags = IRQF_DISABLED, + .dma_in_irq_description = "serial 1 dma rec", #else - .dma_in_enabled = 0 + .dma_in_enabled = 0, + .dma_in_enabled = 0, + .dma_in_nbr = UINT_MAX, + .dma_in_irq_nbr = 0, + .dma_in_irq_flags = 0, + .dma_in_irq_description = NULL, #endif #else .enabled = 0, + .io_if_description = NULL, + .dma_in_irq_nbr = 0, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif @@ -748,20 +363,40 @@ static struct e100_serial rs_table[] = { .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 0, + .dma_owner = dma_ser2, + .io_if = if_serial_2, #ifdef CONFIG_ETRAX_SERIAL_PORT2 .enabled = 1, + .io_if_description = "ser2", #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT .dma_out_enabled = 1, + .dma_out_nbr = SER2_TX_DMA_NBR, + .dma_out_irq_nbr = SER2_DMA_TX_IRQ_NBR, + .dma_out_irq_flags = IRQF_DISABLED, + .dma_out_irq_description = "serial 2 dma tr", #else .dma_out_enabled = 0, + .dma_out_nbr = UINT_MAX, + .dma_out_irq_nbr = 0, + .dma_out_irq_flags = 0, + .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN .dma_in_enabled = 1, + .dma_in_nbr = SER2_RX_DMA_NBR, + .dma_in_irq_nbr = SER2_DMA_RX_IRQ_NBR, + .dma_in_irq_flags = IRQF_DISABLED, + .dma_in_irq_description = "serial 2 dma rec", #else - .dma_in_enabled = 0 + .dma_in_enabled = 0, + .dma_in_nbr = UINT_MAX, + .dma_in_irq_nbr = 0, + .dma_in_irq_flags = 0, + .dma_in_irq_description = NULL, #endif #else .enabled = 0, + .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif @@ -782,20 +417,40 @@ static struct e100_serial rs_table[] = { .rx_ctrl = DEF_RX, .tx_ctrl = DEF_TX, .iseteop = 1, + .dma_owner = dma_ser3, + .io_if = if_serial_3, #ifdef CONFIG_ETRAX_SERIAL_PORT3 .enabled = 1, + .io_if_description = "ser3", #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT .dma_out_enabled = 1, + .dma_out_nbr = SER3_TX_DMA_NBR, + .dma_out_irq_nbr = SER3_DMA_TX_IRQ_NBR, + .dma_out_irq_flags = IRQF_DISABLED, + .dma_out_irq_description = "serial 3 dma tr", #else .dma_out_enabled = 0, + .dma_out_nbr = UINT_MAX, + .dma_out_irq_nbr = 0, + .dma_out_irq_flags = 0, + .dma_out_irq_description = NULL, #endif #ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN .dma_in_enabled = 1, + .dma_in_nbr = SER3_RX_DMA_NBR, + .dma_in_irq_nbr = SER3_DMA_RX_IRQ_NBR, + .dma_in_irq_flags = IRQF_DISABLED, + .dma_in_irq_description = "serial 3 dma rec", #else - .dma_in_enabled = 0 + .dma_in_enabled = 0, + .dma_in_nbr = UINT_MAX, + .dma_in_irq_nbr = 0, + .dma_in_irq_flags = 0, + .dma_in_irq_description = NULL #endif #else .enabled = 0, + .io_if_description = NULL, .dma_out_enabled = 0, .dma_in_enabled = 0 #endif @@ -1416,12 +1071,11 @@ e100_dtr(struct e100_serial *info, int set) { unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); *e100_modem_pins[info->line].dtr_shadow &= ~mask; *e100_modem_pins[info->line].dtr_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].dtr_port = *e100_modem_pins[info->line].dtr_shadow; - restore_flags(flags); + local_irq_restore(flags); } #ifdef SERIAL_DEBUG_IO @@ -1440,12 +1094,11 @@ e100_rts(struct e100_serial *info, int set) { #ifndef CONFIG_SVINTO_SIM unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); info->rx_ctrl &= ~E100_RTS_MASK; info->rx_ctrl |= (set ? 0 : E100_RTS_MASK); /* RTS is active low */ info->port[REG_REC_CTRL] = info->rx_ctrl; - restore_flags(flags); + local_irq_restore(flags); #ifdef SERIAL_DEBUG_IO printk("ser%i rts %i\n", info->line, set); #endif @@ -1463,12 +1116,11 @@ e100_ri_out(struct e100_serial *info, int set) unsigned char mask = e100_modem_pins[info->line].ri_mask; unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); *e100_modem_pins[info->line].ri_shadow &= ~mask; *e100_modem_pins[info->line].ri_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].ri_port = *e100_modem_pins[info->line].ri_shadow; - restore_flags(flags); + local_irq_restore(flags); } #endif } @@ -1481,12 +1133,11 @@ e100_cd_out(struct e100_serial *info, int set) unsigned char mask = e100_modem_pins[info->line].cd_mask; unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); *e100_modem_pins[info->line].cd_shadow &= ~mask; *e100_modem_pins[info->line].cd_shadow |= (set ? 0 : mask); *e100_modem_pins[info->line].cd_port = *e100_modem_pins[info->line].cd_shadow; - restore_flags(flags); + local_irq_restore(flags); } #endif } @@ -1560,8 +1211,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info) /* Disable output DMA channel for the serial port in question * ( set to something other then serialX) */ - save_flags(flags); - cli(); + local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "disable_txdma_channel %i\n", info->line)); if (info->line == 0) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma6)) == @@ -1589,7 +1239,7 @@ static void e100_disable_txdma_channel(struct e100_serial *info) } } *R_GEN_CONFIG = genconfig_shadow; - restore_flags(flags); + local_irq_restore(flags); } @@ -1597,8 +1247,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info) { unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "enable_txdma_channel %i\n", info->line)); /* Enable output DMA channel for the serial port in question */ if (info->line == 0) { @@ -1615,7 +1264,7 @@ static void e100_enable_txdma_channel(struct e100_serial *info) genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma4, serial3); } *R_GEN_CONFIG = genconfig_shadow; - restore_flags(flags); + local_irq_restore(flags); } static void e100_disable_rxdma_channel(struct e100_serial *info) @@ -1625,8 +1274,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info) /* Disable input DMA channel for the serial port in question * ( set to something other then serialX) */ - save_flags(flags); - cli(); + local_irq_save(flags); if (info->line == 0) { if ((genconfig_shadow & IO_MASK(R_GEN_CONFIG, dma7)) == IO_STATE(R_GEN_CONFIG, dma7, serial0)) { @@ -1653,7 +1301,7 @@ static void e100_disable_rxdma_channel(struct e100_serial *info) } } *R_GEN_CONFIG = genconfig_shadow; - restore_flags(flags); + local_irq_restore(flags); } @@ -1661,8 +1309,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info) { unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); /* Enable input DMA channel for the serial port in question */ if (info->line == 0) { genconfig_shadow &= ~IO_MASK(R_GEN_CONFIG, dma7); @@ -1678,7 +1325,7 @@ static void e100_enable_rxdma_channel(struct e100_serial *info) genconfig_shadow |= IO_STATE(R_GEN_CONFIG, dma5, serial3); } *R_GEN_CONFIG = genconfig_shadow; - restore_flags(flags); + local_irq_restore(flags); } #ifdef SERIAL_HANDLE_EARLY_ERRORS @@ -1785,7 +1432,7 @@ e100_enable_rs485(struct tty_struct *tty,struct rs485_control *r) } static int -e100_write_rs485(struct tty_struct *tty, int from_user, +e100_write_rs485(struct tty_struct *tty, const unsigned char *buf, int count) { struct e100_serial * info = (struct e100_serial *)tty->driver_data; @@ -1798,7 +1445,7 @@ e100_write_rs485(struct tty_struct *tty, int from_user, */ info->rs485.enabled = 1; /* rs_write now deals with RS485 if enabled */ - count = rs_write(tty, from_user, buf, count); + count = rs_write(tty, buf, count); info->rs485.enabled = old_enabled; return count; } @@ -1836,7 +1483,7 @@ rs_stop(struct tty_struct *tty) unsigned long flags; unsigned long xoff; - save_flags(flags); cli(); + local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "XOFF rs_stop xmit %i\n", CIRC_CNT(info->xmit.head, info->xmit.tail,SERIAL_XMIT_SIZE))); @@ -1848,7 +1495,7 @@ rs_stop(struct tty_struct *tty) } *((unsigned long *)&info->port[REG_XOFF]) = xoff; - restore_flags(flags); + local_irq_restore(flags); } } @@ -1860,7 +1507,7 @@ rs_start(struct tty_struct *tty) unsigned long flags; unsigned long xoff; - save_flags(flags); cli(); + local_irq_save(flags); DFLOW(DEBUG_LOG(info->line, "XOFF rs_start xmit %i\n", CIRC_CNT(info->xmit.head, info->xmit.tail,SERIAL_XMIT_SIZE))); @@ -1875,7 +1522,7 @@ rs_start(struct tty_struct *tty) info->xmit.head != info->xmit.tail && info->xmit.buf) e100_enable_serial_tx_ready_irq(info); - restore_flags(flags); + local_irq_restore(flags); } } @@ -2055,8 +1702,7 @@ static int serial_fast_timer_expired = 0; static void flush_timeout_function(unsigned long data); #define START_FLUSH_FAST_TIMER_TIME(info, string, usec) {\ unsigned long timer_flags; \ - save_flags(timer_flags); \ - cli(); \ + local_irq_save(timer_flags); \ if (fast_timers[info->line].function == NULL) { \ serial_fast_timer_started++; \ TIMERD(DEBUG_LOG(info->line, "start_timer %i ", info->line)); \ @@ -2070,7 +1716,7 @@ static void flush_timeout_function(unsigned long data); else { \ TIMERD(DEBUG_LOG(info->line, "timer %i already running\n", info->line)); \ } \ - restore_flags(timer_flags); \ + local_irq_restore(timer_flags); \ } #define START_FLUSH_FAST_TIMER(info, string) START_FLUSH_FAST_TIMER_TIME(info, string, info->flush_time_usec) @@ -2099,8 +1745,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer) { unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); if (!info->first_recv_buffer) info->first_recv_buffer = buffer; @@ -2113,7 +1758,7 @@ append_recv_buffer(struct e100_serial *info, struct etrax_recv_buffer *buffer) if (info->recv_cnt > info->max_recv_cnt) info->max_recv_cnt = info->recv_cnt; - restore_flags(flags); + local_irq_restore(flags); } static int @@ -2133,11 +1778,7 @@ add_char_and_flag(struct e100_serial *info, unsigned char data, unsigned char fl info->icount.rx++; } else { struct tty_struct *tty = info->tty; - *tty->flip.char_buf_ptr = data; - *tty->flip.flag_buf_ptr = flag; - tty->flip.flag_buf_ptr++; - tty->flip.char_buf_ptr++; - tty->flip.count++; + tty_insert_flip_char(tty, data, flag); info->icount.rx++; } @@ -2322,7 +1963,6 @@ start_receive(struct e100_serial *info) */ return; #endif - info->tty->flip.count = 0; if (info->uses_dma_in) { /* reset the input dma channel to be sure it works */ @@ -2484,32 +2124,20 @@ static void flush_to_flip_buffer(struct e100_serial *info) { struct tty_struct *tty; struct etrax_recv_buffer *buffer; - unsigned int length; unsigned long flags; - int max_flip_size; - - if (!info->first_recv_buffer) - return; - save_flags(flags); - cli(); + local_irq_save(flags); + tty = info->tty; - if (!(tty = info->tty)) { - restore_flags(flags); + if (!tty) { + local_irq_restore(flags); return; } while ((buffer = info->first_recv_buffer) != NULL) { unsigned int count = buffer->length; - count = tty_buffer_request_room(tty, count); - if (count == 0) /* Throttle ?? */ - break; - - if (count > 1) - tty_insert_flip_strings(tty, buffer->buffer, count - 1); - tty_insert_flip_char(tty, buffer->buffer[count-1], buffer->error); - + tty_insert_flip_string(tty, buffer->buffer, count); info->recv_cnt -= count; if (count == buffer->length) { @@ -2525,18 +2153,9 @@ static void flush_to_flip_buffer(struct e100_serial *info) if (!info->first_recv_buffer) info->last_recv_buffer = NULL; - restore_flags(flags); - - DFLIP( - if (1) { - DEBUG_LOG(info->line, "*** rxtot %i\n", info->icount.rx); - DEBUG_LOG(info->line, "ldisc %lu\n", tty->ldisc.chars_in_buffer(tty)); - DEBUG_LOG(info->line, "room %lu\n", tty->ldisc.receive_room(tty)); - } + local_irq_restore(flags); - ); - - /* this includes a check for low-latency */ + /* This includes a check for low-latency */ tty_flip_buffer_push(tty); } @@ -2679,21 +2298,7 @@ struct e100_serial * handle_ser_rx_interrupt_no_dma(struct e100_serial *info) printk("!NO TTY!\n"); return info; } - if (tty->flip.count >= CRIS_BUF_SIZE - TTY_THRESHOLD_THROTTLE) { - /* check TTY_THROTTLED first so it indicates our state */ - if (!test_and_set_bit(TTY_THROTTLED, &tty->flags)) { - DFLOW(DEBUG_LOG(info->line, "rs_throttle flip.count: %i\n", tty->flip.count)); - rs_throttle(tty); - } - } - if (tty->flip.count >= CRIS_BUF_SIZE) { - DEBUG_LOG(info->line, "force FLIP! %i\n", tty->flip.count); - tty->flip.work.func((void *) tty); - if (tty->flip.count >= CRIS_BUF_SIZE) { - DEBUG_LOG(info->line, "FLIP FULL! %i\n", tty->flip.count); - return info; /* if TTY_DONT_FLIP is set */ - } - } + /* Read data and status at the same time */ data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); more_data: @@ -2746,27 +2351,26 @@ more_data: DEBUG_LOG(info->line, "EBRK %i\n", info->break_detected_cnt); info->errorcode = ERRCODE_INSERT_BREAK; } else { + unsigned char data = IO_EXTRACT(R_SERIAL0_READ, + data_in, data_read); + char flag = TTY_NORMAL; if (info->errorcode == ERRCODE_INSERT_BREAK) { - info->icount.brk++; - *tty->flip.char_buf_ptr = 0; - *tty->flip.flag_buf_ptr = TTY_BREAK; - tty->flip.flag_buf_ptr++; - tty->flip.char_buf_ptr++; - tty->flip.count++; + struct tty_struct *tty = info->tty; + tty_insert_flip_char(tty, 0, flag); info->icount.rx++; } - *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read); if (data_read & IO_MASK(R_SERIAL0_READ, par_err)) { info->icount.parity++; - *tty->flip.flag_buf_ptr = TTY_PARITY; + flag = TTY_PARITY; } else if (data_read & IO_MASK(R_SERIAL0_READ, overrun)) { info->icount.overrun++; - *tty->flip.flag_buf_ptr = TTY_OVERRUN; + flag = TTY_OVERRUN; } else if (data_read & IO_MASK(R_SERIAL0_READ, framing_err)) { info->icount.frame++; - *tty->flip.flag_buf_ptr = TTY_FRAME; + flag = TTY_FRAME; } + tty_insert_flip_char(tty, data, flag); info->errorcode = 0; } info->break_detected_cnt = 0; @@ -2782,16 +2386,14 @@ more_data: log_int(rdpc(), 0, 0); } ); - *tty->flip.char_buf_ptr = IO_EXTRACT(R_SERIAL0_READ, data_in, data_read); - *tty->flip.flag_buf_ptr = 0; + tty_insert_flip_char(tty, + IO_EXTRACT(R_SERIAL0_READ, data_in, data_read), + TTY_NORMAL); } else { DEBUG_LOG(info->line, "ser_rx int but no data_avail %08lX\n", data_read); } - tty->flip.flag_buf_ptr++; - tty->flip.char_buf_ptr++; - tty->flip.count++; info->icount.rx++; data_read = *((unsigned long *)&info->port[REG_DATA_STATUS32]); if (data_read & IO_MASK(R_SERIAL0_READ, data_avail)) { @@ -2929,7 +2531,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) if (info->x_char) { unsigned char rstat; DFLOW(DEBUG_LOG(info->line, "tx_int: xchar 0x%02X\n", info->x_char)); - save_flags(flags); cli(); + local_irq_save(flags); rstat = info->port[REG_STATUS]; DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); @@ -2938,7 +2540,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) info->x_char = 0; /* We must enable since it is disabled in ser_interrupt */ e100_enable_serial_tx_ready_irq(info); - restore_flags(flags); + local_irq_restore(flags); return; } if (info->uses_dma_out) { @@ -2946,7 +2548,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) int i; /* We only use normal tx interrupt when sending x_char */ DFLOW(DEBUG_LOG(info->line, "tx_int: xchar sent\n", 0)); - save_flags(flags); cli(); + local_irq_save(flags); rstat = info->port[REG_STATUS]; DFLOW(DEBUG_LOG(info->line, "stat %x\n", rstat)); e100_disable_serial_tx_ready_irq(info); @@ -2959,7 +2561,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) nop(); *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, continue); - restore_flags(flags); + local_irq_restore(flags); return; } /* Normal char-by-char interrupt */ @@ -2973,7 +2575,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) } DINTR2(DEBUG_LOG(info->line, "tx_int %c\n", info->xmit.buf[info->xmit.tail])); /* Send a byte, rs485 timing is critical so turn of ints */ - save_flags(flags); cli(); + local_irq_save(flags); info->port[REG_TR_DATA] = info->xmit.buf[info->xmit.tail]; info->xmit.tail = (info->xmit.tail + 1) & (SERIAL_XMIT_SIZE-1); info->icount.tx++; @@ -2997,7 +2599,7 @@ static void handle_ser_tx_interrupt(struct e100_serial *info) /* We must enable since it is disabled in ser_interrupt */ e100_enable_serial_tx_ready_irq(info); } - restore_flags(flags); + local_irq_restore(flags); if (CIRC_CNT(info->xmit.head, info->xmit.tail, @@ -3022,7 +2624,7 @@ ser_interrupt(int irq, void *dev_id) int handled = 0; static volatile unsigned long reentered_ready_mask = 0; - save_flags(flags); cli(); + local_irq_save(flags); irq_mask1_rd = *R_IRQ_MASK1_RD; /* First handle all rx interrupts with ints disabled */ info = rs_table; @@ -3067,7 +2669,7 @@ ser_interrupt(int irq, void *dev_id) /* Unblock the serial interrupt */ *R_VECT_MASK_SET = IO_STATE(R_VECT_MASK_SET, serial, set); - sti(); + local_irq_enable(); ready_mask = (1 << (8+1+2*0)); /* ser0 tr_ready */ info = rs_table; for (i = 0; i < NR_PORTS; i++) { @@ -3080,11 +2682,11 @@ ser_interrupt(int irq, void *dev_id) ready_mask <<= 2; } /* handle_ser_tx_interrupt enables tr_ready interrupts */ - cli(); + local_irq_disable(); /* Handle reentered TX interrupt */ irq_mask1_rd = reentered_ready_mask; } - cli(); + local_irq_disable(); tx_started = 0; } else { unsigned long ready_mask; @@ -3100,7 +2702,7 @@ ser_interrupt(int irq, void *dev_id) } } - restore_flags(flags); + local_irq_restore(flags); return IRQ_RETVAL(handled); } /* ser_interrupt */ #endif @@ -3121,11 +2723,13 @@ ser_interrupt(int irq, void *dev_id) * them using rs_sched_event(), and they get done here. */ static void -do_softint(void *private_) +do_softint(struct work_struct *work) { - struct e100_serial *info = (struct e100_serial *) private_; + struct e100_serial *info; struct tty_struct *tty; + info = container_of(work, struct e100_serial, work); + tty = info->tty; if (!tty) return; @@ -3145,13 +2749,12 @@ startup(struct e100_serial * info) if (!xmit_page) return -ENOMEM; - save_flags(flags); - cli(); + local_irq_save(flags); /* if it was already initialized, skip this */ if (info->flags & ASYNC_INITIALIZED) { - restore_flags(flags); + local_irq_restore(flags); free_page(xmit_page); return 0; } @@ -3277,7 +2880,7 @@ startup(struct e100_serial * info) info->flags |= ASYNC_INITIALIZED; - restore_flags(flags); + local_irq_restore(flags); return 0; } @@ -3328,8 +2931,7 @@ shutdown(struct e100_serial * info) info->irq); #endif - save_flags(flags); - cli(); /* Disable interrupts */ + local_irq_save(flags); if (info->xmit.buf) { free_page((unsigned long)info->xmit.buf); @@ -3353,7 +2955,7 @@ shutdown(struct e100_serial * info) set_bit(TTY_IO_ERROR, &info->tty->flags); info->flags &= ~ASYNC_INITIALIZED; - restore_flags(flags); + local_irq_restore(flags); } @@ -3411,7 +3013,6 @@ change_speed(struct e100_serial *info) DBAUD(printk("using external baudrate: %lu\n", CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8)); info->baud = CONFIG_ETRAX_EXTERN_PB6CLK_FREQ/8; } - } #endif else { @@ -3445,8 +3046,7 @@ change_speed(struct e100_serial *info) #ifndef CONFIG_SVINTO_SIM /* start with default settings and then fill in changes */ - save_flags(flags); - cli(); + local_irq_save(flags); /* 8 bit, no/even parity */ info->rx_ctrl &= ~(IO_MASK(R_SERIAL0_REC_CTRL, rec_bitnr) | IO_MASK(R_SERIAL0_REC_CTRL, rec_par_en) | @@ -3510,7 +3110,7 @@ change_speed(struct e100_serial *info) } *((unsigned long *)&info->port[REG_XOFF]) = xoff; - restore_flags(flags); + local_irq_restore(flags); #endif /* !CONFIG_SVINTO_SIM */ update_char_time(info); @@ -3538,13 +3138,12 @@ rs_flush_chars(struct tty_struct *tty) /* this protection might not exactly be necessary here */ - save_flags(flags); - cli(); + local_irq_save(flags); start_transmit(info); - restore_flags(flags); + local_irq_restore(flags); } -static int rs_raw_write(struct tty_struct * tty, int from_user, +static int rs_raw_write(struct tty_struct *tty, const unsigned char *buf, int count) { int c, ret = 0; @@ -3567,53 +3166,19 @@ static int rs_raw_write(struct tty_struct * tty, int from_user, SIMCOUT(buf, count); return count; #endif - save_flags(flags); + local_save_flags(flags); DFLOW(DEBUG_LOG(info->line, "write count %i ", count)); DFLOW(DEBUG_LOG(info->line, "ldisc %i\n", tty->ldisc.chars_in_buffer(tty))); - /* the cli/restore_flags pairs below are needed because the - * DMA interrupt handler moves the info->xmit values. the memcpy - * needs to be in the critical region unfortunately, because we - * need to read xmit values, memcpy, write xmit values in one - * atomic operation... this could perhaps be avoided by more clever - * design. + /* The local_irq_disable/restore_flags pairs below are needed + * because the DMA interrupt handler moves the info->xmit values. + * the memcpy needs to be in the critical region unfortunately, + * because we need to read xmit values, memcpy, write xmit values + * in one atomic operation... this could perhaps be avoided by + * more clever design. */ - if (from_user) { - mutex_lock(&tmp_buf_mutex); - while (1) { - int c1; - c = CIRC_SPACE_TO_END(info->xmit.head, - info->xmit.tail, - SERIAL_XMIT_SIZE); - if (count < c) - c = count; - if (c <= 0) - break; - - c -= copy_from_user(tmp_buf, buf, c); - if (!c) { - if (!ret) - ret = -EFAULT; - break; - } - cli(); - c1 = CIRC_SPACE_TO_END(info->xmit.head, - info->xmit.tail, - SERIAL_XMIT_SIZE); - if (c1 < c) - c = c1; - memcpy(info->xmit.buf + info->xmit.head, tmp_buf, c); - info->xmit.head = ((info->xmit.head + c) & - (SERIAL_XMIT_SIZE-1)); - restore_flags(flags); - buf += c; - count -= c; - ret += c; - } - mutex_unlock(&tmp_buf_mutex); - } else { - cli(); + local_irq_disable(); while (count) { c = CIRC_SPACE_TO_END(info->xmit.head, info->xmit.tail, @@ -3631,8 +3196,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user, count -= c; ret += c; } - restore_flags(flags); - } + local_irq_restore(flags); /* enable transmitter if not running, unless the tty is stopped * this does not need IRQ protection since if tr_running == 0 @@ -3651,7 +3215,7 @@ static int rs_raw_write(struct tty_struct * tty, int from_user, } /* raw_raw_write() */ static int -rs_write(struct tty_struct * tty, int from_user, +rs_write(struct tty_struct *tty, const unsigned char *buf, int count) { #if defined(CONFIG_ETRAX_RS485) @@ -3678,7 +3242,7 @@ rs_write(struct tty_struct * tty, int from_user, } #endif /* CONFIG_ETRAX_RS485 */ - count = rs_raw_write(tty, from_user, buf, count); + count = rs_raw_write(tty, buf, count); #if defined(CONFIG_ETRAX_RS485) if (info->rs485.enabled) @@ -3746,10 +3310,9 @@ rs_flush_buffer(struct tty_struct *tty) struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; - save_flags(flags); - cli(); + local_irq_save(flags); info->xmit.head = info->xmit.tail = 0; - restore_flags(flags); + local_irq_restore(flags); tty_wakeup(tty); } @@ -3767,7 +3330,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch) { struct e100_serial *info = (struct e100_serial *)tty->driver_data; unsigned long flags; - save_flags(flags); cli(); + local_irq_save(flags); if (info->uses_dma_out) { /* Put the DMA on hold and disable the channel */ *info->ocmdadr = IO_STATE(R_DMA_CH6_CMD, cmd, hold); @@ -3784,7 +3347,7 @@ static void rs_send_xchar(struct tty_struct *tty, char ch) DFLOW(DEBUG_LOG(info->line, "rs_send_xchar 0x%02X\n", ch)); info->x_char = ch; e100_enable_serial_tx_ready_irq(info); - restore_flags(flags); + local_irq_restore(flags); } /* @@ -3996,21 +3559,61 @@ char *get_control_state_str(int MLines, char *s) } #endif +static void +rs_break(struct tty_struct *tty, int break_state) +{ + struct e100_serial *info = (struct e100_serial *)tty->driver_data; + unsigned long flags; + + if (!info->port) + return; + + local_irq_save(flags); + if (break_state == -1) { + /* Go to manual mode and set the txd pin to 0 */ + /* Clear bit 7 (txd) and 6 (tr_enable) */ + info->tx_ctrl &= 0x3F; + } else { + /* Set bit 7 (txd) and 6 (tr_enable) */ + info->tx_ctrl |= (0x80 | 0x40); + } + info->port[REG_TR_CTRL] = info->tx_ctrl; + local_irq_restore(flags); +} + static int -get_modem_info(struct e100_serial * info, unsigned int *value) +rs_tiocmset(struct tty_struct *tty, struct file *file, + unsigned int set, unsigned int clear) { - unsigned int result; - /* Polarity isn't verified */ -#if 0 /*def SERIAL_DEBUG_IO */ + struct e100_serial *info = (struct e100_serial *)tty->driver_data; - printk("get_modem_info: RTS: %i DTR: %i CD: %i RI: %i DSR: %i CTS: %i\n", - E100_RTS_GET(info), - E100_DTR_GET(info), - E100_CD_GET(info), - E100_RI_GET(info), - E100_DSR_GET(info), - E100_CTS_GET(info)); -#endif + if (clear & TIOCM_RTS) + e100_rts(info, 0); + if (clear & TIOCM_DTR) + e100_dtr(info, 0); + /* Handle FEMALE behaviour */ + if (clear & TIOCM_RI) + e100_ri_out(info, 0); + if (clear & TIOCM_CD) + e100_cd_out(info, 0); + + if (set & TIOCM_RTS) + e100_rts(info, 1); + if (set & TIOCM_DTR) + e100_dtr(info, 1); + /* Handle FEMALE behaviour */ + if (set & TIOCM_RI) + e100_ri_out(info, 1); + if (set & TIOCM_CD) + e100_cd_out(info, 1); + return 0; +} + +static int +rs_tiocmget(struct tty_struct *tty, struct file *file) +{ + struct e100_serial *info = (struct e100_serial *)tty->driver_data; + unsigned int result; result = (!E100_RTS_GET(info) ? TIOCM_RTS : 0) @@ -4021,95 +3624,20 @@ get_modem_info(struct e100_serial * info, unsigned int *value) | (!E100_CTS_GET(info) ? TIOCM_CTS : 0); #ifdef SERIAL_DEBUG_IO - printk("e100ser: modem state: %i 0x%08X\n", result, result); + printk(KERN_DEBUG "ser%i: modem state: %i 0x%08X\n", + info->line, result, result); { char s[100]; get_control_state_str(result, s); - printk("state: %s\n", s); + printk(KERN_DEBUG "state: %s\n", s); } #endif - if (copy_to_user(value, &result, sizeof(int))) - return -EFAULT; - return 0; -} + return result; - -static int -set_modem_info(struct e100_serial * info, unsigned int cmd, - unsigned int *value) -{ - unsigned int arg; - - if (copy_from_user(&arg, value, sizeof(int))) - return -EFAULT; - - switch (cmd) { - case TIOCMBIS: - if (arg & TIOCM_RTS) { - e100_rts(info, 1); - } - if (arg & TIOCM_DTR) { - e100_dtr(info, 1); - } - /* Handle FEMALE behaviour */ - if (arg & TIOCM_RI) { - e100_ri_out(info, 1); - } - if (arg & TIOCM_CD) { - e100_cd_out(info, 1); - } - break; - case TIOCMBIC: - if (arg & TIOCM_RTS) { - e100_rts(info, 0); - } - if (arg & TIOCM_DTR) { - e100_dtr(info, 0); - } - /* Handle FEMALE behaviour */ - if (arg & TIOCM_RI) { - e100_ri_out(info, 0); - } - if (arg & TIOCM_CD) { - e100_cd_out(info, 0); - } - break; - case TIOCMSET: - e100_rts(info, arg & TIOCM_RTS); - e100_dtr(info, arg & TIOCM_DTR); - /* Handle FEMALE behaviour */ - e100_ri_out(info, arg & TIOCM_RI); - e100_cd_out(info, arg & TIOCM_CD); - break; - default: - return -EINVAL; - } - return 0; } -static void -rs_break(struct tty_struct *tty, int break_state) -{ - struct e100_serial * info = (struct e100_serial *)tty->driver_data; - unsigned long flags; - - if (!info->port) - return; - - save_flags(flags); - cli(); - if (break_state == -1) { - /* Go to manual mode and set the txd pin to 0 */ - info->tx_ctrl &= 0x3F; /* Clear bit 7 (txd) and 6 (tr_enable) */ - } else { - info->tx_ctrl |= (0x80 | 0x40); /* Set bit 7 (txd) and 6 (tr_enable) */ - } - info->port[REG_TR_CTRL] = info->tx_ctrl; - restore_flags(flags); -} - static int rs_ioctl(struct tty_struct *tty, struct file * file, unsigned int cmd, unsigned long arg) @@ -4124,49 +3652,45 @@ rs_ioctl(struct tty_struct *tty, struct file * file, } switch (cmd) { - case TIOCMGET: - return get_modem_info(info, (unsigned int *) arg); - case TIOCMBIS: - case TIOCMBIC: - case TIOCMSET: - return set_modem_info(info, cmd, (unsigned int *) arg); - case TIOCGSERIAL: - return get_serial_info(info, - (struct serial_struct *) arg); - case TIOCSSERIAL: - return set_serial_info(info, - (struct serial_struct *) arg); - case TIOCSERGETLSR: /* Get line status register */ - return get_lsr_info(info, (unsigned int *) arg); - - case TIOCSERGSTRUCT: - if (copy_to_user((struct e100_serial *) arg, - info, sizeof(struct e100_serial))) - return -EFAULT; - return 0; + case TIOCGSERIAL: + return get_serial_info(info, + (struct serial_struct *) arg); + case TIOCSSERIAL: + return set_serial_info(info, + (struct serial_struct *) arg); + case TIOCSERGETLSR: /* Get line status register */ + return get_lsr_info(info, (unsigned int *) arg); + + case TIOCSERGSTRUCT: + if (copy_to_user((struct e100_serial *) arg, + info, sizeof(struct e100_serial))) + return -EFAULT; + return 0; #if defined(CONFIG_ETRAX_RS485) - case TIOCSERSETRS485: - { - struct rs485_control rs485ctrl; - if (copy_from_user(&rs485ctrl, (struct rs485_control*)arg, sizeof(rs485ctrl))) - return -EFAULT; + case TIOCSERSETRS485: + { + struct rs485_control rs485ctrl; + if (copy_from_user(&rs485ctrl, (struct rs485_control *)arg, + sizeof(rs485ctrl))) + return -EFAULT; - return e100_enable_rs485(tty, &rs485ctrl); - } + return e100_enable_rs485(tty, &rs485ctrl); + } - case TIOCSERWRRS485: - { - struct rs485_write rs485wr; - if (copy_from_user(&rs485wr, (struct rs485_write*)arg, sizeof(rs485wr))) - return -EFAULT; + case TIOCSERWRRS485: + { + struct rs485_write rs485wr; + if (copy_from_user(&rs485wr, (struct rs485_write *)arg, + sizeof(rs485wr))) + return -EFAULT; - return e100_write_rs485(tty, 1, rs485wr.outc, rs485wr.outc_size); - } + return e100_write_rs485(tty, rs485wr.outc, rs485wr.outc_size); + } #endif - default: - return -ENOIOCTLCMD; + default: + return -ENOIOCTLCMD; } return 0; } @@ -4191,46 +3715,6 @@ rs_set_termios(struct tty_struct *tty, struct ktermios *old_termios) } -/* In debugport.c - register a console write function that uses the normal - * serial driver - */ -typedef int (*debugport_write_function)(int i, const char *buf, unsigned int len); - -extern debugport_write_function debug_write_function; - -static int rs_debug_write_function(int i, const char *buf, unsigned int len) -{ - int cnt; - int written = 0; - struct tty_struct *tty; - static int recurse_cnt = 0; - - tty = rs_table[i].tty; - if (tty) { - unsigned long flags; - if (recurse_cnt > 5) /* We skip this debug output */ - return 1; - - local_irq_save(flags); - recurse_cnt++; - local_irq_restore(flags); - do { - cnt = rs_write(tty, 0, buf + written, len); - if (cnt >= 0) { - written += cnt; - buf += cnt; - len -= cnt; - } else - len = cnt; - } while(len > 0); - local_irq_save(flags); - recurse_cnt--; - local_irq_restore(flags); - return 1; - } - return 0; -} - /* * ------------------------------------------------------------ * rs_close() @@ -4252,11 +3736,10 @@ rs_close(struct tty_struct *tty, struct file * filp) /* interrupts are disabled for this entire function */ - save_flags(flags); - cli(); + local_irq_save(flags); if (tty_hung_up_p(filp)) { - restore_flags(flags); + local_irq_restore(flags); return; } @@ -4283,7 +3766,7 @@ rs_close(struct tty_struct *tty, struct file * filp) info->count = 0; } if (info->count) { - restore_flags(flags); + local_irq_restore(flags); return; } info->flags |= ASYNC_CLOSING; @@ -4337,7 +3820,7 @@ rs_close(struct tty_struct *tty, struct file * filp) } info->flags &= ~(ASYNC_NORMAL_ACTIVE|ASYNC_CLOSING); wake_up_interruptible(&info->close_wait); - restore_flags(flags); + local_irq_restore(flags); /* port closed */ @@ -4359,6 +3842,28 @@ rs_close(struct tty_struct *tty, struct file * filp) #endif } #endif + + /* + * Release any allocated DMA irq's. + */ + if (info->dma_in_enabled) { + free_irq(info->dma_in_irq_nbr, info); + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description); + info->uses_dma_in = 0; +#ifdef SERIAL_DEBUG_OPEN + printk(KERN_DEBUG "DMA irq '%s' freed\n", + info->dma_in_irq_description); +#endif + } + if (info->dma_out_enabled) { + free_irq(info->dma_out_irq_nbr, info); + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description); + info->uses_dma_out = 0; +#ifdef SERIAL_DEBUG_OPEN + printk(KERN_DEBUG "DMA irq '%s' freed\n", + info->dma_out_irq_description); +#endif + } } /* @@ -4433,8 +3938,8 @@ block_til_ready(struct tty_struct *tty, struct file * filp, */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - if (info->flags & ASYNC_CLOSING) - interruptible_sleep_on(&info->close_wait); + wait_event_interruptible(info->close_wait, + !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART if (info->flags & ASYNC_HUP_NOTIFY) return -EAGAIN; @@ -4472,21 +3977,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp, printk("block_til_ready before block: ttyS%d, count = %d\n", info->line, info->count); #endif - save_flags(flags); - cli(); + local_irq_save(flags); if (!tty_hung_up_p(filp)) { extra_count++; info->count--; } - restore_flags(flags); + local_irq_restore(flags); info->blocked_open++; while (1) { - save_flags(flags); - cli(); + local_irq_save(flags); /* assert RTS and DTR */ e100_rts(info, 1); e100_dtr(info, 1); - restore_flags(flags); + local_irq_restore(flags); set_current_state(TASK_INTERRUPTIBLE); if (tty_hung_up_p(filp) || !(info->flags & ASYNC_INITIALIZED)) { @@ -4528,6 +4031,19 @@ block_til_ready(struct tty_struct *tty, struct file * filp, return 0; } +static void +deinit_port(struct e100_serial *info) +{ + if (info->dma_out_enabled) { + cris_free_dma(info->dma_out_nbr, info->dma_out_irq_description); + free_irq(info->dma_out_irq_nbr, info); + } + if (info->dma_in_enabled) { + cris_free_dma(info->dma_in_nbr, info->dma_in_irq_description); + free_irq(info->dma_in_irq_nbr, info); + } +} + /* * This routine is called whenever a serial port is opened. * It performs the serial-specific initialization for the tty structure. @@ -4538,9 +4054,9 @@ rs_open(struct tty_struct *tty, struct file * filp) struct e100_serial *info; int retval, line; unsigned long page; + int allocated_resources = 0; /* find which port we want to open */ - line = tty->index; if (line < 0 || line >= NR_PORTS) @@ -4580,8 +4096,8 @@ rs_open(struct tty_struct *tty, struct file * filp) */ if (tty_hung_up_p(filp) || (info->flags & ASYNC_CLOSING)) { - if (info->flags & ASYNC_CLOSING) - interruptible_sleep_on(&info->close_wait); + wait_event_interruptible(info->close_wait, + !(info->flags & ASYNC_CLOSING)); #ifdef SERIAL_DO_RESTART return ((info->flags & ASYNC_HUP_NOTIFY) ? -EAGAIN : -ERESTARTSYS); @@ -4591,12 +4107,85 @@ rs_open(struct tty_struct *tty, struct file * filp) } /* + * If DMA is enabled try to allocate the irq's. + */ + if (info->count == 1) { + allocated_resources = 1; + if (info->dma_in_enabled) { + if (request_irq(info->dma_in_irq_nbr, + rec_interrupt, + info->dma_in_irq_flags, + info->dma_in_irq_description, + info)) { + printk(KERN_WARNING "DMA irq '%s' busy; " + "falling back to non-DMA mode\n", + info->dma_in_irq_description); + /* Make sure we never try to use DMA in */ + /* for the port again. */ + info->dma_in_enabled = 0; + } else if (cris_request_dma(info->dma_in_nbr, + info->dma_in_irq_description, + DMA_VERBOSE_ON_ERROR, + info->dma_owner)) { + free_irq(info->dma_in_irq_nbr, info); + printk(KERN_WARNING "DMA '%s' busy; " + "falling back to non-DMA mode\n", + info->dma_in_irq_description); + /* Make sure we never try to use DMA in */ + /* for the port again. */ + info->dma_in_enabled = 0; + } +#ifdef SERIAL_DEBUG_OPEN + else + printk(KERN_DEBUG "DMA irq '%s' allocated\n", + info->dma_in_irq_description); +#endif + } + if (info->dma_out_enabled) { + if (request_irq(info->dma_out_irq_nbr, + tr_interrupt, + info->dma_out_irq_flags, + info->dma_out_irq_description, + info)) { + printk(KERN_WARNING "DMA irq '%s' busy; " + "falling back to non-DMA mode\n", + info->dma_out_irq_description); + /* Make sure we never try to use DMA out */ + /* for the port again. */ + info->dma_out_enabled = 0; + } else if (cris_request_dma(info->dma_out_nbr, + info->dma_out_irq_description, + DMA_VERBOSE_ON_ERROR, + info->dma_owner)) { + free_irq(info->dma_out_irq_nbr, info); + printk(KERN_WARNING "DMA '%s' busy; " + "falling back to non-DMA mode\n", + info->dma_out_irq_description); + /* Make sure we never try to use DMA out */ + /* for the port again. */ + info->dma_out_enabled = 0; + } +#ifdef SERIAL_DEBUG_OPEN + else + printk(KERN_DEBUG "DMA irq '%s' allocated\n", + info->dma_out_irq_description); +#endif + } + } + + /* * Start up the serial port */ retval = startup(info); - if (retval) + if (retval) { + if (allocated_resources) + deinit_port(info); + + /* FIXME Decrease count info->count here too? */ return retval; + } + retval = block_til_ready(tty, filp, info); if (retval) { @@ -4604,6 +4193,9 @@ rs_open(struct tty_struct *tty, struct file * filp) printk("rs_open returning after block_til_ready with %d\n", retval); #endif + if (allocated_resources) + deinit_port(info); + return retval; } @@ -4793,6 +4385,8 @@ static const struct tty_operations rs_ops = { .send_xchar = rs_send_xchar, .wait_until_sent = rs_wait_until_sent, .read_proc = rs_read_proc, + .tiocmget = rs_tiocmget, + .tiocmset = rs_tiocmset }; static int __init @@ -4810,9 +4404,27 @@ rs_init(void) /* Setup the timed flush handler system */ #if !defined(CONFIG_ETRAX_SERIAL_FAST_TIMER) - init_timer(&flush_timer); - flush_timer.function = timed_flush_handler; - mod_timer(&flush_timer, jiffies + CONFIG_ETRAX_SERIAL_RX_TIMEOUT_TICKS); + setup_timer(&flush_timer, timed_flush_handler, 0); + mod_timer(&flush_timer, jiffies + 5); +#endif + +#if defined(CONFIG_ETRAX_RS485) +#if defined(CONFIG_ETRAX_RS485_ON_PA) + if (cris_io_interface_allocate_pins(if_ser0, 'a', rs485_pa_bit, + rs485_pa_bit)) { + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " + "RS485 pin\n"); + return -EBUSY; + } +#endif +#if defined(CONFIG_ETRAX_RS485_ON_PORT_G) + if (cris_io_interface_allocate_pins(if_ser0, 'g', rs485_pa_bit, + rs485_port_g_bit)) { + printk(KERN_CRIT "ETRAX100LX serial: Could not allocate " + "RS485 pin\n"); + return -EBUSY; + } +#endif #endif /* Initialize the tty_driver structure */ @@ -4839,6 +4451,16 @@ rs_init(void) /* do some initializing for the separate ports */ for (i = 0, info = rs_table; i < NR_PORTS; i++,info++) { + if (info->enabled) { + if (cris_request_io_interface(info->io_if, + info->io_if_description)) { + printk(KERN_CRIT "ETRAX100LX async serial: " + "Could not allocate IO pins for " + "%s, port %d\n", + info->io_if_description, i); + info->enabled = 0; + } + } info->uses_dma_in = 0; info->uses_dma_out = 0; info->line = i; @@ -4872,7 +4494,7 @@ rs_init(void) info->rs485.delay_rts_before_send = 0; info->rs485.enabled = 0; #endif - INIT_WORK(&info->work, do_softint, info); + INIT_WORK(&info->work, do_softint); if (info->enabled) { printk(KERN_INFO "%s%d at 0x%x is a builtin UART with DMA\n", @@ -4890,64 +4512,17 @@ rs_init(void) #endif #ifndef CONFIG_SVINTO_SIM +#ifndef CONFIG_ETRAX_KGDB /* Not needed in simulator. May only complicate stuff. */ /* hook the irq's for DMA channel 6 and 7, serial output and input, and some more... */ - if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial ", NULL)) - panic("irq8"); - -#ifdef CONFIG_ETRAX_SERIAL_PORT0 -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA6_OUT - if (request_irq(SER0_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 0 dma tr", NULL)) - panic("irq22"); -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT0_DMA7_IN - if (request_irq(SER0_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 0 dma rec", NULL)) - panic("irq23"); -#endif -#endif - -#ifdef CONFIG_ETRAX_SERIAL_PORT1 -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA8_OUT - if (request_irq(SER1_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_DISABLED, "serial 1 dma tr", NULL)) - panic("irq24"); -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT1_DMA9_IN - if (request_irq(SER1_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_DISABLED, "serial 1 dma rec", NULL)) - panic("irq25"); -#endif -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT2 - /* DMA Shared with par0 (and SCSI0 and ATA) */ -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA2_OUT - if (request_irq(SER2_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma tr", NULL)) - panic("irq18"); -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT2_DMA3_IN - if (request_irq(SER2_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 2 dma rec", NULL)) - panic("irq19"); -#endif -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT3 - /* DMA Shared with par1 (and SCSI1 and Extern DMA 0) */ -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA4_OUT - if (request_irq(SER3_DMA_TX_IRQ_NBR, tr_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma tr", NULL)) - panic("irq20"); -#endif -#ifdef CONFIG_ETRAX_SERIAL_PORT3_DMA5_IN - if (request_irq(SER3_DMA_RX_IRQ_NBR, rec_interrupt, IRQF_SHARED | IRQF_DISABLED, "serial 3 dma rec", NULL)) - panic("irq21"); -#endif -#endif + if (request_irq(SERIAL_IRQ_NBR, ser_interrupt, + IRQF_SHARED | IRQF_DISABLED, "serial ", driver)) + panic("%s: Failed to request irq8", __FUNCTION__); -#ifdef CONFIG_ETRAX_SERIAL_FLUSH_DMA_FAST - if (request_irq(TIMER1_IRQ_NBR, timeout_interrupt, IRQF_SHARED | IRQF_DISABLED, - "fast serial dma timeout", NULL)) { - printk(KERN_CRIT "err: timer1 irq\n"); - } #endif #endif /* CONFIG_SVINTO_SIM */ - debug_write_function = rs_debug_write_function; + return 0; } diff --git a/drivers/serial/crisv10.h b/drivers/serial/crisv10.h new file mode 100644 index 0000000..ccd0f32 --- /dev/null +++ b/drivers/serial/crisv10.h @@ -0,0 +1,146 @@ +/* + * serial.h: Arch-dep definitions for the Etrax100 serial driver. + * + * Copyright (C) 1998-2007 Axis Communications AB + */ + +#ifndef _ETRAX_SERIAL_H +#define _ETRAX_SERIAL_H + +#include <linux/circ_buf.h> +#include <asm/termios.h> +#include <asm/dma.h> +#include <asm/arch/io_interface_mux.h> + +/* Software state per channel */ + +#ifdef __KERNEL__ +/* + * This is our internal structure for each serial port's state. + * + * Many fields are paralleled by the structure used by the serial_struct + * structure. + * + * For definitions of the flags field, see tty.h + */ + +#define SERIAL_RECV_DESCRIPTORS 8 + +struct etrax_recv_buffer { + struct etrax_recv_buffer *next; + unsigned short length; + unsigned char error; + unsigned char pad; + + unsigned char buffer[0]; +}; + +struct e100_serial { + int baud; + volatile u8 *port; /* R_SERIALx_CTRL */ + u32 irq; /* bitnr in R_IRQ_MASK2 for dmaX_descr */ + + /* Output registers */ + volatile u8 *oclrintradr; /* adr to R_DMA_CHx_CLR_INTR */ + volatile u32 *ofirstadr; /* adr to R_DMA_CHx_FIRST */ + volatile u8 *ocmdadr; /* adr to R_DMA_CHx_CMD */ + const volatile u8 *ostatusadr; /* adr to R_DMA_CHx_STATUS */ + + /* Input registers */ + volatile u8 *iclrintradr; /* adr to R_DMA_CHx_CLR_INTR */ + volatile u32 *ifirstadr; /* adr to R_DMA_CHx_FIRST */ + volatile u8 *icmdadr; /* adr to R_DMA_CHx_CMD */ + volatile u32 *idescradr; /* adr to R_DMA_CHx_DESCR */ + + int flags; /* defined in tty.h */ + + u8 rx_ctrl; /* shadow for R_SERIALx_REC_CTRL */ + u8 tx_ctrl; /* shadow for R_SERIALx_TR_CTRL */ + u8 iseteop; /* bit number for R_SET_EOP for the input dma */ + int enabled; /* Set to 1 if the port is enabled in HW config */ + + u8 dma_out_enabled; /* Set to 1 if DMA should be used */ + u8 dma_in_enabled; /* Set to 1 if DMA should be used */ + + /* end of fields defined in rs_table[] in .c-file */ + int dma_owner; + unsigned int dma_in_nbr; + unsigned int dma_out_nbr; + unsigned int dma_in_irq_nbr; + unsigned int dma_out_irq_nbr; + unsigned long dma_in_irq_flags; + unsigned long dma_out_irq_flags; + char *dma_in_irq_description; + char *dma_out_irq_description; + + enum cris_io_interface io_if; + char *io_if_description; + + u8 uses_dma_in; /* Set to 1 if DMA is used */ + u8 uses_dma_out; /* Set to 1 if DMA is used */ + u8 forced_eop; /* a fifo eop has been forced */ + int baud_base; /* For special baudrates */ + int custom_divisor; /* For special baudrates */ + struct etrax_dma_descr tr_descr; + struct etrax_dma_descr rec_descr[SERIAL_RECV_DESCRIPTORS]; + int cur_rec_descr; + + volatile int tr_running; /* 1 if output is running */ + + struct tty_struct *tty; + int read_status_mask; + int ignore_status_mask; + int x_char; /* xon/xoff character */ + int close_delay; + unsigned short closing_wait; + unsigned short closing_wait2; + unsigned long event; + unsigned long last_active; + int line; + int type; /* PORT_ETRAX */ + int count; /* # of fd on device */ + int blocked_open; /* # of blocked opens */ + struct circ_buf xmit; + struct etrax_recv_buffer *first_recv_buffer; + struct etrax_recv_buffer *last_recv_buffer; + unsigned int recv_cnt; + unsigned int max_recv_cnt; + + struct work_struct work; + struct async_icount icount; /* error-statistics etc.*/ + struct ktermios normal_termios; + struct ktermios callout_termios; + wait_queue_head_t open_wait; + wait_queue_head_t close_wait; + + unsigned long char_time_usec; /* The time for 1 char, in usecs */ + unsigned long flush_time_usec; /* How often we should flush */ + unsigned long last_tx_active_usec; /* Last tx usec in the jiffies */ + unsigned long last_tx_active; /* Last tx time in jiffies */ + unsigned long last_rx_active_usec; /* Last rx usec in the jiffies */ + unsigned long last_rx_active; /* Last rx time in jiffies */ + + int break_detected_cnt; + int errorcode; + +#ifdef CONFIG_ETRAX_RS485 + struct rs485_control rs485; /* RS-485 support */ +#endif +}; + +/* this PORT is not in the standard serial.h. it's not actually used for + * anything since we only have one type of async serial-port anyway in this + * system. + */ + +#define PORT_ETRAX 1 + +/* + * Events are used to schedule things to happen at timer-interrupt + * time, instead of at rs interrupt time. + */ +#define RS_EVENT_WRITE_WAKEUP 0 + +#endif /* __KERNEL__ */ + +#endif /* !_ETRAX_SERIAL_H */ diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c index 89769ce..b31f443 100644 --- a/drivers/spi/spi.c +++ b/drivers/spi/spi.c @@ -457,10 +457,11 @@ done: EXPORT_SYMBOL_GPL(spi_register_master); -static int __unregister(struct device *dev, void *unused) +static int __unregister(struct device *dev, void *master_dev) { /* note: before about 2.6.14-rc1 this would corrupt memory: */ - spi_unregister_device(to_spi_device(dev)); + if (dev != master_dev) + spi_unregister_device(to_spi_device(dev)); return 0; } @@ -478,7 +479,8 @@ void spi_unregister_master(struct spi_master *master) { int dummy; - dummy = device_for_each_child(master->dev.parent, NULL, __unregister); + dummy = device_for_each_child(master->dev.parent, &master->dev, + __unregister); device_unregister(&master->dev); } EXPORT_SYMBOL_GPL(spi_unregister_master); diff --git a/drivers/spi/spi_txx9.c b/drivers/spi/spi_txx9.c index cc5094f..363ac8e 100644 --- a/drivers/spi/spi_txx9.c +++ b/drivers/spi/spi_txx9.c @@ -24,6 +24,7 @@ #include <linux/spi/spi.h> #include <linux/err.h> #include <linux/clk.h> +#include <linux/io.h> #include <asm/gpio.h> @@ -74,7 +75,6 @@ struct txx9spi { struct list_head queue; wait_queue_head_t waitq; void __iomem *membase; - int irq; int baseclk; struct clk *clk; u32 max_speed_hz, min_speed_hz; @@ -350,12 +350,12 @@ static int __init txx9spi_probe(struct platform_device *dev) struct resource *res; int ret = -ENODEV; u32 mcr; + int irq; master = spi_alloc_master(&dev->dev, sizeof(*c)); if (!master) return ret; c = spi_master_get_devdata(master); - c->irq = -1; platform_set_drvdata(dev, master); INIT_WORK(&c->work, txx9spi_work); @@ -381,32 +381,36 @@ static int __init txx9spi_probe(struct platform_device *dev) res = platform_get_resource(dev, IORESOURCE_MEM, 0); if (!res) - goto exit; - c->membase = ioremap(res->start, res->end - res->start + 1); + goto exit_busy; + if (!devm_request_mem_region(&dev->dev, + res->start, res->end - res->start + 1, + "spi_txx9")) + goto exit_busy; + c->membase = devm_ioremap(&dev->dev, + res->start, res->end - res->start + 1); if (!c->membase) - goto exit; + goto exit_busy; /* enter config mode */ mcr = txx9spi_rd(c, TXx9_SPMCR); mcr &= ~(TXx9_SPMCR_OPMODE | TXx9_SPMCR_SPSTP | TXx9_SPMCR_BCLR); txx9spi_wr(c, mcr | TXx9_SPMCR_CONFIG | TXx9_SPMCR_BCLR, TXx9_SPMCR); - c->irq = platform_get_irq(dev, 0); - if (c->irq < 0) - goto exit; - ret = request_irq(c->irq, txx9spi_interrupt, 0, dev->name, c); - if (ret) { - c->irq = -1; + irq = platform_get_irq(dev, 0); + if (irq < 0) + goto exit_busy; + ret = devm_request_irq(&dev->dev, irq, txx9spi_interrupt, 0, + "spi_txx9", c); + if (ret) goto exit; - } c->workqueue = create_singlethread_workqueue(master->dev.parent->bus_id); if (!c->workqueue) - goto exit; + goto exit_busy; c->last_chipselect = -1; dev_info(&dev->dev, "at %#llx, irq %d, %dMHz\n", - (unsigned long long)res->start, c->irq, + (unsigned long long)res->start, irq, (c->baseclk + 500000) / 1000000); master->bus_num = dev->id; @@ -418,13 +422,11 @@ static int __init txx9spi_probe(struct platform_device *dev) if (ret) goto exit; return 0; +exit_busy: + ret = -EBUSY; exit: if (c->workqueue) destroy_workqueue(c->workqueue); - if (c->irq >= 0) - free_irq(c->irq, c); - if (c->membase) - iounmap(c->membase); if (c->clk) { clk_disable(c->clk); clk_put(c->clk); @@ -442,8 +444,6 @@ static int __exit txx9spi_remove(struct platform_device *dev) spi_unregister_master(master); platform_set_drvdata(dev, NULL); destroy_workqueue(c->workqueue); - free_irq(c->irq, c); - iounmap(c->membase); clk_disable(c->clk); clk_put(c->clk); spi_master_put(master); diff --git a/drivers/spi/tle62x0.c b/drivers/spi/tle62x0.c index 6da58ca..455991f 100644 --- a/drivers/spi/tle62x0.c +++ b/drivers/spi/tle62x0.c @@ -107,8 +107,11 @@ static ssize_t tle62x0_status_show(struct device *dev, mutex_lock(&st->lock); ret = tle62x0_read(st); - dev_dbg(dev, "tle62x0_read() returned %d\n", ret); + if (ret < 0) { + mutex_unlock(&st->lock); + return ret; + } for (ptr = 0; ptr < (st->nr_gpio * 2)/8; ptr += 1) { fault <<= 8; diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index 6bfdba6..1f7ab15 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1215,20 +1215,18 @@ static int keyspan_chars_in_buffer (struct usb_serial_port *port) static int keyspan_open (struct usb_serial_port *port, struct file *filp) { - struct keyspan_port_private *p_priv; - struct keyspan_serial_private *s_priv; - struct usb_serial *serial = port->serial; + struct keyspan_port_private *p_priv; + struct keyspan_serial_private *s_priv; + struct usb_serial *serial = port->serial; const struct keyspan_device_details *d_details; int i, err; - int baud_rate, device_port; struct urb *urb; - unsigned int cflag; s_priv = usb_get_serial_data(serial); p_priv = usb_get_serial_port_data(port); d_details = p_priv->device_details; - - dbg("%s - port%d.", __FUNCTION__, port->number); + + dbg("%s - port%d.", __FUNCTION__, port->number); /* Set some sane defaults */ p_priv->rts_state = 1; @@ -1249,7 +1247,7 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp) urb->dev = serial->dev; /* make sure endpoint data toggle is synchronized with the device */ - + usb_clear_halt(urb->dev, urb->pipe); if ((err = usb_submit_urb(urb, GFP_KERNEL)) != 0) { @@ -1265,30 +1263,6 @@ static int keyspan_open (struct usb_serial_port *port, struct file *filp) /* usb_settoggle(urb->dev, usb_pipeendpoint(urb->pipe), usb_pipeout(urb->pipe), 0); */ } - /* get the terminal config for the setup message now so we don't - * need to send 2 of them */ - - cflag = port->tty->termios->c_cflag; - device_port = port->number - port->serial->minor; - - /* Baud rate calculation takes baud rate as an integer - so other rates can be generated if desired. */ - baud_rate = tty_get_baud_rate(port->tty); - /* If no match or invalid, leave as default */ - if (baud_rate >= 0 - && d_details->calculate_baud_rate(baud_rate, d_details->baudclk, - NULL, NULL, NULL, device_port) == KEYSPAN_BAUD_RATE_OK) { - p_priv->baud = baud_rate; - } - - /* set CTS/RTS handshake etc. */ - p_priv->cflag = cflag; - p_priv->flow_control = (cflag & CRTSCTS)? flow_cts: flow_none; - - keyspan_send_setup(port, 1); - //mdelay(100); - //keyspan_set_termios(port, NULL); - return (0); } diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig index cc4b60f..7d86e9e 100644 --- a/drivers/video/Kconfig +++ b/drivers/video/Kconfig @@ -503,7 +503,7 @@ config FB_VALKYRIE config FB_CT65550 bool "Chips 65550 display support" - depends on (FB = y) && PPC32 + depends on (FB = y) && PPC32 && PCI select FB_CFB_FILLRECT select FB_CFB_COPYAREA select FB_CFB_IMAGEBLIT diff --git a/drivers/video/gbefb.c b/drivers/video/gbefb.c index b9b572b..2e552d5 100644 --- a/drivers/video/gbefb.c +++ b/drivers/video/gbefb.c @@ -183,8 +183,8 @@ static struct fb_videomode default_mode_LCD __initdata = { .vmode = FB_VMODE_NONINTERLACED, }; -struct fb_videomode *default_mode = &default_mode_CRT; -struct fb_var_screeninfo *default_var = &default_var_CRT; +struct fb_videomode *default_mode __initdata = &default_mode_CRT; +struct fb_var_screeninfo *default_var __initdata = &default_var_CRT; static int flat_panel_enabled = 0; diff --git a/drivers/video/geode/lxfb.h b/drivers/video/geode/lxfb.h index 6c227f9..ca13c48 100644 --- a/drivers/video/geode/lxfb.h +++ b/drivers/video/geode/lxfb.h @@ -33,7 +33,7 @@ void lx_set_palette_reg(struct fb_info *, unsigned int, unsigned int, #define MSR_LX_GLD_CONFIG 0x48002001 #define MSR_LX_GLCP_DOTPLL 0x4c000015 -#define MSR_LX_DF_PADSEL 0x48000011 +#define MSR_LX_DF_PADSEL 0x48002011 #define MSR_LX_DC_SPARE 0x80000011 #define MSR_LX_DF_GLCONFIG 0x48002001 diff --git a/drivers/video/ps3fb.c b/drivers/video/ps3fb.c index b3463dd..75836aa 100644 --- a/drivers/video/ps3fb.c +++ b/drivers/video/ps3fb.c @@ -727,7 +727,7 @@ static int ps3fb_blank(int blank, struct fb_info *info) static int ps3fb_get_vblank(struct fb_vblank *vblank) { - memset(vblank, 0, sizeof(&vblank)); + memset(vblank, 0, sizeof(*vblank)); vblank->flags = FB_VBLANK_HAVE_VSYNC; return 0; } diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c index a5333c1..b829dc7 100644 --- a/drivers/video/s1d13xxxfb.c +++ b/drivers/video/s1d13xxxfb.c @@ -540,7 +540,7 @@ s1d13xxxfb_probe(struct platform_device *pdev) int ret = 0; u8 revision; - dbg("probe called: device is %p\n", dev); + dbg("probe called: device is %p\n", pdev); printk(KERN_INFO "Epson S1D13XXX FB Driver\n"); @@ -753,8 +753,11 @@ static struct platform_driver s1d13xxxfb_driver = { static int __init s1d13xxxfb_init(void) { + +#ifndef MODULE if (fb_get_options("s1d13xxxfb", NULL)) return -ENODEV; +#endif return platform_driver_register(&s1d13xxxfb_driver); } diff --git a/drivers/video/sis/sis_main.c b/drivers/video/sis/sis_main.c index bc7d236..37bd24b 100644 --- a/drivers/video/sis/sis_main.c +++ b/drivers/video/sis/sis_main.c @@ -1248,7 +1248,6 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in if(found_mode) { ivideo->sisfb_mode_idx = sisfb_validate_mode(ivideo, ivideo->sisfb_mode_idx, ivideo->currentvbflags); - ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]; } else { ivideo->sisfb_mode_idx = -1; } @@ -1260,6 +1259,8 @@ sisfb_do_set_var(struct fb_var_screeninfo *var, int isactive, struct fb_info *in return -EINVAL; } + ivideo->mode_no = sisbios_mode[ivideo->sisfb_mode_idx].mode_no[ivideo->mni]; + if(sisfb_search_refresh_rate(ivideo, ivideo->refresh_rate, ivideo->sisfb_mode_idx) == 0) { ivideo->rate_idx = sisbios_mode[ivideo->sisfb_mode_idx].rate_idx; ivideo->refresh_rate = 60; diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c index b983d26..d1d6c0f 100644 --- a/drivers/video/uvesafb.c +++ b/drivers/video/uvesafb.c @@ -926,8 +926,10 @@ static int uvesafb_setpalette(struct uvesafb_pal_entry *entries, int count, int start, struct fb_info *info) { struct uvesafb_ktask *task; +#ifdef CONFIG_X86 struct uvesafb_par *par = info->par; int i = par->mode_idx; +#endif int err = 0; /* @@ -1103,11 +1105,11 @@ static int uvesafb_pan_display(struct fb_var_screeninfo *var, static int uvesafb_blank(int blank, struct fb_info *info) { - struct uvesafb_par *par = info->par; struct uvesafb_ktask *task; int err = 1; - #ifdef CONFIG_X86 + struct uvesafb_par *par = info->par; + if (par->vbe_ib.capabilities & VBE_CAP_VGACOMPAT) { int loop = 10000; u8 seq = 0, crtc17 = 0; diff --git a/drivers/w1/masters/ds2490.c b/drivers/w1/masters/ds2490.c index 299e274..b63b5e0 100644 --- a/drivers/w1/masters/ds2490.c +++ b/drivers/w1/masters/ds2490.c @@ -233,7 +233,7 @@ static int ds_recv_status_nodump(struct ds_device *dev, struct ds_status *st, { int count, err; - memset(st, 0, sizeof(st)); + memset(st, 0, sizeof(*st)); count = 0; err = usb_bulk_msg(dev->udev, usb_rcvbulkpipe(dev->udev, dev->ep[EP_STATUS]), buf, size, &count, 100); |