diff options
Diffstat (limited to 'drivers/dma/ioat')
-rw-r--r-- | drivers/dma/ioat/dma.c | 12 | ||||
-rw-r--r-- | drivers/dma/ioat/dma.h | 19 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.c | 186 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v2.h | 33 | ||||
-rw-r--r-- | drivers/dma/ioat/dma_v3.c | 143 | ||||
-rw-r--r-- | drivers/dma/ioat/pci.c | 7 |
6 files changed, 170 insertions, 230 deletions
diff --git a/drivers/dma/ioat/dma.c b/drivers/dma/ioat/dma.c index 3e5a800..c9213ea 100644 --- a/drivers/dma/ioat/dma.c +++ b/drivers/dma/ioat/dma.c @@ -727,18 +727,18 @@ static void ioat1_timer_event(unsigned long data) } enum dma_status -ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat_chan_common *chan = to_chan_common(c); struct ioatdma_device *device = chan->device; - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) return DMA_SUCCESS; device->cleanup_fn((unsigned long) c); - return ioat_is_complete(c, cookie, done, used); + return ioat_tx_status(c, cookie, txstate); } static void ioat1_dma_start_null_desc(struct ioat_dma_chan *ioat) @@ -858,7 +858,7 @@ int __devinit ioat_dma_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); if (tmo == 0 || - dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) + dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test copy timed out, disabling\n"); err = -ENODEV; @@ -1199,7 +1199,7 @@ int __devinit ioat1_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat1_dma_memcpy_issue_pending; dma->device_alloc_chan_resources = ioat1_dma_alloc_chan_resources; dma->device_free_chan_resources = ioat1_dma_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_dma_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma.h b/drivers/dma/ioat/dma.h index 86b97ac..6d3a73b 100644 --- a/drivers/dma/ioat/dma.h +++ b/drivers/dma/ioat/dma.h @@ -96,6 +96,7 @@ struct ioat_chan_common { #define IOAT_COMPLETION_ACK 1 #define IOAT_RESET_PENDING 2 #define IOAT_KOBJ_INIT_FAIL 3 + #define IOAT_RESHAPE_PENDING 4 struct timer_list timer; #define COMPLETION_TIMEOUT msecs_to_jiffies(100) #define IDLE_TIMEOUT msecs_to_jiffies(2000) @@ -142,15 +143,14 @@ static inline struct ioat_dma_chan *to_ioat_chan(struct dma_chan *c) } /** - * ioat_is_complete - poll the status of an ioat transaction + * ioat_tx_status - poll the status of an ioat transaction * @c: channel handle * @cookie: transaction identifier - * @done: if set, updated with last completed transaction - * @used: if set, updated with last used transaction + * @txstate: if set, updated with the transaction state */ static inline enum dma_status -ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat_chan_common *chan = to_chan_common(c); dma_cookie_t last_used; @@ -159,10 +159,7 @@ ioat_is_complete(struct dma_chan *c, dma_cookie_t cookie, last_used = c->cookie; last_complete = chan->completed_cookie; - if (done) - *done = last_complete; - if (used) - *used = last_used; + dma_set_tx_state(txstate, last_complete, last_used, 0); return dma_async_is_complete(cookie, last_complete, last_used); } @@ -338,8 +335,8 @@ struct dca_provider * __devinit ioat_dca_init(struct pci_dev *pdev, unsigned long ioat_get_current_completion(struct ioat_chan_common *chan); void ioat_init_channel(struct ioatdma_device *device, struct ioat_chan_common *chan, int idx); -enum dma_status ioat_is_dma_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used); +enum dma_status ioat_dma_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate); void ioat_dma_unmap(struct ioat_chan_common *chan, enum dma_ctrl_flags flags, size_t len, struct ioat_dma_descriptor *hw); bool ioat_cleanup_preamble(struct ioat_chan_common *chan, diff --git a/drivers/dma/ioat/dma_v2.c b/drivers/dma/ioat/dma_v2.c index b5ae56c..3c8b32a 100644 --- a/drivers/dma/ioat/dma_v2.c +++ b/drivers/dma/ioat/dma_v2.c @@ -56,8 +56,6 @@ void __ioat2_issue_pending(struct ioat2_dma_chan *ioat) ioat->dmacount += ioat2_ring_pending(ioat); ioat->issued = ioat->head; - /* make descriptor updates globally visible before notifying channel */ - wmb(); writew(ioat->dmacount, chan->reg_base + IOAT_CHAN_DMACOUNT_OFFSET); dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x count: %#x\n", @@ -69,9 +67,9 @@ void ioat2_issue_pending(struct dma_chan *c) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); if (ioat2_ring_pending(ioat)) { - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&ioat->prep_lock); __ioat2_issue_pending(ioat); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); } } @@ -80,7 +78,7 @@ void ioat2_issue_pending(struct dma_chan *c) * @ioat: ioat2+ channel * * Check if the number of unsubmitted descriptors has exceeded the - * watermark. Called with ring_lock held + * watermark. Called with prep_lock held */ static void ioat2_update_pending(struct ioat2_dma_chan *ioat) { @@ -92,7 +90,6 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) { struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; - int idx; if (ioat2_ring_space(ioat) < 1) { dev_err(to_dev(&ioat->base), @@ -102,8 +99,7 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) dev_dbg(to_dev(&ioat->base), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); - idx = ioat2_desc_alloc(ioat, 1); - desc = ioat2_get_ring_ent(ioat, idx); + desc = ioat2_get_ring_ent(ioat, ioat->head); hw = desc->hw; hw->ctl = 0; @@ -117,14 +113,16 @@ static void __ioat2_start_null_desc(struct ioat2_dma_chan *ioat) async_tx_ack(&desc->txd); ioat2_set_chainaddr(ioat, desc->txd.phys); dump_desc_dbg(ioat, desc); + wmb(); + ioat->head += 1; __ioat2_issue_pending(ioat); } static void ioat2_start_null_desc(struct ioat2_dma_chan *ioat) { - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&ioat->prep_lock); __ioat2_start_null_desc(ioat); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); } static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) @@ -134,15 +132,16 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) struct ioat_ring_ent *desc; bool seen_current = false; u16 active; - int i; + int idx = ioat->tail, i; dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); active = ioat2_ring_active(ioat); for (i = 0; i < active && !seen_current; i++) { - prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + smp_read_barrier_depends(); + prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); + desc = ioat2_get_ring_ent(ioat, idx + i); tx = &desc->txd; dump_desc_dbg(ioat, desc); if (tx->cookie) { @@ -158,11 +157,12 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) if (tx->phys == phys_complete) seen_current = true; } - ioat->tail += i; + smp_mb(); /* finish all descriptor reads before incrementing tail */ + ioat->tail = idx + i; BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; - if (ioat->head == ioat->tail) { + if (active - i == 0) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); @@ -179,24 +179,9 @@ static void ioat2_cleanup(struct ioat2_dma_chan *ioat) struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; - - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - - if (!spin_trylock_bh(&ioat->ring_lock)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - - __cleanup(ioat, phys_complete); - - spin_unlock_bh(&ioat->ring_lock); + spin_lock_bh(&chan->cleanup_lock); + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); spin_unlock_bh(&chan->cleanup_lock); } @@ -287,12 +272,10 @@ void ioat2_timer_event(unsigned long data) struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; - spin_lock_bh(&chan->cleanup_lock); if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { unsigned long phys_complete; u64 status; - spin_lock_bh(&ioat->ring_lock); status = ioat_chansts(chan); /* when halted due to errors check for channel @@ -311,26 +294,31 @@ void ioat2_timer_event(unsigned long data) * acknowledged a pending completion once, then be more * forceful with a restart */ - if (ioat_cleanup_preamble(chan, &phys_complete)) + spin_lock_bh(&chan->cleanup_lock); + if (ioat_cleanup_preamble(chan, &phys_complete)) { __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + } else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { + spin_lock_bh(&ioat->prep_lock); ioat2_restart_channel(ioat); - else { + spin_unlock_bh(&ioat->prep_lock); + } else { set_bit(IOAT_COMPLETION_ACK, &chan->state); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); } - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); } else { u16 active; /* if the ring is idle, empty, and oversized try to step * down the size */ - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->prep_lock); active = ioat2_ring_active(ioat); if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) reshape_ring(ioat, ioat->alloc_order-1); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); /* keep shrinking until we get back to our minimum * default size @@ -338,7 +326,6 @@ void ioat2_timer_event(unsigned long data) if (ioat->alloc_order > ioat_get_alloc_order()) mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } - spin_unlock_bh(&chan->cleanup_lock); } static int ioat2_reset_hw(struct ioat_chan_common *chan) @@ -392,7 +379,7 @@ int ioat2_enumerate_channels(struct ioatdma_device *device) ioat_init_channel(device, &ioat->base, i); ioat->xfercap_log = xfercap_log; - spin_lock_init(&ioat->ring_lock); + spin_lock_init(&ioat->prep_lock); if (device->reset_hw(&ioat->base)) { i = 0; break; @@ -418,8 +405,17 @@ static dma_cookie_t ioat2_tx_submit_unlock(struct dma_async_tx_descriptor *tx) if (!test_and_set_bit(IOAT_COMPLETION_PENDING, &chan->state)) mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + + /* make descriptor updates visible before advancing ioat->head, + * this is purposefully not smp_wmb() since we are also + * publishing the descriptor updates to a dma device + */ + wmb(); + + ioat->head += ioat->produce; + ioat2_update_pending(ioat); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); return cookie; } @@ -531,13 +527,15 @@ int ioat2_alloc_chan_resources(struct dma_chan *c) if (!ring) return -ENOMEM; - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->prep_lock); ioat->ring = ring; ioat->head = 0; ioat->issued = 0; ioat->tail = 0; ioat->alloc_order = order; - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); tasklet_enable(&chan->cleanup_task); ioat2_start_null_desc(ioat); @@ -553,7 +551,7 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) */ struct ioat_chan_common *chan = &ioat->base; struct dma_chan *c = &chan->common; - const u16 curr_size = ioat2_ring_mask(ioat) + 1; + const u16 curr_size = ioat2_ring_size(ioat); const u16 active = ioat2_ring_active(ioat); const u16 new_size = 1 << order; struct ioat_ring_ent **ring; @@ -653,54 +651,61 @@ bool reshape_ring(struct ioat2_dma_chan *ioat, int order) } /** - * ioat2_alloc_and_lock - common descriptor alloc boilerplate for ioat2,3 ops - * @idx: gets starting descriptor index on successful allocation + * ioat2_check_space_lock - verify space and grab ring producer lock * @ioat: ioat2,3 channel (ring) to operate on * @num_descs: allocation length */ -int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs) +int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs) { struct ioat_chan_common *chan = &ioat->base; + bool retry; - spin_lock_bh(&ioat->ring_lock); + retry: + spin_lock_bh(&ioat->prep_lock); /* never allow the last descriptor to be consumed, we need at * least one free at all times to allow for on-the-fly ring * resizing. */ - while (unlikely(ioat2_ring_space(ioat) <= num_descs)) { - if (reshape_ring(ioat, ioat->alloc_order + 1) && - ioat2_ring_space(ioat) > num_descs) - break; - - if (printk_ratelimit()) - dev_dbg(to_dev(chan), - "%s: ring full! num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, - ioat->issued); - spin_unlock_bh(&ioat->ring_lock); - - /* progress reclaim in the allocation failure case we - * may be called under bh_disabled so we need to trigger - * the timer event directly - */ - spin_lock_bh(&chan->cleanup_lock); - if (jiffies > chan->timer.expires && - timer_pending(&chan->timer)) { - struct ioatdma_device *device = chan->device; - - mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); - spin_unlock_bh(&chan->cleanup_lock); - device->timer_fn((unsigned long) &chan->common); - } else - spin_unlock_bh(&chan->cleanup_lock); - return -ENOMEM; + if (likely(ioat2_ring_space(ioat) > num_descs)) { + dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); + ioat->produce = num_descs; + return 0; /* with ioat->prep_lock held */ } + retry = test_and_set_bit(IOAT_RESHAPE_PENDING, &chan->state); + spin_unlock_bh(&ioat->prep_lock); - dev_dbg(to_dev(chan), "%s: num_descs: %d (%x:%x:%x)\n", - __func__, num_descs, ioat->head, ioat->tail, ioat->issued); + /* is another cpu already trying to expand the ring? */ + if (retry) + goto retry; - *idx = ioat2_desc_alloc(ioat, num_descs); - return 0; /* with ioat->ring_lock held */ + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->prep_lock); + retry = reshape_ring(ioat, ioat->alloc_order + 1); + clear_bit(IOAT_RESHAPE_PENDING, &chan->state); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); + + /* if we were able to expand the ring retry the allocation */ + if (retry) + goto retry; + + if (printk_ratelimit()) + dev_dbg(to_dev(chan), "%s: ring full! num_descs: %d (%x:%x:%x)\n", + __func__, num_descs, ioat->head, ioat->tail, ioat->issued); + + /* progress reclaim in the allocation failure case we may be + * called under bh_disabled so we need to trigger the timer + * event directly + */ + if (jiffies > chan->timer.expires && timer_pending(&chan->timer)) { + struct ioatdma_device *device = chan->device; + + mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); + device->timer_fn((unsigned long) &chan->common); + } + + return -ENOMEM; } struct dma_async_tx_descriptor * @@ -713,14 +718,11 @@ ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, dma_addr_t dst = dma_dest; dma_addr_t src = dma_src; size_t total_len = len; - int num_descs; - u16 idx; - int i; + int num_descs, idx, i; num_descs = ioat2_xferlen_to_descs(ioat, len); - if (likely(num_descs) && - ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) - /* pass */; + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) + idx = ioat->head; else return NULL; i = 0; @@ -777,7 +779,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) device->cleanup_fn((unsigned long) c); device->reset_hw(chan); - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->prep_lock); descs = ioat2_ring_space(ioat); dev_dbg(to_dev(chan), "freeing %d idle descriptors\n", descs); for (i = 0; i < descs; i++) { @@ -800,7 +803,8 @@ void ioat2_free_chan_resources(struct dma_chan *c) ioat->alloc_order = 0; pci_pool_free(device->completion_pool, chan->completion, chan->completion_dma); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); chan->last_completion = 0; chan->completion_dma = 0; @@ -855,7 +859,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *device, int dca) dma->device_issue_pending = ioat2_issue_pending; dma->device_alloc_chan_resources = ioat2_alloc_chan_resources; dma->device_free_chan_resources = ioat2_free_chan_resources; - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_tx_status; err = ioat_probe(device); if (err) diff --git a/drivers/dma/ioat/dma_v2.h b/drivers/dma/ioat/dma_v2.h index ef2871f..a2c413b 100644 --- a/drivers/dma/ioat/dma_v2.h +++ b/drivers/dma/ioat/dma_v2.h @@ -22,6 +22,7 @@ #define IOATDMA_V2_H #include <linux/dmaengine.h> +#include <linux/circ_buf.h> #include "dma.h" #include "hw.h" @@ -49,8 +50,9 @@ extern int ioat_ring_alloc_order; * @tail: cleanup index * @dmacount: identical to 'head' except for occasionally resetting to zero * @alloc_order: log2 of the number of allocated descriptors + * @produce: number of descriptors to produce at submit time * @ring: software ring buffer implementation of hardware ring - * @ring_lock: protects ring attributes + * @prep_lock: serializes descriptor preparation (producers) */ struct ioat2_dma_chan { struct ioat_chan_common base; @@ -60,8 +62,9 @@ struct ioat2_dma_chan { u16 tail; u16 dmacount; u16 alloc_order; + u16 produce; struct ioat_ring_ent **ring; - spinlock_t ring_lock; + spinlock_t prep_lock; }; static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) @@ -71,38 +74,26 @@ static inline struct ioat2_dma_chan *to_ioat2_chan(struct dma_chan *c) return container_of(chan, struct ioat2_dma_chan, base); } -static inline u16 ioat2_ring_mask(struct ioat2_dma_chan *ioat) +static inline u16 ioat2_ring_size(struct ioat2_dma_chan *ioat) { - return (1 << ioat->alloc_order) - 1; + return 1 << ioat->alloc_order; } /* count of descriptors in flight with the engine */ static inline u16 ioat2_ring_active(struct ioat2_dma_chan *ioat) { - return (ioat->head - ioat->tail) & ioat2_ring_mask(ioat); + return CIRC_CNT(ioat->head, ioat->tail, ioat2_ring_size(ioat)); } /* count of descriptors pending submission to hardware */ static inline u16 ioat2_ring_pending(struct ioat2_dma_chan *ioat) { - return (ioat->head - ioat->issued) & ioat2_ring_mask(ioat); + return CIRC_CNT(ioat->head, ioat->issued, ioat2_ring_size(ioat)); } static inline u16 ioat2_ring_space(struct ioat2_dma_chan *ioat) { - u16 num_descs = ioat2_ring_mask(ioat) + 1; - u16 active = ioat2_ring_active(ioat); - - BUG_ON(active > num_descs); - - return num_descs - active; -} - -/* assumes caller already checked space */ -static inline u16 ioat2_desc_alloc(struct ioat2_dma_chan *ioat, u16 len) -{ - ioat->head += len; - return ioat->head - len; + return ioat2_ring_size(ioat) - ioat2_ring_active(ioat); } static inline u16 ioat2_xferlen_to_descs(struct ioat2_dma_chan *ioat, size_t len) @@ -151,7 +142,7 @@ struct ioat_ring_ent { static inline struct ioat_ring_ent * ioat2_get_ring_ent(struct ioat2_dma_chan *ioat, u16 idx) { - return ioat->ring[idx & ioat2_ring_mask(ioat)]; + return ioat->ring[idx & (ioat2_ring_size(ioat) - 1)]; } static inline void ioat2_set_chainaddr(struct ioat2_dma_chan *ioat, u64 addr) @@ -168,7 +159,7 @@ int __devinit ioat2_dma_probe(struct ioatdma_device *dev, int dca); int __devinit ioat3_dma_probe(struct ioatdma_device *dev, int dca); struct dca_provider * __devinit ioat2_dca_init(struct pci_dev *pdev, void __iomem *iobase); struct dca_provider * __devinit ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase); -int ioat2_alloc_and_lock(u16 *idx, struct ioat2_dma_chan *ioat, int num_descs); +int ioat2_check_space_lock(struct ioat2_dma_chan *ioat, int num_descs); int ioat2_enumerate_channels(struct ioatdma_device *device); struct dma_async_tx_descriptor * ioat2_dma_prep_memcpy_lock(struct dma_chan *c, dma_addr_t dma_dest, diff --git a/drivers/dma/ioat/dma_v3.c b/drivers/dma/ioat/dma_v3.c index 6740e31..1cdd22e 100644 --- a/drivers/dma/ioat/dma_v3.c +++ b/drivers/dma/ioat/dma_v3.c @@ -260,8 +260,8 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) struct ioat_chan_common *chan = &ioat->base; struct ioat_ring_ent *desc; bool seen_current = false; + int idx = ioat->tail, i; u16 active; - int i; dev_dbg(to_dev(chan), "%s: head: %#x tail: %#x issued: %#x\n", __func__, ioat->head, ioat->tail, ioat->issued); @@ -270,13 +270,14 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) for (i = 0; i < active && !seen_current; i++) { struct dma_async_tx_descriptor *tx; - prefetch(ioat2_get_ring_ent(ioat, ioat->tail + i + 1)); - desc = ioat2_get_ring_ent(ioat, ioat->tail + i); + smp_read_barrier_depends(); + prefetch(ioat2_get_ring_ent(ioat, idx + i + 1)); + desc = ioat2_get_ring_ent(ioat, idx + i); dump_desc_dbg(ioat, desc); tx = &desc->txd; if (tx->cookie) { chan->completed_cookie = tx->cookie; - ioat3_dma_unmap(ioat, desc, ioat->tail + i); + ioat3_dma_unmap(ioat, desc, idx + i); tx->cookie = 0; if (tx->callback) { tx->callback(tx->callback_param); @@ -293,69 +294,30 @@ static void __cleanup(struct ioat2_dma_chan *ioat, unsigned long phys_complete) i++; } } - ioat->tail += i; + smp_mb(); /* finish all descriptor reads before incrementing tail */ + ioat->tail = idx + i; BUG_ON(active && !seen_current); /* no active descs have written a completion? */ chan->last_completion = phys_complete; - active = ioat2_ring_active(ioat); - if (active == 0) { + if (active - i == 0) { dev_dbg(to_dev(chan), "%s: cancel completion timeout\n", __func__); clear_bit(IOAT_COMPLETION_PENDING, &chan->state); mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } /* 5 microsecond delay per pending descriptor */ - writew(min((5 * active), IOAT_INTRDELAY_MASK), + writew(min((5 * (active - i)), IOAT_INTRDELAY_MASK), chan->device->reg_base + IOAT_INTRDELAY_OFFSET); } -/* try to cleanup, but yield (via spin_trylock) to incoming submissions - * with the expectation that we will immediately poll again shortly - */ -static void ioat3_cleanup_poll(struct ioat2_dma_chan *ioat) +static void ioat3_cleanup(struct ioat2_dma_chan *ioat) { struct ioat_chan_common *chan = &ioat->base; unsigned long phys_complete; - prefetch(chan->completion); - - if (!spin_trylock_bh(&chan->cleanup_lock)) - return; - - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - - if (!spin_trylock_bh(&ioat->ring_lock)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - - __cleanup(ioat, phys_complete); - - spin_unlock_bh(&ioat->ring_lock); - spin_unlock_bh(&chan->cleanup_lock); -} - -/* run cleanup now because we already delayed the interrupt via INTRDELAY */ -static void ioat3_cleanup_sync(struct ioat2_dma_chan *ioat) -{ - struct ioat_chan_common *chan = &ioat->base; - unsigned long phys_complete; - - prefetch(chan->completion); - spin_lock_bh(&chan->cleanup_lock); - if (!ioat_cleanup_preamble(chan, &phys_complete)) { - spin_unlock_bh(&chan->cleanup_lock); - return; - } - spin_lock_bh(&ioat->ring_lock); - - __cleanup(ioat, phys_complete); - - spin_unlock_bh(&ioat->ring_lock); + if (ioat_cleanup_preamble(chan, &phys_complete)) + __cleanup(ioat, phys_complete); spin_unlock_bh(&chan->cleanup_lock); } @@ -363,7 +325,7 @@ static void ioat3_cleanup_event(unsigned long data) { struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); - ioat3_cleanup_sync(ioat); + ioat3_cleanup(ioat); writew(IOAT_CHANCTRL_RUN, ioat->base.reg_base + IOAT_CHANCTRL_OFFSET); } @@ -384,12 +346,10 @@ static void ioat3_timer_event(unsigned long data) struct ioat2_dma_chan *ioat = to_ioat2_chan((void *) data); struct ioat_chan_common *chan = &ioat->base; - spin_lock_bh(&chan->cleanup_lock); if (test_bit(IOAT_COMPLETION_PENDING, &chan->state)) { unsigned long phys_complete; u64 status; - spin_lock_bh(&ioat->ring_lock); status = ioat_chansts(chan); /* when halted due to errors check for channel @@ -408,26 +368,31 @@ static void ioat3_timer_event(unsigned long data) * acknowledged a pending completion once, then be more * forceful with a restart */ + spin_lock_bh(&chan->cleanup_lock); if (ioat_cleanup_preamble(chan, &phys_complete)) __cleanup(ioat, phys_complete); - else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) + else if (test_bit(IOAT_COMPLETION_ACK, &chan->state)) { + spin_lock_bh(&ioat->prep_lock); ioat3_restart_channel(ioat); - else { + spin_unlock_bh(&ioat->prep_lock); + } else { set_bit(IOAT_COMPLETION_ACK, &chan->state); mod_timer(&chan->timer, jiffies + COMPLETION_TIMEOUT); } - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&chan->cleanup_lock); } else { u16 active; /* if the ring is idle, empty, and oversized try to step * down the size */ - spin_lock_bh(&ioat->ring_lock); + spin_lock_bh(&chan->cleanup_lock); + spin_lock_bh(&ioat->prep_lock); active = ioat2_ring_active(ioat); if (active == 0 && ioat->alloc_order > ioat_get_alloc_order()) reshape_ring(ioat, ioat->alloc_order-1); - spin_unlock_bh(&ioat->ring_lock); + spin_unlock_bh(&ioat->prep_lock); + spin_unlock_bh(&chan->cleanup_lock); /* keep shrinking until we get back to our minimum * default size @@ -435,21 +400,20 @@ static void ioat3_timer_event(unsigned long data) if (ioat->alloc_order > ioat_get_alloc_order()) mod_timer(&chan->timer, jiffies + IDLE_TIMEOUT); } - spin_unlock_bh(&chan->cleanup_lock); } static enum dma_status -ioat3_is_complete(struct dma_chan *c, dma_cookie_t cookie, - dma_cookie_t *done, dma_cookie_t *used) +ioat3_tx_status(struct dma_chan *c, dma_cookie_t cookie, + struct dma_tx_state *txstate) { struct ioat2_dma_chan *ioat = to_ioat2_chan(c); - if (ioat_is_complete(c, cookie, done, used) == DMA_SUCCESS) + if (ioat_tx_status(c, cookie, txstate) == DMA_SUCCESS) return DMA_SUCCESS; - ioat3_cleanup_poll(ioat); + ioat3_cleanup(ioat); - return ioat_is_complete(c, cookie, done, used); + return ioat_tx_status(c, cookie, txstate); } static struct dma_async_tx_descriptor * @@ -460,15 +424,12 @@ ioat3_prep_memset_lock(struct dma_chan *c, dma_addr_t dest, int value, struct ioat_ring_ent *desc; size_t total_len = len; struct ioat_fill_descriptor *fill; - int num_descs; u64 src_data = (0x0101010101010101ULL) * (value & 0xff); - u16 idx; - int i; + int num_descs, idx, i; num_descs = ioat2_xferlen_to_descs(ioat, len); - if (likely(num_descs) && - ioat2_alloc_and_lock(&idx, ioat, num_descs) == 0) - /* pass */; + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs) == 0) + idx = ioat->head; else return NULL; i = 0; @@ -513,11 +474,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, struct ioat_xor_descriptor *xor; struct ioat_xor_ext_descriptor *xor_ex = NULL; struct ioat_dma_descriptor *hw; + int num_descs, with_ext, idx, i; u32 offset = 0; - int num_descs; - int with_ext; - int i; - u16 idx; u8 op = result ? IOAT_OP_XOR_VAL : IOAT_OP_XOR; BUG_ON(src_cnt < 2); @@ -537,9 +495,8 @@ __ioat3_prep_xor_lock(struct dma_chan *c, enum sum_check_flags *result, * (legacy) descriptor to ensure all completion writes arrive in * order. */ - if (likely(num_descs) && - ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) - /* pass */; + if (likely(num_descs) && ioat2_check_space_lock(ioat, num_descs+1) == 0) + idx = ioat->head; else return NULL; i = 0; @@ -657,11 +614,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, struct ioat_pq_ext_descriptor *pq_ex = NULL; struct ioat_dma_descriptor *hw; u32 offset = 0; - int num_descs; - int with_ext; - int i, s; - u16 idx; u8 op = result ? IOAT_OP_PQ_VAL : IOAT_OP_PQ; + int i, s, idx, with_ext, num_descs; dev_dbg(to_dev(chan), "%s\n", __func__); /* the engine requires at least two sources (we provide @@ -687,8 +641,8 @@ __ioat3_prep_pq_lock(struct dma_chan *c, enum sum_check_flags *result, * order. */ if (likely(num_descs) && - ioat2_alloc_and_lock(&idx, ioat, num_descs+1) == 0) - /* pass */; + ioat2_check_space_lock(ioat, num_descs+1) == 0) + idx = ioat->head; else return NULL; i = 0; @@ -851,10 +805,9 @@ ioat3_prep_interrupt_lock(struct dma_chan *c, unsigned long flags) struct ioat2_dma_chan *ioat = to_ioat2_chan(c); struct ioat_ring_ent *desc; struct ioat_dma_descriptor *hw; - u16 idx; - if (ioat2_alloc_and_lock(&idx, ioat, 1) == 0) - desc = ioat2_get_ring_ent(ioat, idx); + if (ioat2_check_space_lock(ioat, 1) == 0) + desc = ioat2_get_ring_ent(ioat, ioat->head); else return NULL; @@ -977,7 +930,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test xor timed out\n"); err = -ENODEV; goto free_resources; @@ -1031,7 +984,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test validate timed out\n"); err = -ENODEV; goto free_resources; @@ -1072,7 +1025,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test memset timed out\n"); err = -ENODEV; goto free_resources; @@ -1115,7 +1068,7 @@ static int __devinit ioat_xor_val_self_test(struct ioatdma_device *device) tmo = wait_for_completion_timeout(&cmp, msecs_to_jiffies(3000)); - if (dma->device_is_tx_complete(dma_chan, cookie, NULL, NULL) != DMA_SUCCESS) { + if (dma->device_tx_status(dma_chan, cookie, NULL) != DMA_SUCCESS) { dev_err(dev, "Self-test 2nd validate timed out\n"); err = -ENODEV; goto free_resources; @@ -1222,7 +1175,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (cap & IOAT_CAP_XOR) { is_raid_device = true; dma->max_xor = 8; - dma->xor_align = 2; + dma->xor_align = 6; dma_cap_set(DMA_XOR, dma->cap_mask); dma->device_prep_dma_xor = ioat3_prep_xor; @@ -1233,7 +1186,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (cap & IOAT_CAP_PQ) { is_raid_device = true; dma_set_maxpq(dma, 8, 0); - dma->pq_align = 2; + dma->pq_align = 6; dma_cap_set(DMA_PQ, dma->cap_mask); dma->device_prep_dma_pq = ioat3_prep_pq; @@ -1243,7 +1196,7 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (!(cap & IOAT_CAP_XOR)) { dma->max_xor = 8; - dma->xor_align = 2; + dma->xor_align = 6; dma_cap_set(DMA_XOR, dma->cap_mask); dma->device_prep_dma_xor = ioat3_prep_pqxor; @@ -1259,11 +1212,11 @@ int __devinit ioat3_dma_probe(struct ioatdma_device *device, int dca) if (is_raid_device) { - dma->device_is_tx_complete = ioat3_is_complete; + dma->device_tx_status = ioat3_tx_status; device->cleanup_fn = ioat3_cleanup_event; device->timer_fn = ioat3_timer_event; } else { - dma->device_is_tx_complete = ioat_is_dma_complete; + dma->device_tx_status = ioat_dma_tx_status; device->cleanup_fn = ioat2_cleanup_event; device->timer_fn = ioat2_timer_event; } diff --git a/drivers/dma/ioat/pci.c b/drivers/dma/ioat/pci.c index 99ec267..fab37d1 100644 --- a/drivers/dma/ioat/pci.c +++ b/drivers/dma/ioat/pci.c @@ -138,15 +138,10 @@ static int __devinit ioat_pci_probe(struct pci_dev *pdev, const struct pci_devic if (err) return err; - device = devm_kzalloc(dev, sizeof(*device), GFP_KERNEL); - if (!device) - return -ENOMEM; - - pci_set_master(pdev); - device = alloc_ioatdma(pdev, iomap[IOAT_MMIO_BAR]); if (!device) return -ENOMEM; + pci_set_master(pdev); pci_set_drvdata(pdev, device); device->version = readb(device->reg_base + IOAT_VER_OFFSET); |