diff options
Diffstat (limited to 'drivers/usb/host/ehci-sched.c')
-rw-r--r-- | drivers/usb/host/ehci-sched.c | 339 |
1 files changed, 240 insertions, 99 deletions
diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 7b5ae71..d4a8ace 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -44,9 +44,10 @@ static int ehci_get_frame (struct usb_hcd *hcd); * @tag: hardware tag for type of this record */ static union ehci_shadow * -periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) +periodic_next_shadow(struct ehci_hcd *ehci, union ehci_shadow *periodic, + __hc32 tag) { - switch (tag) { + switch (hc32_to_cpu(ehci, tag)) { case Q_TYPE_QH: return &periodic->qh->qh_next; case Q_TYPE_FSTN: @@ -62,13 +63,14 @@ periodic_next_shadow (union ehci_shadow *periodic, __le32 tag) /* caller must hold ehci->lock */ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) { - union ehci_shadow *prev_p = &ehci->pshadow [frame]; - __le32 *hw_p = &ehci->periodic [frame]; + union ehci_shadow *prev_p = &ehci->pshadow[frame]; + __hc32 *hw_p = &ehci->periodic[frame]; union ehci_shadow here = *prev_p; /* find predecessor of "ptr"; hw and shadow lists are in sync */ while (here.ptr && here.ptr != ptr) { - prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p)); + prev_p = periodic_next_shadow(ehci, prev_p, + Q_NEXT_TYPE(ehci, *hw_p)); hw_p = here.hw_next; here = *prev_p; } @@ -79,7 +81,8 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) /* update shadow and hardware lists ... the old "next" pointers * from ptr may still be in use, the caller updates them. */ - *prev_p = *periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p)); + *prev_p = *periodic_next_shadow(ehci, &here, + Q_NEXT_TYPE(ehci, *hw_p)); *hw_p = *here.hw_next; } @@ -87,18 +90,19 @@ static void periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr) static unsigned short periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) { - __le32 *hw_p = &ehci->periodic [frame]; + __hc32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned usecs = 0; while (q->ptr) { - switch (Q_NEXT_TYPE (*hw_p)) { + switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { case Q_TYPE_QH: /* is it in the S-mask? */ - if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe)) + if (q->qh->hw_info2 & cpu_to_hc32(ehci, 1 << uframe)) usecs += q->qh->usecs; /* ... or C-mask? */ - if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe))) + if (q->qh->hw_info2 & cpu_to_hc32(ehci, + 1 << (8 + uframe))) usecs += q->qh->c_usecs; hw_p = &q->qh->hw_next; q = &q->qh->qh_next; @@ -108,7 +112,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) /* for "save place" FSTNs, count the relevant INTR * bandwidth from the previous frame */ - if (q->fstn->hw_prev != EHCI_LIST_END) { + if (q->fstn->hw_prev != EHCI_LIST_END(ehci)) { ehci_dbg (ehci, "ignoring FSTN cost ...\n"); } hw_p = &q->fstn->hw_next; @@ -121,9 +125,10 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) break; case Q_TYPE_SITD: /* is it in the S-mask? (count SPLIT, DATA) */ - if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) { + if (q->sitd->hw_uframe & cpu_to_hc32(ehci, + 1 << uframe)) { if (q->sitd->hw_fullspeed_ep & - __constant_cpu_to_le32 (1<<31)) + cpu_to_hc32(ehci, 1<<31)) usecs += q->sitd->stream->usecs; else /* worst case for OUT start-split */ usecs += HS_USECS_ISO (188); @@ -131,7 +136,7 @@ periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe) /* ... C-mask? (count CSPLIT, DATA) */ if (q->sitd->hw_uframe & - cpu_to_le32 (1 << (8 + uframe))) { + cpu_to_hc32(ehci, 1 << (8 + uframe))) { /* worst case for IN complete-split */ usecs += q->sitd->stream->c_usecs; } @@ -173,9 +178,9 @@ static int same_tt (struct usb_device *dev1, struct usb_device *dev2) * will cause a transfer in "B-frame" uframe 0. "B-frames" lag * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. */ -static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __le32 mask) +static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) { - unsigned char smask = QH_SMASK & le32_to_cpu(mask); + unsigned char smask = QH_SMASK & hc32_to_cpu(ehci, mask); if (!smask) { ehci_err(ehci, "invalid empty smask!\n"); /* uframe 7 can't have bw so this will indicate failure */ @@ -217,14 +222,14 @@ periodic_tt_usecs ( unsigned short tt_usecs[8] ) { - __le32 *hw_p = &ehci->periodic [frame]; + __hc32 *hw_p = &ehci->periodic [frame]; union ehci_shadow *q = &ehci->pshadow [frame]; unsigned char uf; memset(tt_usecs, 0, 16); while (q->ptr) { - switch (Q_NEXT_TYPE(*hw_p)) { + switch (hc32_to_cpu(ehci, Q_NEXT_TYPE(ehci, *hw_p))) { case Q_TYPE_ITD: hw_p = &q->itd->hw_next; q = &q->itd->itd_next; @@ -247,8 +252,8 @@ periodic_tt_usecs ( continue; // case Q_TYPE_FSTN: default: - ehci_dbg(ehci, - "ignoring periodic frame %d FSTN\n", frame); + ehci_dbg(ehci, "ignoring periodic frame %d FSTN\n", + frame); hw_p = &q->fstn->hw_next; q = &q->fstn->fstn_next; } @@ -368,41 +373,42 @@ static int tt_no_collision ( */ for (; frame < ehci->periodic_size; frame += period) { union ehci_shadow here; - __le32 type; + __hc32 type; here = ehci->pshadow [frame]; - type = Q_NEXT_TYPE (ehci->periodic [frame]); + type = Q_NEXT_TYPE(ehci, ehci->periodic [frame]); while (here.ptr) { - switch (type) { + switch (hc32_to_cpu(ehci, type)) { case Q_TYPE_ITD: - type = Q_NEXT_TYPE (here.itd->hw_next); + type = Q_NEXT_TYPE(ehci, here.itd->hw_next); here = here.itd->itd_next; continue; case Q_TYPE_QH: if (same_tt (dev, here.qh->dev)) { u32 mask; - mask = le32_to_cpu (here.qh->hw_info2); + mask = hc32_to_cpu(ehci, + here.qh->hw_info2); /* "knows" no gap is needed */ mask |= mask >> 8; if (mask & uf_mask) break; } - type = Q_NEXT_TYPE (here.qh->hw_next); + type = Q_NEXT_TYPE(ehci, here.qh->hw_next); here = here.qh->qh_next; continue; case Q_TYPE_SITD: if (same_tt (dev, here.sitd->urb->dev)) { u16 mask; - mask = le32_to_cpu (here.sitd + mask = hc32_to_cpu(ehci, here.sitd ->hw_uframe); /* FIXME assumes no gap for IN! */ mask |= mask >> 8; if (mask & uf_mask) break; } - type = Q_NEXT_TYPE (here.sitd->hw_next); + type = Q_NEXT_TYPE(ehci, here.sitd->hw_next); here = here.sitd->sitd_next; continue; // case Q_TYPE_FSTN: @@ -473,6 +479,109 @@ static int disable_periodic (struct ehci_hcd *ehci) } /*-------------------------------------------------------------------------*/ +#ifdef CONFIG_CPU_FREQ + +static int safe_to_modify_i (struct ehci_hcd *ehci, struct ehci_qh *qh) +{ + int now; /* current (frame * 8) + uframe */ + int prev_start, next_start; /* uframes from/to split start */ + int start_uframe = ffs(le32_to_cpup (&qh->hw_info2) & QH_SMASK); + int end_uframe = fls((le32_to_cpup (&qh->hw_info2) & QH_CMASK) >> 8); + int split_duration = end_uframe - start_uframe; + + now = readl(&ehci->regs->frame_index) % (ehci->periodic_size << 3); + + next_start = ((1024 << 3) + (qh->start << 3) + start_uframe - now) + % (qh->period << 3); + prev_start = (qh->period << 3) - next_start; + + /* + * Make sure there will be at least one uframe when qh is safe. + */ + if ((qh->period << 3) <= (ehci->i_thresh + 2 + split_duration)) + /* never safe */ + return -EINVAL; + + /* + * Wait 1 uframe after transaction should have started, to make + * sure controller has time to write back overlay, so we can + * check QTD_STS_STS to see if transaction is in progress. + */ + if ((next_start > ehci->i_thresh) && (prev_start > 1)) + /* safe to set "i" bit if split isn't in progress */ + return (qh->hw_token & STATUS_BIT(ehci)) ? 0 : 1; + else + return 0; +} + +/* Set inactivate bit for all the split interrupt QHs. */ +static void qh_inactivate_split_intr_qhs (struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + int not_done, safe; + u32 inactivate = INACTIVATE_BIT(ehci); + u32 active = ACTIVE_BIT(ehci); + + do { + not_done = 0; + list_for_each_entry(qh, &ehci->split_intr_qhs, + split_intr_qhs) { + if (qh->hw_info1 & inactivate) + /* already off */ + continue; + /* + * To avoid setting "I" after the start split happens, + * don't set it if the QH might be cached in the + * controller. Some HCs (Broadcom/ServerWorks HT1000) + * will stop in the middle of a split transaction when + * the "I" bit is set. + */ + safe = safe_to_modify_i(ehci, qh); + if (safe == 0) { + not_done = 1; + } else if (safe > 0) { + qh->was_active = qh->hw_token & active; + qh->hw_info1 |= inactivate; + } + } + } while (not_done); + wmb(); +} + +static void qh_reactivate_split_intr_qhs (struct ehci_hcd *ehci) +{ + struct ehci_qh *qh; + u32 token; + int not_done, safe; + u32 inactivate = INACTIVATE_BIT(ehci); + u32 active = ACTIVE_BIT(ehci); + u32 halt = HALT_BIT(ehci); + + do { + not_done = 0; + list_for_each_entry(qh, &ehci->split_intr_qhs, split_intr_qhs) { + if (!(qh->hw_info1 & inactivate)) /* already on */ + continue; + /* + * Don't reactivate if cached, or controller might + * overwrite overlay after we modify it! + */ + safe = safe_to_modify_i(ehci, qh); + if (safe == 0) { + not_done = 1; + } else if (safe > 0) { + /* See EHCI 1.0 section 4.15.2.4. */ + token = qh->hw_token; + qh->hw_token = (token | halt) & ~active; + wmb(); + qh->hw_info1 &= ~inactivate; + wmb(); + qh->hw_token = (token & ~halt) | qh->was_active; + } + } + } while (not_done); +} +#endif /* periodic schedule slots have iso tds (normal or split) first, then a * sparse tree for active interrupt transfers. @@ -487,25 +596,36 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) dev_dbg (&qh->dev->dev, "link qh%d-%04x/%p start %d [%d/%d us]\n", - period, le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), + period, hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs); +#ifdef CONFIG_CPU_FREQ + /* + * If low/full speed interrupt QHs are inactive (because of + * cpufreq changing processor speeds), start QH with I flag set-- + * it will automatically be cleared when cpufreq is done. + */ + if (ehci->cpufreq_changing) + if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) + qh->hw_info1 |= INACTIVATE_BIT(ehci); +#endif + /* high bandwidth, or otherwise every microframe */ if (period == 0) period = 1; for (i = qh->start; i < ehci->periodic_size; i += period) { - union ehci_shadow *prev = &ehci->pshadow [i]; - __le32 *hw_p = &ehci->periodic [i]; + union ehci_shadow *prev = &ehci->pshadow[i]; + __hc32 *hw_p = &ehci->periodic[i]; union ehci_shadow here = *prev; - __le32 type = 0; + __hc32 type = 0; /* skip the iso nodes at list head */ while (here.ptr) { - type = Q_NEXT_TYPE (*hw_p); - if (type == Q_TYPE_QH) + type = Q_NEXT_TYPE(ehci, *hw_p); + if (type == cpu_to_hc32(ehci, Q_TYPE_QH)) break; - prev = periodic_next_shadow (prev, type); + prev = periodic_next_shadow(ehci, prev, type); hw_p = &here.qh->hw_next; here = *prev; } @@ -527,7 +647,7 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) qh->hw_next = *hw_p; wmb (); prev->qh = qh; - *hw_p = QH_NEXT (qh->qh_dma); + *hw_p = QH_NEXT (ehci, qh->qh_dma); } } qh->qh_state = QH_STATE_LINKED; @@ -538,6 +658,12 @@ static int qh_link_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) ? ((qh->usecs + qh->c_usecs) / qh->period) : (qh->usecs * 8); +#ifdef CONFIG_CPU_FREQ + /* add qh to list of low/full speed interrupt QHs, if applicable */ + if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) { + list_add(&qh->split_intr_qhs, &ehci->split_intr_qhs); + } +#endif /* maybe enable periodic schedule processing */ if (!ehci->periodic_sched++) return enable_periodic (ehci); @@ -555,7 +681,14 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) // and this qh is active in the current uframe // (and overlay token SplitXstate is false?) // THEN - // qh->hw_info1 |= __constant_cpu_to_le32 (1 << 7 /* "ignore" */); + // qh->hw_info1 |= __constant_cpu_to_hc32(1 << 7 /* "ignore" */); + +#ifdef CONFIG_CPU_FREQ + /* remove qh from list of low/full speed interrupt QHs */ + if (!(qh->hw_info1 & (cpu_to_le32(1 << 13)))) { + list_del_init(&qh->split_intr_qhs); + } +#endif /* high bandwidth, or otherwise part of every microframe */ if ((period = qh->period) == 0) @@ -572,7 +705,7 @@ static void qh_unlink_periodic (struct ehci_hcd *ehci, struct ehci_qh *qh) dev_dbg (&qh->dev->dev, "unlink qh%d-%04x/%p start %d [%d/%d us]\n", qh->period, - le32_to_cpup (&qh->hw_info2) & (QH_CMASK | QH_SMASK), + hc32_to_cpup(ehci, &qh->hw_info2) & (QH_CMASK | QH_SMASK), qh, qh->start, qh->usecs, qh->c_usecs); /* qh->qh_next still "live" to HC */ @@ -598,7 +731,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) * active high speed queues may need bigger delays... */ if (list_empty (&qh->qtd_list) - || (__constant_cpu_to_le32 (QH_CMASK) + || (cpu_to_hc32(ehci, QH_CMASK) & qh->hw_info2) != 0) wait = 2; else @@ -606,7 +739,7 @@ static void intr_deschedule (struct ehci_hcd *ehci, struct ehci_qh *qh) udelay (wait); qh->qh_state = QH_STATE_IDLE; - qh->hw_next = EHCI_LIST_END; + qh->hw_next = EHCI_LIST_END(ehci); wmb (); } @@ -663,7 +796,7 @@ static int check_intr_schedule ( unsigned frame, unsigned uframe, const struct ehci_qh *qh, - __le32 *c_maskp + __hc32 *c_maskp ) { int retval = -ENOSPC; @@ -695,7 +828,7 @@ static int check_intr_schedule ( retval = 0; - *c_maskp = cpu_to_le32 (mask << 8); + *c_maskp = cpu_to_hc32(ehci, mask << 8); } #else /* Make sure this tt's buffer is also available for CSPLITs. @@ -706,7 +839,7 @@ static int check_intr_schedule ( * one smart pass... */ mask = 0x03 << (uframe + qh->gap_uf); - *c_maskp = cpu_to_le32 (mask << 8); + *c_maskp = cpu_to_hc32(ehci, mask << 8); mask |= 1 << uframe; if (tt_no_collision (ehci, qh->period, qh->dev, frame, mask)) { @@ -726,20 +859,20 @@ done: /* "first fit" scheduling policy used the first time through, * or when the previous schedule slot can't be re-used. */ -static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) +static int qh_schedule(struct ehci_hcd *ehci, struct ehci_qh *qh) { int status; unsigned uframe; - __le32 c_mask; + __hc32 c_mask; unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */ qh_refresh(ehci, qh); - qh->hw_next = EHCI_LIST_END; + qh->hw_next = EHCI_LIST_END(ehci); frame = qh->start; /* reuse the previous schedule slots, if we can */ if (frame < qh->period) { - uframe = ffs (le32_to_cpup (&qh->hw_info2) & QH_SMASK); + uframe = ffs(hc32_to_cpup(ehci, &qh->hw_info2) & QH_SMASK); status = check_intr_schedule (ehci, frame, --uframe, qh, &c_mask); } else { @@ -775,10 +908,10 @@ static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh) qh->start = frame; /* reset S-frame and (maybe) C-frame masks */ - qh->hw_info2 &= __constant_cpu_to_le32(~(QH_CMASK | QH_SMASK)); + qh->hw_info2 &= cpu_to_hc32(ehci, ~(QH_CMASK | QH_SMASK)); qh->hw_info2 |= qh->period - ? cpu_to_le32 (1 << uframe) - : __constant_cpu_to_le32 (QH_SMASK); + ? cpu_to_hc32(ehci, 1 << uframe) + : cpu_to_hc32(ehci, QH_SMASK); qh->hw_info2 |= c_mask; } else ehci_dbg (ehci, "reused qh %p schedule\n", qh); @@ -808,7 +941,7 @@ static int intr_submit ( spin_lock_irqsave (&ehci->lock, flags); if (unlikely(!test_bit(HCD_FLAG_HW_ACCESSIBLE, - &ehci_to_hcd(ehci)->flags))) { + &ehci_to_hcd(ehci)->flags))) { status = -ESHUTDOWN; goto done; } @@ -898,9 +1031,9 @@ iso_stream_init ( buf1 |= maxp; maxp *= multi; - stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum); - stream->buf1 = cpu_to_le32 (buf1); - stream->buf2 = cpu_to_le32 (multi); + stream->buf0 = cpu_to_hc32(ehci, (epnum << 8) | dev->devnum); + stream->buf1 = cpu_to_hc32(ehci, buf1); + stream->buf2 = cpu_to_hc32(ehci, multi); /* usbfs wants to report the average usecs per frame tied up * when transfers on this endpoint are scheduled ... @@ -943,7 +1076,7 @@ iso_stream_init ( bandwidth /= 1 << (interval + 2); /* stream->splits gets created from raw_mask later */ - stream->address = cpu_to_le32 (addr); + stream->address = cpu_to_hc32(ehci, addr); } stream->bandwidth = bandwidth; @@ -1077,7 +1210,8 @@ iso_sched_alloc (unsigned packets, gfp_t mem_flags) } static inline void -itd_sched_init ( +itd_sched_init( + struct ehci_hcd *ehci, struct ehci_iso_sched *iso_sched, struct ehci_iso_stream *stream, struct urb *urb @@ -1107,7 +1241,7 @@ itd_sched_init ( && !(urb->transfer_flags & URB_NO_INTERRUPT)) trans |= EHCI_ITD_IOC; trans |= length << 16; - uframe->transaction = cpu_to_le32 (trans); + uframe->transaction = cpu_to_hc32(ehci, trans); /* might need to cross a buffer page within a uframe */ uframe->bufp = (buf & ~(u64)0x0fff); @@ -1149,7 +1283,7 @@ itd_urb_transaction ( if (unlikely (sched == NULL)) return -ENOMEM; - itd_sched_init (sched, stream, urb); + itd_sched_init(ehci, sched, stream, urb); if (urb->interval < 8) num_itds = 1 + (sched->span + 7) / 8; @@ -1167,7 +1301,7 @@ itd_urb_transaction ( /* prefer previously-allocated itds */ if (likely (!list_empty(&stream->free_list))) { itd = list_entry (stream->free_list.prev, - struct ehci_itd, itd_list); + struct ehci_itd, itd_list); list_del (&itd->itd_list); itd_dma = itd->itd_dma; } else @@ -1294,7 +1428,7 @@ sitd_slot_ok ( uframe += period_uframes; } while (uframe < mod); - stream->splits = cpu_to_le32(stream->raw_mask << (uframe & 7)); + stream->splits = cpu_to_hc32(ehci, stream->raw_mask << (uframe & 7)); return 1; } @@ -1415,12 +1549,13 @@ ready: /*-------------------------------------------------------------------------*/ static inline void -itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) +itd_init(struct ehci_hcd *ehci, struct ehci_iso_stream *stream, + struct ehci_itd *itd) { int i; /* it's been recently zeroed */ - itd->hw_next = EHCI_LIST_END; + itd->hw_next = EHCI_LIST_END(ehci); itd->hw_bufp [0] = stream->buf0; itd->hw_bufp [1] = stream->buf1; itd->hw_bufp [2] = stream->buf2; @@ -1432,7 +1567,8 @@ itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd) } static inline void -itd_patch ( +itd_patch( + struct ehci_hcd *ehci, struct ehci_itd *itd, struct ehci_iso_sched *iso_sched, unsigned index, @@ -1447,17 +1583,18 @@ itd_patch ( uframe &= 0x07; itd->index [uframe] = index; - itd->hw_transaction [uframe] = uf->transaction; - itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12); - itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0); - itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32)); + itd->hw_transaction[uframe] = uf->transaction; + itd->hw_transaction[uframe] |= cpu_to_hc32(ehci, pg << 12); + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, uf->bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(uf->bufp >> 32)); /* iso_frame_desc[].offset must be strictly increasing */ if (unlikely (uf->cross)) { u64 bufp = uf->bufp + 4096; + itd->pg = ++pg; - itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0); - itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32)); + itd->hw_bufp[pg] |= cpu_to_hc32(ehci, bufp & ~(u32)0); + itd->hw_bufp_hi[pg] |= cpu_to_hc32(ehci, (u32)(bufp >> 32)); } } @@ -1470,7 +1607,7 @@ itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd) ehci->pshadow [frame].itd = itd; itd->frame = frame; wmb (); - ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD; + ehci->periodic[frame] = cpu_to_hc32(ehci, itd->itd_dma | Q_TYPE_ITD); } /* fit urb's itds into the selected schedule slot; activate as needed */ @@ -1515,14 +1652,14 @@ itd_link_urb ( list_move_tail (&itd->itd_list, &stream->td_list); itd->stream = iso_stream_get (stream); itd->urb = usb_get_urb (urb); - itd_init (stream, itd); + itd_init (ehci, stream, itd); } uframe = next_uframe & 0x07; frame = next_uframe >> 3; itd->usecs [uframe] = stream->usecs; - itd_patch (itd, iso_sched, packet, uframe); + itd_patch(ehci, itd, iso_sched, packet, uframe); next_uframe += stream->interval; stream->depth += stream->interval; @@ -1570,7 +1707,7 @@ itd_complete ( urb_index = itd->index[uframe]; desc = &urb->iso_frame_desc [urb_index]; - t = le32_to_cpup (&itd->hw_transaction [uframe]); + t = hc32_to_cpup(ehci, &itd->hw_transaction [uframe]); itd->hw_transaction [uframe] = 0; stream->depth -= stream->interval; @@ -1700,7 +1837,8 @@ done: */ static inline void -sitd_sched_init ( +sitd_sched_init( + struct ehci_hcd *ehci, struct ehci_iso_sched *iso_sched, struct ehci_iso_stream *stream, struct urb *urb @@ -1729,7 +1867,7 @@ sitd_sched_init ( && !(urb->transfer_flags & URB_NO_INTERRUPT)) trans |= SITD_IOC; trans |= length << 16; - packet->transaction = cpu_to_le32 (trans); + packet->transaction = cpu_to_hc32(ehci, trans); /* might need to cross a buffer page within a td */ packet->bufp = buf; @@ -1765,7 +1903,7 @@ sitd_urb_transaction ( if (iso_sched == NULL) return -ENOMEM; - sitd_sched_init (iso_sched, stream, urb); + sitd_sched_init(ehci, iso_sched, stream, urb); /* allocate/init sITDs */ spin_lock_irqsave (&ehci->lock, flags); @@ -1817,7 +1955,8 @@ sitd_urb_transaction ( /*-------------------------------------------------------------------------*/ static inline void -sitd_patch ( +sitd_patch( + struct ehci_hcd *ehci, struct ehci_iso_stream *stream, struct ehci_sitd *sitd, struct ehci_iso_sched *iso_sched, @@ -1827,20 +1966,20 @@ sitd_patch ( struct ehci_iso_packet *uf = &iso_sched->packet [index]; u64 bufp = uf->bufp; - sitd->hw_next = EHCI_LIST_END; + sitd->hw_next = EHCI_LIST_END(ehci); sitd->hw_fullspeed_ep = stream->address; sitd->hw_uframe = stream->splits; sitd->hw_results = uf->transaction; - sitd->hw_backpointer = EHCI_LIST_END; + sitd->hw_backpointer = EHCI_LIST_END(ehci); bufp = uf->bufp; - sitd->hw_buf [0] = cpu_to_le32 (bufp); - sitd->hw_buf_hi [0] = cpu_to_le32 (bufp >> 32); + sitd->hw_buf[0] = cpu_to_hc32(ehci, bufp); + sitd->hw_buf_hi[0] = cpu_to_hc32(ehci, bufp >> 32); - sitd->hw_buf [1] = cpu_to_le32 (uf->buf1); + sitd->hw_buf[1] = cpu_to_hc32(ehci, uf->buf1); if (uf->cross) bufp += 4096; - sitd->hw_buf_hi [1] = cpu_to_le32 (bufp >> 32); + sitd->hw_buf_hi[1] = cpu_to_hc32(ehci, bufp >> 32); sitd->index = index; } @@ -1853,7 +1992,7 @@ sitd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_sitd *sitd) ehci->pshadow [frame].sitd = sitd; sitd->frame = frame; wmb (); - ehci->periodic [frame] = cpu_to_le32 (sitd->sitd_dma) | Q_TYPE_SITD; + ehci->periodic[frame] = cpu_to_hc32(ehci, sitd->sitd_dma | Q_TYPE_SITD); } /* fit urb's sitds into the selected schedule slot; activate as needed */ @@ -1881,7 +2020,7 @@ sitd_link_urb ( urb->dev->devpath, stream->bEndpointAddress & 0x0f, (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out", (next_uframe >> 3) % ehci->periodic_size, - stream->interval, le32_to_cpu (stream->splits)); + stream->interval, hc32_to_cpu(ehci, stream->splits)); stream->start = jiffies; } ehci_to_hcd(ehci)->self.bandwidth_isoc_reqs++; @@ -1902,7 +2041,7 @@ sitd_link_urb ( sitd->stream = iso_stream_get (stream); sitd->urb = usb_get_urb (urb); - sitd_patch (stream, sitd, sched, packet); + sitd_patch(ehci, stream, sitd, sched, packet); sitd_link (ehci, (next_uframe >> 3) % ehci->periodic_size, sitd); @@ -1940,7 +2079,7 @@ sitd_complete ( urb_index = sitd->index; desc = &urb->iso_frame_desc [urb_index]; - t = le32_to_cpup (&sitd->hw_results); + t = hc32_to_cpup(ehci, &sitd->hw_results); /* report transfer status */ if (t & SITD_ERRS) { @@ -2095,7 +2234,7 @@ scan_periodic (struct ehci_hcd *ehci) for (;;) { union ehci_shadow q, *q_p; - __le32 type, *hw_p; + __hc32 type, *hw_p; unsigned uframes; /* don't scan past the live uframe */ @@ -2113,7 +2252,7 @@ restart: q_p = &ehci->pshadow [frame]; hw_p = &ehci->periodic [frame]; q.ptr = q_p->ptr; - type = Q_NEXT_TYPE (*hw_p); + type = Q_NEXT_TYPE(ehci, *hw_p); modified = 0; while (q.ptr != NULL) { @@ -2122,11 +2261,11 @@ restart: int live; live = HC_IS_RUNNING (ehci_to_hcd(ehci)->state); - switch (type) { + switch (hc32_to_cpu(ehci, type)) { case Q_TYPE_QH: /* handle any completions */ temp.qh = qh_get (q.qh); - type = Q_NEXT_TYPE (q.qh->hw_next); + type = Q_NEXT_TYPE(ehci, q.qh->hw_next); q = q.qh->qh_next; modified = qh_completions (ehci, temp.qh); if (unlikely (list_empty (&temp.qh->qtd_list))) @@ -2137,10 +2276,10 @@ restart: /* for "save place" FSTNs, look at QH entries * in the previous frame for completions. */ - if (q.fstn->hw_prev != EHCI_LIST_END) { + if (q.fstn->hw_prev != EHCI_LIST_END(ehci)) { dbg ("ignoring completions from FSTNs"); } - type = Q_NEXT_TYPE (q.fstn->hw_next); + type = Q_NEXT_TYPE(ehci, q.fstn->hw_next); q = q.fstn->fstn_next; break; case Q_TYPE_ITD: @@ -2148,11 +2287,12 @@ restart: rmb (); for (uf = live ? uframes : 8; uf < 8; uf++) { if (0 == (q.itd->hw_transaction [uf] - & ITD_ACTIVE)) + & ITD_ACTIVE(ehci))) continue; q_p = &q.itd->itd_next; hw_p = &q.itd->hw_next; - type = Q_NEXT_TYPE (q.itd->hw_next); + type = Q_NEXT_TYPE(ehci, + q.itd->hw_next); q = *q_p; break; } @@ -2164,23 +2304,24 @@ restart: */ *q_p = q.itd->itd_next; *hw_p = q.itd->hw_next; - type = Q_NEXT_TYPE (q.itd->hw_next); + type = Q_NEXT_TYPE(ehci, q.itd->hw_next); wmb(); modified = itd_complete (ehci, q.itd); q = *q_p; break; case Q_TYPE_SITD: - if ((q.sitd->hw_results & SITD_ACTIVE) + if ((q.sitd->hw_results & SITD_ACTIVE(ehci)) && live) { q_p = &q.sitd->sitd_next; hw_p = &q.sitd->hw_next; - type = Q_NEXT_TYPE (q.sitd->hw_next); + type = Q_NEXT_TYPE(ehci, + q.sitd->hw_next); q = *q_p; break; } *q_p = q.sitd->sitd_next; *hw_p = q.sitd->hw_next; - type = Q_NEXT_TYPE (q.sitd->hw_next); + type = Q_NEXT_TYPE(ehci, q.sitd->hw_next); wmb(); modified = sitd_complete (ehci, q.sitd); q = *q_p; |