aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/memory-barriers.txt348
-rw-r--r--Makefile2
-rw-r--r--arch/alpha/Kconfig2
-rw-r--r--arch/arm/mach-ep93xx/ts72xx.c8
-rw-r--r--arch/arm/mach-imx/irq.c2
-rw-r--r--arch/arm/mach-integrator/integrator_cp.c5
-rw-r--r--arch/arm/mach-pxa/spitz.c1
-rw-r--r--arch/arm/mach-sa1100/neponset.c8
-rw-r--r--arch/arm/mach-versatile/core.c5
-rw-r--r--arch/i386/kernel/acpi/earlyquirk.c23
-rw-r--r--arch/i386/kernel/setup.c11
-rw-r--r--arch/powerpc/kernel/prom_init.c10
-rw-r--r--arch/powerpc/kernel/signal_32.c11
-rw-r--r--arch/powerpc/kernel/signal_64.c2
-rw-r--r--arch/powerpc/mm/hash_native_64.c4
-rw-r--r--arch/powerpc/platforms/cell/setup.c11
-rw-r--r--arch/powerpc/platforms/pseries/setup.c8
-rw-r--r--arch/sparc/kernel/smp.c11
-rw-r--r--arch/sparc64/kernel/pci_sun4v.c124
-rw-r--r--arch/sparc64/kernel/smp.c35
-rw-r--r--arch/sparc64/kernel/sparc64_ksyms.c1
-rw-r--r--arch/sparc64/kernel/traps.c11
-rw-r--r--arch/x86_64/kernel/io_apic.c30
-rw-r--r--block/as-iosched.c13
-rw-r--r--block/cfq-iosched.c21
-rw-r--r--block/deadline-iosched.c13
-rw-r--r--block/elevator.c55
-rw-r--r--block/noop-iosched.c7
-rw-r--r--drivers/acpi/processor_perflib.c5
-rw-r--r--drivers/cdrom/cdrom.c6
-rw-r--r--drivers/char/Makefile2
-rw-r--r--drivers/char/n_tty.c4
-rw-r--r--drivers/message/fusion/mptspi.c2
-rw-r--r--drivers/message/i2o/exec-osm.c72
-rw-r--r--drivers/message/i2o/iop.c4
-rw-r--r--drivers/net/e1000/e1000_ethtool.c5
-rw-r--r--drivers/net/e1000/e1000_main.c8
-rw-r--r--drivers/net/sky2.c53
-rw-r--r--drivers/net/tg3.c144
-rw-r--r--drivers/net/tg3.h3
-rw-r--r--drivers/net/wireless/bcm43xx/bcm43xx_dma.c31
-rw-r--r--drivers/pci/pci-driver.c13
-rw-r--r--drivers/pci/pci.c18
-rw-r--r--drivers/scsi/sata_mv.c3
-rw-r--r--drivers/usb/host/ohci-pxa27x.c3
-rw-r--r--drivers/video/console/fbcon.c2
-rw-r--r--fs/bio.c5
-rw-r--r--fs/debugfs/inode.c3
-rw-r--r--fs/locks.c2
-rw-r--r--include/asm-arm/arch-pxa/ohci.h2
-rw-r--r--include/asm-powerpc/cputable.h2
-rw-r--r--include/asm-s390/futex.h15
-rw-r--r--include/linux/elevator.h2
-rw-r--r--include/linux/i2o.h5
-rw-r--r--include/linux/mempolicy.h1
-rw-r--r--include/linux/pci-acpi.h2
-rw-r--r--kernel/exit.c8
-rw-r--r--kernel/posix-cpu-timers.c48
-rw-r--r--mm/shmem.c2
-rw-r--r--mm/vmscan.c2
-rw-r--r--net/dccp/ackvec.c1
-rw-r--r--net/ipv4/ip_forward.c1
-rw-r--r--net/ipv4/tcp_input.c4
63 files changed, 856 insertions, 409 deletions
diff --git a/Documentation/memory-barriers.txt b/Documentation/memory-barriers.txt
index c61d8b8..4710845 100644
--- a/Documentation/memory-barriers.txt
+++ b/Documentation/memory-barriers.txt
@@ -19,6 +19,7 @@ Contents:
- Control dependencies.
- SMP barrier pairing.
- Examples of memory barrier sequences.
+ - Read memory barriers vs load speculation.
(*) Explicit kernel barriers.
@@ -248,7 +249,7 @@ And there are a number of things that _must_ or _must_not_ be assumed:
we may get either of:
STORE *A = X; Y = LOAD *A;
- STORE *A = Y;
+ STORE *A = Y = X;
=========================
@@ -344,9 +345,12 @@ Memory barriers come in four basic varieties:
(4) General memory barriers.
- A general memory barrier is a combination of both a read memory barrier
- and a write memory barrier. It is a partial ordering over both loads and
- stores.
+ A general memory barrier gives a guarantee that all the LOAD and STORE
+ operations specified before the barrier will appear to happen before all
+ the LOAD and STORE operations specified after the barrier with respect to
+ the other components of the system.
+
+ A general memory barrier is a partial ordering over both loads and stores.
General memory barriers imply both read and write memory barriers, and so
can substitute for either.
@@ -546,9 +550,9 @@ write barrier, though, again, a general barrier is viable:
=============== ===============
a = 1;
<write barrier>
- b = 2; x = a;
+ b = 2; x = b;
<read barrier>
- y = b;
+ y = a;
Or:
@@ -563,6 +567,18 @@ Or:
Basically, the read barrier always has to be there, even though it can be of
the "weaker" type.
+[!] Note that the stores before the write barrier would normally be expected to
+match the loads after the read barrier or data dependency barrier, and vice
+versa:
+
+ CPU 1 CPU 2
+ =============== ===============
+ a = 1; }---- --->{ v = c
+ b = 2; } \ / { w = d
+ <write barrier> \ <read barrier>
+ c = 3; } / \ { x = a;
+ d = 4; }---- --->{ y = b;
+
EXAMPLES OF MEMORY BARRIER SEQUENCES
------------------------------------
@@ -600,8 +616,8 @@ STORE B, STORE C } all occuring before the unordered set of { STORE D, STORE E
| | +------+
+-------+ : :
|
- | Sequence in which stores committed to memory system
- | by CPU 1
+ | Sequence in which stores are committed to the
+ | memory system by CPU 1
V
@@ -683,14 +699,12 @@ then the following will occur:
| : : | |
| : : | CPU 2 |
| +-------+ | |
- \ | X->9 |------>| |
- \ +-------+ | |
- ----->| B->2 | | |
- +-------+ | |
- Makes sure all effects ---> ddddddddddddddddd | |
- prior to the store of C +-------+ | |
- are perceptible to | B->2 |------>| |
- successive loads +-------+ | |
+ | | X->9 |------>| |
+ | +-------+ | |
+ Makes sure all effects ---> \ ddddddddddddddddd | |
+ prior to the store of C \ +-------+ | |
+ are perceptible to ----->| B->2 |------>| |
+ subsequent loads +-------+ | |
: : +-------+
@@ -699,73 +713,239 @@ following sequence of events:
CPU 1 CPU 2
======================= =======================
+ { A = 0, B = 9 }
STORE A=1
- STORE B=2
- STORE C=3
<write barrier>
- STORE D=4
- STORE E=5
- LOAD A
+ STORE B=2
LOAD B
- LOAD C
- LOAD D
- LOAD E
+ LOAD A
Without intervention, CPU 2 may then choose to perceive the events on CPU 1 in
some effectively random order, despite the write barrier issued by CPU 1:
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }
- | | : +------+ }
- | CPU 1 | : | B=2 | }---
- | | +------+ } \
- | | wwwwwwwwwwwww} \
- | | +------+ } \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ } \ { | C->3 |------>| |
- | |------>| D=4 | } \ { +-------+ : | |
- | | +------+ \ { | E->5 | : | |
- +-------+ : : \ { +-------+ : | |
- Transfer -->{ | A->1 | : | CPU 2 |
- from CPU 1 { +-------+ : | |
- to CPU 2 { | D->4 | : | |
- { +-------+ : | |
- { | B->2 |------>| |
- +-------+ | |
- : : +-------+
-
-
-If, however, a read barrier were to be placed between the load of C and the
-load of D on CPU 2, then the partial ordering imposed by CPU 1 will be
-perceived correctly by CPU 2.
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | | A->0 |------>| |
+ | +-------+ | |
+ | : : +-------+
+ \ : :
+ \ +-------+
+ ---->| A->1 |
+ +-------+
+ : :
- +-------+ : :
- | | +------+
- | |------>| C=3 | }
- | | : +------+ }
- | | : | A=1 | }---
- | | : +------+ } \
- | CPU 1 | : | B=2 | } \
- | | +------+ \
- | | wwwwwwwwwwwwwwww \
- | | +------+ \ : : +-------+
- | | : | E=5 | } \ +-------+ | |
- | | : +------+ }--- \ { | C->3 |------>| |
- | |------>| D=4 | } \ \ { +-------+ : | |
- | | +------+ \ -->{ | B->2 | : | |
- +-------+ : : \ { +-------+ : | |
- \ { | A->1 | : | CPU 2 |
- \ +-------+ | |
- At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
- barrier causes all effects \ +-------+ | |
- prior to the storage of C \ { | E->5 | : | |
- to be perceptible to CPU 2 -->{ +-------+ : | |
- { | D->4 |------>| |
- +-------+ | |
- : : +-------+
+
+If, however, a read barrier were to be placed between the load of E and the
+load of A on CPU 2:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ <read barrier>
+ LOAD A
+
+then the partial ordering imposed by CPU 1 will be perceived correctly by CPU
+2:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+To illustrate this more completely, consider what could happen if the code
+contained a load of A either side of the read barrier:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ { A = 0, B = 9 }
+ STORE A=1
+ <write barrier>
+ STORE B=2
+ LOAD B
+ LOAD A [first load of A]
+ <read barrier>
+ LOAD A [second load of A]
+
+Even though the two loads of A both occur after the load of B, they may both
+come up with different values:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ | : : | |
+ | +-------+ | |
+ | | A->0 |------>| 1st |
+ | +-------+ | |
+ At this point the read ----> \ rrrrrrrrrrrrrrrrr | |
+ barrier causes all effects \ +-------+ | |
+ prior to the storage of B ---->| A->1 |------>| 2nd |
+ to be perceptible to CPU 2 +-------+ | |
+ : : +-------+
+
+
+But it may be that the update to A from CPU 1 becomes perceptible to CPU 2
+before the read barrier completes anyway:
+
+ +-------+ : : : :
+ | | +------+ +-------+
+ | |------>| A=1 |------ --->| A->0 |
+ | | +------+ \ +-------+
+ | CPU 1 | wwwwwwwwwwwwwwww \ --->| B->9 |
+ | | +------+ | +-------+
+ | |------>| B=2 |--- | : :
+ | | +------+ \ | : : +-------+
+ +-------+ : : \ | +-------+ | |
+ ---------->| B->2 |------>| |
+ | +-------+ | CPU 2 |
+ | : : | |
+ \ : : | |
+ \ +-------+ | |
+ ---->| A->1 |------>| 1st |
+ +-------+ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ | A->1 |------>| 2nd |
+ +-------+ | |
+ : : +-------+
+
+
+The guarantee is that the second load will always come up with A == 1 if the
+load of B came up with B == 2. No such guarantee exists for the first load of
+A; that may come up with either A == 0 or A == 1.
+
+
+READ MEMORY BARRIERS VS LOAD SPECULATION
+----------------------------------------
+
+Many CPUs speculate with loads: that is they see that they will need to load an
+item from memory, and they find a time where they're not using the bus for any
+other loads, and so do the load in advance - even though they haven't actually
+got to that point in the instruction execution flow yet. This permits the
+actual load instruction to potentially complete immediately because the CPU
+already has the value to hand.
+
+It may turn out that the CPU didn't actually need the value - perhaps because a
+branch circumvented the load - in which case it can discard the value or just
+cache it for later use.
+
+Consider:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE } Divide instructions generally
+ DIVIDE } take a long time to perform
+ LOAD A
+
+Which might appear as this:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ Once the divisions are complete --> : : ~-->| |
+ the CPU can then perform the : : | |
+ LOAD with immediate effect : : +-------+
+
+
+Placing a read barrier or a data dependency barrier just before the second
+load:
+
+ CPU 1 CPU 2
+ ======================= =======================
+ LOAD B
+ DIVIDE
+ DIVIDE
+ <read barrier>
+ LOAD A
+
+will force any value speculatively obtained to be reconsidered to an extent
+dependent on the type of barrier used. If there was no change made to the
+speculated memory location, then the speculated value will just be used:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrr~ | |
+ : : ~ | |
+ : : ~-->| |
+ : : | |
+ : : +-------+
+
+
+but if there was an update or an invalidation from another CPU pending, then
+the speculation will be cancelled and the value reloaded:
+
+ : : +-------+
+ +-------+ | |
+ --->| B->2 |------>| |
+ +-------+ | CPU 2 |
+ : :DIVIDE | |
+ +-------+ | |
+ The CPU being busy doing a ---> --->| A->0 |~~~~ | |
+ division speculates on the +-------+ ~ | |
+ LOAD of A : : ~ | |
+ : :DIVIDE | |
+ : : ~ | |
+ : : ~ | |
+ rrrrrrrrrrrrrrrrr | |
+ +-------+ | |
+ The speculation is discarded ---> --->| A->1 |------>| |
+ and an updated value is +-------+ | |
+ retrieved : : +-------+
========================
@@ -901,7 +1081,7 @@ IMPLICIT KERNEL MEMORY BARRIERS
===============================
Some of the other functions in the linux kernel imply memory barriers, amongst
-which are locking, scheduling and memory allocation functions.
+which are locking and scheduling functions.
This specification is a _minimum_ guarantee; any particular architecture may
provide more substantial guarantees, but these may not be relied upon outside
@@ -966,6 +1146,20 @@ equivalent to a full barrier, but a LOCK followed by an UNLOCK is not.
barriers is that the effects instructions outside of a critical section may
seep into the inside of the critical section.
+A LOCK followed by an UNLOCK may not be assumed to be full memory barrier
+because it is possible for an access preceding the LOCK to happen after the
+LOCK, and an access following the UNLOCK to happen before the UNLOCK, and the
+two accesses can themselves then cross:
+
+ *A = a;
+ LOCK
+ UNLOCK
+ *B = b;
+
+may occur as:
+
+ LOCK, STORE *B, STORE *A, UNLOCK
+
Locks and semaphores may not provide any guarantee of ordering on UP compiled
systems, and so cannot be counted on in such a situation to actually achieve
anything at all - especially with respect to I/O accesses - unless combined
@@ -1016,8 +1210,6 @@ Other functions that imply barriers:
(*) schedule() and similar imply full memory barriers.
- (*) Memory allocation and release functions imply full memory barriers.
-
=================================
INTER-CPU LOCKING BARRIER EFFECTS
diff --git a/Makefile b/Makefile
index a3a7baa..1700d3f 100644
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 17
-EXTRAVERSION =-rc6
+EXTRAVERSION =
NAME=Crazed Snow-Weasel
# *DOCUMENTATION*
diff --git a/arch/alpha/Kconfig b/arch/alpha/Kconfig
index 8290b69..213c785 100644
--- a/arch/alpha/Kconfig
+++ b/arch/alpha/Kconfig
@@ -453,7 +453,7 @@ config ALPHA_IRONGATE
config GENERIC_HWEIGHT
bool
- default y if !ALPHA_EV6 && !ALPHA_EV67
+ default y if !ALPHA_EV67
config ALPHA_AVANTI
bool
diff --git a/arch/arm/mach-ep93xx/ts72xx.c b/arch/arm/mach-ep93xx/ts72xx.c
index 9be01b0c..e24566b 100644
--- a/arch/arm/mach-ep93xx/ts72xx.c
+++ b/arch/arm/mach-ep93xx/ts72xx.c
@@ -111,21 +111,21 @@ static void __init ts72xx_map_io(void)
}
}
-static unsigned char ts72xx_rtc_readb(unsigned long addr)
+static unsigned char ts72xx_rtc_readbyte(unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
return __raw_readb(TS72XX_RTC_DATA_VIRT_BASE);
}
-static void ts72xx_rtc_writeb(unsigned char value, unsigned long addr)
+static void ts72xx_rtc_writebyte(unsigned char value, unsigned long addr)
{
__raw_writeb(addr, TS72XX_RTC_INDEX_VIRT_BASE);
__raw_writeb(value, TS72XX_RTC_DATA_VIRT_BASE);
}
static struct m48t86_ops ts72xx_rtc_ops = {
- .readb = ts72xx_rtc_readb,
- .writeb = ts72xx_rtc_writeb,
+ .readbyte = ts72xx_rtc_readbyte,
+ .writebyte = ts72xx_rtc_writebyte,
};
static struct platform_device ts72xx_rtc_device = {
diff --git a/arch/arm/mach-imx/irq.c b/arch/arm/mach-imx/irq.c
index eeb8a6d..a5de5f1 100644
--- a/arch/arm/mach-imx/irq.c
+++ b/arch/arm/mach-imx/irq.c
@@ -127,7 +127,7 @@ static void
imx_gpio_ack_irq(unsigned int irq)
{
DEBUG_IRQ("%s: irq %d\n", __FUNCTION__, irq);
- ISR(IRQ_TO_REG(irq)) |= 1 << ((irq - IRQ_GPIOA(0)) % 32);
+ ISR(IRQ_TO_REG(irq)) = 1 << ((irq - IRQ_GPIOA(0)) % 32);
}
static void
diff --git a/arch/arm/mach-integrator/integrator_cp.c b/arch/arm/mach-integrator/integrator_cp.c
index a0724f2..9f55f5a 100644
--- a/arch/arm/mach-integrator/integrator_cp.c
+++ b/arch/arm/mach-integrator/integrator_cp.c
@@ -232,8 +232,6 @@ static void __init intcp_init_irq(void)
for (i = IRQ_PIC_START; i <= IRQ_PIC_END; i++) {
if (i == 11)
i = 22;
- if (i == IRQ_CP_CPPLDINT)
- i++;
if (i == 29)
break;
set_irq_chip(i, &pic_chip);
@@ -259,8 +257,7 @@ static void __init intcp_init_irq(void)
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
- set_irq_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
- pic_unmask_irq(IRQ_CP_CPPLDINT);
+ set_irq_chained_handler(IRQ_CP_CPPLDINT, sic_handle_irq);
}
/*
diff --git a/arch/arm/mach-pxa/spitz.c b/arch/arm/mach-pxa/spitz.c
index 19b372d..44bcb80 100644
--- a/arch/arm/mach-pxa/spitz.c
+++ b/arch/arm/mach-pxa/spitz.c
@@ -371,6 +371,7 @@ static int spitz_ohci_init(struct device *dev)
static struct pxaohci_platform_data spitz_ohci_platform_data = {
.port_mode = PMM_NPS_MODE,
.init = spitz_ohci_init,
+ .power_budget = 150,
};
diff --git a/arch/arm/mach-sa1100/neponset.c b/arch/arm/mach-sa1100/neponset.c
index 9e02bc3..af6d277 100644
--- a/arch/arm/mach-sa1100/neponset.c
+++ b/arch/arm/mach-sa1100/neponset.c
@@ -59,6 +59,14 @@ neponset_irq_handler(unsigned int irq, struct irqdesc *desc, struct pt_regs *reg
if (irr & (IRR_ETHERNET | IRR_USAR)) {
desc->chip->mask(irq);
+ /*
+ * Ack the interrupt now to prevent re-entering
+ * this neponset handler. Again, this is safe
+ * since we'll check the IRR register prior to
+ * leaving.
+ */
+ desc->chip->ack(irq);
+
if (irr & IRR_ETHERNET) {
d = irq_desc + IRQ_NEPONSET_SMC9196;
desc_handle_irq(IRQ_NEPONSET_SMC9196, d, regs);
diff --git a/arch/arm/mach-versatile/core.c b/arch/arm/mach-versatile/core.c
index 799697d..cebd48a 100644
--- a/arch/arm/mach-versatile/core.c
+++ b/arch/arm/mach-versatile/core.c
@@ -112,10 +112,9 @@ void __init versatile_init_irq(void)
{
unsigned int i;
- vic_init(VA_VIC_BASE, IRQ_VIC_START, ~(1 << 31));
+ vic_init(VA_VIC_BASE, IRQ_VIC_START, ~0);
- set_irq_handler(IRQ_VICSOURCE31, sic_handle_irq);
- enable_irq(IRQ_VICSOURCE31);
+ set_irq_chained_handler(IRQ_VICSOURCE31, sic_handle_irq);
/* Do second interrupt controller */
writel(~0, VA_SIC_BASE + SIC_IRQ_ENABLE_CLEAR);
diff --git a/arch/i386/kernel/acpi/earlyquirk.c b/arch/i386/kernel/acpi/earlyquirk.c
index 2e3b643..1649a17 100644
--- a/arch/i386/kernel/acpi/earlyquirk.c
+++ b/arch/i386/kernel/acpi/earlyquirk.c
@@ -5,17 +5,34 @@
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/pci.h>
+#include <linux/acpi.h>
+
#include <asm/pci-direct.h>
#include <asm/acpi.h>
#include <asm/apic.h>
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+ nvidia_hpet_detected = 1;
+ return 0;
+}
+#endif
+
static int __init check_bridge(int vendor, int device)
{
#ifdef CONFIG_ACPI
- /* According to Nvidia all timer overrides are bogus. Just ignore
- them all. */
+ /* According to Nvidia all timer overrides are bogus unless HPET
+ is enabled. */
if (vendor == PCI_VENDOR_ID_NVIDIA) {
- acpi_skip_timer_override = 1;
+ nvidia_hpet_detected = 0;
+ acpi_table_parse(ACPI_HPET, nvidia_hpet_check);
+ if (nvidia_hpet_detected == 0) {
+ acpi_skip_timer_override = 1;
+ }
}
#endif
if (vendor == PCI_VENDOR_ID_ATI && timer_over_8254 == 1) {
diff --git a/arch/i386/kernel/setup.c b/arch/i386/kernel/setup.c
index 846e163..dd6b0e3 100644
--- a/arch/i386/kernel/setup.c
+++ b/arch/i386/kernel/setup.c
@@ -1547,15 +1547,18 @@ void __init setup_arch(char **cmdline_p)
if (efi_enabled)
efi_map_memmap();
-#ifdef CONFIG_X86_IO_APIC
- check_acpi_pci(); /* Checks more than just ACPI actually */
-#endif
-
#ifdef CONFIG_ACPI
/*
* Parse the ACPI tables for possible boot-time SMP configuration.
*/
acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_X86_IO_APIC
+ check_acpi_pci(); /* Checks more than just ACPI actually */
+#endif
+
+#ifdef CONFIG_ACPI
acpi_boot_init();
#if defined(CONFIG_SMP) && defined(CONFIG_X86_PC)
diff --git a/arch/powerpc/kernel/prom_init.c b/arch/powerpc/kernel/prom_init.c
index 41e9ab4..f70bd09 100644
--- a/arch/powerpc/kernel/prom_init.c
+++ b/arch/powerpc/kernel/prom_init.c
@@ -822,6 +822,7 @@ static void __init prom_send_capabilities(void)
/* try calling the ibm,client-architecture-support method */
if (call_prom_ret("call-method", 3, 2, &ret,
ADDR("ibm,client-architecture-support"),
+ root,
ADDR(ibm_architecture_vec)) == 0) {
/* the call exists... */
if (ret)
@@ -1622,6 +1623,15 @@ static int __init prom_find_machine_type(void)
if (strstr(p, RELOC("Power Macintosh")) ||
strstr(p, RELOC("MacRISC")))
return PLATFORM_POWERMAC;
+#ifdef CONFIG_PPC64
+ /* We must make sure we don't detect the IBM Cell
+ * blades as pSeries due to some firmware issues,
+ * so we do it here.
+ */
+ if (strstr(p, RELOC("IBM,CBEA")) ||
+ strstr(p, RELOC("IBM,CPBW-1.0")))
+ return PLATFORM_GENERIC;
+#endif /* CONFIG_PPC64 */
i += sl + 1;
}
}
diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
index 01e3c08..8fdeca2 100644
--- a/arch/powerpc/kernel/signal_32.c
+++ b/arch/powerpc/kernel/signal_32.c
@@ -803,10 +803,13 @@ static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int
if (__get_user(cmcp, &ucp->uc_regs))
return -EFAULT;
mcp = (struct mcontext __user *)(u64)cmcp;
+ /* no need to check access_ok(mcp), since mcp < 4GB */
}
#else
if (__get_user(mcp, &ucp->uc_regs))
return -EFAULT;
+ if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
+ return -EFAULT;
#endif
restore_sigmask(&set);
if (restore_user_regs(regs, mcp, sig))
@@ -908,13 +911,14 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
{
struct sig_dbg_op op;
int i;
+ unsigned char tmp;
unsigned long new_msr = regs->msr;
#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
unsigned long new_dbcr0 = current->thread.dbcr0;
#endif
for (i=0; i<ndbg; i++) {
- if (__copy_from_user(&op, dbg, sizeof(op)))
+ if (copy_from_user(&op, dbg + i, sizeof(op)))
return -EFAULT;
switch (op.dbg_type) {
case SIG_DBG_SINGLE_STEPPING:
@@ -959,6 +963,11 @@ int sys_debug_setcontext(struct ucontext __user *ctx,
current->thread.dbcr0 = new_dbcr0;
#endif
+ if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
+ || __get_user(tmp, (u8 __user *) ctx)
+ || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
+ return -EFAULT;
+
/*
* If we get a fault copying the context into the kernel's
* image of the user's registers, we can't just return -EFAULT
diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
index 27f65b9..c2db642 100644
--- a/arch/powerpc/kernel/signal_64.c
+++ b/arch/powerpc/kernel/signal_64.c
@@ -182,6 +182,8 @@ static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
if (err)
return err;
+ if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
+ return -EFAULT;
/* Copy 33 vec registers (vr0..31 and vscr) from the stack */
if (v_regs != 0 && (msr & MSR_VEC) != 0)
err |= __copy_from_user(current->thread.vr, v_regs,
diff --git a/arch/powerpc/mm/hash_native_64.c b/arch/powerpc/mm/hash_native_64.c
index 33654d1..994856e 100644
--- a/arch/powerpc/mm/hash_native_64.c
+++ b/arch/powerpc/mm/hash_native_64.c
@@ -52,7 +52,7 @@ static inline void __tlbie(unsigned long va, unsigned int psize)
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
- va |= (0x7f >> (8 - penc)) << 12;
+ va |= penc << 12;
asm volatile("tlbie %0,1" : : "r" (va) : "memory");
break;
}
@@ -74,7 +74,7 @@ static inline void __tlbiel(unsigned long va, unsigned int psize)
default:
penc = mmu_psize_defs[psize].penc;
va &= ~((1ul << mmu_psize_defs[psize].shift) - 1);
- va |= (0x7f >> (8 - penc)) << 12;
+ va |= penc << 12;
asm volatile(".long 0x7c000224 | (%0 << 11) | (1 << 21)"
: : "r"(va) : "memory");
break;
diff --git a/arch/powerpc/platforms/cell/setup.c b/arch/powerpc/platforms/cell/setup.c
index 6574b22..fd3e560 100644
--- a/arch/powerpc/platforms/cell/setup.c
+++ b/arch/powerpc/platforms/cell/setup.c
@@ -125,14 +125,13 @@ static void __init cell_init_early(void)
static int __init cell_probe(void)
{
- /* XXX This is temporary, the Cell maintainer will come up with
- * more appropriate detection logic
- */
unsigned long root = of_get_flat_dt_root();
- if (!of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
- return 0;
- return 1;
+ if (of_flat_dt_is_compatible(root, "IBM,CBEA") ||
+ of_flat_dt_is_compatible(root, "IBM,CPBW-1.0"))
+ return 1;
+
+ return 0;
}
/*
diff --git a/arch/powerpc/platforms/pseries/setup.c b/arch/powerpc/platforms/pseries/setup.c
index 5f79f01..3ba8783 100644
--- a/arch/powerpc/platforms/pseries/setup.c
+++ b/arch/powerpc/platforms/pseries/setup.c
@@ -389,6 +389,7 @@ static int __init pSeries_probe_hypertas(unsigned long node,
static int __init pSeries_probe(void)
{
+ unsigned long root = of_get_flat_dt_root();
char *dtype = of_get_flat_dt_prop(of_get_flat_dt_root(),
"device_type", NULL);
if (dtype == NULL)
@@ -396,6 +397,13 @@ static int __init pSeries_probe(void)
if (strcmp(dtype, "chrp"))
return 0;
+ /* Cell blades firmware claims to be chrp while it's not. Until this
+ * is fixed, we need to avoid those here.
+ */
+ if (of_flat_dt_is_compatible(root, "IBM,CPBW-1.0") ||
+ of_flat_dt_is_compatible(root, "IBM,CBEA"))
+ return 0;
+
DBG("pSeries detected, looking for LPAR capability...\n");
/* Now try to figure out if we are running on LPAR */
diff --git a/arch/sparc/kernel/smp.c b/arch/sparc/kernel/smp.c
index a93f5da..40b42c8 100644
--- a/arch/sparc/kernel/smp.c
+++ b/arch/sparc/kernel/smp.c
@@ -69,6 +69,17 @@ void __init smp_store_cpu_info(int id)
"clock-frequency", 0);
cpu_data(id).prom_node = cpu_node;
cpu_data(id).mid = cpu_get_hwmid(cpu_node);
+
+ /* this is required to tune the scheduler correctly */
+ /* is it possible to have CPUs with different cache sizes? */
+ if (id == boot_cpu_id) {
+ int cache_line,cache_nlines;
+ cache_line = 0x20;
+ cache_line = prom_getintdefault(cpu_node, "ecache-line-size", cache_line);
+ cache_nlines = 0x8000;
+ cache_nlines = prom_getintdefault(cpu_node, "ecache-nlines", cache_nlines);
+ max_cache_size = cache_line * cache_nlines;
+ }
if (cpu_data(id).mid < 0)
panic("No MID found for CPU%d at node 0x%08d", id, cpu_node);
}
diff --git a/arch/sparc64/kernel/pci_sun4v.c b/arch/sparc64/kernel/pci_sun4v.c
index 2b7a1f3..0c08952 100644
--- a/arch/sparc64/kernel/pci_sun4v.c
+++ b/arch/sparc64/kernel/pci_sun4v.c
@@ -599,18 +599,128 @@ struct pci_iommu_ops pci_sun4v_iommu_ops = {
/* SUN4V PCI configuration space accessors. */
-static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+struct pdev_entry {
+ struct pdev_entry *next;
+ u32 devhandle;
+ unsigned int bus;
+ unsigned int device;
+ unsigned int func;
+};
+
+#define PDEV_HTAB_SIZE 16
+#define PDEV_HTAB_MASK (PDEV_HTAB_SIZE - 1)
+static struct pdev_entry *pdev_htab[PDEV_HTAB_SIZE];
+
+static inline unsigned int pdev_hashfn(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
{
- if (bus == pbm->pci_first_busno) {
- if (device == 0 && func == 0)
- return 0;
- return 1;
+ unsigned int val;
+
+ val = (devhandle ^ (devhandle >> 4));
+ val ^= bus;
+ val ^= device;
+ val ^= func;
+
+ return val & PDEV_HTAB_MASK;
+}
+
+static int pdev_htab_add(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+ struct pdev_entry *p = kmalloc(sizeof(*p), GFP_KERNEL);
+ struct pdev_entry **slot;
+
+ if (!p)
+ return -ENOMEM;
+
+ slot = &pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+ p->next = *slot;
+ *slot = p;
+
+ p->devhandle = devhandle;
+ p->bus = bus;
+ p->device = device;
+ p->func = func;
+
+ return 0;
+}
+
+/* Recursively descend into the OBP device tree, rooted at toplevel_node,
+ * looking for a PCI device matching bus and devfn.
+ */
+static int obp_find(struct linux_prom_pci_registers *pregs, int toplevel_node, unsigned int bus, unsigned int devfn)
+{
+ toplevel_node = prom_getchild(toplevel_node);
+
+ while (toplevel_node != 0) {
+ int ret = obp_find(pregs, toplevel_node, bus, devfn);
+
+ if (ret != 0)
+ return ret;
+
+ ret = prom_getproperty(toplevel_node, "reg", (char *) pregs,
+ sizeof(*pregs) * PROMREG_MAX);
+ if (ret == 0 || ret == -1)
+ goto next_sibling;
+
+ if (((pregs[0].phys_hi >> 16) & 0xff) == bus &&
+ ((pregs[0].phys_hi >> 8) & 0xff) == devfn)
+ break;
+
+ next_sibling:
+ toplevel_node = prom_getsibling(toplevel_node);
+ }
+
+ return toplevel_node;
+}
+
+static int pdev_htab_populate(struct pci_pbm_info *pbm)
+{
+ struct linux_prom_pci_registers pr[PROMREG_MAX];
+ u32 devhandle = pbm->devhandle;
+ unsigned int bus;
+
+ for (bus = pbm->pci_first_busno; bus <= pbm->pci_last_busno; bus++) {
+ unsigned int devfn;
+
+ for (devfn = 0; devfn < 256; devfn++) {
+ unsigned int device = PCI_SLOT(devfn);
+ unsigned int func = PCI_FUNC(devfn);
+
+ if (obp_find(pr, pbm->prom_node, bus, devfn)) {
+ int err = pdev_htab_add(devhandle, bus,
+ device, func);
+ if (err)
+ return err;
+ }
+ }
+ }
+
+ return 0;
+}
+
+static struct pdev_entry *pdev_find(u32 devhandle, unsigned int bus, unsigned int device, unsigned int func)
+{
+ struct pdev_entry *p;
+
+ p = pdev_htab[pdev_hashfn(devhandle, bus, device, func)];
+ while (p) {
+ if (p->devhandle == devhandle &&
+ p->bus == bus &&
+ p->device == device &&
+ p->func == func)
+ break;
+
+ p = p->next;
}
+ return p;
+}
+
+static inline int pci_sun4v_out_of_range(struct pci_pbm_info *pbm, unsigned int bus, unsigned int device, unsigned int func)
+{
if (bus < pbm->pci_first_busno ||
bus > pbm->pci_last_busno)
return 1;
- return 0;
+ return pdev_find(pbm->devhandle, bus, device, func) == NULL;
}
static int pci_sun4v_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn,
@@ -1063,6 +1173,8 @@ static void pci_sun4v_pbm_init(struct pci_controller_info *p, int prom_node, u32
pci_sun4v_get_bus_range(pbm);
pci_sun4v_iommu_init(pbm);
+
+ pdev_htab_populate(pbm);
}
void sun4v_pci_init(int node, char *model_name)
diff --git a/arch/sparc64/kernel/smp.c b/arch/sparc64/kernel/smp.c
index 4e8cd79..f03d52d 100644
--- a/arch/sparc64/kernel/smp.c
+++ b/arch/sparc64/kernel/smp.c
@@ -1287,6 +1287,40 @@ int setup_profiling_timer(unsigned int multiplier)
return 0;
}
+static void __init smp_tune_scheduling(void)
+{
+ int instance, node;
+ unsigned int def, smallest = ~0U;
+
+ def = ((tlb_type == hypervisor) ?
+ (3 * 1024 * 1024) :
+ (4 * 1024 * 1024));
+
+ instance = 0;
+ while (!cpu_find_by_instance(instance, &node, NULL)) {
+ unsigned int val;
+
+ val = prom_getintdefault(node, "ecache-size", def);
+ if (val < smallest)
+ smallest = val;
+
+ instance++;
+ }
+
+ /* Any value less than 256K is nonsense. */
+ if (smallest < (256U * 1024U))
+ smallest = 256 * 1024;
+
+ max_cache_size = smallest;
+
+ if (smallest < 1U * 1024U * 1024U)
+ printk(KERN_INFO "Using max_cache_size of %uKB\n",
+ smallest / 1024U);
+ else
+ printk(KERN_INFO "Using max_cache_size of %uMB\n",
+ smallest / 1024U / 1024U);
+}
+
/* Constrain the number of cpus to max_cpus. */
void __init smp_prepare_cpus(unsigned int max_cpus)
{
@@ -1322,6 +1356,7 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
}
smp_store_cpu_info(boot_cpu_id);
+ smp_tune_scheduling();
}
/* Set this up early so that things like the scheduler can init
diff --git a/arch/sparc64/kernel/sparc64_ksyms.c b/arch/sparc64/kernel/sparc64_ksyms.c
index 62d8a99..38e569f 100644
--- a/arch/sparc64/kernel/sparc64_ksyms.c
+++ b/arch/sparc64/kernel/sparc64_ksyms.c
@@ -297,7 +297,6 @@ EXPORT_SYMBOL(svr4_getcontext);
EXPORT_SYMBOL(svr4_setcontext);
EXPORT_SYMBOL(compat_sys_ioctl);
EXPORT_SYMBOL(sparc32_open);
-EXPORT_SYMBOL(sys_close);
#endif
/* Special internal versions of library functions. */
diff --git a/arch/sparc64/kernel/traps.c b/arch/sparc64/kernel/traps.c
index 2793a5d..563db52 100644
--- a/arch/sparc64/kernel/traps.c
+++ b/arch/sparc64/kernel/traps.c
@@ -1797,7 +1797,9 @@ static const char *sun4v_err_type_to_str(u32 type)
};
}
-static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
+extern void __show_regs(struct pt_regs * regs);
+
+static void sun4v_log_error(struct pt_regs *regs, struct sun4v_error_entry *ent, int cpu, const char *pfx, atomic_t *ocnt)
{
int cnt;
@@ -1830,6 +1832,8 @@ static void sun4v_log_error(struct sun4v_error_entry *ent, int cpu, const char *
pfx,
ent->err_raddr, ent->err_size, ent->err_cpu);
+ __show_regs(regs);
+
if ((cnt = atomic_read(ocnt)) != 0) {
atomic_set(ocnt, 0);
wmb();
@@ -1862,7 +1866,7 @@ void sun4v_resum_error(struct pt_regs *regs, unsigned long offset)
put_cpu();
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_ERR "RESUMABLE ERROR",
&sun4v_resum_oflow_cnt);
}
@@ -1910,7 +1914,7 @@ void sun4v_nonresum_error(struct pt_regs *regs, unsigned long offset)
}
#endif
- sun4v_log_error(&local_copy, cpu,
+ sun4v_log_error(regs, &local_copy, cpu,
KERN_EMERG "NON-RESUMABLE ERROR",
&sun4v_nonresum_oflow_cnt);
@@ -2200,7 +2204,6 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
void die_if_kernel(char *str, struct pt_regs *regs)
{
static int die_counter;
- extern void __show_regs(struct pt_regs * regs);
extern void smp_report_regs(void);
int count = 0;
diff --git a/arch/x86_64/kernel/io_apic.c b/arch/x86_64/kernel/io_apic.c
index 0de3ea9..9cc7031 100644
--- a/arch/x86_64/kernel/io_apic.c
+++ b/arch/x86_64/kernel/io_apic.c
@@ -271,6 +271,18 @@ __setup("enable_8254_timer", setup_enable_8254_timer);
#include <linux/pci_ids.h>
#include <linux/pci.h>
+
+#ifdef CONFIG_ACPI
+
+static int nvidia_hpet_detected __initdata;
+
+static int __init nvidia_hpet_check(unsigned long phys, unsigned long size)
+{
+ nvidia_hpet_detected = 1;
+ return 0;
+}
+#endif
+
/* Temporary Hack. Nvidia and VIA boards currently only work with IO-APIC
off. Check for an Nvidia or VIA PCI bridge and turn it off.
Use pci direct infrastructure because this runs before the PCI subsystem.
@@ -317,11 +329,19 @@ void __init check_ioapic(void)
return;
case PCI_VENDOR_ID_NVIDIA:
#ifdef CONFIG_ACPI
- /* All timer overrides on Nvidia
- seem to be wrong. Skip them. */
- acpi_skip_timer_override = 1;
- printk(KERN_INFO
- "Nvidia board detected. Ignoring ACPI timer override.\n");
+ /*
+ * All timer overrides on Nvidia are
+ * wrong unless HPET is enabled.
+ */
+ nvidia_hpet_detected = 0;
+ acpi_table_parse(ACPI_HPET,
+ nvidia_hpet_check);
+ if (nvidia_hpet_detected == 0) {
+ acpi_skip_timer_override = 1;
+ printk(KERN_INFO "Nvidia board "
+ "detected. Ignoring ACPI "
+ "timer override.\n");
+ }
#endif
/* RED-PEN skip them on mptables too? */
return;
diff --git a/block/as-iosched.c b/block/as-iosched.c
index e25a5d7..a7caf35 100644
--- a/block/as-iosched.c
+++ b/block/as-iosched.c
@@ -1648,17 +1648,17 @@ static void as_exit_queue(elevator_t *e)
* initialize elevator private data (as_data), and alloc a arq for
* each request on the free lists
*/
-static int as_init_queue(request_queue_t *q, elevator_t *e)
+static void *as_init_queue(request_queue_t *q, elevator_t *e)
{
struct as_data *ad;
int i;
if (!arq_pool)
- return -ENOMEM;
+ return NULL;
ad = kmalloc_node(sizeof(*ad), GFP_KERNEL, q->node);
if (!ad)
- return -ENOMEM;
+ return NULL;
memset(ad, 0, sizeof(*ad));
ad->q = q; /* Identify what queue the data belongs to */
@@ -1667,7 +1667,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
GFP_KERNEL, q->node);
if (!ad->hash) {
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
ad->arq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -1675,7 +1675,7 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
if (!ad->arq_pool) {
kfree(ad->hash);
kfree(ad);
- return -ENOMEM;
+ return NULL;
}
/* anticipatory scheduling helpers */
@@ -1696,14 +1696,13 @@ static int as_init_queue(request_queue_t *q, elevator_t *e)
ad->antic_expire = default_antic_expire;
ad->batch_expire[REQ_SYNC] = default_read_batch_expire;
ad->batch_expire[REQ_ASYNC] = default_write_batch_expire;
- e->elevator_data = ad;
ad->current_batch_expires = jiffies + ad->batch_expire[REQ_SYNC];
ad->write_batch_count = ad->batch_expire[REQ_ASYNC] / 10;
if (ad->write_batch_count < 2)
ad->write_batch_count = 2;
- return 0;
+ return ad;
}
/*
diff --git a/block/cfq-iosched.c b/block/cfq-iosched.c
index 8e9d848..052b174 100644
--- a/block/cfq-iosched.c
+++ b/block/cfq-iosched.c
@@ -1323,17 +1323,12 @@ cfq_alloc_io_context(struct cfq_data *cfqd, gfp_t gfp_mask)
struct cfq_io_context *cic = kmem_cache_alloc(cfq_ioc_pool, gfp_mask);
if (cic) {
- RB_CLEAR(&cic->rb_node);
- cic->key = NULL;
- cic->cfqq[ASYNC] = NULL;
- cic->cfqq[SYNC] = NULL;
+ memset(cic, 0, sizeof(*cic));
+ RB_CLEAR_COLOR(&cic->rb_node);
cic->last_end_request = jiffies;
- cic->ttime_total = 0;
- cic->ttime_samples = 0;
- cic->ttime_mean = 0;
+ INIT_LIST_HEAD(&cic->queue_list);
cic->dtor = cfq_free_io_context;
cic->exit = cfq_exit_io_context;
- INIT_LIST_HEAD(&cic->queue_list);
atomic_inc(&ioc_count);
}
@@ -2251,14 +2246,14 @@ static void cfq_exit_queue(elevator_t *e)
kfree(cfqd);
}
-static int cfq_init_queue(request_queue_t *q, elevator_t *e)
+static void *cfq_init_queue(request_queue_t *q, elevator_t *e)
{
struct cfq_data *cfqd;
int i;
cfqd = kmalloc(sizeof(*cfqd), GFP_KERNEL);
if (!cfqd)
- return -ENOMEM;
+ return NULL;
memset(cfqd, 0, sizeof(*cfqd));
@@ -2288,8 +2283,6 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
for (i = 0; i < CFQ_QHASH_ENTRIES; i++)
INIT_HLIST_HEAD(&cfqd->cfq_hash[i]);
- e->elevator_data = cfqd;
-
cfqd->queue = q;
cfqd->max_queued = q->nr_requests / 4;
@@ -2316,14 +2309,14 @@ static int cfq_init_queue(request_queue_t *q, elevator_t *e)
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
- return 0;
+ return cfqd;
out_crqpool:
kfree(cfqd->cfq_hash);
out_cfqhash:
kfree(cfqd->crq_hash);
out_crqhash:
kfree(cfqd);
- return -ENOMEM;
+ return NULL;
}
static void cfq_slab_kill(void)
diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c
index 399fa1e..3bd0415 100644
--- a/block/deadline-iosched.c
+++ b/block/deadline-iosched.c
@@ -613,24 +613,24 @@ static void deadline_exit_queue(elevator_t *e)
* initialize elevator private data (deadline_data), and alloc a drq for
* each request on the free lists
*/
-static int deadline_init_queue(request_queue_t *q, elevator_t *e)
+static void *deadline_init_queue(request_queue_t *q, elevator_t *e)
{
struct deadline_data *dd;
int i;
if (!drq_pool)
- return -ENOMEM;
+ return NULL;
dd = kmalloc_node(sizeof(*dd), GFP_KERNEL, q->node);
if (!dd)
- return -ENOMEM;
+ return NULL;
memset(dd, 0, sizeof(*dd));
dd->hash = kmalloc_node(sizeof(struct list_head)*DL_HASH_ENTRIES,
GFP_KERNEL, q->node);
if (!dd->hash) {
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
dd->drq_pool = mempool_create_node(BLKDEV_MIN_RQ, mempool_alloc_slab,
@@ -638,7 +638,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
if (!dd->drq_pool) {
kfree(dd->hash);
kfree(dd);
- return -ENOMEM;
+ return NULL;
}
for (i = 0; i < DL_HASH_ENTRIES; i++)
@@ -653,8 +653,7 @@ static int deadline_init_queue(request_queue_t *q, elevator_t *e)
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->fifo_batch = fifo_batch;
- e->elevator_data = dd;
- return 0;
+ return dd;
}
static void deadline_put_request(request_queue_t *q, struct request *rq)
diff --git a/block/elevator.c b/block/elevator.c
index 8768a36..a0afdd3 100644
--- a/block/elevator.c
+++ b/block/elevator.c
@@ -121,16 +121,16 @@ static struct elevator_type *elevator_get(const char *name)
return e;
}
-static int elevator_attach(request_queue_t *q, struct elevator_queue *eq)
+static void *elevator_init_queue(request_queue_t *q, struct elevator_queue *eq)
{
- int ret = 0;
+ return eq->ops->elevator_init_fn(q, eq);
+}
+static void elevator_attach(request_queue_t *q, struct elevator_queue *eq,
+ void *data)
+{
q->elevator = eq;
-
- if (eq->ops->elevator_init_fn)
- ret = eq->ops->elevator_init_fn(q, eq);
-
- return ret;
+ eq->elevator_data = data;
}
static char chosen_elevator[16];
@@ -181,6 +181,7 @@ int elevator_init(request_queue_t *q, char *name)
struct elevator_type *e = NULL;
struct elevator_queue *eq;
int ret = 0;
+ void *data;
INIT_LIST_HEAD(&q->queue_head);
q->last_merge = NULL;
@@ -202,10 +203,13 @@ int elevator_init(request_queue_t *q, char *name)
if (!eq)
return -ENOMEM;
- ret = elevator_attach(q, eq);
- if (ret)
+ data = elevator_init_queue(q, eq);
+ if (!data) {
kobject_put(&eq->kobj);
+ return -ENOMEM;
+ }
+ elevator_attach(q, eq, data);
return ret;
}
@@ -722,13 +726,16 @@ int elv_register_queue(struct request_queue *q)
return error;
}
+static void __elv_unregister_queue(elevator_t *e)
+{
+ kobject_uevent(&e->kobj, KOBJ_REMOVE);
+ kobject_del(&e->kobj);
+}
+
void elv_unregister_queue(struct request_queue *q)
{
- if (q) {
- elevator_t *e = q->elevator;
- kobject_uevent(&e->kobj, KOBJ_REMOVE);
- kobject_del(&e->kobj);
- }
+ if (q)
+ __elv_unregister_queue(q->elevator);
}
int elv_register(struct elevator_type *e)
@@ -780,6 +787,7 @@ EXPORT_SYMBOL_GPL(elv_unregister);
static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
{
elevator_t *old_elevator, *e;
+ void *data;
/*
* Allocate new elevator
@@ -788,6 +796,12 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
if (!e)
return 0;
+ data = elevator_init_queue(q, e);
+ if (!data) {
+ kobject_put(&e->kobj);
+ return 0;
+ }
+
/*
* Turn on BYPASS and drain all requests w/ elevator private data
*/
@@ -806,19 +820,19 @@ static int elevator_switch(request_queue_t *q, struct elevator_type *new_e)
elv_drain_elevator(q);
}
- spin_unlock_irq(q->queue_lock);
-
/*
- * unregister old elevator data
+ * Remember old elevator.
*/
- elv_unregister_queue(q);
old_elevator = q->elevator;
/*
* attach and start new elevator
*/
- if (elevator_attach(q, e))
- goto fail;
+ elevator_attach(q, e, data);
+
+ spin_unlock_irq(q->queue_lock);
+
+ __elv_unregister_queue(old_elevator);
if (elv_register_queue(q))
goto fail_register;
@@ -837,7 +851,6 @@ fail_register:
*/
elevator_exit(e);
e = NULL;
-fail:
q->elevator = old_elevator;
elv_register_queue(q);
clear_bit(QUEUE_FLAG_ELVSWITCH, &q->queue_flags);
diff --git a/block/noop-iosched.c b/block/noop-iosched.c
index f370e4a..56a7c62 100644
--- a/block/noop-iosched.c
+++ b/block/noop-iosched.c
@@ -65,16 +65,15 @@ noop_latter_request(request_queue_t *q, struct request *rq)
return list_entry(rq->queuelist.next, struct request, queuelist);
}
-static int noop_init_queue(request_queue_t *q, elevator_t *e)
+static void *noop_init_queue(request_queue_t *q, elevator_t *e)
{
struct noop_data *nd;
nd = kmalloc(sizeof(*nd), GFP_KERNEL);
if (!nd)
- return -ENOMEM;
+ return NULL;
INIT_LIST_HEAD(&nd->queue);
- e->elevator_data = nd;
- return 0;
+ return nd;
}
static void noop_exit_queue(elevator_t *e)
diff --git a/drivers/acpi/processor_perflib.c b/drivers/acpi/processor_perflib.c
index abbdb37..f36db22 100644
--- a/drivers/acpi/processor_perflib.c
+++ b/drivers/acpi/processor_perflib.c
@@ -577,6 +577,8 @@ acpi_processor_register_performance(struct acpi_processor_performance
return_VALUE(-EBUSY);
}
+ WARN_ON(!performance);
+
pr->performance = performance;
if (acpi_processor_get_performance_info(pr)) {
@@ -609,7 +611,8 @@ acpi_processor_unregister_performance(struct acpi_processor_performance
return_VOID;
}
- kfree(pr->performance->states);
+ if (pr->performance)
+ kfree(pr->performance->states);
pr->performance = NULL;
acpi_cpufreq_remove_file(pr);
diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
index a59876a..3170eaa 100644
--- a/drivers/cdrom/cdrom.c
+++ b/drivers/cdrom/cdrom.c
@@ -1009,9 +1009,9 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
if (fp->f_mode & FMODE_WRITE) {
ret = -EROFS;
if (cdrom_open_write(cdi))
- goto err;
+ goto err_release;
if (!CDROM_CAN(CDC_RAM))
- goto err;
+ goto err_release;
ret = 0;
cdi->media_written = 0;
}
@@ -1026,6 +1026,8 @@ int cdrom_open(struct cdrom_device_info *cdi, struct inode *ip, struct file *fp)
not be mounting, but opening with O_NONBLOCK */
check_disk_change(ip->i_bdev);
return 0;
+err_release:
+ cdi->ops->release(cdi);
err:
cdi->use_count--;
return ret;
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index f5b01c6..fb919bf 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -41,9 +41,9 @@ obj-$(CONFIG_N_HDLC) += n_hdlc.o
obj-$(CONFIG_AMIGA_BUILTIN_SERIAL) += amiserial.o
obj-$(CONFIG_SX) += sx.o generic_serial.o
obj-$(CONFIG_RIO) += rio/ generic_serial.o
-obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_HVC_CONSOLE) += hvc_vio.o hvsi.o
obj-$(CONFIG_HVC_RTAS) += hvc_rtas.o
+obj-$(CONFIG_HVC_DRIVER) += hvc_console.o
obj-$(CONFIG_RAW_DRIVER) += raw.o
obj-$(CONFIG_SGI_SNSC) += snsc.o snsc_event.o
obj-$(CONFIG_MMTIMER) += mmtimer.o
diff --git a/drivers/char/n_tty.c b/drivers/char/n_tty.c
index ede365d..b9371d5 100644
--- a/drivers/char/n_tty.c
+++ b/drivers/char/n_tty.c
@@ -1384,8 +1384,10 @@ do_it_again:
* longer than TTY_THRESHOLD_UNTHROTTLE in canonical mode,
* we won't get any more characters.
*/
- if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE)
+ if (n_tty_chars_in_buffer(tty) <= TTY_THRESHOLD_UNTHROTTLE) {
+ n_tty_set_room(tty);
check_unthrottle(tty);
+ }
if (b - buf >= minimum)
break;
diff --git a/drivers/message/fusion/mptspi.c b/drivers/message/fusion/mptspi.c
index f2a4d38..3201de0 100644
--- a/drivers/message/fusion/mptspi.c
+++ b/drivers/message/fusion/mptspi.c
@@ -831,6 +831,7 @@ mptspi_ioc_reset(MPT_ADAPTER *ioc, int reset_phase)
return rc;
}
+#ifdef CONFIG_PM
/*
* spi module resume handler
*/
@@ -846,6 +847,7 @@ mptspi_resume(struct pci_dev *pdev)
return rc;
}
+#endif
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
/*=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=*/
diff --git a/drivers/message/i2o/exec-osm.c b/drivers/message/i2o/exec-osm.c
index 5ea133c..7bd4d85 100644
--- a/drivers/message/i2o/exec-osm.c
+++ b/drivers/message/i2o/exec-osm.c
@@ -55,6 +55,7 @@ struct i2o_exec_wait {
u32 m; /* message id */
struct i2o_message *msg; /* pointer to the reply message */
struct list_head list; /* node in global wait list */
+ spinlock_t lock; /* lock before modifying */
};
/* Work struct needed to handle LCT NOTIFY replies */
@@ -87,6 +88,7 @@ static struct i2o_exec_wait *i2o_exec_wait_alloc(void)
return NULL;
INIT_LIST_HEAD(&wait->list);
+ spin_lock_init(&wait->lock);
return wait;
};
@@ -125,6 +127,7 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
DECLARE_WAIT_QUEUE_HEAD(wq);
struct i2o_exec_wait *wait;
static u32 tcntxt = 0x80000000;
+ long flags;
int rc = 0;
wait = i2o_exec_wait_alloc();
@@ -146,33 +149,28 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
wait->tcntxt = tcntxt++;
msg->u.s.tcntxt = cpu_to_le32(wait->tcntxt);
+ wait->wq = &wq;
+ /*
+ * we add elements to the head, because if a entry in the list will
+ * never be removed, we have to iterate over it every time
+ */
+ list_add(&wait->list, &i2o_exec_wait_list);
+
/*
* Post the message to the controller. At some point later it will
* return. If we time out before it returns then complete will be zero.
*/
i2o_msg_post(c, msg);
- if (!wait->complete) {
- wait->wq = &wq;
- /*
- * we add elements add the head, because if a entry in the list
- * will never be removed, we have to iterate over it every time
- */
- list_add(&wait->list, &i2o_exec_wait_list);
-
- wait_event_interruptible_timeout(wq, wait->complete,
- timeout * HZ);
+ wait_event_interruptible_timeout(wq, wait->complete, timeout * HZ);
- wait->wq = NULL;
- }
+ spin_lock_irqsave(&wait->lock, flags);
- barrier();
+ wait->wq = NULL;
- if (wait->complete) {
+ if (wait->complete)
rc = le32_to_cpu(wait->msg->body[0]) >> 24;
- i2o_flush_reply(c, wait->m);
- i2o_exec_wait_free(wait);
- } else {
+ else {
/*
* We cannot remove it now. This is important. When it does
* terminate (which it must do if the controller has not
@@ -186,6 +184,13 @@ int i2o_msg_post_wait_mem(struct i2o_controller *c, struct i2o_message *msg,
rc = -ETIMEDOUT;
}
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc != -ETIMEDOUT) {
+ i2o_flush_reply(c, wait->m);
+ i2o_exec_wait_free(wait);
+ }
+
return rc;
};
@@ -213,7 +218,6 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
{
struct i2o_exec_wait *wait, *tmp;
unsigned long flags;
- static spinlock_t lock = SPIN_LOCK_UNLOCKED;
int rc = 1;
/*
@@ -223,23 +227,24 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
* already expired. Not much we can do about that except log it for
* debug purposes, increase timeout, and recompile.
*/
- spin_lock_irqsave(&lock, flags);
list_for_each_entry_safe(wait, tmp, &i2o_exec_wait_list, list) {
if (wait->tcntxt == context) {
- list_del(&wait->list);
+ spin_lock_irqsave(&wait->lock, flags);
- spin_unlock_irqrestore(&lock, flags);
+ list_del(&wait->list);
wait->m = m;
wait->msg = msg;
wait->complete = 1;
- barrier();
-
- if (wait->wq) {
- wake_up_interruptible(wait->wq);
+ if (wait->wq)
rc = 0;
- } else {
+ else
+ rc = -1;
+
+ spin_unlock_irqrestore(&wait->lock, flags);
+
+ if (rc) {
struct device *dev;
dev = &c->pdev->dev;
@@ -248,15 +253,13 @@ static int i2o_msg_post_wait_complete(struct i2o_controller *c, u32 m,
c->name);
i2o_dma_free(dev, &wait->dma);
i2o_exec_wait_free(wait);
- rc = -1;
- }
+ } else
+ wake_up_interruptible(wait->wq);
return rc;
}
}
- spin_unlock_irqrestore(&lock, flags);
-
osm_warn("%s: Bogus reply in POST WAIT (tr-context: %08x)!\n", c->name,
context);
@@ -322,14 +325,9 @@ static DEVICE_ATTR(product_id, S_IRUGO, i2o_exec_show_product_id, NULL);
static int i2o_exec_probe(struct device *dev)
{
struct i2o_device *i2o_dev = to_i2o_device(dev);
- struct i2o_controller *c = i2o_dev->iop;
i2o_event_register(i2o_dev, &i2o_exec_driver, 0, 0xffffffff);
- c->exec = i2o_dev;
-
- i2o_exec_lct_notify(c, c->lct->change_ind + 1);
-
device_create_file(dev, &dev_attr_vendor_id);
device_create_file(dev, &dev_attr_product_id);
@@ -523,6 +521,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
struct device *dev;
struct i2o_message *msg;
+ down(&c->lct_lock);
+
dev = &c->pdev->dev;
if (i2o_dma_realloc
@@ -545,6 +545,8 @@ static int i2o_exec_lct_notify(struct i2o_controller *c, u32 change_ind)
i2o_msg_post(c, msg);
+ up(&c->lct_lock);
+
return 0;
};
diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
index 4921674..febbdd4 100644
--- a/drivers/message/i2o/iop.c
+++ b/drivers/message/i2o/iop.c
@@ -804,8 +804,6 @@ void i2o_iop_remove(struct i2o_controller *c)
/* Ask the IOP to switch to RESET state */
i2o_iop_reset(c);
-
- put_device(&c->device);
}
/**
@@ -1059,7 +1057,7 @@ struct i2o_controller *i2o_iop_alloc(void)
snprintf(poolname, sizeof(poolname), "i2o_%s_msg_inpool", c->name);
if (i2o_pool_alloc
- (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4,
+ (&c->in_msg, poolname, I2O_INBOUND_MSG_FRAME_SIZE * 4 + sizeof(u32),
I2O_MSG_INPOOL_MIN)) {
kfree(c);
return ERR_PTR(-ENOMEM);
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index ecccca3..d1c705b 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -870,13 +870,16 @@ e1000_intr_test(struct e1000_adapter *adapter, uint64_t *data)
*data = 0;
/* Hook up test interrupt handler just for this test */
- if (!request_irq(irq, &e1000_test_intr, 0, netdev->name, netdev)) {
+ if (!request_irq(irq, &e1000_test_intr, SA_PROBEIRQ, netdev->name,
+ netdev)) {
shared_int = FALSE;
} else if (request_irq(irq, &e1000_test_intr, SA_SHIRQ,
netdev->name, netdev)){
*data = 1;
return -1;
}
+ DPRINTK(PROBE,INFO, "testing %s interrupt\n",
+ (shared_int ? "shared" : "unshared"));
/* Disable all the interrupts */
E1000_WRITE_REG(&adapter->hw, IMC, 0xFFFFFFFF);
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index ed15fca..97e71a4 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3519,7 +3519,7 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
buffer_info = &rx_ring->buffer_info[i];
while (rx_desc->status & E1000_RXD_STAT_DD) {
- struct sk_buff *skb, *next_skb;
+ struct sk_buff *skb;
u8 status;
#ifdef CONFIG_E1000_NAPI
if (*work_done >= work_to_do)
@@ -3537,8 +3537,6 @@ e1000_clean_rx_irq(struct e1000_adapter *adapter,
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
- next_skb = next_buffer->skb;
- prefetch(next_skb->data - NET_IP_ALIGN);
cleaned = TRUE;
cleaned_count++;
@@ -3668,7 +3666,7 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
struct e1000_buffer *buffer_info, *next_buffer;
struct e1000_ps_page *ps_page;
struct e1000_ps_page_dma *ps_page_dma;
- struct sk_buff *skb, *next_skb;
+ struct sk_buff *skb;
unsigned int i, j;
uint32_t length, staterr;
int cleaned_count = 0;
@@ -3697,8 +3695,6 @@ e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
prefetch(next_rxd);
next_buffer = &rx_ring->buffer_info[i];
- next_skb = next_buffer->skb;
- prefetch(next_skb->data - NET_IP_ALIGN);
cleaned = TRUE;
cleaned_count++;
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 9591096..fba1e4d 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -187,12 +187,11 @@ static u16 gm_phy_read(struct sky2_hw *hw, unsigned port, u16 reg)
return v;
}
-static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
+static void sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
{
u16 power_control;
u32 reg1;
int vaux;
- int ret = 0;
pr_debug("sky2_set_power_state %d\n", state);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_ON);
@@ -275,12 +274,10 @@ static int sky2_set_power_state(struct sky2_hw *hw, pci_power_t state)
break;
default:
printk(KERN_ERR PFX "Unknown power state %d\n", state);
- ret = -1;
}
sky2_pci_write16(hw, hw->pm_cap + PCI_PM_CTRL, power_control);
sky2_write8(hw, B2_TST_CTRL1, TST_CFG_WRITE_OFF);
- return ret;
}
static void sky2_phy_reset(struct sky2_hw *hw, unsigned port)
@@ -2164,6 +2161,13 @@ static void sky2_descriptor_error(struct sky2_hw *hw, unsigned port,
/* If idle then force a fake soft NAPI poll once a second
* to work around cases where sharing an edge triggered interrupt.
*/
+static inline void sky2_idle_start(struct sky2_hw *hw)
+{
+ if (idle_timeout > 0)
+ mod_timer(&hw->idle_timer,
+ jiffies + msecs_to_jiffies(idle_timeout));
+}
+
static void sky2_idle(unsigned long arg)
{
struct sky2_hw *hw = (struct sky2_hw *) arg;
@@ -2183,6 +2187,9 @@ static int sky2_poll(struct net_device *dev0, int *budget)
int work_done = 0;
u32 status = sky2_read32(hw, B0_Y2_SP_EISR);
+ if (!~status)
+ goto out;
+
if (status & Y2_IS_HW_ERR)
sky2_hw_intr(hw);
@@ -2219,7 +2226,7 @@ static int sky2_poll(struct net_device *dev0, int *budget)
if (sky2_more_work(hw))
return 1;
-
+out:
netif_rx_complete(dev0);
sky2_read32(hw, B0_Y2_SP_LISR);
@@ -2248,8 +2255,10 @@ static irqreturn_t sky2_intr(int irq, void *dev_id, struct pt_regs *regs)
static void sky2_netpoll(struct net_device *dev)
{
struct sky2_port *sky2 = netdev_priv(dev);
+ struct net_device *dev0 = sky2->hw->dev[0];
- sky2_intr(sky2->hw->pdev->irq, sky2->hw, NULL);
+ if (netif_running(dev) && __netif_rx_schedule_prep(dev0))
+ __netif_rx_schedule(dev0);
}
#endif
@@ -3350,9 +3359,7 @@ static int __devinit sky2_probe(struct pci_dev *pdev,
sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
setup_timer(&hw->idle_timer, sky2_idle, (unsigned long) hw);
- if (idle_timeout > 0)
- mod_timer(&hw->idle_timer,
- jiffies + msecs_to_jiffies(idle_timeout));
+ sky2_idle_start(hw);
pci_set_drvdata(pdev, hw);
@@ -3425,8 +3432,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
{
struct sky2_hw *hw = pci_get_drvdata(pdev);
int i;
+ pci_power_t pstate = pci_choose_state(pdev, state);
+
+ if (!(pstate == PCI_D3hot || pstate == PCI_D3cold))
+ return -EINVAL;
+
+ del_timer_sync(&hw->idle_timer);
- for (i = 0; i < 2; i++) {
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
if (dev) {
@@ -3435,10 +3448,14 @@ static int sky2_suspend(struct pci_dev *pdev, pm_message_t state)
sky2_down(dev);
netif_device_detach(dev);
+ netif_poll_disable(dev);
}
}
- return sky2_set_power_state(hw, pci_choose_state(pdev, state));
+ sky2_write32(hw, B0_IMSK, 0);
+ pci_save_state(pdev);
+ sky2_set_power_state(hw, pstate);
+ return 0;
}
static int sky2_resume(struct pci_dev *pdev)
@@ -3448,27 +3465,31 @@ static int sky2_resume(struct pci_dev *pdev)
pci_restore_state(pdev);
pci_enable_wake(pdev, PCI_D0, 0);
- err = sky2_set_power_state(hw, PCI_D0);
- if (err)
- goto out;
+ sky2_set_power_state(hw, PCI_D0);
err = sky2_reset(hw);
if (err)
goto out;
- for (i = 0; i < 2; i++) {
+ sky2_write32(hw, B0_IMSK, Y2_IS_BASE);
+
+ for (i = 0; i < hw->ports; i++) {
struct net_device *dev = hw->dev[i];
if (dev && netif_running(dev)) {
netif_device_attach(dev);
+ netif_poll_enable(dev);
+
err = sky2_up(dev);
if (err) {
printk(KERN_ERR PFX "%s: could not up: %d\n",
dev->name, err);
dev_close(dev);
- break;
+ goto out;
}
}
}
+
+ sky2_idle_start(hw);
out:
return err;
}
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index 49ad60b..862c226 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -69,8 +69,8 @@
#define DRV_MODULE_NAME "tg3"
#define PFX DRV_MODULE_NAME ": "
-#define DRV_MODULE_VERSION "3.58"
-#define DRV_MODULE_RELDATE "May 22, 2006"
+#define DRV_MODULE_VERSION "3.59"
+#define DRV_MODULE_RELDATE "June 8, 2006"
#define TG3_DEF_MAC_MODE 0
#define TG3_DEF_RX_MODE 0
@@ -4485,9 +4485,8 @@ static void tg3_disable_nvram_access(struct tg3 *tp)
/* tp->lock is held. */
static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
{
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
- tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
- NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
+ tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
+ NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
switch (kind) {
@@ -4568,13 +4567,12 @@ static int tg3_chip_reset(struct tg3 *tp)
void (*write_op)(struct tg3 *, u32, u32);
int i;
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
- tg3_nvram_lock(tp);
- /* No matching tg3_nvram_unlock() after this because
- * chip reset below will undo the nvram lock.
- */
- tp->nvram_lock_cnt = 0;
- }
+ tg3_nvram_lock(tp);
+
+ /* No matching tg3_nvram_unlock() after this because
+ * chip reset below will undo the nvram lock.
+ */
+ tp->nvram_lock_cnt = 0;
if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
@@ -4727,20 +4725,25 @@ static int tg3_chip_reset(struct tg3 *tp)
tw32_f(MAC_MODE, 0);
udelay(40);
- if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
- /* Wait for firmware initialization to complete. */
- for (i = 0; i < 100000; i++) {
- tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
- if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
- break;
- udelay(10);
- }
- if (i >= 100000) {
- printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
- "firmware will not restart magic=%08x\n",
- tp->dev->name, val);
- return -ENODEV;
- }
+ /* Wait for firmware initialization to complete. */
+ for (i = 0; i < 100000; i++) {
+ tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
+ if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
+ break;
+ udelay(10);
+ }
+
+ /* Chip might not be fitted with firmare. Some Sun onboard
+ * parts are configured like that. So don't signal the timeout
+ * of the above loop as an error, but do report the lack of
+ * running firmware once.
+ */
+ if (i >= 100000 &&
+ !(tp->tg3_flags2 & TG3_FLG2_NO_FWARE_REPORTED)) {
+ tp->tg3_flags2 |= TG3_FLG2_NO_FWARE_REPORTED;
+
+ printk(KERN_INFO PFX "%s: No firmware running.\n",
+ tp->dev->name);
}
if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
@@ -9075,9 +9078,6 @@ static void __devinit tg3_nvram_init(struct tg3 *tp)
{
int j;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
- return;
-
tw32_f(GRC_EEPROM_ADDR,
(EEPROM_ADDR_FSM_RESET |
(EEPROM_DEFAULT_CLOCK_PERIOD <<
@@ -9210,11 +9210,6 @@ static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
{
int ret;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
- return -EINVAL;
- }
-
if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
return tg3_nvram_read_using_eeprom(tp, offset, val);
@@ -9447,11 +9442,6 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
{
int ret;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
- return -EINVAL;
- }
-
if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
~GRC_LCLCTRL_GPIO_OUTPUT1);
@@ -9578,15 +9568,19 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
tp->misc_host_ctrl);
+ /* The memory arbiter has to be enabled in order for SRAM accesses
+ * to succeed. Normally on powerup the tg3 chip firmware will make
+ * sure it is enabled, but other entities such as system netboot
+ * code might disable it.
+ */
+ val = tr32(MEMARB_MODE);
+ tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
+
tp->phy_id = PHY_ID_INVALID;
tp->led_ctrl = LED_CTRL_MODE_PHY_1;
- /* Do not even try poking around in here on Sun parts. */
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- /* All SUN chips are built-in LOMs. */
- tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
- return;
- }
+ /* Assume an onboard device by default. */
+ tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
if (val == NIC_SRAM_DATA_SIG_MAGIC) {
@@ -9686,6 +9680,8 @@ static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP)
tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
+ else
+ tp->tg3_flags &= ~TG3_FLAG_EEPROM_WRITE_PROT;
if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
@@ -9834,16 +9830,8 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
int i;
u32 magic;
- if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
- /* Sun decided not to put the necessary bits in the
- * NVRAM of their onboard tg3 parts :(
- */
- strcpy(tp->board_part_number, "Sun 570X");
- return;
- }
-
if (tg3_nvram_read_swab(tp, 0x0, &magic))
- return;
+ goto out_not_found;
if (magic == TG3_EEPROM_MAGIC) {
for (i = 0; i < 256; i += 4) {
@@ -9874,6 +9862,9 @@ static void __devinit tg3_read_partno(struct tg3 *tp)
break;
msleep(1);
}
+ if (!(tmp16 & 0x8000))
+ goto out_not_found;
+
pci_read_config_dword(tp->pdev, vpd_cap + PCI_VPD_DATA,
&tmp);
tmp = cpu_to_le32(tmp);
@@ -9965,37 +9956,6 @@ static void __devinit tg3_read_fw_ver(struct tg3 *tp)
}
}
-#ifdef CONFIG_SPARC64
-static int __devinit tg3_is_sun_570X(struct tg3 *tp)
-{
- struct pci_dev *pdev = tp->pdev;
- struct pcidev_cookie *pcp = pdev->sysdata;
-
- if (pcp != NULL) {
- int node = pcp->prom_node;
- u32 venid;
- int err;
-
- err = prom_getproperty(node, "subsystem-vendor-id",
- (char *) &venid, sizeof(venid));
- if (err == 0 || err == -1)
- return 0;
- if (venid == PCI_VENDOR_ID_SUN)
- return 1;
-
- /* TG3 chips onboard the SunBlade-2500 don't have the
- * subsystem-vendor-id set to PCI_VENDOR_ID_SUN but they
- * are distinguishable from non-Sun variants by being
- * named "network" by the firmware. Non-Sun cards will
- * show up as being named "ethernet".
- */
- if (!strcmp(pcp->prom_name, "network"))
- return 1;
- }
- return 0;
-}
-#endif
-
static int __devinit tg3_get_invariants(struct tg3 *tp)
{
static struct pci_device_id write_reorder_chipsets[] = {
@@ -10012,11 +9972,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
u16 pci_cmd;
int err;
-#ifdef CONFIG_SPARC64
- if (tg3_is_sun_570X(tp))
- tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
-#endif
-
/* Force memory write invalidate off. If we leave it on,
* then on 5700_BX chips we have to enable a workaround.
* The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
@@ -10312,8 +10267,7 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
if (tp->write32 == tg3_write_indirect_reg32 ||
((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
(GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) ||
- (tp->tg3_flags2 & TG3_FLG2_SUN_570X))
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
tp->tg3_flags |= TG3_FLAG_SRAM_USE_CONFIG;
/* Get eeprom hw config before calling tg3_set_power_state().
@@ -10594,8 +10548,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
#endif
mac_offset = 0x7c;
- if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
- !(tp->tg3_flags & TG3_FLG2_SUN_570X)) ||
+ if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
(tp->tg3_flags2 & TG3_FLG2_5780_CLASS)) {
if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
mac_offset = 0xcc;
@@ -10622,8 +10575,7 @@ static int __devinit tg3_get_device_address(struct tg3 *tp)
}
if (!addr_ok) {
/* Next, try NVRAM. */
- if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
- !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
+ if (!tg3_nvram_read(tp, mac_offset + 0, &hi) &&
!tg3_nvram_read(tp, mac_offset + 4, &lo)) {
dev->dev_addr[0] = ((hi >> 16) & 0xff);
dev->dev_addr[1] = ((hi >> 24) & 0xff);
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index 0e29b88..ff0faab 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -2184,7 +2184,7 @@ struct tg3 {
#define TG3_FLAG_INIT_COMPLETE 0x80000000
u32 tg3_flags2;
#define TG3_FLG2_RESTART_TIMER 0x00000001
-#define TG3_FLG2_SUN_570X 0x00000002
+/* 0x00000002 available */
#define TG3_FLG2_NO_ETH_WIRE_SPEED 0x00000004
#define TG3_FLG2_IS_5788 0x00000008
#define TG3_FLG2_MAX_RXPEND_64 0x00000010
@@ -2216,6 +2216,7 @@ struct tg3 {
#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2)
#define TG3_FLG2_1SHOT_MSI 0x10000000
#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
+#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
u32 split_mode_max_reqs;
#define SPLIT_MODE_5704_MAX_REQ 3
diff --git a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
index bbecba0..d0318e5 100644
--- a/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
+++ b/drivers/net/wireless/bcm43xx/bcm43xx_dma.c
@@ -624,25 +624,28 @@ err_destroy_tx0:
static u16 generate_cookie(struct bcm43xx_dmaring *ring,
int slot)
{
- u16 cookie = 0x0000;
+ u16 cookie = 0xF000;
/* Use the upper 4 bits of the cookie as
* DMA controller ID and store the slot number
- * in the lower 12 bits
+ * in the lower 12 bits.
+ * Note that the cookie must never be 0, as this
+ * is a special value used in RX path.
*/
switch (ring->mmio_base) {
default:
assert(0);
case BCM43xx_MMIO_DMA1_BASE:
+ cookie = 0xA000;
break;
case BCM43xx_MMIO_DMA2_BASE:
- cookie = 0x1000;
+ cookie = 0xB000;
break;
case BCM43xx_MMIO_DMA3_BASE:
- cookie = 0x2000;
+ cookie = 0xC000;
break;
case BCM43xx_MMIO_DMA4_BASE:
- cookie = 0x3000;
+ cookie = 0xD000;
break;
}
assert(((u16)slot & 0xF000) == 0x0000);
@@ -660,16 +663,16 @@ struct bcm43xx_dmaring * parse_cookie(struct bcm43xx_private *bcm,
struct bcm43xx_dmaring *ring = NULL;
switch (cookie & 0xF000) {
- case 0x0000:
+ case 0xA000:
ring = dma->tx_ring0;
break;
- case 0x1000:
+ case 0xB000:
ring = dma->tx_ring1;
break;
- case 0x2000:
+ case 0xC000:
ring = dma->tx_ring2;
break;
- case 0x3000:
+ case 0xD000:
ring = dma->tx_ring3;
break;
default:
@@ -839,8 +842,18 @@ static void dma_rx(struct bcm43xx_dmaring *ring,
/* We received an xmit status. */
struct bcm43xx_hwxmitstatus *hw = (struct bcm43xx_hwxmitstatus *)skb->data;
struct bcm43xx_xmitstatus stat;
+ int i = 0;
stat.cookie = le16_to_cpu(hw->cookie);
+ while (stat.cookie == 0) {
+ if (unlikely(++i >= 10000)) {
+ assert(0);
+ break;
+ }
+ udelay(2);
+ barrier();
+ stat.cookie = le16_to_cpu(hw->cookie);
+ }
stat.flags = hw->flags;
stat.cnt1 = hw->cnt1;
stat.cnt2 = hw->cnt2;
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 1456759..10e1a90 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -285,9 +285,9 @@ static int pci_device_suspend(struct device * dev, pm_message_t state)
* Default resume method for devices that have no driver provided resume,
* or not even a driver at all.
*/
-static void pci_default_resume(struct pci_dev *pci_dev)
+static int pci_default_resume(struct pci_dev *pci_dev)
{
- int retval;
+ int retval = 0;
/* restore the PCI config space */
pci_restore_state(pci_dev);
@@ -297,18 +297,21 @@ static void pci_default_resume(struct pci_dev *pci_dev)
/* if the device was busmaster before the suspend, make it busmaster again */
if (pci_dev->is_busmaster)
pci_set_master(pci_dev);
+
+ return retval;
}
static int pci_device_resume(struct device * dev)
{
+ int error;
struct pci_dev * pci_dev = to_pci_dev(dev);
struct pci_driver * drv = pci_dev->driver;
if (drv && drv->resume)
- drv->resume(pci_dev);
+ error = drv->resume(pci_dev);
else
- pci_default_resume(pci_dev);
- return 0;
+ error = pci_default_resume(pci_dev);
+ return error;
}
static void pci_device_shutdown(struct device *dev)
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 2329f94..1228627 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -461,9 +461,23 @@ int
pci_restore_state(struct pci_dev *dev)
{
int i;
+ int val;
- for (i = 0; i < 16; i++)
- pci_write_config_dword(dev,i * 4, dev->saved_config_space[i]);
+ /*
+ * The Base Address register should be programmed before the command
+ * register(s)
+ */
+ for (i = 15; i >= 0; i--) {
+ pci_read_config_dword(dev, i * 4, &val);
+ if (val != dev->saved_config_space[i]) {
+ printk(KERN_DEBUG "PM: Writing back config space on "
+ "device %s at offset %x (was %x, writing %x)\n",
+ pci_name(dev), i,
+ val, (int)dev->saved_config_space[i]);
+ pci_write_config_dword(dev,i * 4,
+ dev->saved_config_space[i]);
+ }
+ }
pci_restore_msi_state(dev);
pci_restore_msix_state(dev);
return 0;
diff --git a/drivers/scsi/sata_mv.c b/drivers/scsi/sata_mv.c
index 9b8bca1..f16f92a 100644
--- a/drivers/scsi/sata_mv.c
+++ b/drivers/scsi/sata_mv.c
@@ -2035,6 +2035,7 @@ static void mv_phy_reset(struct ata_port *ap)
static void mv_eng_timeout(struct ata_port *ap)
{
struct ata_queued_cmd *qc;
+ unsigned long flags;
printk(KERN_ERR "ata%u: Entering mv_eng_timeout\n",ap->id);
DPRINTK("All regs @ start of eng_timeout\n");
@@ -2046,8 +2047,10 @@ static void mv_eng_timeout(struct ata_port *ap)
ap->host_set->mmio_base, ap, qc, qc->scsicmd,
&qc->scsicmd->cmnd);
+ spin_lock_irqsave(&ap->host_set->lock, flags);
mv_err_intr(ap, 0);
mv_stop_and_reset(ap);
+ spin_unlock_irqrestore(&ap->host_set->lock, flags);
WARN_ON(!(qc->flags & ATA_QCFLAG_ACTIVE));
if (qc->flags & ATA_QCFLAG_ACTIVE) {
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index acde886..fafe7c1 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -185,6 +185,9 @@ int usb_hcd_pxa27x_probe (const struct hc_driver *driver, struct platform_device
/* Select Power Management Mode */
pxa27x_ohci_select_pmm(inf->port_mode);
+ if (inf->power_budget)
+ hcd->power_budget = inf->power_budget;
+
ohci_hcd_init(hcd_to_ohci(hcd));
retval = usb_add_hcd(hcd, pdev->resource[1].start, SA_INTERRUPT);
diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c
index 953eb8c..47ba1a7 100644
--- a/drivers/video/console/fbcon.c
+++ b/drivers/video/console/fbcon.c
@@ -1745,7 +1745,7 @@ static int fbcon_scroll(struct vc_data *vc, int t, int b, int dir,
fbcon_redraw_move(vc, p, 0, t, count);
ypan_up_redraw(vc, t, count);
if (vc->vc_rows - b > 0)
- fbcon_redraw_move(vc, p, b - count,
+ fbcon_redraw_move(vc, p, b,
vc->vc_rows - b, b);
} else
fbcon_redraw_move(vc, p, t + count, b - t - count, t);
diff --git a/fs/bio.c b/fs/bio.c
index 098c12b..6a0b9ad 100644
--- a/fs/bio.c
+++ b/fs/bio.c
@@ -654,9 +654,10 @@ static struct bio *__bio_map_user_iov(request_queue_t *q,
write_to_vm, 0, &pages[cur_page], NULL);
up_read(&current->mm->mmap_sem);
- if (ret < local_nr_pages)
+ if (ret < local_nr_pages) {
+ ret = -EFAULT;
goto out_unmap;
-
+ }
offset = uaddr & ~PAGE_MASK;
for (j = cur_page; j < page_limit; j++) {
diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
index 85d166c..b55b4ea 100644
--- a/fs/debugfs/inode.c
+++ b/fs/debugfs/inode.c
@@ -67,12 +67,13 @@ static struct inode *debugfs_get_inode(struct super_block *sb, int mode, dev_t d
static int debugfs_mknod(struct inode *dir, struct dentry *dentry,
int mode, dev_t dev)
{
- struct inode *inode = debugfs_get_inode(dir->i_sb, mode, dev);
+ struct inode *inode;
int error = -EPERM;
if (dentry->d_inode)
return -EEXIST;
+ inode = debugfs_get_inode(dir->i_sb, mode, dev);
if (inode) {
d_instantiate(dentry, inode);
dget(dentry);
diff --git a/fs/locks.c b/fs/locks.c
index 6f99c0a..ab61a8b 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -755,6 +755,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
if (request->fl_type == F_UNLCK)
goto out;
+ error = -ENOMEM;
new_fl = locks_alloc_lock();
if (new_fl == NULL)
goto out;
@@ -781,6 +782,7 @@ static int flock_lock_file(struct file *filp, struct file_lock *request)
locks_copy_lock(new_fl, request);
locks_insert_lock(&inode->i_flock, new_fl);
new_fl = NULL;
+ error = 0;
out:
unlock_kernel();
diff --git a/include/asm-arm/arch-pxa/ohci.h b/include/asm-arm/arch-pxa/ohci.h
index 7da8956..e848a47 100644
--- a/include/asm-arm/arch-pxa/ohci.h
+++ b/include/asm-arm/arch-pxa/ohci.h
@@ -11,6 +11,8 @@ struct pxaohci_platform_data {
#define PMM_NPS_MODE 1
#define PMM_GLOBAL_MODE 2
#define PMM_PERPORT_MODE 3
+
+ int power_budget;
};
extern void pxa_set_ohci_info(struct pxaohci_platform_data *info);
diff --git a/include/asm-powerpc/cputable.h b/include/asm-powerpc/cputable.h
index 9fcf016..f6265c2 100644
--- a/include/asm-powerpc/cputable.h
+++ b/include/asm-powerpc/cputable.h
@@ -329,7 +329,7 @@ extern void do_cpu_ftr_fixups(unsigned long offset);
#define CPU_FTRS_CELL (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2 | \
CPU_FTR_ALTIVEC_COMP | CPU_FTR_MMCRA | CPU_FTR_SMT | \
- CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO)
+ CPU_FTR_CTRL | CPU_FTR_PAUSE_ZERO | CPU_FTR_CI_LARGE_PAGE)
#define CPU_FTRS_COMPATIBLE (CPU_FTR_SPLIT_ID_CACHE | CPU_FTR_USE_TB | \
CPU_FTR_HPTE_TABLE | CPU_FTR_PPCAS_ARCH_V2)
#endif
diff --git a/include/asm-s390/futex.h b/include/asm-s390/futex.h
index 40c25e1..1802775 100644
--- a/include/asm-s390/futex.h
+++ b/include/asm-s390/futex.h
@@ -11,23 +11,24 @@
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 4\n" \
- " .long 0b,2b,1b,2b\n" \
+ " .long 0b,4b,2b,4b,3b,4b\n" \
".previous"
#else /* __s390x__ */
#define __futex_atomic_fixup \
".section __ex_table,\"a\"\n" \
" .align 8\n" \
- " .quad 0b,2b,1b,2b\n" \
+ " .quad 0b,4b,2b,4b,3b,4b\n" \
".previous"
#endif /* __s390x__ */
#define __futex_atomic_op(insn, ret, oldval, newval, uaddr, oparg) \
- asm volatile(" l %1,0(%6)\n" \
- "0: " insn \
- " cs %1,%2,0(%6)\n" \
- "1: jl 0b\n" \
+ asm volatile(" sacf 256\n" \
+ "0: l %1,0(%6)\n" \
+ "1: " insn \
+ "2: cs %1,%2,0(%6)\n" \
+ "3: jl 1b\n" \
" lhi %0,0\n" \
- "2:\n" \
+ "4: sacf 0\n" \
__futex_atomic_fixup \
: "=d" (ret), "=&d" (oldval), "=&d" (newval), \
"=m" (*uaddr) \
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index ad133fc..1713ace 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -21,7 +21,7 @@ typedef void (elevator_put_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_activate_req_fn) (request_queue_t *, struct request *);
typedef void (elevator_deactivate_req_fn) (request_queue_t *, struct request *);
-typedef int (elevator_init_fn) (request_queue_t *, elevator_t *);
+typedef void *(elevator_init_fn) (request_queue_t *, elevator_t *);
typedef void (elevator_exit_fn) (elevator_t *);
struct elevator_ops
diff --git a/include/linux/i2o.h b/include/linux/i2o.h
index dd7d627..c115e9e 100644
--- a/include/linux/i2o.h
+++ b/include/linux/i2o.h
@@ -1114,8 +1114,11 @@ static inline struct i2o_message *i2o_msg_get(struct i2o_controller *c)
mmsg->mfa = readl(c->in_port);
if (unlikely(mmsg->mfa >= c->in_queue.len)) {
+ u32 mfa = mmsg->mfa;
+
mempool_free(mmsg, c->in_msg.mempool);
- if(mmsg->mfa == I2O_QUEUE_EMPTY)
+
+ if (mfa == I2O_QUEUE_EMPTY)
return ERR_PTR(-EBUSY);
return ERR_PTR(-EFAULT);
}
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 6a7621b..f5fdca1 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -36,6 +36,7 @@
#include <linux/nodemask.h>
struct vm_area_struct;
+struct mm_struct;
#ifdef CONFIG_NUMA
diff --git a/include/linux/pci-acpi.h b/include/linux/pci-acpi.h
index 4877e35..936ef82 100644
--- a/include/linux/pci-acpi.h
+++ b/include/linux/pci-acpi.h
@@ -50,7 +50,7 @@
extern acpi_status pci_osc_control_set(acpi_handle handle, u32 flags);
extern acpi_status pci_osc_support_set(u32 flags);
#else
-#if !defined(acpi_status)
+#if !defined(AE_ERROR)
typedef u32 acpi_status;
#define AE_ERROR (acpi_status) (0x0001)
#endif
diff --git a/kernel/exit.c b/kernel/exit.c
index e95b932..e06d0c1 100644
--- a/kernel/exit.c
+++ b/kernel/exit.c
@@ -881,14 +881,6 @@ fastcall NORET_TYPE void do_exit(long code)
tsk->flags |= PF_EXITING;
- /*
- * Make sure we don't try to process any timer firings
- * while we are already exiting.
- */
- tsk->it_virt_expires = cputime_zero;
- tsk->it_prof_expires = cputime_zero;
- tsk->it_sched_expires = 0;
-
if (unlikely(in_atomic()))
printk(KERN_INFO "note: %s[%d] exited with preempt_count %d\n",
current->comm, current->pid,
diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
index 520f6c5..d38d9ec 100644
--- a/kernel/posix-cpu-timers.c
+++ b/kernel/posix-cpu-timers.c
@@ -555,9 +555,6 @@ static void arm_timer(struct k_itimer *timer, union cpu_time_count now)
struct cpu_timer_list *next;
unsigned long i;
- if (CPUCLOCK_PERTHREAD(timer->it_clock) && (p->flags & PF_EXITING))
- return;
-
head = (CPUCLOCK_PERTHREAD(timer->it_clock) ?
p->cpu_timers : p->signal->cpu_timers);
head += CPUCLOCK_WHICH(timer->it_clock);
@@ -1173,6 +1170,9 @@ static void check_process_timers(struct task_struct *tsk,
}
t = tsk;
do {
+ if (unlikely(t->flags & PF_EXITING))
+ continue;
+
ticks = cputime_add(cputime_add(t->utime, t->stime),
prof_left);
if (!cputime_eq(prof_expires, cputime_zero) &&
@@ -1193,11 +1193,7 @@ static void check_process_timers(struct task_struct *tsk,
t->it_sched_expires > sched)) {
t->it_sched_expires = sched;
}
-
- do {
- t = next_thread(t);
- } while (unlikely(t->flags & PF_EXITING));
- } while (t != tsk);
+ } while ((t = next_thread(t)) != tsk);
}
}
@@ -1289,30 +1285,30 @@ void run_posix_cpu_timers(struct task_struct *tsk)
#undef UNEXPIRED
- BUG_ON(tsk->exit_state);
-
/*
* Double-check with locks held.
*/
read_lock(&tasklist_lock);
- spin_lock(&tsk->sighand->siglock);
+ if (likely(tsk->signal != NULL)) {
+ spin_lock(&tsk->sighand->siglock);
- /*
- * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
- * all the timers that are firing, and put them on the firing list.
- */
- check_thread_timers(tsk, &firing);
- check_process_timers(tsk, &firing);
+ /*
+ * Here we take off tsk->cpu_timers[N] and tsk->signal->cpu_timers[N]
+ * all the timers that are firing, and put them on the firing list.
+ */
+ check_thread_timers(tsk, &firing);
+ check_process_timers(tsk, &firing);
- /*
- * We must release these locks before taking any timer's lock.
- * There is a potential race with timer deletion here, as the
- * siglock now protects our private firing list. We have set
- * the firing flag in each timer, so that a deletion attempt
- * that gets the timer lock before we do will give it up and
- * spin until we've taken care of that timer below.
- */
- spin_unlock(&tsk->sighand->siglock);
+ /*
+ * We must release these locks before taking any timer's lock.
+ * There is a potential race with timer deletion here, as the
+ * siglock now protects our private firing list. We have set
+ * the firing flag in each timer, so that a deletion attempt
+ * that gets the timer lock before we do will give it up and
+ * spin until we've taken care of that timer below.
+ */
+ spin_unlock(&tsk->sighand->siglock);
+ }
read_unlock(&tasklist_lock);
/*
diff --git a/mm/shmem.c b/mm/shmem.c
index 4c5e68e..1e43c8a 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1780,6 +1780,7 @@ static int shmem_rmdir(struct inode *dir, struct dentry *dentry)
if (!simple_empty(dentry))
return -ENOTEMPTY;
+ dentry->d_inode->i_nlink--;
dir->i_nlink--;
return shmem_unlink(dir, dentry);
}
@@ -2102,6 +2103,7 @@ static int shmem_fill_super(struct super_block *sb,
sb->s_blocksize_bits = PAGE_CACHE_SHIFT;
sb->s_magic = TMPFS_MAGIC;
sb->s_op = &shmem_ops;
+ sb->s_time_gran = 1;
inode = shmem_get_inode(sb, S_IFDIR | mode, 0);
if (!inode)
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 4649a63..440a733 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1061,7 +1061,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, unsigned long nr_pages,
loop_again:
total_scanned = 0;
nr_reclaimed = 0;
- sc.may_writepage = !laptop_mode,
+ sc.may_writepage = !laptop_mode;
sc.nr_mapped = read_page_state(nr_mapped);
inc_page_state(pageoutrun);
diff --git a/net/dccp/ackvec.c b/net/dccp/ackvec.c
index b5981e5..8c211c5 100644
--- a/net/dccp/ackvec.c
+++ b/net/dccp/ackvec.c
@@ -452,6 +452,7 @@ found:
(unsigned long long)
avr->dccpavr_ack_ackno);
dccp_ackvec_throw_record(av, avr);
+ break;
}
/*
* If it wasn't received, continue scanning... we might
diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c
index 0923add..9f0bb52 100644
--- a/net/ipv4/ip_forward.c
+++ b/net/ipv4/ip_forward.c
@@ -116,6 +116,7 @@ sr_failed:
too_many_hops:
/* Tell the sender its packet died... */
+ IP_INC_STATS_BH(IPSTATS_MIB_INHDRERRORS);
icmp_send(skb, ICMP_TIME_EXCEEDED, ICMP_EXC_TTL, 0);
drop:
kfree_skb(skb);
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 4a538bc..b5521a9 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -1649,7 +1649,7 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
* Hence, we can detect timed out packets during fast
* retransmit without falling to slow start.
*/
- if (tcp_head_timedout(sk, tp)) {
+ if (!IsReno(tp) && tcp_head_timedout(sk, tp)) {
struct sk_buff *skb;
skb = tp->scoreboard_skb_hint ? tp->scoreboard_skb_hint
@@ -1662,8 +1662,6 @@ static void tcp_update_scoreboard(struct sock *sk, struct tcp_sock *tp)
if (!(TCP_SKB_CB(skb)->sacked&TCPCB_TAGBITS)) {
TCP_SKB_CB(skb)->sacked |= TCPCB_LOST;
tp->lost_out += tcp_skb_pcount(skb);
- if (IsReno(tp))
- tcp_remove_reno_sacks(sk, tp, tcp_skb_pcount(skb) + 1);
/* clear xmit_retrans hint */
if (tp->retransmit_skb_hint &&