aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--Documentation/stable_kernel_rules.txt19
-rw-r--r--Documentation/trace/postprocess/trace-vmscan-postprocess.pl8
-rw-r--r--MAINTAINERS2
-rw-r--r--Makefile2
-rw-r--r--arch/arm/configs/mxs_defconfig1
-rw-r--r--arch/arm/include/asm/cacheflush.h8
-rw-r--r--arch/arm/mach-omap2/opp.c3
-rw-r--r--arch/arm/mm/tlb-v7.S12
-rw-r--r--arch/ia64/include/asm/atomic.h4
-rw-r--r--arch/ia64/kernel/irq_ia64.c1
-rw-r--r--arch/m68k/include/asm/entry_mm.h4
-rw-r--r--arch/m68k/kernel/sys_m68k.c8
-rw-r--r--arch/mips/include/asm/thread_info.h4
-rw-r--r--arch/mips/kernel/vmlinux.lds.S3
-rw-r--r--arch/powerpc/include/asm/cputime.h6
-rw-r--r--arch/powerpc/include/asm/reg.h3
-rw-r--r--arch/powerpc/kernel/ftrace.c12
-rw-r--r--arch/powerpc/kernel/time.c10
-rw-r--r--arch/s390/kernel/compat_linux.c2
-rw-r--r--arch/x86/include/asm/processor.h2
-rw-r--r--arch/x86/kernel/alternative.c2
-rw-r--r--arch/x86/kernel/amd_nb.c8
-rw-r--r--arch/x86/kernel/cpu/amd.c2
-rw-r--r--arch/x86/kernel/cpu/common.c5
-rw-r--r--arch/x86/kernel/cpu/intel.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce.c2
-rw-r--r--arch/x86/kernel/cpu/mcheck/mce_amd.c5
-rw-r--r--arch/x86/kernel/cpu/proc.c4
-rw-r--r--arch/x86/kernel/microcode_core.c31
-rw-r--r--arch/x86/xen/p2m.c36
-rw-r--r--drivers/acpi/ac.c4
-rw-r--r--drivers/base/memory.c58
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/random.c374
-rw-r--r--drivers/edac/i7core_edac.c2
-rw-r--r--drivers/firmware/dmi_scan.c3
-rw-r--r--drivers/firmware/pcdp.c4
-rw-r--r--drivers/gpu/drm/i915/intel_ringbuffer.c7
-rw-r--r--drivers/gpu/drm/radeon/atombios_dp.c10
-rw-r--r--drivers/gpu/drm/radeon/evergreen.c57
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h8
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c35
-rw-r--r--drivers/gpu/drm/radeon/radeon_cursor.c8
-rw-r--r--drivers/gpu/drm/radeon/rv515.c13
-rw-r--r--drivers/hwmon/coretemp.c7
-rw-r--r--drivers/infiniband/ulp/srp/ib_srp.c87
-rw-r--r--drivers/input/tablet/wacom_wac.c2
-rw-r--r--drivers/md/dm-raid1.c2
-rw-r--r--drivers/md/dm-region-hash.c5
-rw-r--r--drivers/mfd/ab3100-core.c5
-rw-r--r--drivers/mfd/ab3550-core.c2
-rw-r--r--drivers/mfd/ezx-pcap.c2
-rw-r--r--drivers/mfd/wm831x-otp.c8
-rw-r--r--drivers/mmc/host/sdhci-pci.c1
-rw-r--r--drivers/net/bnx2.c6
-rw-r--r--drivers/net/caif/caif_serial.c3
-rw-r--r--drivers/net/e1000e/82571.c4
-rw-r--r--drivers/net/tun.c6
-rw-r--r--drivers/net/usb/kaweth.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1
-rw-r--r--drivers/net/wireless/rt2x00/rt61pci.c3
-rw-r--r--drivers/rtc/rtc-wm831x.c24
-rw-r--r--drivers/scsi/libsas/sas_expander.c47
-rw-r--r--drivers/scsi/scsi_error.c14
-rw-r--r--drivers/scsi/scsi_lib.c11
-rw-r--r--drivers/scsi/scsi_scan.c3
-rw-r--r--drivers/scsi/scsi_sysfs.c41
-rw-r--r--drivers/usb/core/devio.c10
-rw-r--r--drivers/usb/core/hub.c9
-rw-r--r--drivers/usb/early/ehci-dbgp.c2
-rw-r--r--drivers/usb/gadget/u_ether.c12
-rw-r--r--drivers/usb/host/pci-quirks.c7
-rw-r--r--drivers/usb/host/pci-quirks.h1
-rw-r--r--drivers/usb/host/xhci-pci.c10
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/serial/ftdi_sio.c1
-rw-r--r--drivers/usb/serial/ftdi_sio_ids.h7
-rw-r--r--drivers/usb/serial/mos7840.c9
-rw-r--r--drivers/usb/serial/option.c281
-rw-r--r--fs/btrfs/async-thread.c9
-rw-r--r--fs/btrfs/disk-io.c5
-rw-r--r--fs/cifs/readdir.c7
-rw-r--r--fs/ext4/balloc.c3
-rw-r--r--fs/ext4/bitmap.c8
-rw-r--r--fs/ext4/ext4.h2
-rw-r--r--fs/ext4/ialloc.c3
-rw-r--r--fs/ext4/inode.c9
-rw-r--r--fs/ext4/super.c1
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/hugetlbfs/inode.c3
-rw-r--r--fs/locks.c6
-rw-r--r--fs/nfs/file.c7
-rw-r--r--fs/nfs/internal.h2
-rw-r--r--fs/nfs/write.c4
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nilfs2/ioctl.c4
-rw-r--r--fs/nilfs2/super.c3
-rw-r--r--fs/nilfs2/the_nilfs.c1
-rw-r--r--fs/nilfs2/the_nilfs.h2
-rw-r--r--fs/ubifs/sb.c8
-rw-r--r--include/linux/cpu.h5
-rw-r--r--include/linux/cpuset.h47
-rw-r--r--include/linux/fs.h11
-rw-r--r--include/linux/init_task.h8
-rw-r--r--include/linux/irqdesc.h1
-rw-r--r--include/linux/memcontrol.h3
-rw-r--r--include/linux/mfd/ezx-pcap.h1
-rw-r--r--include/linux/migrate.h23
-rw-r--r--include/linux/mmzone.h14
-rw-r--r--include/linux/random.h19
-rw-r--r--include/linux/sched.h2
-rw-r--r--include/linux/swap.h7
-rw-r--r--include/linux/usb.h21
-rw-r--r--include/trace/events/random.h134
-rw-r--r--include/trace/events/vmscan.h85
-rw-r--r--kernel/cpuset.c63
-rw-r--r--kernel/fork.c3
-rw-r--r--kernel/futex.c17
-rw-r--r--kernel/irq/handle.c7
-rw-r--r--kernel/irq/manage.c17
-rw-r--r--kernel/power/hibernate.c6
-rw-r--r--kernel/power/suspend.c3
-rw-r--r--kernel/time/ntp.c8
-rw-r--r--kernel/workqueue.c38
-rw-r--r--mm/compaction.c26
-rw-r--r--mm/filemap.c11
-rw-r--r--mm/hugetlb.c38
-rw-r--r--mm/memcontrol.c3
-rw-r--r--mm/memory-failure.c6
-rw-r--r--mm/memory_hotplug.c2
-rw-r--r--mm/mempolicy.c30
-rw-r--r--mm/migrate.c240
-rw-r--r--mm/mmu_notifier.c45
-rw-r--r--mm/page_alloc.c118
-rw-r--r--mm/slab.c13
-rw-r--r--mm/slub.c40
-rw-r--r--mm/vmscan.c296
-rw-r--r--mm/vmstat.c2
-rw-r--r--net/caif/caif_dev.c2
-rw-r--r--net/core/dev.c3
-rw-r--r--net/core/rtnetlink.c9
-rw-r--r--net/ipv4/cipso_ipv4.c6
-rw-r--r--net/ipv4/tcp.c5
-rw-r--r--net/ipv4/tcp_input.c4
-rw-r--r--net/mac80211/mesh.c1
-rw-r--r--net/sched/sch_sfb.c2
-rw-r--r--net/sctp/input.c7
-rw-r--r--net/sctp/socket.c12
-rw-r--r--net/sunrpc/rpcb_clnt.c4
-rw-r--r--net/sunrpc/sched.c2
-rw-r--r--net/sunrpc/xprtrdma/transport.c3
-rw-r--r--net/sunrpc/xprtsock.c10
-rw-r--r--net/wanrouter/wanmain.c51
-rw-r--r--net/wireless/core.c5
-rw-r--r--net/wireless/core.h1
-rw-r--r--net/wireless/util.c5
-rw-r--r--sound/drivers/mpu401/mpu401_uart.c1
-rw-r--r--sound/pci/hda/patch_realtek.c1
-rw-r--r--sound/soc/codecs/wm8962.c3
-rw-r--r--sound/soc/codecs/wm8994.c2
-rw-r--r--sound/usb/clock.c3
162 files changed, 2041 insertions, 1045 deletions
diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
index e1f856b..22bf11b 100644
--- a/Documentation/stable_kernel_rules.txt
+++ b/Documentation/stable_kernel_rules.txt
@@ -1,4 +1,4 @@
-Everything you ever wanted to know about Linux 2.6 -stable releases.
+Everything you ever wanted to know about Linux -stable releases.
Rules on what kind of patches are accepted, and which ones are not, into the
"-stable" tree:
@@ -41,10 +41,10 @@ Procedure for submitting patches to the -stable tree:
cherry-picked than this can be specified in the following format in
the sign-off area:
- Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
- Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
- Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
- Cc: <stable@vger.kernel.org> # .32.x
+ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
+ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
+ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
+ Cc: <stable@vger.kernel.org> # 3.3.x
Signed-off-by: Ingo Molnar <mingo@elte.hu>
The tag sequence has the meaning of:
@@ -78,6 +78,15 @@ Review cycle:
security kernel team, and not go through the normal review cycle.
Contact the kernel security team for more details on this procedure.
+Trees:
+
+ - The queues of patches, for both completed versions and in progress
+ versions can be found at:
+ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
+ - The finalized and tagged releases of all stable kernels can be found
+ in separate branches per version at:
+ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
+
Review committee:
diff --git a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
index 12cecc8..4a37c47 100644
--- a/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
+++ b/Documentation/trace/postprocess/trace-vmscan-postprocess.pl
@@ -379,10 +379,10 @@ EVENT_PROCESS:
# To closer match vmstat scanning statistics, only count isolate_both
# and isolate_inactive as scanning. isolate_active is rotation
- # isolate_inactive == 0
- # isolate_active == 1
- # isolate_both == 2
- if ($isolate_mode != 1) {
+ # isolate_inactive == 1
+ # isolate_active == 2
+ # isolate_both == 3
+ if ($isolate_mode != 2) {
$perprocesspid{$process_pid}->{HIGH_NR_SCANNED} += $nr_scanned;
}
$perprocesspid{$process_pid}->{HIGH_NR_CONTIG_DIRTY} += $nr_contig_dirty;
diff --git a/MAINTAINERS b/MAINTAINERS
index b0209fb..74c8e4b 100644
--- a/MAINTAINERS
+++ b/MAINTAINERS
@@ -5247,7 +5247,7 @@ F: Documentation/blockdev/ramdisk.txt
F: drivers/block/brd.c
RANDOM NUMBER DRIVER
-M: Matt Mackall <mpm@selenic.com>
+M: Theodore Ts'o" <tytso@mit.edu>
S: Maintained
F: drivers/char/random.c
diff --git a/Makefile b/Makefile
index 5fdfaa8..d863aa0 100644
--- a/Makefile
+++ b/Makefile
@@ -1,6 +1,6 @@
VERSION = 3
PATCHLEVEL = 0
-SUBLEVEL = 38
+SUBLEVEL = 42
EXTRAVERSION =
NAME = Sneaky Weasel
diff --git a/arch/arm/configs/mxs_defconfig b/arch/arm/configs/mxs_defconfig
index 2bf2243..166d6aa 100644
--- a/arch/arm/configs/mxs_defconfig
+++ b/arch/arm/configs/mxs_defconfig
@@ -29,7 +29,6 @@ CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
CONFIG_PREEMPT_VOLUNTARY=y
CONFIG_AEABI=y
-CONFIG_DEFAULT_MMAP_MIN_ADDR=65536
CONFIG_AUTO_ZRELADDR=y
CONFIG_FPE_NWFPE=y
CONFIG_NET=y
diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
index 1252a26..42dec04 100644
--- a/arch/arm/include/asm/cacheflush.h
+++ b/arch/arm/include/asm/cacheflush.h
@@ -215,7 +215,9 @@ static inline void vivt_flush_cache_mm(struct mm_struct *mm)
static inline void
vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm)))
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm)))
__cpuc_flush_user_range(start & PAGE_MASK, PAGE_ALIGN(end),
vma->vm_flags);
}
@@ -223,7 +225,9 @@ vivt_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned
static inline void
vivt_flush_cache_page(struct vm_area_struct *vma, unsigned long user_addr, unsigned long pfn)
{
- if (cpumask_test_cpu(smp_processor_id(), mm_cpumask(vma->vm_mm))) {
+ struct mm_struct *mm = vma->vm_mm;
+
+ if (!mm || cpumask_test_cpu(smp_processor_id(), mm_cpumask(mm))) {
unsigned long addr = user_addr & PAGE_MASK;
__cpuc_flush_user_range(addr, addr + PAGE_SIZE, vma->vm_flags);
}
diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
index ab8b35b..0627494 100644
--- a/arch/arm/mach-omap2/opp.c
+++ b/arch/arm/mach-omap2/opp.c
@@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
omap_table_init = 1;
/* Lets now register with OPP library */
- for (i = 0; i < opp_def_size; i++) {
+ for (i = 0; i < opp_def_size; i++, opp_def++) {
struct omap_hwmod *oh;
struct device *dev;
@@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
__func__, opp_def->freq,
opp_def->hwmod_name, i, r);
}
- opp_def++;
}
return 0;
diff --git a/arch/arm/mm/tlb-v7.S b/arch/arm/mm/tlb-v7.S
index 53cd5b4..875634a 100644
--- a/arch/arm/mm/tlb-v7.S
+++ b/arch/arm/mm/tlb-v7.S
@@ -38,11 +38,19 @@ ENTRY(v7wbi_flush_user_tlb_range)
dsb
mov r0, r0, lsr #PAGE_SHIFT @ align address
mov r1, r1, lsr #PAGE_SHIFT
+#ifdef CONFIG_ARM_ERRATA_720789
+ mov r3, #0
+#else
asid r3, r3 @ mask ASID
+#endif
orr r0, r3, r0, lsl #PAGE_SHIFT @ Create initial MVA
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
@@ -70,7 +78,11 @@ ENTRY(v7wbi_flush_kern_tlb_range)
mov r0, r0, lsl #PAGE_SHIFT
mov r1, r1, lsl #PAGE_SHIFT
1:
+#ifdef CONFIG_ARM_ERRATA_720789
+ ALT_SMP(mcr p15, 0, r0, c8, c3, 3) @ TLB invalidate U MVA all ASID (shareable)
+#else
ALT_SMP(mcr p15, 0, r0, c8, c3, 1) @ TLB invalidate U MVA (shareable)
+#endif
ALT_UP(mcr p15, 0, r0, c8, c7, 1) @ TLB invalidate U MVA
add r0, r0, #PAGE_SZ
cmp r0, r1
diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
index 4468814..6fcc9a0 100644
--- a/arch/ia64/include/asm/atomic.h
+++ b/arch/ia64/include/asm/atomic.h
@@ -18,8 +18,8 @@
#include <asm/system.h>
-#define ATOMIC_INIT(i) ((atomic_t) { (i) })
-#define ATOMIC64_INIT(i) ((atomic64_t) { (i) })
+#define ATOMIC_INIT(i) { (i) }
+#define ATOMIC64_INIT(i) { (i) }
#define atomic_read(v) (*(volatile int *)&(v)->counter)
#define atomic64_read(v) (*(volatile long *)&(v)->counter)
diff --git a/arch/ia64/kernel/irq_ia64.c b/arch/ia64/kernel/irq_ia64.c
index 782c3a35..3540c5e 100644
--- a/arch/ia64/kernel/irq_ia64.c
+++ b/arch/ia64/kernel/irq_ia64.c
@@ -23,7 +23,6 @@
#include <linux/ioport.h>
#include <linux/kernel_stat.h>
#include <linux/ptrace.h>
-#include <linux/random.h> /* for rand_initialize_irq() */
#include <linux/signal.h>
#include <linux/smp.h>
#include <linux/threads.h>
diff --git a/arch/m68k/include/asm/entry_mm.h b/arch/m68k/include/asm/entry_mm.h
index 73b8c8f..bdace4b 100644
--- a/arch/m68k/include/asm/entry_mm.h
+++ b/arch/m68k/include/asm/entry_mm.h
@@ -35,8 +35,8 @@
/* the following macro is used when enabling interrupts */
#if defined(MACH_ATARI_ONLY)
- /* block out HSYNC on the atari */
-#define ALLOWINT (~0x400)
+ /* block out HSYNC = ipl 2 on the atari */
+#define ALLOWINT (~0x500)
#define MAX_NOINT_IPL 3
#else
/* portable version */
diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
index 8623f8d..9a5932e 100644
--- a/arch/m68k/kernel/sys_m68k.c
+++ b/arch/m68k/kernel/sys_m68k.c
@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
goto bad_access;
}
- mem_value = *mem;
+ /*
+ * No need to check for EFAULT; we know that the page is
+ * present and writable.
+ */
+ __get_user(mem_value, mem);
if (mem_value == oldval)
- *mem = newval;
+ __put_user(newval, mem);
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
index 97f8bf6..adda036 100644
--- a/arch/mips/include/asm/thread_info.h
+++ b/arch/mips/include/asm/thread_info.h
@@ -60,6 +60,8 @@ struct thread_info {
register struct thread_info *__current_thread_info __asm__("$28");
#define current_thread_info() __current_thread_info
+#endif /* !__ASSEMBLY__ */
+
/* thread information allocation */
#if defined(CONFIG_PAGE_SIZE_4KB) && defined(CONFIG_32BIT)
#define THREAD_SIZE_ORDER (1)
@@ -97,8 +99,6 @@ register struct thread_info *__current_thread_info __asm__("$28");
#define free_thread_info(info) kfree(info)
-#endif /* !__ASSEMBLY__ */
-
#define PREEMPT_ACTIVE 0x10000000
/*
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index a81176f..be281c6 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -1,5 +1,6 @@
#include <asm/asm-offsets.h>
#include <asm/page.h>
+#include <asm/thread_info.h>
#include <asm-generic/vmlinux.lds.h>
#undef mips
@@ -73,7 +74,7 @@ SECTIONS
.data : { /* Data */
. = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
- INIT_TASK_DATA(PAGE_SIZE)
+ INIT_TASK_DATA(THREAD_SIZE)
NOSAVE_DATA
CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
diff --git a/arch/powerpc/include/asm/cputime.h b/arch/powerpc/include/asm/cputime.h
index 1cf20bd..33a3580 100644
--- a/arch/powerpc/include/asm/cputime.h
+++ b/arch/powerpc/include/asm/cputime.h
@@ -126,11 +126,11 @@ static inline u64 cputime64_to_jiffies64(const cputime_t ct)
/*
* Convert cputime <-> microseconds
*/
-extern u64 __cputime_msec_factor;
+extern u64 __cputime_usec_factor;
static inline unsigned long cputime_to_usecs(const cputime_t ct)
{
- return mulhdu(ct, __cputime_msec_factor) * USEC_PER_MSEC;
+ return mulhdu(ct, __cputime_usec_factor);
}
static inline cputime_t usecs_to_cputime(const unsigned long us)
@@ -143,7 +143,7 @@ static inline cputime_t usecs_to_cputime(const unsigned long us)
sec = us / 1000000;
if (ct) {
ct *= tb_ticks_per_sec;
- do_div(ct, 1000);
+ do_div(ct, 1000000);
}
if (sec)
ct += (cputime_t) sec * tb_ticks_per_sec;
diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
index c5cae0d..764e99c 100644
--- a/arch/powerpc/include/asm/reg.h
+++ b/arch/powerpc/include/asm/reg.h
@@ -1000,7 +1000,8 @@
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#define mfmsr() ({unsigned long rval; \
- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
+ asm volatile("mfmsr %0" : "=r" (rval) : \
+ : "memory"); rval;})
#ifdef CONFIG_PPC_BOOK3S_64
#define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
: : "r" (v) : "memory")
diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
index bf99cfa..6324008 100644
--- a/arch/powerpc/kernel/ftrace.c
+++ b/arch/powerpc/kernel/ftrace.c
@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
/*
* On PPC32 the trampoline looks like:
- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
+ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
+ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
+ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
* 0x4e, 0x80, 0x04, 0x20 bctr
*/
@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
pr_devel(" %08x %08x ", jmp[0], jmp[1]);
/* verify that this is what we expect it to be */
- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
- (jmp[2] != 0x7d6903a6) ||
+ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
+ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
+ (jmp[2] != 0x7d8903a6) ||
(jmp[3] != 0x4e800420)) {
printk(KERN_ERR "Not a trampoline\n");
return -EINVAL;
diff --git a/arch/powerpc/kernel/time.c b/arch/powerpc/kernel/time.c
index 2de304a..1becd7b 100644
--- a/arch/powerpc/kernel/time.c
+++ b/arch/powerpc/kernel/time.c
@@ -168,13 +168,13 @@ EXPORT_SYMBOL_GPL(ppc_tb_freq);
#ifdef CONFIG_VIRT_CPU_ACCOUNTING
/*
* Factors for converting from cputime_t (timebase ticks) to
- * jiffies, milliseconds, seconds, and clock_t (1/USER_HZ seconds).
+ * jiffies, microseconds, seconds, and clock_t (1/USER_HZ seconds).
* These are all stored as 0.64 fixed-point binary fractions.
*/
u64 __cputime_jiffies_factor;
EXPORT_SYMBOL(__cputime_jiffies_factor);
-u64 __cputime_msec_factor;
-EXPORT_SYMBOL(__cputime_msec_factor);
+u64 __cputime_usec_factor;
+EXPORT_SYMBOL(__cputime_usec_factor);
u64 __cputime_sec_factor;
EXPORT_SYMBOL(__cputime_sec_factor);
u64 __cputime_clockt_factor;
@@ -192,8 +192,8 @@ static void calc_cputime_factors(void)
div128_by_32(HZ, 0, tb_ticks_per_sec, &res);
__cputime_jiffies_factor = res.result_low;
- div128_by_32(1000, 0, tb_ticks_per_sec, &res);
- __cputime_msec_factor = res.result_low;
+ div128_by_32(1000000, 0, tb_ticks_per_sec, &res);
+ __cputime_usec_factor = res.result_low;
div128_by_32(1, 0, tb_ticks_per_sec, &res);
__cputime_sec_factor = res.result_low;
div128_by_32(USER_HZ, 0, tb_ticks_per_sec, &res);
diff --git a/arch/s390/kernel/compat_linux.c b/arch/s390/kernel/compat_linux.c
index 53acaa8..f98af03 100644
--- a/arch/s390/kernel/compat_linux.c
+++ b/arch/s390/kernel/compat_linux.c
@@ -631,7 +631,6 @@ asmlinkage unsigned long old32_mmap(struct mmap_arg_struct_emu31 __user *arg)
return -EFAULT;
if (a.offset & ~PAGE_MASK)
return -EINVAL;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd,
a.offset >> PAGE_SHIFT);
}
@@ -642,7 +641,6 @@ asmlinkage long sys32_mmap2(struct mmap_arg_struct_emu31 __user *arg)
if (copy_from_user(&a, arg, sizeof(a)))
return -EFAULT;
- a.addr = (unsigned long) compat_ptr(a.addr);
return sys_mmap_pgoff(a.addr, a.len, a.prot, a.flags, a.fd, a.offset);
}
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 5d9c61d..e5f7248 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -99,7 +99,6 @@ struct cpuinfo_x86 {
u16 apicid;
u16 initial_apicid;
u16 x86_clflush_size;
-#ifdef CONFIG_SMP
/* number of cores as seen by the OS: */
u16 booted_cores;
/* Physical processor id: */
@@ -110,7 +109,6 @@ struct cpuinfo_x86 {
u8 compute_unit_id;
/* Index into per_cpu list: */
u16 cpu_index;
-#endif
} __attribute__((__aligned__(SMP_CACHE_BYTES)));
#define X86_VENDOR_INTEL 0
diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
index a81f2d5..dfabea4 100644
--- a/arch/x86/kernel/alternative.c
+++ b/arch/x86/kernel/alternative.c
@@ -220,7 +220,7 @@ void __init arch_init_ideal_nops(void)
ideal_nops = intel_nops;
#endif
}
-
+ break;
default:
#ifdef CONFIG_X86_64
ideal_nops = k8_nops;
diff --git a/arch/x86/kernel/amd_nb.c b/arch/x86/kernel/amd_nb.c
index bae1efe..be16854 100644
--- a/arch/x86/kernel/amd_nb.c
+++ b/arch/x86/kernel/amd_nb.c
@@ -154,16 +154,14 @@ int amd_get_subcaches(int cpu)
{
struct pci_dev *link = node_to_amd_nb(amd_get_nb_id(cpu))->link;
unsigned int mask;
- int cuid = 0;
+ int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
return 0;
pci_read_config_dword(link, 0x1d4, &mask);
-#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
-#endif
return (mask >> (4 * cuid)) & 0xf;
}
@@ -172,7 +170,7 @@ int amd_set_subcaches(int cpu, int mask)
static unsigned int reset, ban;
struct amd_northbridge *nb = node_to_amd_nb(amd_get_nb_id(cpu));
unsigned int reg;
- int cuid = 0;
+ int cuid;
if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
return -EINVAL;
@@ -190,9 +188,7 @@ int amd_set_subcaches(int cpu, int mask)
pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
}
-#ifdef CONFIG_SMP
cuid = cpu_data(cpu).compute_unit_id;
-#endif
mask <<= 4 * cuid;
mask |= (0xf ^ (1 << cuid)) << 26;
diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
index b13ed39..8115040 100644
--- a/arch/x86/kernel/cpu/amd.c
+++ b/arch/x86/kernel/cpu/amd.c
@@ -146,7 +146,6 @@ static void __cpuinit init_amd_k6(struct cpuinfo_x86 *c)
static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -190,7 +189,6 @@ static void __cpuinit amd_k7_smp_check(struct cpuinfo_x86 *c)
valid_k7:
;
-#endif
}
static void __cpuinit init_amd_k7(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
index 22a073d..0cb2883 100644
--- a/arch/x86/kernel/cpu/common.c
+++ b/arch/x86/kernel/cpu/common.c
@@ -675,9 +675,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
if (this_cpu->c_early_init)
this_cpu->c_early_init(c);
-#ifdef CONFIG_SMP
c->cpu_index = 0;
-#endif
filter_cpuid_features(c, false);
setup_smep(c);
@@ -760,10 +758,7 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
c->apicid = c->initial_apicid;
# endif
#endif
-
-#ifdef CONFIG_X86_HT
c->phys_proc_id = c->initial_apicid;
-#endif
}
setup_smep(c);
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index ed6086e..e0dc000 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -179,7 +179,6 @@ static void __cpuinit trap_init_f00f_bug(void)
static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
{
-#ifdef CONFIG_SMP
/* calling is from identify_secondary_cpu() ? */
if (!c->cpu_index)
return;
@@ -196,7 +195,6 @@ static void __cpuinit intel_smp_check(struct cpuinfo_x86 *c)
WARN_ONCE(1, "WARNING: SMP operation may be unreliable"
"with B stepping processors.\n");
}
-#endif
}
static void __cpuinit intel_workarounds(struct cpuinfo_x86 *c)
diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
index ff1ae9b..942bda2 100644
--- a/arch/x86/kernel/cpu/mcheck/mce.c
+++ b/arch/x86/kernel/cpu/mcheck/mce.c
@@ -122,9 +122,7 @@ void mce_setup(struct mce *m)
m->time = get_seconds();
m->cpuvendor = boot_cpu_data.x86_vendor;
m->cpuid = cpuid_eax(1);
-#ifdef CONFIG_SMP
m->socketid = cpu_data(m->extcpu).phys_proc_id;
-#endif
m->apicid = cpu_data(m->extcpu).initial_apicid;
rdmsrl(MSR_IA32_MCG_CAP, m->mcgcap);
}
diff --git a/arch/x86/kernel/cpu/mcheck/mce_amd.c b/arch/x86/kernel/cpu/mcheck/mce_amd.c
index dc4fb77..b97aa72 100644
--- a/arch/x86/kernel/cpu/mcheck/mce_amd.c
+++ b/arch/x86/kernel/cpu/mcheck/mce_amd.c
@@ -65,11 +65,9 @@ struct threshold_bank {
};
static DEFINE_PER_CPU(struct threshold_bank * [NR_BANKS], threshold_banks);
-#ifdef CONFIG_SMP
static unsigned char shared_bank[NR_BANKS] = {
0, 0, 0, 0, 1
};
-#endif
static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
@@ -227,10 +225,9 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
if (!block)
per_cpu(bank_map, cpu) |= (1 << bank);
-#ifdef CONFIG_SMP
+
if (shared_bank[bank] && c->cpu_core_id)
break;
-#endif
memset(&b, 0, sizeof(b));
b.cpu = cpu;
diff --git a/arch/x86/kernel/cpu/proc.c b/arch/x86/kernel/cpu/proc.c
index 62ac8cb..72c365a 100644
--- a/arch/x86/kernel/cpu/proc.c
+++ b/arch/x86/kernel/cpu/proc.c
@@ -64,12 +64,10 @@ static void show_cpuinfo_misc(struct seq_file *m, struct cpuinfo_x86 *c)
static int show_cpuinfo(struct seq_file *m, void *v)
{
struct cpuinfo_x86 *c = v;
- unsigned int cpu = 0;
+ unsigned int cpu;
int i;
-#ifdef CONFIG_SMP
cpu = c->cpu_index;
-#endif
seq_printf(m, "processor\t: %u\n"
"vendor_id\t: %s\n"
"cpu family\t: %d\n"
diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
index f924280..c4e2465 100644
--- a/arch/x86/kernel/microcode_core.c
+++ b/arch/x86/kernel/microcode_core.c
@@ -297,20 +297,31 @@ static ssize_t reload_store(struct sys_device *dev,
const char *buf, size_t size)
{
unsigned long val;
- int cpu = dev->id;
- int ret = 0;
- char *end;
+ int cpu;
+ ssize_t ret = 0, tmp_ret;
- val = simple_strtoul(buf, &end, 0);
- if (end == buf)
+ /* allow reload only from the BSP */
+ if (boot_cpu_data.cpu_index != dev->id)
return -EINVAL;
- if (val == 1) {
- get_online_cpus();
- if (cpu_online(cpu))
- ret = reload_for_cpu(cpu);
- put_online_cpus();
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+ if (val != 1)
+ return size;
+
+ get_online_cpus();
+ for_each_online_cpu(cpu) {
+ tmp_ret = reload_for_cpu(cpu);
+ if (tmp_ret != 0)
+ pr_warn("Error reloading microcode on CPU %d\n", cpu);
+
+ /* save retval of the first encountered reload error */
+ if (!ret)
+ ret = tmp_ret;
}
+ put_online_cpus();
if (!ret)
ret = size;
diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c
index 58efeb9..2f7847d 100644
--- a/arch/x86/xen/p2m.c
+++ b/arch/x86/xen/p2m.c
@@ -683,6 +683,7 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
+ int ret = 0;
pfn = page_to_pfn(page);
if (!PageHighMem(page)) {
@@ -706,6 +707,24 @@ int m2p_add_override(unsigned long mfn, struct page *page, bool clear_pte)
list_add(&page->lru, &m2p_overrides[mfn_hash(mfn)]);
spin_unlock_irqrestore(&m2p_override_lock, flags);
+ /* p2m(m2p(mfn)) == mfn: the mfn is already present somewhere in
+ * this domain. Set the FOREIGN_FRAME_BIT in the p2m for the other
+ * pfn so that the following mfn_to_pfn(mfn) calls will return the
+ * pfn from the m2p_override (the backend pfn) instead.
+ * We need to do this because the pages shared by the frontend
+ * (xen-blkfront) can be already locked (lock_page, called by
+ * do_read_cache_page); when the userspace backend tries to use them
+ * with direct_IO, mfn_to_pfn returns the pfn of the frontend, so
+ * do_blockdev_direct_IO is going to try to lock the same pages
+ * again resulting in a deadlock.
+ * As a side effect get_user_pages_fast might not be safe on the
+ * frontend pages while they are being shared with the backend,
+ * because mfn_to_pfn (that ends up being called by GUPF) will
+ * return the backend pfn rather than the frontend pfn. */
+ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ if (ret == 0 && get_phys_to_machine(pfn) == mfn)
+ set_phys_to_machine(pfn, FOREIGN_FRAME(mfn));
+
return 0;
}
EXPORT_SYMBOL_GPL(m2p_add_override);
@@ -717,6 +736,7 @@ int m2p_remove_override(struct page *page, bool clear_pte)
unsigned long uninitialized_var(address);
unsigned level;
pte_t *ptep = NULL;
+ int ret = 0;
pfn = page_to_pfn(page);
mfn = get_phys_to_machine(pfn);
@@ -743,6 +763,22 @@ int m2p_remove_override(struct page *page, bool clear_pte)
/* No tlb flush necessary because the caller already
* left the pte unmapped. */
+ /* p2m(m2p(mfn)) == FOREIGN_FRAME(mfn): the mfn is already present
+ * somewhere in this domain, even before being added to the
+ * m2p_override (see comment above in m2p_add_override).
+ * If there are no other entries in the m2p_override corresponding
+ * to this mfn, then remove the FOREIGN_FRAME_BIT from the p2m for
+ * the original pfn (the one shared by the frontend): the backend
+ * cannot do any IO on this page anymore because it has been
+ * unshared. Removing the FOREIGN_FRAME_BIT from the p2m entry of
+ * the original pfn causes mfn_to_pfn(mfn) to return the frontend
+ * pfn again. */
+ mfn &= ~FOREIGN_FRAME_BIT;
+ ret = __get_user(pfn, &machine_to_phys_mapping[mfn]);
+ if (ret == 0 && get_phys_to_machine(pfn) == FOREIGN_FRAME(mfn) &&
+ m2p_find_override(mfn) == NULL)
+ set_phys_to_machine(pfn, mfn);
+
return 0;
}
EXPORT_SYMBOL_GPL(m2p_remove_override);
diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
index 58c3f74..9582050 100644
--- a/drivers/acpi/ac.c
+++ b/drivers/acpi/ac.c
@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
ac->charger.properties = ac_props;
ac->charger.num_properties = ARRAY_SIZE(ac_props);
ac->charger.get_property = get_ac_property;
- power_supply_register(&ac->device->dev, &ac->charger);
+ result = power_supply_register(&ac->device->dev, &ac->charger);
+ if (result)
+ goto end;
printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
acpi_device_name(device), acpi_device_bid(device),
diff --git a/drivers/base/memory.c b/drivers/base/memory.c
index 45d7c8f..5fb6aae 100644
--- a/drivers/base/memory.c
+++ b/drivers/base/memory.c
@@ -224,13 +224,48 @@ int memory_isolate_notify(unsigned long val, void *v)
}
/*
+ * The probe routines leave the pages reserved, just as the bootmem code does.
+ * Make sure they're still that way.
+ */
+static bool pages_correctly_reserved(unsigned long start_pfn,
+ unsigned long nr_pages)
+{
+ int i, j;
+ struct page *page;
+ unsigned long pfn = start_pfn;
+
+ /*
+ * memmap between sections is not contiguous except with
+ * SPARSEMEM_VMEMMAP. We lookup the page once per section
+ * and assume memmap is contiguous within each section
+ */
+ for (i = 0; i < sections_per_block; i++, pfn += PAGES_PER_SECTION) {
+ if (WARN_ON_ONCE(!pfn_valid(pfn)))
+ return false;
+ page = pfn_to_page(pfn);
+
+ for (j = 0; j < PAGES_PER_SECTION; j++) {
+ if (PageReserved(page + j))
+ continue;
+
+ printk(KERN_WARNING "section number %ld page number %d "
+ "not reserved, was it already online?\n",
+ pfn_to_section_nr(pfn), j);
+
+ return false;
+ }
+ }
+
+ return true;
+}
+
+/*
* MEMORY_HOTPLUG depends on SPARSEMEM in mm/Kconfig, so it is
* OK to have direct references to sparsemem variables in here.
*/
static int
memory_block_action(unsigned long phys_index, unsigned long action)
{
- int i;
unsigned long start_pfn, start_paddr;
unsigned long nr_pages = PAGES_PER_SECTION * sections_per_block;
struct page *first_page;
@@ -238,26 +273,13 @@ memory_block_action(unsigned long phys_index, unsigned long action)
first_page = pfn_to_page(phys_index << PFN_SECTION_SHIFT);
- /*
- * The probe routines leave the pages reserved, just
- * as the bootmem code does. Make sure they're still
- * that way.
- */
- if (action == MEM_ONLINE) {
- for (i = 0; i < nr_pages; i++) {
- if (PageReserved(first_page+i))
- continue;
-
- printk(KERN_WARNING "section number %ld page number %d "
- "not reserved, was it already online?\n",
- phys_index, i);
- return -EBUSY;
- }
- }
-
switch (action) {
case MEM_ONLINE:
start_pfn = page_to_pfn(first_page);
+
+ if (!pages_correctly_reserved(start_pfn, nr_pages))
+ return -EBUSY;
+
ret = online_pages(start_pfn, nr_pages);
break;
case MEM_OFFLINE:
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 25d139c..579051c 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -284,7 +284,7 @@ mspec_mmap(struct file *file, struct vm_area_struct *vma,
vdata->flags = flags;
vdata->type = type;
spin_lock_init(&vdata->lock);
- vdata->refcnt = ATOMIC_INIT(1);
+ atomic_set(&vdata->refcnt, 1);
vma->vm_private_data = vdata;
vma->vm_flags |= (VM_IO | VM_RESERVED | VM_PFNMAP | VM_DONTEXPAND);
diff --git a/drivers/char/random.c b/drivers/char/random.c
index c35a785..fceac95 100644
--- a/drivers/char/random.c
+++ b/drivers/char/random.c
@@ -125,21 +125,26 @@
* The current exported interfaces for gathering environmental noise
* from the devices are:
*
+ * void add_device_randomness(const void *buf, unsigned int size);
* void add_input_randomness(unsigned int type, unsigned int code,
* unsigned int value);
- * void add_interrupt_randomness(int irq);
+ * void add_interrupt_randomness(int irq, int irq_flags);
* void add_disk_randomness(struct gendisk *disk);
*
+ * add_device_randomness() is for adding data to the random pool that
+ * is likely to differ between two devices (or possibly even per boot).
+ * This would be things like MAC addresses or serial numbers, or the
+ * read-out of the RTC. This does *not* add any actual entropy to the
+ * pool, but it initializes the pool to different values for devices
+ * that might otherwise be identical and have very little entropy
+ * available to them (particularly common in the embedded world).
+ *
* add_input_randomness() uses the input layer interrupt timing, as well as
* the event type information from the hardware.
*
- * add_interrupt_randomness() uses the inter-interrupt timing as random
- * inputs to the entropy pool. Note that not all interrupts are good
- * sources of randomness! For example, the timer interrupts is not a
- * good choice, because the periodicity of the interrupts is too
- * regular, and hence predictable to an attacker. Network Interface
- * Controller interrupts are a better measure, since the timing of the
- * NIC interrupts are more unpredictable.
+ * add_interrupt_randomness() uses the interrupt timing as random
+ * inputs to the entropy pool. Using the cycle counters and the irq source
+ * as inputs, it feeds the randomness roughly once a second.
*
* add_disk_randomness() uses what amounts to the seek time of block
* layer request events, on a per-disk_devt basis, as input to the
@@ -248,6 +253,8 @@
#include <linux/percpu.h>
#include <linux/cryptohash.h>
#include <linux/fips.h>
+#include <linux/ptrace.h>
+#include <linux/kmemcheck.h>
#ifdef CONFIG_GENERIC_HARDIRQS
# include <linux/irq.h>
@@ -256,8 +263,12 @@
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/irq.h>
+#include <asm/irq_regs.h>
#include <asm/io.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/random.h>
+
/*
* Configuration information
*/
@@ -266,6 +277,8 @@
#define SEC_XFER_SIZE 512
#define EXTRACT_SIZE 10
+#define LONGS(x) (((x) + sizeof(unsigned long) - 1)/sizeof(unsigned long))
+
/*
* The minimum number of bits of entropy before we wake up a read on
* /dev/random. Should be enough to do a significant reseed.
@@ -420,8 +433,10 @@ struct entropy_store {
/* read-write data: */
spinlock_t lock;
unsigned add_ptr;
+ unsigned input_rotate;
int entropy_count;
- int input_rotate;
+ int entropy_total;
+ unsigned int initialized:1;
__u8 last_data[EXTRACT_SIZE];
};
@@ -454,6 +469,10 @@ static struct entropy_store nonblocking_pool = {
.pool = nonblocking_pool_data
};
+static __u32 const twist_table[8] = {
+ 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
+ 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
+
/*
* This function adds bytes into the entropy "pool". It does not
* update the entropy estimate. The caller should call
@@ -464,29 +483,24 @@ static struct entropy_store nonblocking_pool = {
* it's cheap to do so and helps slightly in the expected case where
* the entropy is concentrated in the low-order bits.
*/
-static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
- int nbytes, __u8 out[64])
+static void _mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
{
- static __u32 const twist_table[8] = {
- 0x00000000, 0x3b6e20c8, 0x76dc4190, 0x4db26158,
- 0xedb88320, 0xd6d6a3e8, 0x9b64c2b0, 0xa00ae278 };
unsigned long i, j, tap1, tap2, tap3, tap4, tap5;
int input_rotate;
int wordmask = r->poolinfo->poolwords - 1;
const char *bytes = in;
__u32 w;
- unsigned long flags;
- /* Taps are constant, so we can load them without holding r->lock. */
tap1 = r->poolinfo->tap1;
tap2 = r->poolinfo->tap2;
tap3 = r->poolinfo->tap3;
tap4 = r->poolinfo->tap4;
tap5 = r->poolinfo->tap5;
- spin_lock_irqsave(&r->lock, flags);
- input_rotate = r->input_rotate;
- i = r->add_ptr;
+ smp_rmb();
+ input_rotate = ACCESS_ONCE(r->input_rotate);
+ i = ACCESS_ONCE(r->add_ptr);
/* mix one byte at a time to simplify size handling and churn faster */
while (nbytes--) {
@@ -513,19 +527,61 @@ static void mix_pool_bytes_extract(struct entropy_store *r, const void *in,
input_rotate += i ? 7 : 14;
}
- r->input_rotate = input_rotate;
- r->add_ptr = i;
+ ACCESS_ONCE(r->input_rotate) = input_rotate;
+ ACCESS_ONCE(r->add_ptr) = i;
+ smp_wmb();
if (out)
for (j = 0; j < 16; j++)
((__u32 *)out)[j] = r->pool[(i - j) & wordmask];
+}
+static void __mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ trace_mix_pool_bytes_nolock(r->name, nbytes, _RET_IP_);
+ _mix_pool_bytes(r, in, nbytes, out);
+}
+
+static void mix_pool_bytes(struct entropy_store *r, const void *in,
+ int nbytes, __u8 out[64])
+{
+ unsigned long flags;
+
+ trace_mix_pool_bytes(r->name, nbytes, _RET_IP_);
+ spin_lock_irqsave(&r->lock, flags);
+ _mix_pool_bytes(r, in, nbytes, out);
spin_unlock_irqrestore(&r->lock, flags);
}
-static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
+struct fast_pool {
+ __u32 pool[4];
+ unsigned long last;
+ unsigned short count;
+ unsigned char rotate;
+ unsigned char last_timer_intr;
+};
+
+/*
+ * This is a fast mixing routine used by the interrupt randomness
+ * collector. It's hardcoded for an 128 bit pool and assumes that any
+ * locks that might be needed are taken by the caller.
+ */
+static void fast_mix(struct fast_pool *f, const void *in, int nbytes)
{
- mix_pool_bytes_extract(r, in, bytes, NULL);
+ const char *bytes = in;
+ __u32 w;
+ unsigned i = f->count;
+ unsigned input_rotate = f->rotate;
+
+ while (nbytes--) {
+ w = rol32(*bytes++, input_rotate & 31) ^ f->pool[i & 3] ^
+ f->pool[(i + 1) & 3];
+ f->pool[i & 3] = (w >> 3) ^ twist_table[w & 7];
+ input_rotate += (i++ & 3) ? 7 : 14;
+ }
+ f->count = i;
+ f->rotate = input_rotate;
}
/*
@@ -533,30 +589,38 @@ static void mix_pool_bytes(struct entropy_store *r, const void *in, int bytes)
*/
static void credit_entropy_bits(struct entropy_store *r, int nbits)
{
- unsigned long flags;
- int entropy_count;
+ int entropy_count, orig;
if (!nbits)
return;
- spin_lock_irqsave(&r->lock, flags);
-
DEBUG_ENT("added %d entropy credits to %s\n", nbits, r->name);
- entropy_count = r->entropy_count;
+retry:
+ entropy_count = orig = ACCESS_ONCE(r->entropy_count);
entropy_count += nbits;
+
if (entropy_count < 0) {
DEBUG_ENT("negative entropy/overflow\n");
entropy_count = 0;
} else if (entropy_count > r->poolinfo->POOLBITS)
entropy_count = r->poolinfo->POOLBITS;
- r->entropy_count = entropy_count;
+ if (cmpxchg(&r->entropy_count, orig, entropy_count) != orig)
+ goto retry;
+
+ if (!r->initialized && nbits > 0) {
+ r->entropy_total += nbits;
+ if (r->entropy_total > 128)
+ r->initialized = 1;
+ }
+
+ trace_credit_entropy_bits(r->name, nbits, entropy_count,
+ r->entropy_total, _RET_IP_);
/* should we wake readers? */
if (r == &input_pool && entropy_count >= random_read_wakeup_thresh) {
wake_up_interruptible(&random_read_wait);
kill_fasync(&fasync, SIGIO, POLL_IN);
}
- spin_unlock_irqrestore(&r->lock, flags);
}
/*********************************************************************
@@ -572,42 +636,24 @@ struct timer_rand_state {
unsigned dont_count_entropy:1;
};
-#ifndef CONFIG_GENERIC_HARDIRQS
-
-static struct timer_rand_state *irq_timer_state[NR_IRQS];
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- return irq_timer_state[irq];
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
-{
- irq_timer_state[irq] = state;
-}
-
-#else
-
-static struct timer_rand_state *get_timer_rand_state(unsigned int irq)
-{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
-
- return desc->timer_rand_state;
-}
-
-static void set_timer_rand_state(unsigned int irq,
- struct timer_rand_state *state)
+/*
+ * Add device- or boot-specific data to the input and nonblocking
+ * pools to help initialize them to unique values.
+ *
+ * None of this adds any entropy, it is meant to avoid the
+ * problem of the nonblocking pool having similar initial state
+ * across largely identical devices.
+ */
+void add_device_randomness(const void *buf, unsigned int size)
{
- struct irq_desc *desc;
-
- desc = irq_to_desc(irq);
+ unsigned long time = get_cycles() ^ jiffies;
- desc->timer_rand_state = state;
+ mix_pool_bytes(&input_pool, buf, size, NULL);
+ mix_pool_bytes(&input_pool, &time, sizeof(time), NULL);
+ mix_pool_bytes(&nonblocking_pool, buf, size, NULL);
+ mix_pool_bytes(&nonblocking_pool, &time, sizeof(time), NULL);
}
-#endif
+EXPORT_SYMBOL(add_device_randomness);
static struct timer_rand_state input_timer_state;
@@ -624,8 +670,8 @@ static struct timer_rand_state input_timer_state;
static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
{
struct {
- cycles_t cycles;
long jiffies;
+ unsigned cycles;
unsigned num;
} sample;
long delta, delta2, delta3;
@@ -639,7 +685,7 @@ static void add_timer_randomness(struct timer_rand_state *state, unsigned num)
sample.jiffies = jiffies;
sample.cycles = get_cycles();
sample.num = num;
- mix_pool_bytes(&input_pool, &sample, sizeof(sample));
+ mix_pool_bytes(&input_pool, &sample, sizeof(sample), NULL);
/*
* Calculate number of bits of randomness we probably added.
@@ -696,17 +742,48 @@ void add_input_randomness(unsigned int type, unsigned int code,
}
EXPORT_SYMBOL_GPL(add_input_randomness);
-void add_interrupt_randomness(int irq)
+static DEFINE_PER_CPU(struct fast_pool, irq_randomness);
+
+void add_interrupt_randomness(int irq, int irq_flags)
{
- struct timer_rand_state *state;
+ struct entropy_store *r;
+ struct fast_pool *fast_pool = &__get_cpu_var(irq_randomness);
+ struct pt_regs *regs = get_irq_regs();
+ unsigned long now = jiffies;
+ __u32 input[4], cycles = get_cycles();
+
+ input[0] = cycles ^ jiffies;
+ input[1] = irq;
+ if (regs) {
+ __u64 ip = instruction_pointer(regs);
+ input[2] = ip;
+ input[3] = ip >> 32;
+ }
- state = get_timer_rand_state(irq);
+ fast_mix(fast_pool, input, sizeof(input));
- if (state == NULL)
+ if ((fast_pool->count & 1023) &&
+ !time_after(now, fast_pool->last + HZ))
return;
- DEBUG_ENT("irq event %d\n", irq);
- add_timer_randomness(state, 0x100 + irq);
+ fast_pool->last = now;
+
+ r = nonblocking_pool.initialized ? &input_pool : &nonblocking_pool;
+ __mix_pool_bytes(r, &fast_pool->pool, sizeof(fast_pool->pool), NULL);
+ /*
+ * If we don't have a valid cycle counter, and we see
+ * back-to-back timer interrupts, then skip giving credit for
+ * any entropy.
+ */
+ if (cycles == 0) {
+ if (irq_flags & __IRQF_TIMER) {
+ if (fast_pool->last_timer_intr)
+ return;
+ fast_pool->last_timer_intr = 1;
+ } else
+ fast_pool->last_timer_intr = 0;
+ }
+ credit_entropy_bits(r, 1);
}
#ifdef CONFIG_BLOCK
@@ -738,7 +815,7 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
*/
static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
{
- __u32 tmp[OUTPUT_POOL_WORDS];
+ __u32 tmp[OUTPUT_POOL_WORDS];
if (r->pull && r->entropy_count < nbytes * 8 &&
r->entropy_count < r->poolinfo->POOLBITS) {
@@ -757,7 +834,7 @@ static void xfer_secondary_pool(struct entropy_store *r, size_t nbytes)
bytes = extract_entropy(r->pull, tmp, bytes,
random_read_wakeup_thresh / 8, rsvd);
- mix_pool_bytes(r, tmp, bytes);
+ mix_pool_bytes(r, tmp, bytes, NULL);
credit_entropy_bits(r, bytes*8);
}
}
@@ -816,13 +893,19 @@ static size_t account(struct entropy_store *r, size_t nbytes, int min,
static void extract_buf(struct entropy_store *r, __u8 *out)
{
int i;
- __u32 hash[5], workspace[SHA_WORKSPACE_WORDS];
+ union {
+ __u32 w[5];
+ unsigned long l[LONGS(EXTRACT_SIZE)];
+ } hash;
+ __u32 workspace[SHA_WORKSPACE_WORDS];
__u8 extract[64];
+ unsigned long flags;
/* Generate a hash across the pool, 16 words (512 bits) at a time */
- sha_init(hash);
+ sha_init(hash.w);
+ spin_lock_irqsave(&r->lock, flags);
for (i = 0; i < r->poolinfo->poolwords; i += 16)
- sha_transform(hash, (__u8 *)(r->pool + i), workspace);
+ sha_transform(hash.w, (__u8 *)(r->pool + i), workspace);
/*
* We mix the hash back into the pool to prevent backtracking
@@ -833,13 +916,14 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* brute-forcing the feedback as hard as brute-forcing the
* hash.
*/
- mix_pool_bytes_extract(r, hash, sizeof(hash), extract);
+ __mix_pool_bytes(r, hash.w, sizeof(hash.w), extract);
+ spin_unlock_irqrestore(&r->lock, flags);
/*
* To avoid duplicates, we atomically extract a portion of the
* pool while mixing, and hash one final time.
*/
- sha_transform(hash, extract, workspace);
+ sha_transform(hash.w, extract, workspace);
memset(extract, 0, sizeof(extract));
memset(workspace, 0, sizeof(workspace));
@@ -848,20 +932,32 @@ static void extract_buf(struct entropy_store *r, __u8 *out)
* pattern, we fold it in half. Thus, we always feed back
* twice as much data as we output.
*/
- hash[0] ^= hash[3];
- hash[1] ^= hash[4];
- hash[2] ^= rol32(hash[2], 16);
- memcpy(out, hash, EXTRACT_SIZE);
- memset(hash, 0, sizeof(hash));
+ hash.w[0] ^= hash.w[3];
+ hash.w[1] ^= hash.w[4];
+ hash.w[2] ^= rol32(hash.w[2], 16);
+
+ /*
+ * If we have a architectural hardware random number
+ * generator, mix that in, too.
+ */
+ for (i = 0; i < LONGS(EXTRACT_SIZE); i++) {
+ unsigned long v;
+ if (!arch_get_random_long(&v))
+ break;
+ hash.l[i] ^= v;
+ }
+
+ memcpy(out, &hash, EXTRACT_SIZE);
+ memset(&hash, 0, sizeof(hash));
}
static ssize_t extract_entropy(struct entropy_store *r, void *buf,
- size_t nbytes, int min, int reserved)
+ size_t nbytes, int min, int reserved)
{
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
- unsigned long flags;
+ trace_extract_entropy(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, min, reserved);
@@ -869,6 +965,8 @@ static ssize_t extract_entropy(struct entropy_store *r, void *buf,
extract_buf(r, tmp);
if (fips_enabled) {
+ unsigned long flags;
+
spin_lock_irqsave(&r->lock, flags);
if (!memcmp(tmp, r->last_data, EXTRACT_SIZE))
panic("Hardware RNG duplicated output!\n");
@@ -894,6 +992,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
ssize_t ret = 0, i;
__u8 tmp[EXTRACT_SIZE];
+ trace_extract_entropy_user(r->name, nbytes, r->entropy_count, _RET_IP_);
xfer_secondary_pool(r, nbytes);
nbytes = account(r, nbytes, 0, 0);
@@ -927,8 +1026,9 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
/*
* This function is the exported kernel interface. It returns some
- * number of good random numbers, suitable for seeding TCP sequence
- * numbers, etc.
+ * number of good random numbers, suitable for key generation, seeding
+ * TCP sequence numbers, etc. It does not use the hw random number
+ * generator, if available; use get_random_bytes_arch() for that.
*/
void get_random_bytes(void *buf, int nbytes)
{
@@ -937,6 +1037,39 @@ void get_random_bytes(void *buf, int nbytes)
EXPORT_SYMBOL(get_random_bytes);
/*
+ * This function will use the architecture-specific hardware random
+ * number generator if it is available. The arch-specific hw RNG will
+ * almost certainly be faster than what we can do in software, but it
+ * is impossible to verify that it is implemented securely (as
+ * opposed, to, say, the AES encryption of a sequence number using a
+ * key known by the NSA). So it's useful if we need the speed, but
+ * only if we're willing to trust the hardware manufacturer not to
+ * have put in a back door.
+ */
+void get_random_bytes_arch(void *buf, int nbytes)
+{
+ char *p = buf;
+
+ trace_get_random_bytes(nbytes, _RET_IP_);
+ while (nbytes) {
+ unsigned long v;
+ int chunk = min(nbytes, (int)sizeof(unsigned long));
+
+ if (!arch_get_random_long(&v))
+ break;
+
+ memcpy(p, &v, chunk);
+ p += chunk;
+ nbytes -= chunk;
+ }
+
+ if (nbytes)
+ extract_entropy(&nonblocking_pool, p, nbytes, 0, 0);
+}
+EXPORT_SYMBOL(get_random_bytes_arch);
+
+
+/*
* init_std_data - initialize pool with system data
*
* @r: pool to initialize
@@ -947,18 +1080,31 @@ EXPORT_SYMBOL(get_random_bytes);
*/
static void init_std_data(struct entropy_store *r)
{
- ktime_t now;
- unsigned long flags;
+ int i;
+ ktime_t now = ktime_get_real();
+ unsigned long rv;
- spin_lock_irqsave(&r->lock, flags);
r->entropy_count = 0;
- spin_unlock_irqrestore(&r->lock, flags);
-
- now = ktime_get_real();
- mix_pool_bytes(r, &now, sizeof(now));
- mix_pool_bytes(r, utsname(), sizeof(*(utsname())));
+ r->entropy_total = 0;
+ mix_pool_bytes(r, &now, sizeof(now), NULL);
+ for (i = r->poolinfo->POOLBYTES; i > 0; i -= sizeof(rv)) {
+ if (!arch_get_random_long(&rv))
+ break;
+ mix_pool_bytes(r, &rv, sizeof(rv), NULL);
+ }
+ mix_pool_bytes(r, utsname(), sizeof(*(utsname())), NULL);
}
+/*
+ * Note that setup_arch() may call add_device_randomness()
+ * long before we get here. This allows seeding of the pools
+ * with some platform dependent data very early in the boot
+ * process. But it limits our options here. We must use
+ * statically allocated structures that already have all
+ * initializations complete at compile time. We should also
+ * take care not to overwrite the precious per platform data
+ * we were given.
+ */
static int rand_initialize(void)
{
init_std_data(&input_pool);
@@ -968,24 +1114,6 @@ static int rand_initialize(void)
}
module_init(rand_initialize);
-void rand_initialize_irq(int irq)
-{
- struct timer_rand_state *state;
-
- state = get_timer_rand_state(irq);
-
- if (state)
- return;
-
- /*
- * If kzalloc returns null, we just won't use that entropy
- * source.
- */
- state = kzalloc(sizeof(struct timer_rand_state), GFP_KERNEL);
- if (state)
- set_timer_rand_state(irq, state);
-}
-
#ifdef CONFIG_BLOCK
void rand_initialize_disk(struct gendisk *disk)
{
@@ -1093,7 +1221,7 @@ write_pool(struct entropy_store *r, const char __user *buffer, size_t count)
count -= bytes;
p += bytes;
- mix_pool_bytes(r, buf, bytes);
+ mix_pool_bytes(r, buf, bytes, NULL);
cond_resched();
}
@@ -1236,10 +1364,15 @@ static int proc_do_uuid(ctl_table *table, int write,
uuid = table->data;
if (!uuid) {
uuid = tmp_uuid;
- uuid[8] = 0;
- }
- if (uuid[8] == 0)
generate_random_uuid(uuid);
+ } else {
+ static DEFINE_SPINLOCK(bootid_spinlock);
+
+ spin_lock(&bootid_spinlock);
+ if (!uuid[8])
+ generate_random_uuid(uuid);
+ spin_unlock(&bootid_spinlock);
+ }
sprintf(buf, "%pU", uuid);
@@ -1318,9 +1451,14 @@ late_initcall(random_int_secret_init);
DEFINE_PER_CPU(__u32 [MD5_DIGEST_WORDS], get_random_int_hash);
unsigned int get_random_int(void)
{
- __u32 *hash = get_cpu_var(get_random_int_hash);
+ __u32 *hash;
unsigned int ret;
+ if (arch_get_random_int(&ret))
+ return ret;
+
+ hash = get_cpu_var(get_random_int_hash);
+
hash[0] += current->pid + jiffies + get_cycles();
md5_transform(hash, random_int_secret);
ret = hash[0];
diff --git a/drivers/edac/i7core_edac.c b/drivers/edac/i7core_edac.c
index f6cf448..240966b 100644
--- a/drivers/edac/i7core_edac.c
+++ b/drivers/edac/i7core_edac.c
@@ -1842,11 +1842,9 @@ static int i7core_mce_check_error(void *priv, struct mce *mce)
if (mce->bank != 8)
return 0;
-#ifdef CONFIG_SMP
/* Only handle if it is the right mc controller */
if (cpu_data(mce->cpu).phys_proc_id != pvt->i7core_dev->socket)
return 0;
-#endif
smp_rmb();
if ((pvt->mce_out + 1) % MCE_LOG_LEN == pvt->mce_in) {
diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
index bcb1126..02a52d1 100644
--- a/drivers/firmware/dmi_scan.c
+++ b/drivers/firmware/dmi_scan.c
@@ -6,6 +6,7 @@
#include <linux/dmi.h>
#include <linux/efi.h>
#include <linux/bootmem.h>
+#include <linux/random.h>
#include <asm/dmi.h>
/*
@@ -111,6 +112,8 @@ static int __init dmi_walk_early(void (*decode)(const struct dmi_header *,
dmi_table(buf, dmi_len, dmi_num, decode, NULL);
+ add_device_randomness(buf, dmi_len);
+
dmi_iounmap(buf, dmi_len);
return 0;
}
diff --git a/drivers/firmware/pcdp.c b/drivers/firmware/pcdp.c
index 51e0e2d..a330492 100644
--- a/drivers/firmware/pcdp.c
+++ b/drivers/firmware/pcdp.c
@@ -95,7 +95,7 @@ efi_setup_pcdp_console(char *cmdline)
if (efi.hcdp == EFI_INVALID_TABLE_ADDR)
return -ENODEV;
- pcdp = ioremap(efi.hcdp, 4096);
+ pcdp = early_ioremap(efi.hcdp, 4096);
printk(KERN_INFO "PCDP: v%d at 0x%lx\n", pcdp->rev, efi.hcdp);
if (strstr(cmdline, "console=hcdp")) {
@@ -131,6 +131,6 @@ efi_setup_pcdp_console(char *cmdline)
}
out:
- iounmap(pcdp);
+ early_iounmap(pcdp, 4096);
return rc;
}
diff --git a/drivers/gpu/drm/i915/intel_ringbuffer.c b/drivers/gpu/drm/i915/intel_ringbuffer.c
index 2d6039b..3bd85f7 100644
--- a/drivers/gpu/drm/i915/intel_ringbuffer.c
+++ b/drivers/gpu/drm/i915/intel_ringbuffer.c
@@ -150,8 +150,6 @@ static int init_ring_common(struct intel_ring_buffer *ring)
I915_WRITE_HEAD(ring, 0);
ring->write_tail(ring, 0);
- /* Initialize the ring. */
- I915_WRITE_START(ring, obj->gtt_offset);
head = I915_READ_HEAD(ring) & HEAD_ADDR;
/* G45 ring initialization fails to reset head to zero */
@@ -177,6 +175,11 @@ static int init_ring_common(struct intel_ring_buffer *ring)
}
}
+ /* Initialize the ring. This must happen _after_ we've cleared the ring
+ * registers with the above sequence (the readback of the HEAD registers
+ * also enforces ordering), otherwise the hw might lose the new ring
+ * register values. */
+ I915_WRITE_START(ring, obj->gtt_offset);
I915_WRITE_CTL(ring,
((ring->size - PAGE_SIZE) & RING_NR_PAGES)
| RING_REPORT_64K | RING_VALID);
diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
index 3b77ad6..efc2b21 100644
--- a/drivers/gpu/drm/radeon/atombios_dp.c
+++ b/drivers/gpu/drm/radeon/atombios_dp.c
@@ -22,6 +22,7 @@
*
* Authors: Dave Airlie
* Alex Deucher
+ * Jerome Glisse
*/
#include "drmP.h"
#include "radeon_drm.h"
@@ -624,7 +625,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
link_status, DP_LINK_STATUS_SIZE, 100);
if (ret <= 0) {
- DRM_ERROR("displayport link status failed\n");
return false;
}
@@ -797,8 +797,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
clock_recovery = true;
@@ -860,8 +862,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
else
mdelay(dp_info->rd_interval * 4);
- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
+ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
+ DRM_ERROR("displayport link status failed\n");
break;
+ }
if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
channel_eq = true;
diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c
index 8846bad..314e217 100644
--- a/drivers/gpu/drm/radeon/evergreen.c
+++ b/drivers/gpu/drm/radeon/evergreen.c
@@ -1019,24 +1019,8 @@ void evergreen_agp_enable(struct radeon_device *rdev)
void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *save)
{
- save->vga_control[0] = RREG32(D1VGA_CONTROL);
- save->vga_control[1] = RREG32(D2VGA_CONTROL);
save->vga_render_control = RREG32(VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(VGA_HDP_CONTROL);
- save->crtc_control[0] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET);
- save->crtc_control[1] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET);
- if (rdev->num_crtc >= 4) {
- save->vga_control[2] = RREG32(EVERGREEN_D3VGA_CONTROL);
- save->vga_control[3] = RREG32(EVERGREEN_D4VGA_CONTROL);
- save->crtc_control[2] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET);
- save->crtc_control[3] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET);
- }
- if (rdev->num_crtc >= 6) {
- save->vga_control[4] = RREG32(EVERGREEN_D5VGA_CONTROL);
- save->vga_control[5] = RREG32(EVERGREEN_D6VGA_CONTROL);
- save->crtc_control[4] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET);
- save->crtc_control[5] = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET);
- }
/* Stop all video */
WREG32(VGA_RENDER_CONTROL, 0);
@@ -1147,47 +1131,6 @@ void evergreen_mc_resume(struct radeon_device *rdev, struct evergreen_mc_save *s
/* Unlock host access */
WREG32(VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(D1VGA_CONTROL, save->vga_control[0]);
- WREG32(D2VGA_CONTROL, save->vga_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_D3VGA_CONTROL, save->vga_control[2]);
- WREG32(EVERGREEN_D4VGA_CONTROL, save->vga_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_D5VGA_CONTROL, save->vga_control[4]);
- WREG32(EVERGREEN_D6VGA_CONTROL, save->vga_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 1);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 1);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 1);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 1);
- }
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET, save->crtc_control[0]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET, save->crtc_control[1]);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET, save->crtc_control[2]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET, save->crtc_control[3]);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET, save->crtc_control[4]);
- WREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET, save->crtc_control[5]);
- }
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC0_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC1_REGISTER_OFFSET, 0);
- if (rdev->num_crtc >= 4) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC2_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC3_REGISTER_OFFSET, 0);
- }
- if (rdev->num_crtc >= 6) {
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC4_REGISTER_OFFSET, 0);
- WREG32(EVERGREEN_CRTC_UPDATE_LOCK + EVERGREEN_CRTC5_REGISTER_OFFSET, 0);
- }
WREG32(VGA_RENDER_CONTROL, save->vga_render_control);
}
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 3dedaa0..4d81e96 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -253,13 +253,10 @@ void rs690_line_buffer_adjust(struct radeon_device *rdev,
* rv515
*/
struct rv515_mc_save {
- u32 d1vga_control;
- u32 d2vga_control;
u32 vga_render_control;
u32 vga_hdp_control;
- u32 d1crtc_control;
- u32 d2crtc_control;
};
+
int rv515_init(struct radeon_device *rdev);
void rv515_fini(struct radeon_device *rdev);
uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
@@ -387,11 +384,10 @@ void r700_cp_fini(struct radeon_device *rdev);
* evergreen
*/
struct evergreen_mc_save {
- u32 vga_control[6];
u32 vga_render_control;
u32 vga_hdp_control;
- u32 crtc_control[6];
};
+
void evergreen_pcie_gart_tlb_flush(struct radeon_device *rdev);
int evergreen_init(struct radeon_device *rdev);
void evergreen_fini(struct radeon_device *rdev);
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index 1f6a0f5..f1a1e8a 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -66,14 +66,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
/* just deal with DP (not eDP) here. */
if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
- int saved_dpms = connector->dpms;
-
- /* Only turn off the display it it's physically disconnected */
- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
- else if (radeon_dp_needs_link_train(radeon_connector))
- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
- connector->dpms = saved_dpms;
+ struct radeon_connector_atom_dig *dig_connector =
+ radeon_connector->con_priv;
+
+ /* if existing sink type was not DP no need to retrain */
+ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
+ return;
+
+ /* first get sink type as it may be reset after (un)plug */
+ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
+ /* don't do anything if sink is not display port, i.e.,
+ * passive dp->(dvi|hdmi) adaptor
+ */
+ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
+ int saved_dpms = connector->dpms;
+ /* Only turn off the display if it's physically disconnected */
+ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+ } else if (radeon_dp_needs_link_train(radeon_connector)) {
+ /* set it to OFF so that drm_helper_connector_dpms()
+ * won't return immediately since the current state
+ * is ON at this point.
+ */
+ connector->dpms = DRM_MODE_DPMS_OFF;
+ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+ }
+ connector->dpms = saved_dpms;
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
index 3fb2226..72f749d 100644
--- a/drivers/gpu/drm/radeon/radeon_cursor.c
+++ b/drivers/gpu/drm/radeon/radeon_cursor.c
@@ -257,8 +257,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
if (!(cursor_end & 0x7f))
w--;
}
- if (w <= 0)
+ if (w <= 0) {
w = 1;
+ cursor_end = x - xorigin + w;
+ if (!(cursor_end & 0x7f)) {
+ x--;
+ WARN_ON_ONCE(x < 0);
+ }
+ }
}
}
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index 6613ee9..d5f45b4 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -281,12 +281,8 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
{
- save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
- save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
- save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
- save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
/* Stop all video */
WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
@@ -311,15 +307,6 @@ void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
/* Unlock host access */
WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
mdelay(1);
- /* Restore video state */
- WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
- WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
- WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
- WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
- WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
- WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
}
diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
index 252defd..87fd034 100644
--- a/drivers/hwmon/coretemp.c
+++ b/drivers/hwmon/coretemp.c
@@ -47,16 +47,15 @@
#define MAX_ATTRS 5 /* Maximum no of per-core attrs */
#define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO)
-#ifdef CONFIG_SMP
#define TO_PHYS_ID(cpu) cpu_data(cpu).phys_proc_id
#define TO_CORE_ID(cpu) cpu_data(cpu).cpu_core_id
+#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
+
+#ifdef CONFIG_SMP
#define for_each_sibling(i, cpu) for_each_cpu(i, cpu_sibling_mask(cpu))
#else
-#define TO_PHYS_ID(cpu) (cpu)
-#define TO_CORE_ID(cpu) (cpu)
#define for_each_sibling(i, cpu) for (i = 0; false; )
#endif
-#define TO_ATTR_NO(cpu) (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
/*
* Per-Core Temperature Data
diff --git a/drivers/infiniband/ulp/srp/ib_srp.c b/drivers/infiniband/ulp/srp/ib_srp.c
index 7d5109b..e763e30 100644
--- a/drivers/infiniband/ulp/srp/ib_srp.c
+++ b/drivers/infiniband/ulp/srp/ib_srp.c
@@ -568,24 +568,62 @@ static void srp_unmap_data(struct scsi_cmnd *scmnd,
scmnd->sc_data_direction);
}
-static void srp_remove_req(struct srp_target_port *target,
- struct srp_request *req, s32 req_lim_delta)
+/**
+ * srp_claim_req - Take ownership of the scmnd associated with a request.
+ * @target: SRP target port.
+ * @req: SRP request.
+ * @scmnd: If NULL, take ownership of @req->scmnd. If not NULL, only take
+ * ownership of @req->scmnd if it equals @scmnd.
+ *
+ * Return value:
+ * Either NULL or a pointer to the SCSI command the caller became owner of.
+ */
+static struct scsi_cmnd *srp_claim_req(struct srp_target_port *target,
+ struct srp_request *req,
+ struct scsi_cmnd *scmnd)
{
unsigned long flags;
- srp_unmap_data(req->scmnd, target, req);
+ spin_lock_irqsave(&target->lock, flags);
+ if (!scmnd) {
+ scmnd = req->scmnd;
+ req->scmnd = NULL;
+ } else if (req->scmnd == scmnd) {
+ req->scmnd = NULL;
+ } else {
+ scmnd = NULL;
+ }
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ return scmnd;
+}
+
+/**
+ * srp_free_req() - Unmap data and add request to the free request list.
+ */
+static void srp_free_req(struct srp_target_port *target,
+ struct srp_request *req, struct scsi_cmnd *scmnd,
+ s32 req_lim_delta)
+{
+ unsigned long flags;
+
+ srp_unmap_data(scmnd, target, req);
+
spin_lock_irqsave(&target->lock, flags);
target->req_lim += req_lim_delta;
- req->scmnd = NULL;
list_add_tail(&req->list, &target->free_reqs);
spin_unlock_irqrestore(&target->lock, flags);
}
static void srp_reset_req(struct srp_target_port *target, struct srp_request *req)
{
- req->scmnd->result = DID_RESET << 16;
- req->scmnd->scsi_done(req->scmnd);
- srp_remove_req(target, req, 0);
+ struct scsi_cmnd *scmnd = srp_claim_req(target, req, NULL);
+
+ if (scmnd) {
+ scmnd->result = DID_RESET << 16;
+ scmnd->scsi_done(scmnd);
+ srp_free_req(target, req, scmnd, 0);
+ }
}
static int srp_reconnect_target(struct srp_target_port *target)
@@ -1055,11 +1093,18 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
complete(&target->tsk_mgmt_done);
} else {
req = &target->req_ring[rsp->tag];
- scmnd = req->scmnd;
- if (!scmnd)
+ scmnd = srp_claim_req(target, req, NULL);
+ if (!scmnd) {
shost_printk(KERN_ERR, target->scsi_host,
"Null scmnd for RSP w/tag %016llx\n",
(unsigned long long) rsp->tag);
+
+ spin_lock_irqsave(&target->lock, flags);
+ target->req_lim += be32_to_cpu(rsp->req_lim_delta);
+ spin_unlock_irqrestore(&target->lock, flags);
+
+ return;
+ }
scmnd->result = rsp->status;
if (rsp->flags & SRP_RSP_FLAG_SNSVALID) {
@@ -1074,7 +1119,9 @@ static void srp_process_rsp(struct srp_target_port *target, struct srp_rsp *rsp)
else if (rsp->flags & (SRP_RSP_FLAG_DIOVER | SRP_RSP_FLAG_DIUNDER))
scsi_set_resid(scmnd, be32_to_cpu(rsp->data_in_res_cnt));
- srp_remove_req(target, req, be32_to_cpu(rsp->req_lim_delta));
+ srp_free_req(target, req, scmnd,
+ be32_to_cpu(rsp->req_lim_delta));
+
scmnd->host_scribble = NULL;
scmnd->scsi_done(scmnd);
}
@@ -1613,25 +1660,17 @@ static int srp_abort(struct scsi_cmnd *scmnd)
{
struct srp_target_port *target = host_to_target(scmnd->device->host);
struct srp_request *req = (struct srp_request *) scmnd->host_scribble;
- int ret = SUCCESS;
shost_printk(KERN_ERR, target->scsi_host, "SRP abort called\n");
- if (!req || target->qp_in_error)
- return FAILED;
- if (srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
- SRP_TSK_ABORT_TASK))
+ if (!req || target->qp_in_error || !srp_claim_req(target, req, scmnd))
return FAILED;
+ srp_send_tsk_mgmt(target, req->index, scmnd->device->lun,
+ SRP_TSK_ABORT_TASK);
+ srp_free_req(target, req, scmnd, 0);
+ scmnd->result = DID_ABORT << 16;
- if (req->scmnd) {
- if (!target->tsk_mgmt_status) {
- srp_remove_req(target, req, 0);
- scmnd->result = DID_ABORT << 16;
- } else
- ret = FAILED;
- }
-
- return ret;
+ return SUCCESS;
}
static int srp_reset_device(struct scsi_cmnd *scmnd)
diff --git a/drivers/input/tablet/wacom_wac.c b/drivers/input/tablet/wacom_wac.c
index 08ba5ad..a28ebf0 100644
--- a/drivers/input/tablet/wacom_wac.c
+++ b/drivers/input/tablet/wacom_wac.c
@@ -242,7 +242,7 @@ static int wacom_graphire_irq(struct wacom_wac *wacom)
input_report_abs(input, ABS_X, le16_to_cpup((__le16 *)&data[2]));
input_report_abs(input, ABS_Y, le16_to_cpup((__le16 *)&data[4]));
if (wacom->tool[0] != BTN_TOOL_MOUSE) {
- input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x01) << 8));
+ input_report_abs(input, ABS_PRESSURE, data[6] | ((data[7] & 0x03) << 8));
input_report_key(input, BTN_TOUCH, data[1] & 0x01);
input_report_key(input, BTN_STYLUS, data[1] & 0x02);
input_report_key(input, BTN_STYLUS2, data[1] & 0x04);
diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
index 9bfd057..42ef54f 100644
--- a/drivers/md/dm-raid1.c
+++ b/drivers/md/dm-raid1.c
@@ -1210,7 +1210,7 @@ static int mirror_end_io(struct dm_target *ti, struct bio *bio,
* We need to dec pending if this was a write.
*/
if (rw == WRITE) {
- if (!(bio->bi_rw & REQ_FLUSH))
+ if (!(bio->bi_rw & (REQ_FLUSH | REQ_DISCARD)))
dm_rh_dec(ms->rh, map_context->ll);
return error;
}
diff --git a/drivers/md/dm-region-hash.c b/drivers/md/dm-region-hash.c
index 7771ed2..69732e0 100644
--- a/drivers/md/dm-region-hash.c
+++ b/drivers/md/dm-region-hash.c
@@ -404,6 +404,9 @@ void dm_rh_mark_nosync(struct dm_region_hash *rh, struct bio *bio)
return;
}
+ if (bio->bi_rw & REQ_DISCARD)
+ return;
+
/* We must inform the log that the sync count has changed. */
log->type->set_region_sync(log, region, 0);
@@ -524,7 +527,7 @@ void dm_rh_inc_pending(struct dm_region_hash *rh, struct bio_list *bios)
struct bio *bio;
for (bio = bios->head; bio; bio = bio->bi_next) {
- if (bio->bi_rw & REQ_FLUSH)
+ if (bio->bi_rw & (REQ_FLUSH | REQ_DISCARD))
continue;
rh_inc(rh, dm_rh_bio_to_region(rh, bio));
}
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index a20e1c4..ccd81b1 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -408,8 +408,6 @@ static irqreturn_t ab3100_irq_handler(int irq, void *data)
u32 fatevent;
int err;
- add_interrupt_randomness(irq);
-
err = ab3100_get_register_page_interruptible(ab3100, AB3100_EVENTA1,
event_regs, 3);
if (err)
@@ -938,9 +936,6 @@ static int __devinit ab3100_probe(struct i2c_client *client,
err = request_threaded_irq(client->irq, NULL, ab3100_irq_handler,
IRQF_ONESHOT, "ab3100-core", ab3100);
- /* This real unpredictable IRQ is of course sampled for entropy */
- rand_initialize_irq(client->irq);
-
if (err)
goto exit_no_irq;
diff --git a/drivers/mfd/ab3550-core.c b/drivers/mfd/ab3550-core.c
index 3d7dce6..d69dc4b 100644
--- a/drivers/mfd/ab3550-core.c
+++ b/drivers/mfd/ab3550-core.c
@@ -1309,8 +1309,6 @@ static int __init ab3550_probe(struct i2c_client *client,
err = request_threaded_irq(client->irq, NULL, ab3550_irq_handler,
IRQF_ONESHOT, "ab3550-core", ab);
- /* This real unpredictable IRQ is of course sampled for entropy */
- rand_initialize_irq(client->irq);
if (err)
goto exit_no_irq;
diff --git a/drivers/mfd/ezx-pcap.c b/drivers/mfd/ezx-pcap.c
index 43a76c4..db662e2 100644
--- a/drivers/mfd/ezx-pcap.c
+++ b/drivers/mfd/ezx-pcap.c
@@ -202,7 +202,7 @@ static void pcap_isr_work(struct work_struct *work)
}
local_irq_enable();
ezx_pcap_write(pcap, PCAP_REG_MSR, pcap->msr);
- } while (gpio_get_value(irq_to_gpio(pcap->spi->irq)));
+ } while (gpio_get_value(pdata->gpio));
}
static void pcap_irq_handler(unsigned int irq, struct irq_desc *desc)
diff --git a/drivers/mfd/wm831x-otp.c b/drivers/mfd/wm831x-otp.c
index f742745..b90f3e0 100644
--- a/drivers/mfd/wm831x-otp.c
+++ b/drivers/mfd/wm831x-otp.c
@@ -18,6 +18,7 @@
#include <linux/bcd.h>
#include <linux/delay.h>
#include <linux/mfd/core.h>
+#include <linux/random.h>
#include <linux/mfd/wm831x/core.h>
#include <linux/mfd/wm831x/otp.h>
@@ -66,6 +67,7 @@ static DEVICE_ATTR(unique_id, 0444, wm831x_unique_id_show, NULL);
int wm831x_otp_init(struct wm831x *wm831x)
{
+ char uuid[WM831X_UNIQUE_ID_LEN];
int ret;
ret = device_create_file(wm831x->dev, &dev_attr_unique_id);
@@ -73,6 +75,12 @@ int wm831x_otp_init(struct wm831x *wm831x)
dev_err(wm831x->dev, "Unique ID attribute not created: %d\n",
ret);
+ ret = wm831x_unique_id_read(wm831x, uuid);
+ if (ret == 0)
+ add_device_randomness(uuid, sizeof(uuid));
+ else
+ dev_err(wm831x->dev, "Failed to read UUID: %d\n", ret);
+
return ret;
}
diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
index 936bbca..d3b3115 100644
--- a/drivers/mmc/host/sdhci-pci.c
+++ b/drivers/mmc/host/sdhci-pci.c
@@ -140,6 +140,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
static const struct sdhci_pci_fixes sdhci_cafe = {
.quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
SDHCI_QUIRK_NO_BUSY_IRQ |
+ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
};
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 74580bb..c9b123c 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -5310,7 +5310,7 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
int k, last;
if (skb == NULL) {
- j++;
+ j = NEXT_TX_BD(j);
continue;
}
@@ -5322,8 +5322,8 @@ bnx2_free_tx_skbs(struct bnx2 *bp)
tx_buf->skb = NULL;
last = tx_buf->nr_frags;
- j++;
- for (k = 0; k < last; k++, j++) {
+ j = NEXT_TX_BD(j);
+ for (k = 0; k < last; k++, j = NEXT_TX_BD(j)) {
tx_buf = &txr->tx_buf_ring[TX_RING_IDX(j)];
dma_unmap_page(&bp->pdev->dev,
dma_unmap_addr(tx_buf, mapping),
diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
index 3df0c0f..82b1802 100644
--- a/drivers/net/caif/caif_serial.c
+++ b/drivers/net/caif/caif_serial.c
@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
sprintf(name, "cf%s", tty->name);
dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
+ if (!dev)
+ return -ENOMEM;
+
ser = netdev_priv(dev);
ser->tty = tty_kref_get(tty);
ser->dev = dev;
diff --git a/drivers/net/e1000e/82571.c b/drivers/net/e1000e/82571.c
index 5278e84..0d0ee55 100644
--- a/drivers/net/e1000e/82571.c
+++ b/drivers/net/e1000e/82571.c
@@ -1602,10 +1602,8 @@ static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
* auto-negotiation in the TXCW register and disable
* forced link in the Device Control register in an
* attempt to auto-negotiate with our link partner.
- * If the partner code word is null, stop forcing
- * and restart auto negotiation.
*/
- if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW)) {
+ if (rxcw & E1000_RXCW_C) {
/* Enable autoneg, and unforce link up */
ew32(TXCW, mac->txcw);
ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 76b8650..3cc22b9 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -1245,10 +1245,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
}
#endif
- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
+ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
if (copy_from_user(&ifr, argp, ifreq_len))
return -EFAULT;
-
+ } else {
+ memset(&ifr, 0, sizeof(ifr));
+ }
if (cmd == TUNGETFEATURES) {
/* Currently this just means: "what IFF flags are valid?".
* This is needed because we never checked for invalid flags on
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index ad0298f..3362449 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -1308,7 +1308,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
int retv;
int length = 0; /* shut up GCC */
- urb = usb_alloc_urb(0, GFP_NOIO);
+ urb = usb_alloc_urb(0, GFP_ATOMIC);
if (!urb)
return -ENOMEM;
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 0781fb0..39ebf9f 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -829,6 +829,7 @@ static struct usb_device_id rt2800usb_device_table[] = {
{ USB_DEVICE(0x0411, 0x015d) },
{ USB_DEVICE(0x0411, 0x016f) },
{ USB_DEVICE(0x0411, 0x01a2) },
+ { USB_DEVICE(0x0411, 0x01ee) },
/* Corega */
{ USB_DEVICE(0x07aa, 0x002f) },
{ USB_DEVICE(0x07aa, 0x003c) },
diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c
index 9d35ec1..9e5fd45 100644
--- a/drivers/net/wireless/rt2x00/rt61pci.c
+++ b/drivers/net/wireless/rt2x00/rt61pci.c
@@ -2254,8 +2254,7 @@ static void rt61pci_txdone(struct rt2x00_dev *rt2x00dev)
static void rt61pci_wakeup(struct rt2x00_dev *rt2x00dev)
{
- struct ieee80211_conf conf = { .flags = 0 };
- struct rt2x00lib_conf libconf = { .conf = &conf };
+ struct rt2x00lib_conf libconf = { .conf = &rt2x00dev->hw->conf };
rt61pci_config(rt2x00dev, &libconf, IEEE80211_CONF_CHANGE_PS);
}
diff --git a/drivers/rtc/rtc-wm831x.c b/drivers/rtc/rtc-wm831x.c
index bdc909b..f3c2110 100644
--- a/drivers/rtc/rtc-wm831x.c
+++ b/drivers/rtc/rtc-wm831x.c
@@ -24,7 +24,7 @@
#include <linux/mfd/wm831x/core.h>
#include <linux/delay.h>
#include <linux/platform_device.h>
-
+#include <linux/random.h>
/*
* R16416 (0x4020) - RTC Write Counter
@@ -96,6 +96,26 @@ struct wm831x_rtc {
unsigned int alarm_enabled:1;
};
+static void wm831x_rtc_add_randomness(struct wm831x *wm831x)
+{
+ int ret;
+ u16 reg;
+
+ /*
+ * The write counter contains a pseudo-random number which is
+ * regenerated every time we set the RTC so it should be a
+ * useful per-system source of entropy.
+ */
+ ret = wm831x_reg_read(wm831x, WM831X_RTC_WRITE_COUNTER);
+ if (ret >= 0) {
+ reg = ret;
+ add_device_randomness(&reg, sizeof(reg));
+ } else {
+ dev_warn(wm831x->dev, "Failed to read RTC write counter: %d\n",
+ ret);
+ }
+}
+
/*
* Read current time and date in RTC
*/
@@ -449,6 +469,8 @@ static int wm831x_rtc_probe(struct platform_device *pdev)
alm_irq, ret);
}
+ wm831x_rtc_add_randomness(wm831x);
+
return 0;
err:
diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
index e68fac6..d2f9576 100644
--- a/drivers/scsi/libsas/sas_expander.c
+++ b/drivers/scsi/libsas/sas_expander.c
@@ -770,7 +770,7 @@ static struct domain_device *sas_ex_discover_end_dev(
}
/* See if this phy is part of a wide port */
-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
{
struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
int i;
@@ -786,11 +786,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
sas_port_add_phy(ephy->port, phy->phy);
phy->port = ephy->port;
phy->phy_state = PHY_DEVICE_DISCOVERED;
- return 0;
+ return true;
}
}
- return -ENODEV;
+ return false;
}
static struct domain_device *sas_ex_discover_expander(
@@ -928,8 +928,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
return res;
}
- res = sas_ex_join_wide_port(dev, phy_id);
- if (!res) {
+ if (sas_ex_join_wide_port(dev, phy_id)) {
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
return res;
@@ -974,8 +973,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
SAS_ADDR(child->sas_addr)) {
ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
- res = sas_ex_join_wide_port(dev, i);
- if (!res)
+ if (sas_ex_join_wide_port(dev, i))
SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
@@ -1838,32 +1836,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
{
struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
struct domain_device *child;
- bool found = false;
- int res, i;
+ int res;
SAS_DPRINTK("ex %016llx phy%d new device attached\n",
SAS_ADDR(dev->sas_addr), phy_id);
res = sas_ex_phy_discover(dev, phy_id);
if (res)
- goto out;
- /* to support the wide port inserted */
- for (i = 0; i < dev->ex_dev.num_phys; i++) {
- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
- if (i == phy_id)
- continue;
- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
- SAS_ADDR(ex_phy->attached_sas_addr)) {
- found = true;
- break;
- }
- }
- if (found) {
- sas_ex_join_wide_port(dev, phy_id);
+ return res;
+
+ if (sas_ex_join_wide_port(dev, phy_id))
return 0;
- }
+
res = sas_ex_discover_devices(dev, phy_id);
- if (!res)
- goto out;
+ if (res)
+ return res;
list_for_each_entry(child, &dev->ex_dev.children, siblings) {
if (SAS_ADDR(child->sas_addr) ==
SAS_ADDR(ex_phy->attached_sas_addr)) {
@@ -1873,7 +1859,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
break;
}
}
-out:
return res;
}
@@ -1972,9 +1957,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
struct domain_device *dev = NULL;
res = sas_find_bcast_dev(port_dev, &dev);
- if (res)
- goto out;
- if (dev) {
+ while (res == 0 && dev) {
struct expander_device *ex = &dev->ex_dev;
int i = 0, phy_id;
@@ -1986,8 +1969,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
res = sas_rediscover(dev, phy_id);
i = phy_id + 1;
} while (i < ex->num_phys);
+
+ dev = NULL;
+ res = sas_find_bcast_dev(port_dev, &dev);
}
-out:
return res;
}
diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
index a4b9cdb..7f1afde 100644
--- a/drivers/scsi/scsi_error.c
+++ b/drivers/scsi/scsi_error.c
@@ -1665,6 +1665,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
* requests are started.
*/
scsi_run_host_queues(shost);
+
+ /*
+ * if eh is active and host_eh_scheduled is pending we need to re-run
+ * recovery. we do this check after scsi_run_host_queues() to allow
+ * everything pent up since the last eh run a chance to make forward
+ * progress before we sync again. Either we'll immediately re-run
+ * recovery or scsi_device_unbusy() will wake us again when these
+ * pending commands complete.
+ */
+ spin_lock_irqsave(shost->host_lock, flags);
+ if (shost->host_eh_scheduled)
+ if (scsi_host_set_state(shost, SHOST_RECOVERY))
+ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
+ spin_unlock_irqrestore(shost->host_lock, flags);
}
/**
diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
index 99fc45b..dd454c4 100644
--- a/drivers/scsi/scsi_lib.c
+++ b/drivers/scsi/scsi_lib.c
@@ -481,15 +481,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
*/
static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
{
+ struct scsi_device *sdev = cmd->device;
struct request *req = cmd->request;
unsigned long flags;
+ /*
+ * We need to hold a reference on the device to avoid the queue being
+ * killed after the unlock and before scsi_run_queue is invoked which
+ * may happen because scsi_unprep_request() puts the command which
+ * releases its reference on the device.
+ */
+ get_device(&sdev->sdev_gendev);
+
spin_lock_irqsave(q->queue_lock, flags);
scsi_unprep_request(req);
blk_requeue_request(q, req);
spin_unlock_irqrestore(q->queue_lock, flags);
scsi_run_queue(q);
+
+ put_device(&sdev->sdev_gendev);
}
void scsi_next_command(struct scsi_cmnd *cmd)
diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
index 6e7ea4a..a48b59c 100644
--- a/drivers/scsi/scsi_scan.c
+++ b/drivers/scsi/scsi_scan.c
@@ -1710,6 +1710,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
{
struct scsi_device *sdev;
shost_for_each_device(sdev, shost) {
+ /* target removed before the device could be added */
+ if (sdev->sdev_state == SDEV_DEL)
+ continue;
if (!scsi_host_scan_allowed(shost) ||
scsi_sysfs_add_sdev(sdev) != 0)
__scsi_remove_device(sdev);
diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
index e0bd3f7..de21547 100644
--- a/drivers/scsi/scsi_sysfs.c
+++ b/drivers/scsi/scsi_sysfs.c
@@ -962,7 +962,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
struct scsi_device *sdev;
spin_lock_irqsave(shost->host_lock, flags);
- starget->reap_ref++;
restart:
list_for_each_entry(sdev, &shost->__devices, siblings) {
if (sdev->channel != starget->channel ||
@@ -976,14 +975,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
goto restart;
}
spin_unlock_irqrestore(shost->host_lock, flags);
- scsi_target_reap(starget);
-}
-
-static int __remove_child (struct device * dev, void * data)
-{
- if (scsi_is_target_device(dev))
- __scsi_remove_target(to_scsi_target(dev));
- return 0;
}
/**
@@ -996,14 +987,34 @@ static int __remove_child (struct device * dev, void * data)
*/
void scsi_remove_target(struct device *dev)
{
- if (scsi_is_target_device(dev)) {
- __scsi_remove_target(to_scsi_target(dev));
- return;
+ struct Scsi_Host *shost = dev_to_shost(dev->parent);
+ struct scsi_target *starget, *found;
+ unsigned long flags;
+
+ restart:
+ found = NULL;
+ spin_lock_irqsave(shost->host_lock, flags);
+ list_for_each_entry(starget, &shost->__targets, siblings) {
+ if (starget->state == STARGET_DEL)
+ continue;
+ if (starget->dev.parent == dev || &starget->dev == dev) {
+ found = starget;
+ found->reap_ref++;
+ break;
+ }
}
+ spin_unlock_irqrestore(shost->host_lock, flags);
- get_device(dev);
- device_for_each_child(dev, NULL, __remove_child);
- put_device(dev);
+ if (found) {
+ __scsi_remove_target(found);
+ scsi_target_reap(found);
+ /* in the case where @dev has multiple starget children,
+ * continue removing.
+ *
+ * FIXME: does such a case exist?
+ */
+ goto restart;
+ }
}
EXPORT_SYMBOL(scsi_remove_target);
diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
index ca3c303..4d1f996 100644
--- a/drivers/usb/core/devio.c
+++ b/drivers/usb/core/devio.c
@@ -1557,10 +1557,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
void __user *addr = as->userurb;
unsigned int i;
- if (as->userbuffer && urb->actual_length)
- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
- urb->actual_length))
+ if (as->userbuffer && urb->actual_length) {
+ if (urb->number_of_packets > 0) /* Isochronous */
+ i = urb->transfer_buffer_length;
+ else /* Non-Isoc */
+ i = urb->actual_length;
+ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
return -EFAULT;
+ }
if (put_user(as->status, &userurb->status))
return -EFAULT;
if (put_user(urb->actual_length, &userurb->actual_length))
diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
index 34bb059..3c0aa02 100644
--- a/drivers/usb/core/hub.c
+++ b/drivers/usb/core/hub.c
@@ -24,6 +24,7 @@
#include <linux/kthread.h>
#include <linux/mutex.h>
#include <linux/freezer.h>
+#include <linux/random.h>
#include <asm/uaccess.h>
#include <asm/byteorder.h>
@@ -1902,6 +1903,14 @@ int usb_new_device(struct usb_device *udev)
/* Tell the world! */
announce_device(udev);
+ if (udev->serial)
+ add_device_randomness(udev->serial, strlen(udev->serial));
+ if (udev->product)
+ add_device_randomness(udev->product, strlen(udev->product));
+ if (udev->manufacturer)
+ add_device_randomness(udev->manufacturer,
+ strlen(udev->manufacturer));
+
device_enable_async_suspend(&udev->dev);
/* Register the device. The device driver is responsible
* for configuring the device and invoking the add-device
diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
index 1fc8f12..347bb05 100644
--- a/drivers/usb/early/ehci-dbgp.c
+++ b/drivers/usb/early/ehci-dbgp.c
@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
writel(FLAG_CF, &ehci_regs->configured_flag);
/* Wait until the controller is no longer halted */
- loop = 10;
+ loop = 1000;
do {
status = readl(&ehci_regs->status);
if (!(status & STS_HALT))
diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
index 4690980..f30c307 100644
--- a/drivers/usb/gadget/u_ether.c
+++ b/drivers/usb/gadget/u_ether.c
@@ -846,12 +846,6 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
SET_ETHTOOL_OPS(net, &ops);
- /* two kinds of host-initiated state changes:
- * - iff DATA transfer is active, carrier is "on"
- * - tx queueing enabled if open *and* carrier is "on"
- */
- netif_carrier_off(net);
-
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
SET_NETDEV_DEVTYPE(net, &gadget_type);
@@ -865,6 +859,12 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
INFO(dev, "HOST MAC %pM\n", dev->host_mac);
the_dev = dev;
+
+ /* two kinds of host-initiated state changes:
+ * - iff DATA transfer is active, carrier is "on"
+ * - tx queueing enabled if open *and* carrier is "on"
+ */
+ netif_carrier_off(net);
}
return status;
diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c
index 9ab094c..efba0188 100644
--- a/drivers/usb/host/pci-quirks.c
+++ b/drivers/usb/host/pci-quirks.c
@@ -798,6 +798,13 @@ void usb_enable_xhci_ports(struct pci_dev *xhci_pdev)
}
EXPORT_SYMBOL_GPL(usb_enable_xhci_ports);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev)
+{
+ pci_write_config_dword(xhci_pdev, USB_INTEL_USB3_PSSEN, 0x0);
+ pci_write_config_dword(xhci_pdev, USB_INTEL_XUSB2PR, 0x0);
+}
+EXPORT_SYMBOL_GPL(usb_disable_xhci_ports);
+
/**
* PCI Quirks for xHCI.
*
diff --git a/drivers/usb/host/pci-quirks.h b/drivers/usb/host/pci-quirks.h
index b1002a8..ef004a5 100644
--- a/drivers/usb/host/pci-quirks.h
+++ b/drivers/usb/host/pci-quirks.h
@@ -10,6 +10,7 @@ void usb_amd_quirk_pll_disable(void);
void usb_amd_quirk_pll_enable(void);
bool usb_is_intel_switchable_xhci(struct pci_dev *pdev);
void usb_enable_xhci_ports(struct pci_dev *xhci_pdev);
+void usb_disable_xhci_ports(struct pci_dev *xhci_pdev);
#else
static inline void usb_amd_quirk_pll_disable(void) {}
static inline void usb_amd_quirk_pll_enable(void) {}
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index 39e230f..b047458 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -140,11 +140,21 @@ static int xhci_pci_setup(struct usb_hcd *hcd)
xhci->quirks |= XHCI_SPURIOUS_SUCCESS;
xhci->quirks |= XHCI_EP_LIMIT_QUIRK;
xhci->limit_active_eps = 64;
+ /*
+ * PPT desktop boards DH77EB and DH77DF will power back on after
+ * a few seconds of being shutdown. The fix for this is to
+ * switch the ports from xHCI to EHCI on shutdown. We can't use
+ * DMI information to find those particular boards (since each
+ * vendor will change the board name), so we have to key off all
+ * PPT chipsets.
+ */
+ xhci->quirks |= XHCI_SPURIOUS_REBOOT;
}
if (pdev->vendor == PCI_VENDOR_ID_ETRON &&
pdev->device == PCI_DEVICE_ID_ASROCK_P67) {
xhci->quirks |= XHCI_RESET_ON_RESUME;
xhci_dbg(xhci, "QUIRK: Resetting on resume\n");
+ xhci->quirks |= XHCI_TRUST_TX_LENGTH;
}
if (pdev->vendor == PCI_VENDOR_ID_VIA)
xhci->quirks |= XHCI_RESET_ON_RESUME;
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 16ec4d3..1f9602a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -163,7 +163,7 @@ int xhci_reset(struct xhci_hcd *xhci)
xhci_writel(xhci, command, &xhci->op_regs->command);
ret = handshake(xhci, &xhci->op_regs->command,
- CMD_RESET, 0, 250 * 1000);
+ CMD_RESET, 0, 10 * 1000 * 1000);
if (ret)
return ret;
@@ -172,7 +172,8 @@ int xhci_reset(struct xhci_hcd *xhci)
* xHCI cannot write to any doorbells or operational registers other
* than status until the "Controller Not Ready" flag is cleared.
*/
- return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
+ return handshake(xhci, &xhci->op_regs->status,
+ STS_CNR, 0, 10 * 1000 * 1000);
}
/*
@@ -593,6 +594,9 @@ void xhci_shutdown(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ if (xhci->quirks && XHCI_SPURIOUS_REBOOT)
+ usb_disable_xhci_ports(to_pci_dev(hcd->self.controller));
+
spin_lock_irq(&xhci->lock);
xhci_halt(xhci);
spin_unlock_irq(&xhci->lock);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 25c3e26..d88644d 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1316,6 +1316,7 @@ struct xhci_hcd {
#define XHCI_RESET_ON_RESUME (1 << 7)
#define XHCI_AMD_0x96_HOST (1 << 9)
#define XHCI_TRUST_TX_LENGTH (1 << 10)
+#define XHCI_SPURIOUS_REBOOT (1 << 13)
unsigned int num_active_eps;
unsigned int limit_active_eps;
/* There are two roothubs to keep track of bus suspend info for */
diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c
index 86fc8fc..535a307 100644
--- a/drivers/usb/serial/ftdi_sio.c
+++ b/drivers/usb/serial/ftdi_sio.c
@@ -809,6 +809,7 @@ static struct usb_device_id id_table_combined [] = {
{ USB_DEVICE(LARSENBRUSGAARD_VID, LB_ALTITRACK_PID) },
{ USB_DEVICE(GN_OTOMETRICS_VID, AURICAL_USB_PID) },
{ USB_DEVICE(PI_VID, PI_E861_PID) },
+ { USB_DEVICE(KONDO_VID, KONDO_USB_SERIAL_PID) },
{ USB_DEVICE(BAYER_VID, BAYER_CONTOUR_CABLE_PID) },
{ USB_DEVICE(FTDI_VID, MARVELL_OPENRD_PID),
.driver_info = (kernel_ulong_t)&ftdi_jtag_quirk },
diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h
index d27d7d7..54b4258 100644
--- a/drivers/usb/serial/ftdi_sio_ids.h
+++ b/drivers/usb/serial/ftdi_sio_ids.h
@@ -795,6 +795,13 @@
#define PI_E861_PID 0x1008 /* E-861 piezo controller USB connection */
/*
+ * Kondo Kagaku Co.Ltd.
+ * http://www.kondo-robot.com/EN
+ */
+#define KONDO_VID 0x165c
+#define KONDO_USB_SERIAL_PID 0x0002
+
+/*
* Bayer Ascensia Contour blood glucose meter USB-converter cable.
* http://winglucofacts.com/cables/
*/
diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c
index fdbeb6a..fd2463e 100644
--- a/drivers/usb/serial/mos7840.c
+++ b/drivers/usb/serial/mos7840.c
@@ -1191,9 +1191,12 @@ static int mos7840_chars_in_buffer(struct tty_struct *tty)
}
spin_lock_irqsave(&mos7840_port->pool_lock, flags);
- for (i = 0; i < NUM_URBS; ++i)
- if (mos7840_port->busy[i])
- chars += URB_TRANSFER_BUFFER_SIZE;
+ for (i = 0; i < NUM_URBS; ++i) {
+ if (mos7840_port->busy[i]) {
+ struct urb *urb = mos7840_port->write_urb_pool[i];
+ chars += urb->transfer_buffer_length;
+ }
+ }
spin_unlock_irqrestore(&mos7840_port->pool_lock, flags);
dbg("%s - returns %d", __func__, chars);
return chars;
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index d232623..d8851d0 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -80,85 +80,9 @@ static void option_instat_callback(struct urb *urb);
#define OPTION_PRODUCT_GTM380_MODEM 0x7201
#define HUAWEI_VENDOR_ID 0x12D1
-#define HUAWEI_PRODUCT_E600 0x1001
-#define HUAWEI_PRODUCT_E220 0x1003
-#define HUAWEI_PRODUCT_E220BIS 0x1004
-#define HUAWEI_PRODUCT_E1401 0x1401
-#define HUAWEI_PRODUCT_E1402 0x1402
-#define HUAWEI_PRODUCT_E1403 0x1403
-#define HUAWEI_PRODUCT_E1404 0x1404
-#define HUAWEI_PRODUCT_E1405 0x1405
-#define HUAWEI_PRODUCT_E1406 0x1406
-#define HUAWEI_PRODUCT_E1407 0x1407
-#define HUAWEI_PRODUCT_E1408 0x1408
-#define HUAWEI_PRODUCT_E1409 0x1409
-#define HUAWEI_PRODUCT_E140A 0x140A
-#define HUAWEI_PRODUCT_E140B 0x140B
-#define HUAWEI_PRODUCT_E140C 0x140C
-#define HUAWEI_PRODUCT_E140D 0x140D
-#define HUAWEI_PRODUCT_E140E 0x140E
-#define HUAWEI_PRODUCT_E140F 0x140F
-#define HUAWEI_PRODUCT_E1410 0x1410
-#define HUAWEI_PRODUCT_E1411 0x1411
-#define HUAWEI_PRODUCT_E1412 0x1412
-#define HUAWEI_PRODUCT_E1413 0x1413
-#define HUAWEI_PRODUCT_E1414 0x1414
-#define HUAWEI_PRODUCT_E1415 0x1415
-#define HUAWEI_PRODUCT_E1416 0x1416
-#define HUAWEI_PRODUCT_E1417 0x1417
-#define HUAWEI_PRODUCT_E1418 0x1418
-#define HUAWEI_PRODUCT_E1419 0x1419
-#define HUAWEI_PRODUCT_E141A 0x141A
-#define HUAWEI_PRODUCT_E141B 0x141B
-#define HUAWEI_PRODUCT_E141C 0x141C
-#define HUAWEI_PRODUCT_E141D 0x141D
-#define HUAWEI_PRODUCT_E141E 0x141E
-#define HUAWEI_PRODUCT_E141F 0x141F
-#define HUAWEI_PRODUCT_E1420 0x1420
-#define HUAWEI_PRODUCT_E1421 0x1421
-#define HUAWEI_PRODUCT_E1422 0x1422
-#define HUAWEI_PRODUCT_E1423 0x1423
-#define HUAWEI_PRODUCT_E1424 0x1424
-#define HUAWEI_PRODUCT_E1425 0x1425
-#define HUAWEI_PRODUCT_E1426 0x1426
-#define HUAWEI_PRODUCT_E1427 0x1427
-#define HUAWEI_PRODUCT_E1428 0x1428
-#define HUAWEI_PRODUCT_E1429 0x1429
-#define HUAWEI_PRODUCT_E142A 0x142A
-#define HUAWEI_PRODUCT_E142B 0x142B
-#define HUAWEI_PRODUCT_E142C 0x142C
-#define HUAWEI_PRODUCT_E142D 0x142D
-#define HUAWEI_PRODUCT_E142E 0x142E
-#define HUAWEI_PRODUCT_E142F 0x142F
-#define HUAWEI_PRODUCT_E1430 0x1430
-#define HUAWEI_PRODUCT_E1431 0x1431
-#define HUAWEI_PRODUCT_E1432 0x1432
-#define HUAWEI_PRODUCT_E1433 0x1433
-#define HUAWEI_PRODUCT_E1434 0x1434
-#define HUAWEI_PRODUCT_E1435 0x1435
-#define HUAWEI_PRODUCT_E1436 0x1436
-#define HUAWEI_PRODUCT_E1437 0x1437
-#define HUAWEI_PRODUCT_E1438 0x1438
-#define HUAWEI_PRODUCT_E1439 0x1439
-#define HUAWEI_PRODUCT_E143A 0x143A
-#define HUAWEI_PRODUCT_E143B 0x143B
-#define HUAWEI_PRODUCT_E143C 0x143C
-#define HUAWEI_PRODUCT_E143D 0x143D
-#define HUAWEI_PRODUCT_E143E 0x143E
-#define HUAWEI_PRODUCT_E143F 0x143F
#define HUAWEI_PRODUCT_K4505 0x1464
#define HUAWEI_PRODUCT_K3765 0x1465
-#define HUAWEI_PRODUCT_E14AC 0x14AC
-#define HUAWEI_PRODUCT_K3806 0x14AE
#define HUAWEI_PRODUCT_K4605 0x14C6
-#define HUAWEI_PRODUCT_K5005 0x14C8
-#define HUAWEI_PRODUCT_K3770 0x14C9
-#define HUAWEI_PRODUCT_K3771 0x14CA
-#define HUAWEI_PRODUCT_K4510 0x14CB
-#define HUAWEI_PRODUCT_K4511 0x14CC
-#define HUAWEI_PRODUCT_ETS1220 0x1803
-#define HUAWEI_PRODUCT_E353 0x1506
-#define HUAWEI_PRODUCT_E173S 0x1C05
#define QUANTA_VENDOR_ID 0x0408
#define QUANTA_PRODUCT_Q101 0xEA02
@@ -615,104 +539,123 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLX) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GKE) },
{ USB_DEVICE(QUANTA_VENDOR_ID, QUANTA_PRODUCT_GLE) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E600, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E220BIS, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1401, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1402, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1403, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1404, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1405, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1406, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1407, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1408, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1409, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E140F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1410, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1411, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1412, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1413, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1414, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1415, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1416, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1417, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1418, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1419, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E141F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1420, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1421, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1422, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1423, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1424, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1425, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1426, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1427, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1428, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1429, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E142F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1430, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1431, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1432, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1433, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1434, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1435, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1436, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1437, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1438, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1439, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143A, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143B, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143C, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143D, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143E, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E143F, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173S, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_ETS1220, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E14AC, 0xff, 0xff, 0xff) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3806, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff),
.driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K5005, 0xff, 0x01, 0x33) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3770, 0xff, 0x02, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3771, 0xff, 0x02, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4510, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x31) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4511, 0xff, 0x01, 0x32) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x01) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x02) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x03) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x10) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x12) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x01, 0x13) },
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x01) }, /* E398 3G Modem */
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x02) }, /* E398 3G PC UI Interface */
- { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E353, 0xff, 0x02, 0x03) }, /* E398 3G Application Interface */
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x01, 0x7C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x01) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x02) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x03) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x04) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x05) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x06) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x0F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x10) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x12) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x13) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x14) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x15) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x17) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x18) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x19) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x1C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x31) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x32) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x33) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x34) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x35) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x36) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x3F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x48) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x49) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x4C) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x61) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x62) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x63) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x64) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x65) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x66) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6D) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6E) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x6F) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x78) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x79) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7A) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7B) },
+ { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0x02, 0x7C) },
+
+
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V640) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V620) },
{ USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_V740) },
@@ -939,6 +882,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1018, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf3_blacklist },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) },
{ USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) },
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 7ec1409..8006a28 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -212,10 +212,17 @@ static noinline int run_ordered_completions(struct btrfs_workers *workers,
work->ordered_func(work);
- /* now take the lock again and call the freeing code */
+ /* now take the lock again and drop our item from the list */
spin_lock(&workers->order_lock);
list_del(&work->order_list);
+ spin_unlock(&workers->order_lock);
+
+ /*
+ * we don't want to call the ordered free functions
+ * with the lock held though
+ */
work->ordered_free(work);
+ spin_lock(&workers->order_lock);
}
spin_unlock(&workers->order_lock);
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 1ac8db5d..57106a9 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -801,7 +801,8 @@ static int btree_submit_bio_hook(struct inode *inode, int rw, struct bio *bio,
#ifdef CONFIG_MIGRATION
static int btree_migratepage(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
/*
* we can't safely write a btree page from here,
@@ -816,7 +817,7 @@ static int btree_migratepage(struct address_space *mapping,
if (page_has_private(page) &&
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
- return migrate_page(mapping, newpage, page);
+ return migrate_page(mapping, newpage, page, mode);
}
#endif
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 6751e74..c71032b 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -85,9 +85,12 @@ cifs_readdir_lookup(struct dentry *parent, struct qstr *name,
dentry = d_lookup(parent, name);
if (dentry) {
- /* FIXME: check for inode number changes? */
- if (dentry->d_inode != NULL)
+ inode = dentry->d_inode;
+ /* update inode in place if i_ino didn't change */
+ if (inode && CIFS_I(inode)->uniqueid == fattr->cf_uniqueid) {
+ cifs_fattr_to_inode(inode, fattr);
return dentry;
+ }
d_drop(dentry);
dput(dentry);
}
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 264f694..ebe95f5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -514,7 +514,8 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
if (bitmap_bh == NULL)
continue;
- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_BLOCKS_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
i, ext4_free_blks_count(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
index fa3af81..012faaa 100644
--- a/fs/ext4/bitmap.c
+++ b/fs/ext4/bitmap.c
@@ -15,15 +15,13 @@
static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
+unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
{
unsigned int i, sum = 0;
- if (!map)
- return 0;
for (i = 0; i < numchars; i++)
- sum += nibblemap[map->b_data[i] & 0xf] +
- nibblemap[(map->b_data[i] >> 4) & 0xf];
+ sum += nibblemap[bitmap[i] & 0xf] +
+ nibblemap[(bitmap[i] >> 4) & 0xf];
return sum;
}
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 1a34c1c..e0113aa 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -1713,7 +1713,7 @@ struct mmpd_data {
# define NORET_AND noreturn,
/* bitmap.c */
-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
+extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
/* balloc.c */
extern unsigned int ext4_block_group(struct super_block *sb,
diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
index 412469b..29272de 100644
--- a/fs/ext4/ialloc.c
+++ b/fs/ext4/ialloc.c
@@ -1193,7 +1193,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
if (!bitmap_bh)
continue;
- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
+ x = ext4_count_free(bitmap_bh->b_data,
+ EXT4_INODES_PER_GROUP(sb) / 8);
printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
(unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
bitmap_count += x;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index c1e6a72..18fee6d 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -1134,6 +1134,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
used = ei->i_reserved_data_blocks;
}
+ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
+ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
+ "with only %d reserved metadata blocks\n", __func__,
+ inode->i_ino, ei->i_allocated_meta_blocks,
+ ei->i_reserved_meta_blocks);
+ WARN_ON(1);
+ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
+ }
+
/* Update per-inode reservations */
ei->i_reserved_data_blocks -= used;
ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 113b107..489d406 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -860,6 +860,7 @@ static struct inode *ext4_alloc_inode(struct super_block *sb)
ei->i_reserved_meta_blocks = 0;
ei->i_allocated_meta_blocks = 0;
ei->i_da_metadata_calc_len = 0;
+ ei->i_da_metadata_calc_last_lblock = 0;
spin_lock_init(&(ei->i_block_reservation_lock));
#ifdef CONFIG_QUOTA
ei->i_reserved_quota = 0;
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index 82a6646..79fca8d 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -1710,7 +1710,7 @@ static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count)
size_t n;
u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT;
- for (n = 0; n < count; n++) {
+ for (n = 0; n < count; n++, iov++) {
if (iov->iov_len > (size_t) max)
return -ENOMEM;
max -= iov->iov_len;
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 8b0c875..6327a06 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -568,7 +568,8 @@ static int hugetlbfs_set_page_dirty(struct page *page)
}
static int hugetlbfs_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
int rc;
diff --git a/fs/locks.c b/fs/locks.c
index b286539..35388d5 100644
--- a/fs/locks.c
+++ b/fs/locks.c
@@ -315,7 +315,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
return 0;
}
-static int assign_type(struct file_lock *fl, int type)
+static int assign_type(struct file_lock *fl, long type)
{
switch (type) {
case F_RDLCK:
@@ -452,7 +452,7 @@ static const struct lock_manager_operations lease_manager_ops = {
/*
* Initialize a lease, use the default lock manager operations
*/
-static int lease_init(struct file *filp, int type, struct file_lock *fl)
+static int lease_init(struct file *filp, long type, struct file_lock *fl)
{
if (assign_type(fl, type) != 0)
return -EINVAL;
@@ -470,7 +470,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
}
/* Allocate a file_lock initialised to this type of lease */
-static struct file_lock *lease_alloc(struct file *filp, int type)
+static struct file_lock *lease_alloc(struct file *filp, long type)
{
struct file_lock *fl = locks_alloc_lock();
int error = -ENOMEM;
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index dd2f130..6c6e2c4 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -493,8 +493,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
- /* Only do I/O if gfp is a superset of GFP_KERNEL */
- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
+ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
+ * doing this memory reclaim for a fs-related allocation.
+ */
+ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
+ !(current->flags & PF_FSTRANS)) {
int how = FLUSH_SYNC;
/* Don't let kswapd deadlock waiting for OOM RPC calls */
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 2a55347..4f10d81 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -315,7 +315,7 @@ void nfs_commit_release_pages(struct nfs_write_data *data);
#ifdef CONFIG_MIGRATION
extern int nfs_migrate_page(struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *, enum migrate_mode);
#else
#define nfs_migrate_page NULL
#endif
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index f2f80c0..58bb999 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -1662,7 +1662,7 @@ out_error:
#ifdef CONFIG_MIGRATION
int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
- struct page *page)
+ struct page *page, enum migrate_mode mode)
{
/*
* If PagePrivate is set, then the page is currently associated with
@@ -1677,7 +1677,7 @@ int nfs_migrate_page(struct address_space *mapping, struct page *newpage,
nfs_fscache_release_page(page, GFP_KERNEL);
- return migrate_page(mapping, newpage, page);
+ return migrate_page(mapping, newpage, page, mode);
}
#endif
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index 6c74097..f91d589 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -2010,7 +2010,7 @@ out_acl:
if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
if ((buflen -= 4) < 0)
goto out_resource;
- WRITE32(1);
+ WRITE32(0);
}
if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
if ((buflen -= 4) < 0)
diff --git a/fs/nilfs2/ioctl.c b/fs/nilfs2/ioctl.c
index 3e65427..0d1c9bd 100644
--- a/fs/nilfs2/ioctl.c
+++ b/fs/nilfs2/ioctl.c
@@ -182,7 +182,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
if (copy_from_user(&cpmode, argp, sizeof(cpmode)))
goto out;
- down_read(&inode->i_sb->s_umount);
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
nilfs_transaction_begin(inode->i_sb, &ti, 0);
ret = nilfs_cpfile_change_cpmode(
@@ -192,7 +192,7 @@ static int nilfs_ioctl_change_cpmode(struct inode *inode, struct file *filp,
else
nilfs_transaction_commit(inode->i_sb); /* never fails */
- up_read(&inode->i_sb->s_umount);
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
out:
mnt_drop_write(filp->f_path.mnt);
return ret;
diff --git a/fs/nilfs2/super.c b/fs/nilfs2/super.c
index 8351c44..97bfbdd 100644
--- a/fs/nilfs2/super.c
+++ b/fs/nilfs2/super.c
@@ -951,6 +951,8 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
struct nilfs_root *root;
int ret;
+ mutex_lock(&nilfs->ns_snapshot_mount_mutex);
+
down_read(&nilfs->ns_segctor_sem);
ret = nilfs_cpfile_is_snapshot(nilfs->ns_cpfile, cno);
up_read(&nilfs->ns_segctor_sem);
@@ -975,6 +977,7 @@ static int nilfs_attach_snapshot(struct super_block *s, __u64 cno,
ret = nilfs_get_root_dentry(s, root, root_dentry);
nilfs_put_root(root);
out:
+ mutex_unlock(&nilfs->ns_snapshot_mount_mutex);
return ret;
}
diff --git a/fs/nilfs2/the_nilfs.c b/fs/nilfs2/the_nilfs.c
index 35a8970..1c98f53 100644
--- a/fs/nilfs2/the_nilfs.c
+++ b/fs/nilfs2/the_nilfs.c
@@ -76,6 +76,7 @@ struct the_nilfs *alloc_nilfs(struct block_device *bdev)
nilfs->ns_bdev = bdev;
atomic_set(&nilfs->ns_ndirtyblks, 0);
init_rwsem(&nilfs->ns_sem);
+ mutex_init(&nilfs->ns_snapshot_mount_mutex);
INIT_LIST_HEAD(&nilfs->ns_dirty_files);
INIT_LIST_HEAD(&nilfs->ns_gc_inodes);
spin_lock_init(&nilfs->ns_inode_lock);
diff --git a/fs/nilfs2/the_nilfs.h b/fs/nilfs2/the_nilfs.h
index 9992b11..de7435f 100644
--- a/fs/nilfs2/the_nilfs.h
+++ b/fs/nilfs2/the_nilfs.h
@@ -47,6 +47,7 @@ enum {
* @ns_flags: flags
* @ns_bdev: block device
* @ns_sem: semaphore for shared states
+ * @ns_snapshot_mount_mutex: mutex to protect snapshot mounts
* @ns_sbh: buffer heads of on-disk super blocks
* @ns_sbp: pointers to super block data
* @ns_sbwtime: previous write time of super block
@@ -99,6 +100,7 @@ struct the_nilfs {
struct block_device *ns_bdev;
struct rw_semaphore ns_sem;
+ struct mutex ns_snapshot_mount_mutex;
/*
* used for
diff --git a/fs/ubifs/sb.c b/fs/ubifs/sb.c
index c606f01..1250016 100644
--- a/fs/ubifs/sb.c
+++ b/fs/ubifs/sb.c
@@ -715,8 +715,12 @@ static int fixup_free_space(struct ubifs_info *c)
lnum = ubifs_next_log_lnum(c, lnum);
}
- /* Fixup the current log head */
- err = fixup_leb(c, c->lhead_lnum, c->lhead_offs);
+ /*
+ * Fixup the log head which contains the only a CS node at the
+ * beginning.
+ */
+ err = fixup_leb(c, c->lhead_lnum,
+ ALIGN(UBIFS_CS_NODE_SZ, c->min_io_size));
if (err)
goto out;
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 97f1ca7..111797a 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -66,8 +66,9 @@ enum {
/* migration should happen before other stuff but after perf */
CPU_PRI_PERF = 20,
CPU_PRI_MIGRATION = 10,
- /* prepare workqueues for other notifiers */
- CPU_PRI_WORKQUEUE = 5,
+ /* bring up workqueues before normal notifiers and down after */
+ CPU_PRI_WORKQUEUE_UP = 5,
+ CPU_PRI_WORKQUEUE_DOWN = -5,
};
#ifdef CONFIG_SMP
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index e9eaec5..7a7e5fd 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -89,42 +89,33 @@ extern void rebuild_sched_domains(void);
extern void cpuset_print_task_mems_allowed(struct task_struct *p);
/*
- * reading current mems_allowed and mempolicy in the fastpath must protected
- * by get_mems_allowed()
+ * get_mems_allowed is required when making decisions involving mems_allowed
+ * such as during page allocation. mems_allowed can be updated in parallel
+ * and depending on the new value an operation can fail potentially causing
+ * process failure. A retry loop with get_mems_allowed and put_mems_allowed
+ * prevents these artificial failures.
*/
-static inline void get_mems_allowed(void)
+static inline unsigned int get_mems_allowed(void)
{
- current->mems_allowed_change_disable++;
-
- /*
- * ensure that reading mems_allowed and mempolicy happens after the
- * update of ->mems_allowed_change_disable.
- *
- * the write-side task finds ->mems_allowed_change_disable is not 0,
- * and knows the read-side task is reading mems_allowed or mempolicy,
- * so it will clear old bits lazily.
- */
- smp_mb();
+ return read_seqcount_begin(&current->mems_allowed_seq);
}
-static inline void put_mems_allowed(void)
+/*
+ * If this returns false, the operation that took place after get_mems_allowed
+ * may have failed. It is up to the caller to retry the operation if
+ * appropriate.
+ */
+static inline bool put_mems_allowed(unsigned int seq)
{
- /*
- * ensure that reading mems_allowed and mempolicy before reducing
- * mems_allowed_change_disable.
- *
- * the write-side task will know that the read-side task is still
- * reading mems_allowed or mempolicy, don't clears old bits in the
- * nodemask.
- */
- smp_mb();
- --ACCESS_ONCE(current->mems_allowed_change_disable);
+ return !read_seqcount_retry(&current->mems_allowed_seq, seq);
}
static inline void set_mems_allowed(nodemask_t nodemask)
{
task_lock(current);
+ write_seqcount_begin(&current->mems_allowed_seq);
current->mems_allowed = nodemask;
+ write_seqcount_end(&current->mems_allowed_seq);
task_unlock(current);
}
@@ -234,12 +225,14 @@ static inline void set_mems_allowed(nodemask_t nodemask)
{
}
-static inline void get_mems_allowed(void)
+static inline unsigned int get_mems_allowed(void)
{
+ return 0;
}
-static inline void put_mems_allowed(void)
+static inline bool put_mems_allowed(unsigned int seq)
{
+ return true;
}
#endif /* !CONFIG_CPUSETS */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 96b1035..212ea7b 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -523,6 +523,7 @@ enum positive_aop_returns {
struct page;
struct address_space;
struct writeback_control;
+enum migrate_mode;
struct iov_iter {
const struct iovec *iov;
@@ -607,9 +608,12 @@ struct address_space_operations {
loff_t offset, unsigned long nr_segs);
int (*get_xip_mem)(struct address_space *, pgoff_t, int,
void **, unsigned long *);
- /* migrate the contents of a page to the specified target */
+ /*
+ * migrate the contents of a page to the specified target. If sync
+ * is false, it must not block.
+ */
int (*migratepage) (struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *, enum migrate_mode);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, read_descriptor_t *,
unsigned long);
@@ -2478,7 +2482,8 @@ extern int generic_check_addressable(unsigned, u64);
#ifdef CONFIG_MIGRATION
extern int buffer_migrate_page(struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *,
+ enum migrate_mode);
#else
#define buffer_migrate_page NULL
#endif
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 580f70c..5e41a8e 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -30,6 +30,13 @@ extern struct fs_struct init_fs;
#define INIT_THREADGROUP_FORK_LOCK(sig)
#endif
+#ifdef CONFIG_CPUSETS
+#define INIT_CPUSET_SEQ \
+ .mems_allowed_seq = SEQCNT_ZERO,
+#else
+#define INIT_CPUSET_SEQ
+#endif
+
#define INIT_SIGNALS(sig) { \
.nr_threads = 1, \
.wait_chldexit = __WAIT_QUEUE_HEAD_INITIALIZER(sig.wait_chldexit),\
@@ -193,6 +200,7 @@ extern struct cred init_cred;
INIT_FTRACE_GRAPH \
INIT_TRACE_RECURSION \
INIT_TASK_RCU_PREEMPT(tsk) \
+ INIT_CPUSET_SEQ \
}
diff --git a/include/linux/irqdesc.h b/include/linux/irqdesc.h
index 2d921b3..d0a3100 100644
--- a/include/linux/irqdesc.h
+++ b/include/linux/irqdesc.h
@@ -38,7 +38,6 @@ struct timer_rand_state;
*/
struct irq_desc {
struct irq_data irq_data;
- struct timer_rand_state *timer_rand_state;
unsigned int __percpu *kstat_irqs;
irq_flow_handler_t handle_irq;
#ifdef CONFIG_IRQ_PREFLOW_FASTEOI
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index 313a00e..4a8da84 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,7 +35,8 @@ enum mem_cgroup_page_stat_item {
extern unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
- int mode, struct zone *z,
+ isolate_mode_t mode,
+ struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file);
diff --git a/include/linux/mfd/ezx-pcap.h b/include/linux/mfd/ezx-pcap.h
index 40c37216..32a1b5c 100644
--- a/include/linux/mfd/ezx-pcap.h
+++ b/include/linux/mfd/ezx-pcap.h
@@ -16,6 +16,7 @@ struct pcap_subdev {
struct pcap_platform_data {
unsigned int irq_base;
unsigned int config;
+ int gpio;
void (*init) (void *); /* board specific init */
int num_subdevs;
struct pcap_subdev *subdevs;
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index e39aeec..eaf8674 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -6,18 +6,31 @@
typedef struct page *new_page_t(struct page *, unsigned long private, int **);
+/*
+ * MIGRATE_ASYNC means never block
+ * MIGRATE_SYNC_LIGHT in the current implementation means to allow blocking
+ * on most operations but not ->writepage as the potential stall time
+ * is too significant
+ * MIGRATE_SYNC will block when migrating pages
+ */
+enum migrate_mode {
+ MIGRATE_ASYNC,
+ MIGRATE_SYNC_LIGHT,
+ MIGRATE_SYNC,
+};
+
#ifdef CONFIG_MIGRATION
#define PAGE_MIGRATION 1
extern void putback_lru_pages(struct list_head *l);
extern int migrate_page(struct address_space *,
- struct page *, struct page *);
+ struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync);
+ enum migrate_mode mode);
extern int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync);
+ enum migrate_mode mode);
extern int fail_migrate_page(struct address_space *,
struct page *, struct page *);
@@ -36,10 +49,10 @@ extern int migrate_huge_page_move_mapping(struct address_space *mapping,
static inline void putback_lru_pages(struct list_head *l) {}
static inline int migrate_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync) { return -ENOSYS; }
+ enum migrate_mode mode) { return -ENOSYS; }
static inline int migrate_huge_pages(struct list_head *l, new_page_t x,
unsigned long private, bool offlining,
- bool sync) { return -ENOSYS; }
+ enum migrate_mode mode) { return -ENOSYS; }
static inline int migrate_prep(void) { return -ENOSYS; }
static inline int migrate_prep_local(void) { return -ENOSYS; }
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index aa2d80b..b32f3f9 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -158,6 +158,20 @@ static inline int is_unevictable_lru(enum lru_list l)
return (l == LRU_UNEVICTABLE);
}
+/* Isolate inactive pages */
+#define ISOLATE_INACTIVE ((__force isolate_mode_t)0x1)
+/* Isolate active pages */
+#define ISOLATE_ACTIVE ((__force isolate_mode_t)0x2)
+/* Isolate clean file */
+#define ISOLATE_CLEAN ((__force isolate_mode_t)0x4)
+/* Isolate unmapped file */
+#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x8)
+/* Isolate for asynchronous migration */
+#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x10)
+
+/* LRU Isolation modes. */
+typedef unsigned __bitwise__ isolate_mode_t;
+
enum zone_watermarks {
WMARK_MIN,
WMARK_LOW,
diff --git a/include/linux/random.h b/include/linux/random.h
index d13059f..ac621ce 100644
--- a/include/linux/random.h
+++ b/include/linux/random.h
@@ -48,13 +48,13 @@ struct rnd_state {
#ifdef __KERNEL__
-extern void rand_initialize_irq(int irq);
-
+extern void add_device_randomness(const void *, unsigned int);
extern void add_input_randomness(unsigned int type, unsigned int code,
unsigned int value);
-extern void add_interrupt_randomness(int irq);
+extern void add_interrupt_randomness(int irq, int irq_flags);
extern void get_random_bytes(void *buf, int nbytes);
+extern void get_random_bytes_arch(void *buf, int nbytes);
void generate_random_uuid(unsigned char uuid_out[16]);
#ifndef MODULE
@@ -91,6 +91,19 @@ static inline void prandom32_seed(struct rnd_state *state, u64 seed)
state->s3 = __seed(i, 15);
}
+#ifdef CONFIG_ARCH_RANDOM
+# include <asm/archrandom.h>
+#else
+static inline int arch_get_random_long(unsigned long *v)
+{
+ return 0;
+}
+static inline int arch_get_random_int(unsigned int *v)
+{
+ return 0;
+}
+#endif
+
#endif /* __KERNEL___ */
#endif /* _LINUX_RANDOM_H */
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 35895e3..189b442 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1484,7 +1484,7 @@ struct task_struct {
#endif
#ifdef CONFIG_CPUSETS
nodemask_t mems_allowed; /* Protected by alloc_lock */
- int mems_allowed_change_disable;
+ seqcount_t mems_allowed_seq; /* Seqence no to catch updates */
int cpuset_mem_spread_rotor;
int cpuset_slab_spread_rotor;
#endif
diff --git a/include/linux/swap.h b/include/linux/swap.h
index a273468..e73799d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -243,11 +243,6 @@ static inline void lru_cache_add_file(struct page *page)
__lru_cache_add(page, LRU_INACTIVE_FILE);
}
-/* LRU Isolation modes. */
-#define ISOLATE_INACTIVE 0 /* Isolate inactive pages. */
-#define ISOLATE_ACTIVE 1 /* Isolate active pages. */
-#define ISOLATE_BOTH 2 /* Isolate both active and inactive pages. */
-
/* linux/mm/vmscan.c */
extern unsigned long try_to_free_pages(struct zonelist *zonelist, int order,
gfp_t gfp_mask, nodemask_t *mask);
@@ -259,7 +254,7 @@ extern unsigned long mem_cgroup_shrink_node_zone(struct mem_cgroup *mem,
unsigned int swappiness,
struct zone *zone,
unsigned long *nr_scanned);
-extern int __isolate_lru_page(struct page *page, int mode, int file);
+extern int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file);
extern unsigned long shrink_all_memory(unsigned long nr_pages);
extern int vm_swappiness;
extern int remove_mapping(struct address_space *mapping, struct page *page);
diff --git a/include/linux/usb.h b/include/linux/usb.h
index b08e04c..6cd1576 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -757,6 +757,27 @@ static inline int usb_make_path(struct usb_device *dev, char *buf, size_t size)
.bInterfaceSubClass = (sc), \
.bInterfaceProtocol = (pr)
+/**
+ * USB_VENDOR_AND_INTERFACE_INFO - describe a specific usb vendor with a class of usb interfaces
+ * @vend: the 16 bit USB Vendor ID
+ * @cl: bInterfaceClass value
+ * @sc: bInterfaceSubClass value
+ * @pr: bInterfaceProtocol value
+ *
+ * This macro is used to create a struct usb_device_id that matches a
+ * specific vendor with a specific class of interfaces.
+ *
+ * This is especially useful when explicitly matching devices that have
+ * vendor specific bDeviceClass values, but standards-compliant interfaces.
+ */
+#define USB_VENDOR_AND_INTERFACE_INFO(vend, cl, sc, pr) \
+ .match_flags = USB_DEVICE_ID_MATCH_INT_INFO \
+ | USB_DEVICE_ID_MATCH_VENDOR, \
+ .idVendor = (vend), \
+ .bInterfaceClass = (cl), \
+ .bInterfaceSubClass = (sc), \
+ .bInterfaceProtocol = (pr)
+
/* ----------------------------------------------------------------------- */
/* Stuff for dynamic usb ids */
diff --git a/include/trace/events/random.h b/include/trace/events/random.h
new file mode 100644
index 0000000..422df19
--- /dev/null
+++ b/include/trace/events/random.h
@@ -0,0 +1,134 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM random
+
+#if !defined(_TRACE_RANDOM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_RANDOM_H
+
+#include <linux/writeback.h>
+#include <linux/tracepoint.h>
+
+DECLARE_EVENT_CLASS(random__mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bytes = bytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bytes %d caller %pF",
+ __entry->pool_name, __entry->bytes, (void *)__entry->IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+DEFINE_EVENT(random__mix_pool_bytes, mix_pool_bytes_nolock,
+ TP_PROTO(const char *pool_name, int bytes, unsigned long IP),
+
+ TP_ARGS(pool_name, bytes, IP)
+);
+
+TRACE_EVENT(credit_entropy_bits,
+ TP_PROTO(const char *pool_name, int bits, int entropy_count,
+ int entropy_total, unsigned long IP),
+
+ TP_ARGS(pool_name, bits, entropy_count, entropy_total, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, bits )
+ __field( int, entropy_count )
+ __field( int, entropy_total )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->bits = bits;
+ __entry->entropy_count = entropy_count;
+ __entry->entropy_total = entropy_total;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: bits %d entropy_count %d entropy_total %d "
+ "caller %pF", __entry->pool_name, __entry->bits,
+ __entry->entropy_count, __entry->entropy_total,
+ (void *)__entry->IP)
+);
+
+TRACE_EVENT(get_random_bytes,
+ TP_PROTO(int nbytes, unsigned long IP),
+
+ TP_ARGS(nbytes, IP),
+
+ TP_STRUCT__entry(
+ __field( int, nbytes )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->nbytes = nbytes;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("nbytes %d caller %pF", __entry->nbytes, (void *)__entry->IP)
+);
+
+DECLARE_EVENT_CLASS(random__extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP),
+
+ TP_STRUCT__entry(
+ __field( const char *, pool_name )
+ __field( int, nbytes )
+ __field( int, entropy_count )
+ __field(unsigned long, IP )
+ ),
+
+ TP_fast_assign(
+ __entry->pool_name = pool_name;
+ __entry->nbytes = nbytes;
+ __entry->entropy_count = entropy_count;
+ __entry->IP = IP;
+ ),
+
+ TP_printk("%s pool: nbytes %d entropy_count %d caller %pF",
+ __entry->pool_name, __entry->nbytes, __entry->entropy_count,
+ (void *)__entry->IP)
+);
+
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+DEFINE_EVENT(random__extract_entropy, extract_entropy_user,
+ TP_PROTO(const char *pool_name, int nbytes, int entropy_count,
+ unsigned long IP),
+
+ TP_ARGS(pool_name, nbytes, entropy_count, IP)
+);
+
+
+
+#endif /* _TRACE_RANDOM_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/include/trace/events/vmscan.h b/include/trace/events/vmscan.h
index b2c33bd..edc4b3d 100644
--- a/include/trace/events/vmscan.h
+++ b/include/trace/events/vmscan.h
@@ -179,6 +179,83 @@ DEFINE_EVENT(mm_vmscan_direct_reclaim_end_template, mm_vmscan_memcg_softlimit_re
TP_ARGS(nr_reclaimed)
);
+TRACE_EVENT(mm_shrink_slab_start,
+ TP_PROTO(struct shrinker *shr, struct shrink_control *sc,
+ long nr_objects_to_shrink, unsigned long pgs_scanned,
+ unsigned long lru_pgs, unsigned long cache_items,
+ unsigned long long delta, unsigned long total_scan),
+
+ TP_ARGS(shr, sc, nr_objects_to_shrink, pgs_scanned, lru_pgs,
+ cache_items, delta, total_scan),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, nr_objects_to_shrink)
+ __field(gfp_t, gfp_flags)
+ __field(unsigned long, pgs_scanned)
+ __field(unsigned long, lru_pgs)
+ __field(unsigned long, cache_items)
+ __field(unsigned long long, delta)
+ __field(unsigned long, total_scan)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->shrink;
+ __entry->nr_objects_to_shrink = nr_objects_to_shrink;
+ __entry->gfp_flags = sc->gfp_mask;
+ __entry->pgs_scanned = pgs_scanned;
+ __entry->lru_pgs = lru_pgs;
+ __entry->cache_items = cache_items;
+ __entry->delta = delta;
+ __entry->total_scan = total_scan;
+ ),
+
+ TP_printk("%pF %p: objects to shrink %ld gfp_flags %s pgs_scanned %ld lru_pgs %ld cache items %ld delta %lld total_scan %ld",
+ __entry->shrink,
+ __entry->shr,
+ __entry->nr_objects_to_shrink,
+ show_gfp_flags(__entry->gfp_flags),
+ __entry->pgs_scanned,
+ __entry->lru_pgs,
+ __entry->cache_items,
+ __entry->delta,
+ __entry->total_scan)
+);
+
+TRACE_EVENT(mm_shrink_slab_end,
+ TP_PROTO(struct shrinker *shr, int shrinker_retval,
+ long unused_scan_cnt, long new_scan_cnt),
+
+ TP_ARGS(shr, shrinker_retval, unused_scan_cnt, new_scan_cnt),
+
+ TP_STRUCT__entry(
+ __field(struct shrinker *, shr)
+ __field(void *, shrink)
+ __field(long, unused_scan)
+ __field(long, new_scan)
+ __field(int, retval)
+ __field(long, total_scan)
+ ),
+
+ TP_fast_assign(
+ __entry->shr = shr;
+ __entry->shrink = shr->shrink;
+ __entry->unused_scan = unused_scan_cnt;
+ __entry->new_scan = new_scan_cnt;
+ __entry->retval = shrinker_retval;
+ __entry->total_scan = new_scan_cnt - unused_scan_cnt;
+ ),
+
+ TP_printk("%pF %p: unused scan count %ld new scan count %ld total_scan %ld last shrinker return val %d",
+ __entry->shrink,
+ __entry->shr,
+ __entry->unused_scan,
+ __entry->new_scan,
+ __entry->total_scan,
+ __entry->retval)
+);
DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
@@ -189,7 +266,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode),
@@ -201,7 +278,7 @@ DECLARE_EVENT_CLASS(mm_vmscan_lru_isolate_template,
__field(unsigned long, nr_lumpy_taken)
__field(unsigned long, nr_lumpy_dirty)
__field(unsigned long, nr_lumpy_failed)
- __field(int, isolate_mode)
+ __field(isolate_mode_t, isolate_mode)
),
TP_fast_assign(
@@ -235,7 +312,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_lru_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
@@ -250,7 +327,7 @@ DEFINE_EVENT(mm_vmscan_lru_isolate_template, mm_vmscan_memcg_isolate,
unsigned long nr_lumpy_taken,
unsigned long nr_lumpy_dirty,
unsigned long nr_lumpy_failed,
- int isolate_mode),
+ isolate_mode_t isolate_mode),
TP_ARGS(order, nr_requested, nr_scanned, nr_taken, nr_lumpy_taken, nr_lumpy_dirty, nr_lumpy_failed, isolate_mode)
diff --git a/kernel/cpuset.c b/kernel/cpuset.c
index 9c9b754..b2e84bd 100644
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@ -123,6 +123,19 @@ static inline struct cpuset *task_cs(struct task_struct *task)
struct cpuset, css);
}
+#ifdef CONFIG_NUMA
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return task->mempolicy;
+}
+#else
+static inline bool task_has_mempolicy(struct task_struct *task)
+{
+ return false;
+}
+#endif
+
+
/* bits in struct cpuset flags field */
typedef enum {
CS_CPU_EXCLUSIVE,
@@ -949,7 +962,8 @@ static void cpuset_migrate_mm(struct mm_struct *mm, const nodemask_t *from,
static void cpuset_change_task_nodemask(struct task_struct *tsk,
nodemask_t *newmems)
{
-repeat:
+ bool need_loop;
+
/*
* Allow tasks that have access to memory reserves because they have
* been OOM killed to get memory anywhere.
@@ -960,46 +974,27 @@ repeat:
return;
task_lock(tsk);
- nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
- mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
-
-
/*
- * ensure checking ->mems_allowed_change_disable after setting all new
- * allowed nodes.
- *
- * the read-side task can see an nodemask with new allowed nodes and
- * old allowed nodes. and if it allocates page when cpuset clears newly
- * disallowed ones continuous, it can see the new allowed bits.
- *
- * And if setting all new allowed nodes is after the checking, setting
- * all new allowed nodes and clearing newly disallowed ones will be done
- * continuous, and the read-side task may find no node to alloc page.
+ * Determine if a loop is necessary if another thread is doing
+ * get_mems_allowed(). If at least one node remains unchanged and
+ * tsk does not have a mempolicy, then an empty nodemask will not be
+ * possible when mems_allowed is larger than a word.
*/
- smp_mb();
+ need_loop = task_has_mempolicy(tsk) ||
+ !nodes_intersects(*newmems, tsk->mems_allowed);
- /*
- * Allocation of memory is very fast, we needn't sleep when waiting
- * for the read-side.
- */
- while (ACCESS_ONCE(tsk->mems_allowed_change_disable)) {
- task_unlock(tsk);
- if (!task_curr(tsk))
- yield();
- goto repeat;
- }
+ if (need_loop)
+ write_seqcount_begin(&tsk->mems_allowed_seq);
- /*
- * ensure checking ->mems_allowed_change_disable before clearing all new
- * disallowed nodes.
- *
- * if clearing newly disallowed bits before the checking, the read-side
- * task may find no node to alloc page.
- */
- smp_mb();
+ nodes_or(tsk->mems_allowed, tsk->mems_allowed, *newmems);
+ mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP1);
mpol_rebind_task(tsk, newmems, MPOL_REBIND_STEP2);
tsk->mems_allowed = *newmems;
+
+ if (need_loop)
+ write_seqcount_end(&tsk->mems_allowed_seq);
+
task_unlock(tsk);
}
diff --git a/kernel/fork.c b/kernel/fork.c
index fa6030d..158ca4f 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1001,6 +1001,9 @@ static int copy_signal(unsigned long clone_flags, struct task_struct *tsk)
#ifdef CONFIG_CGROUPS
init_rwsem(&sig->threadgroup_fork_lock);
#endif
+#ifdef CONFIG_CPUSETS
+ seqcount_init(&tsk->mems_allowed_seq);
+#endif
sig->oom_adj = current->signal->oom_adj;
sig->oom_score_adj = current->signal->oom_score_adj;
diff --git a/kernel/futex.c b/kernel/futex.c
index b2d51a7..e45efe7 100644
--- a/kernel/futex.c
+++ b/kernel/futex.c
@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
* @uaddr2: the pi futex we will take prior to returning to user-space
*
* The caller will wait on uaddr and will be requeued by futex_requeue() to
- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
- * complete the acquisition of the rt_mutex prior to returning to userspace.
- * This ensures the rt_mutex maintains an owner when it has waiters; without
- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
- * need to.
+ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
+ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
+ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
+ * without one, the pi logic would not know which task to boost/deboost, if
+ * there was a need to.
*
* We call schedule in futex_wait_queue_me() when we enqueue and return there
* via the following:
@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
struct futex_q q = futex_q_init;
int res, ret;
+ if (uaddr == uaddr2)
+ return -EINVAL;
+
if (!bitset)
return -EINVAL;
@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* signal. futex_unlock_pi() will not destroy the lock_ptr nor
* the pi_state.
*/
- WARN_ON(!&q.pi_state);
+ WARN_ON(!q.pi_state);
pi_mutex = &q.pi_state->pi_mutex;
ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
debug_rt_mutex_free_waiter(&rt_waiter);
@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
* fault, unlock the rt_mutex and return the fault to userspace.
*/
if (ret == -EFAULT) {
- if (rt_mutex_owner(pi_mutex) == current)
+ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
rt_mutex_unlock(pi_mutex);
} else if (ret == -EINTR) {
/*
diff --git a/kernel/irq/handle.c b/kernel/irq/handle.c
index 470d08c..10e0772 100644
--- a/kernel/irq/handle.c
+++ b/kernel/irq/handle.c
@@ -117,7 +117,7 @@ irqreturn_t
handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
{
irqreturn_t retval = IRQ_NONE;
- unsigned int random = 0, irq = desc->irq_data.irq;
+ unsigned int flags = 0, irq = desc->irq_data.irq;
do {
irqreturn_t res;
@@ -145,7 +145,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
/* Fall through to add to randomness */
case IRQ_HANDLED:
- random |= action->flags;
+ flags |= action->flags;
break;
default:
@@ -156,8 +156,7 @@ handle_irq_event_percpu(struct irq_desc *desc, struct irqaction *action)
action = action->next;
} while (action);
- if (random & IRQF_SAMPLE_RANDOM)
- add_interrupt_randomness(irq);
+ add_interrupt_randomness(irq, flags);
if (!noirqdebug)
note_interrupt(irq, desc, retval);
diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c
index df8136f..fa4a70e 100644
--- a/kernel/irq/manage.c
+++ b/kernel/irq/manage.c
@@ -886,22 +886,6 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new)
if (desc->irq_data.chip == &no_irq_chip)
return -ENOSYS;
- /*
- * Some drivers like serial.c use request_irq() heavily,
- * so we have to be careful not to interfere with a
- * running system.
- */
- if (new->flags & IRQF_SAMPLE_RANDOM) {
- /*
- * This function might sleep, we want to call it first,
- * outside of the atomic block.
- * Yes, this might clear the entropy pool if the wrong
- * driver is attempted to be loaded, without actually
- * installing a new handler, but is this really a problem,
- * only the sysadmin is able to do this.
- */
- rand_initialize_irq(irq);
- }
/*
* Check whether the interrupt nests into another interrupt
@@ -1325,7 +1309,6 @@ EXPORT_SYMBOL(free_irq);
* Flags:
*
* IRQF_SHARED Interrupt is shared
- * IRQF_SAMPLE_RANDOM The interrupt can be used for entropy
* IRQF_TRIGGER_* Specify active edge(s) or level
*
*/
diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
index 8884c27..32f1590 100644
--- a/kernel/power/hibernate.c
+++ b/kernel/power/hibernate.c
@@ -344,6 +344,7 @@ int hibernation_snapshot(int platform_mode)
goto Complete_devices;
suspend_console();
+ ftrace_stop();
pm_restrict_gfp_mask();
error = dpm_suspend(PMSG_FREEZE);
if (error)
@@ -369,6 +370,7 @@ int hibernation_snapshot(int platform_mode)
if (error || !in_suspend)
pm_restore_gfp_mask();
+ ftrace_start();
resume_console();
Complete_devices:
@@ -471,6 +473,7 @@ int hibernation_restore(int platform_mode)
pm_prepare_console();
suspend_console();
+ ftrace_stop();
pm_restrict_gfp_mask();
error = dpm_suspend_start(PMSG_QUIESCE);
if (!error) {
@@ -478,6 +481,7 @@ int hibernation_restore(int platform_mode)
dpm_resume_end(PMSG_RECOVER);
}
pm_restore_gfp_mask();
+ ftrace_start();
resume_console();
pm_restore_console();
return error;
@@ -504,6 +508,7 @@ int hibernation_platform_enter(void)
entering_platform_hibernation = true;
suspend_console();
+ ftrace_stop();
error = dpm_suspend_start(PMSG_HIBERNATE);
if (error) {
if (hibernation_ops->recover)
@@ -547,6 +552,7 @@ int hibernation_platform_enter(void)
Resume_devices:
entering_platform_hibernation = false;
dpm_resume_end(PMSG_RESTORE);
+ ftrace_start();
resume_console();
Close:
diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
index a8bf2e6..44c78d5 100644
--- a/kernel/power/suspend.c
+++ b/kernel/power/suspend.c
@@ -23,6 +23,7 @@
#include <linux/slab.h>
#include <linux/suspend.h>
#include <linux/syscore_ops.h>
+#include <linux/ftrace.h>
#include <trace/events/power.h>
#include "power.h"
@@ -213,6 +214,7 @@ int suspend_devices_and_enter(suspend_state_t state)
goto Close;
}
suspend_console();
+ ftrace_stop();
suspend_test_start();
error = dpm_suspend_start(PMSG_SUSPEND);
if (error) {
@@ -229,6 +231,7 @@ int suspend_devices_and_enter(suspend_state_t state)
suspend_test_start();
dpm_resume_end(PMSG_RESUME);
suspend_test_finish("resume devices");
+ ftrace_start();
resume_console();
Close:
if (suspend_ops->end)
diff --git a/kernel/time/ntp.c b/kernel/time/ntp.c
index f1eb182..61fc450 100644
--- a/kernel/time/ntp.c
+++ b/kernel/time/ntp.c
@@ -375,7 +375,9 @@ int second_overflow(unsigned long secs)
time_state = TIME_DEL;
break;
case TIME_INS:
- if (secs % 86400 == 0) {
+ if (!(time_status & STA_INS))
+ time_state = TIME_OK;
+ else if (secs % 86400 == 0) {
leap = -1;
time_state = TIME_OOP;
time_tai++;
@@ -384,7 +386,9 @@ int second_overflow(unsigned long secs)
}
break;
case TIME_DEL:
- if ((secs + 1) % 86400 == 0) {
+ if (!(time_status & STA_DEL))
+ time_state = TIME_OK;
+ else if ((secs + 1) % 86400 == 0) {
leap = 1;
time_tai--;
time_state = TIME_WAIT;
diff --git a/kernel/workqueue.c b/kernel/workqueue.c
index ee1845b..e88c924 100644
--- a/kernel/workqueue.c
+++ b/kernel/workqueue.c
@@ -3561,6 +3561,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
return notifier_from_errno(0);
}
+/*
+ * Workqueues should be brought up before normal priority CPU notifiers.
+ * This will be registered high priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_UP_PREPARE:
+ case CPU_UP_CANCELED:
+ case CPU_DOWN_FAILED:
+ case CPU_ONLINE:
+ return workqueue_cpu_callback(nfb, action, hcpu);
+ }
+ return NOTIFY_OK;
+}
+
+/*
+ * Workqueues should be brought down after normal priority CPU notifiers.
+ * This will be registered as low priority CPU notifier.
+ */
+static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
+ unsigned long action,
+ void *hcpu)
+{
+ switch (action & ~CPU_TASKS_FROZEN) {
+ case CPU_DOWN_PREPARE:
+ case CPU_DYING:
+ case CPU_POST_DEAD:
+ return workqueue_cpu_callback(nfb, action, hcpu);
+ }
+ return NOTIFY_OK;
+}
+
#ifdef CONFIG_SMP
struct work_for_cpu {
@@ -3754,7 +3789,8 @@ static int __init init_workqueues(void)
unsigned int cpu;
int i;
- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
+ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
+ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
/* initialize gcwqs */
for_each_gcwq_cpu(cpu) {
diff --git a/mm/compaction.c b/mm/compaction.c
index 9cec4f2..88d55f4 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -35,10 +35,6 @@ struct compact_control {
unsigned long migrate_pfn; /* isolate_migratepages search base */
bool sync; /* Synchronous migration */
- /* Account for isolated anon and file pages */
- unsigned long nr_anon;
- unsigned long nr_file;
-
unsigned int order; /* order a direct compactor needs */
int migratetype; /* MOVABLE, RECLAIMABLE etc */
struct zone *zone;
@@ -233,17 +229,13 @@ static void isolate_freepages(struct zone *zone,
static void acct_isolated(struct zone *zone, struct compact_control *cc)
{
struct page *page;
- unsigned int count[NR_LRU_LISTS] = { 0, };
+ unsigned int count[2] = { 0, };
- list_for_each_entry(page, &cc->migratepages, lru) {
- int lru = page_lru_base_type(page);
- count[lru]++;
- }
+ list_for_each_entry(page, &cc->migratepages, lru)
+ count[!!page_is_file_cache(page)]++;
- cc->nr_anon = count[LRU_ACTIVE_ANON] + count[LRU_INACTIVE_ANON];
- cc->nr_file = count[LRU_ACTIVE_FILE] + count[LRU_INACTIVE_FILE];
- __mod_zone_page_state(zone, NR_ISOLATED_ANON, cc->nr_anon);
- __mod_zone_page_state(zone, NR_ISOLATED_FILE, cc->nr_file);
+ __mod_zone_page_state(zone, NR_ISOLATED_ANON, count[0]);
+ __mod_zone_page_state(zone, NR_ISOLATED_FILE, count[1]);
}
/* Similar to reclaim, but different enough that they don't share logic */
@@ -279,6 +271,7 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
unsigned long last_pageblock_nr = 0, pageblock_nr;
unsigned long nr_scanned = 0, nr_isolated = 0;
struct list_head *migratelist = &cc->migratepages;
+ isolate_mode_t mode = ISOLATE_ACTIVE|ISOLATE_INACTIVE;
/* Do not scan outside zone boundaries */
low_pfn = max(cc->migrate_pfn, zone->zone_start_pfn);
@@ -393,8 +386,11 @@ static isolate_migrate_t isolate_migratepages(struct zone *zone,
continue;
}
+ if (!cc->sync)
+ mode |= ISOLATE_ASYNC_MIGRATE;
+
/* Try isolate the page */
- if (__isolate_lru_page(page, ISOLATE_BOTH, 0) != 0)
+ if (__isolate_lru_page(page, mode, 0) != 0)
continue;
VM_BUG_ON(PageTransCompound(page));
@@ -596,7 +592,7 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
nr_migrate = cc->nr_migratepages;
err = migrate_pages(&cc->migratepages, compaction_alloc,
(unsigned long)cc, false,
- cc->sync);
+ cc->sync ? MIGRATE_SYNC_LIGHT : MIGRATE_ASYNC);
update_nr_listpages(cc);
nr_remaining = cc->nr_migratepages;
diff --git a/mm/filemap.c b/mm/filemap.c
index b7d8603..10481eb 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -516,10 +516,13 @@ struct page *__page_cache_alloc(gfp_t gfp)
struct page *page;
if (cpuset_do_page_mem_spread()) {
- get_mems_allowed();
- n = cpuset_mem_spread_node();
- page = alloc_pages_exact_node(n, gfp, 0);
- put_mems_allowed();
+ unsigned int cpuset_mems_cookie;
+ do {
+ cpuset_mems_cookie = get_mems_allowed();
+ n = cpuset_mem_spread_node();
+ page = alloc_pages_exact_node(n, gfp, 0);
+ } while (!put_mems_allowed(cpuset_mems_cookie) && !page);
+
return page;
}
return alloc_pages(gfp, 0);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 05f8fd4..037f077 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -460,8 +460,10 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
struct zonelist *zonelist;
struct zone *zone;
struct zoneref *z;
+ unsigned int cpuset_mems_cookie;
- get_mems_allowed();
+retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
zonelist = huge_zonelist(vma, address,
htlb_alloc_mask, &mpol, &nodemask);
/*
@@ -488,10 +490,15 @@ static struct page *dequeue_huge_page_vma(struct hstate *h,
}
}
}
-err:
+
mpol_cond_put(mpol);
- put_mems_allowed();
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
return page;
+
+err:
+ mpol_cond_put(mpol);
+ return NULL;
}
static void update_and_free_page(struct hstate *h, struct page *page)
@@ -2294,6 +2301,22 @@ void unmap_hugepage_range(struct vm_area_struct *vma, unsigned long start,
{
mutex_lock(&vma->vm_file->f_mapping->i_mmap_mutex);
__unmap_hugepage_range(vma, start, end, ref_page);
+ /*
+ * Clear this flag so that x86's huge_pmd_share page_table_shareable
+ * test will fail on a vma being torn down, and not grab a page table
+ * on its way out. We're lucky that the flag has such an appropriate
+ * name, and can in fact be safely cleared here. We could clear it
+ * before the __unmap_hugepage_range above, but all that's necessary
+ * is to clear it before releasing the i_mmap_mutex below.
+ *
+ * This works because in the contexts this is called, the VMA is
+ * going to be destroyed. It is not vunerable to madvise(DONTNEED)
+ * because madvise is not supported on hugetlbfs. The same applies
+ * for direct IO. unmap_hugepage_range() is only being called just
+ * before free_pgtables() so clearing VM_MAYSHARE will not cause
+ * surprises later.
+ */
+ vma->vm_flags &= ~VM_MAYSHARE;
mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
@@ -2846,9 +2869,14 @@ void hugetlb_change_protection(struct vm_area_struct *vma,
}
}
spin_unlock(&mm->page_table_lock);
- mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
-
+ /*
+ * Must flush TLB before releasing i_mmap_mutex: x86's huge_pmd_unshare
+ * may have cleared our pud entry and done put_page on the page table:
+ * once we release i_mmap_mutex, another task can do the final put_page
+ * and that page table be reused and filled with junk.
+ */
flush_tlb_range(vma, start, end);
+ mutex_unlock(&vma->vm_file->f_mapping->i_mmap_mutex);
}
int hugetlb_reserve_pages(struct inode *inode,
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index ffb99b4..57cdf5a 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -1251,7 +1251,8 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
struct list_head *dst,
unsigned long *scanned, int order,
- int mode, struct zone *z,
+ isolate_mode_t mode,
+ struct zone *z,
struct mem_cgroup *mem_cont,
int active, int file)
{
diff --git a/mm/memory-failure.c b/mm/memory-failure.c
index 740c4f5..2f49dcf 100644
--- a/mm/memory-failure.c
+++ b/mm/memory-failure.c
@@ -1334,8 +1334,8 @@ static int soft_offline_huge_page(struct page *page, int flags)
/* Keep page count to indicate a given hugepage is isolated. */
list_add(&hpage->lru, &pagelist);
- ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, 0,
- true);
+ ret = migrate_huge_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL, false,
+ MIGRATE_SYNC);
if (ret) {
struct page *page1, *page2;
list_for_each_entry_safe(page1, page2, &pagelist, lru)
@@ -1464,7 +1464,7 @@ int soft_offline_page(struct page *page, int flags)
page_is_file_cache(page));
list_add(&page->lru, &pagelist);
ret = migrate_pages(&pagelist, new_page, MPOL_MF_MOVE_ALL,
- 0, true);
+ false, MIGRATE_SYNC);
if (ret) {
putback_lru_pages(&pagelist);
pr_info("soft offline: %#lx: migration failed %d, type %lx\n",
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c46887b..ae5a3f2 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -747,7 +747,7 @@ do_migrate_range(unsigned long start_pfn, unsigned long end_pfn)
}
/* this function returns # of failed pages */
ret = migrate_pages(&source, hotremove_migrate_alloc, 0,
- true, true);
+ true, MIGRATE_SYNC);
if (ret)
putback_lru_pages(&source);
}
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 3dac2d1..cff919f 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -926,7 +926,7 @@ static int migrate_to_node(struct mm_struct *mm, int source, int dest,
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_node_page, dest,
- false, true);
+ false, MIGRATE_SYNC);
if (err)
putback_lru_pages(&pagelist);
}
@@ -1810,18 +1810,24 @@ struct page *
alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, int node)
{
- struct mempolicy *pol = get_vma_policy(current, vma, addr);
+ struct mempolicy *pol;
struct zonelist *zl;
struct page *page;
+ unsigned int cpuset_mems_cookie;
+
+retry_cpuset:
+ pol = get_vma_policy(current, vma, addr);
+ cpuset_mems_cookie = get_mems_allowed();
- get_mems_allowed();
if (unlikely(pol->mode == MPOL_INTERLEAVE)) {
unsigned nid;
nid = interleave_nid(pol, vma, addr, PAGE_SHIFT + order);
mpol_cond_put(pol);
page = alloc_page_interleave(gfp, order, nid);
- put_mems_allowed();
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
+
return page;
}
zl = policy_zonelist(gfp, pol, node);
@@ -1832,7 +1838,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
struct page *page = __alloc_pages_nodemask(gfp, order,
zl, policy_nodemask(gfp, pol));
__mpol_put(pol);
- put_mems_allowed();
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
return page;
}
/*
@@ -1840,7 +1847,8 @@ alloc_pages_vma(gfp_t gfp, int order, struct vm_area_struct *vma,
*/
page = __alloc_pages_nodemask(gfp, order, zl,
policy_nodemask(gfp, pol));
- put_mems_allowed();
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
return page;
}
@@ -1867,11 +1875,14 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
struct mempolicy *pol = current->mempolicy;
struct page *page;
+ unsigned int cpuset_mems_cookie;
if (!pol || in_interrupt() || (gfp & __GFP_THISNODE))
pol = &default_policy;
- get_mems_allowed();
+retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
+
/*
* No reference counting needed for current->mempolicy
* nor system default_policy
@@ -1882,7 +1893,10 @@ struct page *alloc_pages_current(gfp_t gfp, unsigned order)
page = __alloc_pages_nodemask(gfp, order,
policy_zonelist(gfp, pol, numa_node_id()),
policy_nodemask(gfp, pol));
- put_mems_allowed();
+
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
+
return page;
}
EXPORT_SYMBOL(alloc_pages_current);
diff --git a/mm/migrate.c b/mm/migrate.c
index 14d0a6a..480714b 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -220,6 +220,56 @@ out:
pte_unmap_unlock(ptep, ptl);
}
+#ifdef CONFIG_BLOCK
+/* Returns true if all buffers are successfully locked */
+static bool buffer_migrate_lock_buffers(struct buffer_head *head,
+ enum migrate_mode mode)
+{
+ struct buffer_head *bh = head;
+
+ /* Simple case, sync compaction */
+ if (mode != MIGRATE_ASYNC) {
+ do {
+ get_bh(bh);
+ lock_buffer(bh);
+ bh = bh->b_this_page;
+
+ } while (bh != head);
+
+ return true;
+ }
+
+ /* async case, we cannot block on lock_buffer so use trylock_buffer */
+ do {
+ get_bh(bh);
+ if (!trylock_buffer(bh)) {
+ /*
+ * We failed to lock the buffer and cannot stall in
+ * async migration. Release the taken locks
+ */
+ struct buffer_head *failed_bh = bh;
+ put_bh(failed_bh);
+ bh = head;
+ while (bh != failed_bh) {
+ unlock_buffer(bh);
+ put_bh(bh);
+ bh = bh->b_this_page;
+ }
+ return false;
+ }
+
+ bh = bh->b_this_page;
+ } while (bh != head);
+ return true;
+}
+#else
+static inline bool buffer_migrate_lock_buffers(struct buffer_head *head,
+ enum migrate_mode mode)
+{
+ return true;
+}
+#endif /* CONFIG_BLOCK */
+
/*
* Replace the page in the mapping.
*
@@ -229,7 +279,8 @@ out:
* 3 for pages with a mapping and PagePrivate/PagePrivate2 set.
*/
static int migrate_page_move_mapping(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page,
+ struct buffer_head *head, enum migrate_mode mode)
{
int expected_count;
void **pslot;
@@ -259,6 +310,20 @@ static int migrate_page_move_mapping(struct address_space *mapping,
}
/*
+ * In the async migration case of moving a page with buffers, lock the
+ * buffers using trylock before the mapping is moved. If the mapping
+ * was moved, we later failed to lock the buffers and could not move
+ * the mapping back due to an elevated page count, we would have to
+ * block waiting on other references to be dropped.
+ */
+ if (mode == MIGRATE_ASYNC && head &&
+ !buffer_migrate_lock_buffers(head, mode)) {
+ page_unfreeze_refs(page, expected_count);
+ spin_unlock_irq(&mapping->tree_lock);
+ return -EAGAIN;
+ }
+
+ /*
* Now we know that no one else is looking at the page.
*/
get_page(newpage); /* add cache reference */
@@ -415,13 +480,14 @@ EXPORT_SYMBOL(fail_migrate_page);
* Pages are locked upon entry and exit.
*/
int migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
int rc;
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- rc = migrate_page_move_mapping(mapping, newpage, page);
+ rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode);
if (rc)
return rc;
@@ -438,28 +504,28 @@ EXPORT_SYMBOL(migrate_page);
* exist.
*/
int buffer_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page, enum migrate_mode mode)
{
struct buffer_head *bh, *head;
int rc;
if (!page_has_buffers(page))
- return migrate_page(mapping, newpage, page);
+ return migrate_page(mapping, newpage, page, mode);
head = page_buffers(page);
- rc = migrate_page_move_mapping(mapping, newpage, page);
+ rc = migrate_page_move_mapping(mapping, newpage, page, head, mode);
if (rc)
return rc;
- bh = head;
- do {
- get_bh(bh);
- lock_buffer(bh);
- bh = bh->b_this_page;
-
- } while (bh != head);
+ /*
+ * In the async case, migrate_page_move_mapping locked the buffers
+ * with an IRQ-safe spinlock held. In the sync case, the buffers
+ * need to be locked now
+ */
+ if (mode != MIGRATE_ASYNC)
+ BUG_ON(!buffer_migrate_lock_buffers(head, mode));
ClearPagePrivate(page);
set_page_private(newpage, page_private(page));
@@ -536,10 +602,14 @@ static int writeout(struct address_space *mapping, struct page *page)
* Default handling if a filesystem does not provide a migration function.
*/
static int fallback_migrate_page(struct address_space *mapping,
- struct page *newpage, struct page *page)
+ struct page *newpage, struct page *page, enum migrate_mode mode)
{
- if (PageDirty(page))
+ if (PageDirty(page)) {
+ /* Only writeback pages in full synchronous migration */
+ if (mode != MIGRATE_SYNC)
+ return -EBUSY;
return writeout(mapping, page);
+ }
/*
* Buffers may be managed in a filesystem specific way.
@@ -549,7 +619,7 @@ static int fallback_migrate_page(struct address_space *mapping,
!try_to_release_page(page, GFP_KERNEL))
return -EAGAIN;
- return migrate_page(mapping, newpage, page);
+ return migrate_page(mapping, newpage, page, mode);
}
/*
@@ -564,7 +634,7 @@ static int fallback_migrate_page(struct address_space *mapping,
* == 0 - success
*/
static int move_to_new_page(struct page *newpage, struct page *page,
- int remap_swapcache, bool sync)
+ int remap_swapcache, enum migrate_mode mode)
{
struct address_space *mapping;
int rc;
@@ -585,29 +655,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
mapping = page_mapping(page);
if (!mapping)
- rc = migrate_page(mapping, newpage, page);
- else {
+ rc = migrate_page(mapping, newpage, page, mode);
+ else if (mapping->a_ops->migratepage)
/*
- * Do not writeback pages if !sync and migratepage is
- * not pointing to migrate_page() which is nonblocking
- * (swapcache/tmpfs uses migratepage = migrate_page).
+ * Most pages have a mapping and most filesystems provide a
+ * migratepage callback. Anonymous pages are part of swap
+ * space which also has its own migratepage callback. This
+ * is the most common path for page migration.
*/
- if (PageDirty(page) && !sync &&
- mapping->a_ops->migratepage != migrate_page)
- rc = -EBUSY;
- else if (mapping->a_ops->migratepage)
- /*
- * Most pages have a mapping and most filesystems
- * should provide a migration function. Anonymous
- * pages are part of swap space which also has its
- * own migration function. This is the most common
- * path for page migration.
- */
- rc = mapping->a_ops->migratepage(mapping,
- newpage, page);
- else
- rc = fallback_migrate_page(mapping, newpage, page);
- }
+ rc = mapping->a_ops->migratepage(mapping,
+ newpage, page, mode);
+ else
+ rc = fallback_migrate_page(mapping, newpage, page, mode);
if (rc) {
newpage->mapping = NULL;
@@ -621,38 +680,18 @@ static int move_to_new_page(struct page *newpage, struct page *page,
return rc;
}
-/*
- * Obtain the lock on page, remove all ptes and migrate the page
- * to the newly allocated page in newpage.
- */
-static int unmap_and_move(new_page_t get_new_page, unsigned long private,
- struct page *page, int force, bool offlining, bool sync)
+static int __unmap_and_move(struct page *page, struct page *newpage,
+ int force, bool offlining, enum migrate_mode mode)
{
- int rc = 0;
- int *result = NULL;
- struct page *newpage = get_new_page(page, private, &result);
+ int rc = -EAGAIN;
int remap_swapcache = 1;
int charge = 0;
struct mem_cgroup *mem;
struct anon_vma *anon_vma = NULL;
- if (!newpage)
- return -ENOMEM;
-
- if (page_count(page) == 1) {
- /* page was freed from under us. So we are done. */
- goto move_newpage;
- }
- if (unlikely(PageTransHuge(page)))
- if (unlikely(split_huge_page(page)))
- goto move_newpage;
-
- /* prepare cgroup just returns 0 or -ENOMEM */
- rc = -EAGAIN;
-
if (!trylock_page(page)) {
- if (!force || !sync)
- goto move_newpage;
+ if (!force || mode == MIGRATE_ASYNC)
+ goto out;
/*
* It's not safe for direct compaction to call lock_page.
@@ -668,7 +707,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
* altogether.
*/
if (current->flags & PF_MEMALLOC)
- goto move_newpage;
+ goto out;
lock_page(page);
}
@@ -697,10 +736,12 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
if (PageWriteback(page)) {
/*
- * For !sync, there is no point retrying as the retry loop
- * is expected to be too short for PageWriteback to be cleared
+ * Only in the case of a full syncronous migration is it
+ * necessary to wait for PageWriteback. In the async case,
+ * the retry loop is too short and in the sync-light case,
+ * the overhead of stalling is too much
*/
- if (!sync) {
+ if (mode != MIGRATE_SYNC) {
rc = -EBUSY;
goto uncharge;
}
@@ -771,7 +812,7 @@ static int unmap_and_move(new_page_t get_new_page, unsigned long private,
skip_unmap:
if (!page_mapped(page))
- rc = move_to_new_page(newpage, page, remap_swapcache, sync);
+ rc = move_to_new_page(newpage, page, remap_swapcache, mode);
if (rc && remap_swapcache)
remove_migration_ptes(page, page);
@@ -785,27 +826,53 @@ uncharge:
mem_cgroup_end_migration(mem, page, newpage, rc == 0);
unlock:
unlock_page(page);
+out:
+ return rc;
+}
-move_newpage:
+/*
+ * Obtain the lock on page, remove all ptes and migrate the page
+ * to the newly allocated page in newpage.
+ */
+static int unmap_and_move(new_page_t get_new_page, unsigned long private,
+ struct page *page, int force, bool offlining,
+ enum migrate_mode mode)
+{
+ int rc = 0;
+ int *result = NULL;
+ struct page *newpage = get_new_page(page, private, &result);
+
+ if (!newpage)
+ return -ENOMEM;
+
+ if (page_count(page) == 1) {
+ /* page was freed from under us. So we are done. */
+ goto out;
+ }
+
+ if (unlikely(PageTransHuge(page)))
+ if (unlikely(split_huge_page(page)))
+ goto out;
+
+ rc = __unmap_and_move(page, newpage, force, offlining, mode);
+out:
if (rc != -EAGAIN) {
- /*
- * A page that has been migrated has all references
- * removed and will be freed. A page that has not been
- * migrated will have kepts its references and be
- * restored.
- */
- list_del(&page->lru);
+ /*
+ * A page that has been migrated has all references
+ * removed and will be freed. A page that has not been
+ * migrated will have kepts its references and be
+ * restored.
+ */
+ list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
putback_lru_page(page);
}
-
/*
* Move the new page to the LRU. If migration was not successful
* then this will free the page.
*/
putback_lru_page(newpage);
-
if (result) {
if (rc)
*result = rc;
@@ -835,7 +902,8 @@ move_newpage:
*/
static int unmap_and_move_huge_page(new_page_t get_new_page,
unsigned long private, struct page *hpage,
- int force, bool offlining, bool sync)
+ int force, bool offlining,
+ enum migrate_mode mode)
{
int rc = 0;
int *result = NULL;
@@ -848,7 +916,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
rc = -EAGAIN;
if (!trylock_page(hpage)) {
- if (!force || !sync)
+ if (!force || mode != MIGRATE_SYNC)
goto out;
lock_page(hpage);
}
@@ -859,7 +927,7 @@ static int unmap_and_move_huge_page(new_page_t get_new_page,
try_to_unmap(hpage, TTU_MIGRATION|TTU_IGNORE_MLOCK|TTU_IGNORE_ACCESS);
if (!page_mapped(hpage))
- rc = move_to_new_page(new_hpage, hpage, 1, sync);
+ rc = move_to_new_page(new_hpage, hpage, 1, mode);
if (rc)
remove_migration_ptes(hpage, hpage);
@@ -902,7 +970,7 @@ out:
*/
int migrate_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, bool offlining,
- bool sync)
+ enum migrate_mode mode)
{
int retry = 1;
int nr_failed = 0;
@@ -923,7 +991,7 @@ int migrate_pages(struct list_head *from,
rc = unmap_and_move(get_new_page, private,
page, pass > 2, offlining,
- sync);
+ mode);
switch(rc) {
case -ENOMEM:
@@ -953,7 +1021,7 @@ out:
int migrate_huge_pages(struct list_head *from,
new_page_t get_new_page, unsigned long private, bool offlining,
- bool sync)
+ enum migrate_mode mode)
{
int retry = 1;
int nr_failed = 0;
@@ -970,7 +1038,7 @@ int migrate_huge_pages(struct list_head *from,
rc = unmap_and_move_huge_page(get_new_page,
private, page, pass > 2, offlining,
- sync);
+ mode);
switch(rc) {
case -ENOMEM:
@@ -1099,7 +1167,7 @@ set_status:
err = 0;
if (!list_empty(&pagelist)) {
err = migrate_pages(&pagelist, new_page_node,
- (unsigned long)pm, 0, true);
+ (unsigned long)pm, 0, MIGRATE_SYNC);
if (err)
putback_lru_pages(&pagelist);
}
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index 8d032de..71c7811 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -33,6 +33,24 @@
void __mmu_notifier_release(struct mm_struct *mm)
{
struct mmu_notifier *mn;
+ struct hlist_node *n;
+
+ /*
+ * RCU here will block mmu_notifier_unregister until
+ * ->release returns.
+ */
+ rcu_read_lock();
+ hlist_for_each_entry_rcu(mn, n, &mm->mmu_notifier_mm->list, hlist)
+ /*
+ * if ->release runs before mmu_notifier_unregister it
+ * must be handled as it's the only way for the driver
+ * to flush all existing sptes and stop the driver
+ * from establishing any more sptes before all the
+ * pages in the mm are freed.
+ */
+ if (mn->ops->release)
+ mn->ops->release(mn, mm);
+ rcu_read_unlock();
spin_lock(&mm->mmu_notifier_mm->lock);
while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) {
@@ -46,23 +64,6 @@ void __mmu_notifier_release(struct mm_struct *mm)
* mmu_notifier_unregister to return.
*/
hlist_del_init_rcu(&mn->hlist);
- /*
- * RCU here will block mmu_notifier_unregister until
- * ->release returns.
- */
- rcu_read_lock();
- spin_unlock(&mm->mmu_notifier_mm->lock);
- /*
- * if ->release runs before mmu_notifier_unregister it
- * must be handled as it's the only way for the driver
- * to flush all existing sptes and stop the driver
- * from establishing any more sptes before all the
- * pages in the mm are freed.
- */
- if (mn->ops->release)
- mn->ops->release(mn, mm);
- rcu_read_unlock();
- spin_lock(&mm->mmu_notifier_mm->lock);
}
spin_unlock(&mm->mmu_notifier_mm->lock);
@@ -284,16 +285,13 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
{
BUG_ON(atomic_read(&mm->mm_count) <= 0);
- spin_lock(&mm->mmu_notifier_mm->lock);
if (!hlist_unhashed(&mn->hlist)) {
- hlist_del_rcu(&mn->hlist);
-
/*
* RCU here will force exit_mmap to wait ->release to finish
* before freeing the pages.
*/
rcu_read_lock();
- spin_unlock(&mm->mmu_notifier_mm->lock);
+
/*
* exit_mmap will block in mmu_notifier_release to
* guarantee ->release is called before freeing the
@@ -302,8 +300,11 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm)
if (mn->ops->release)
mn->ops->release(mn, mm);
rcu_read_unlock();
- } else
+
+ spin_lock(&mm->mmu_notifier_mm->lock);
+ hlist_del_rcu(&mn->hlist);
spin_unlock(&mm->mmu_notifier_mm->lock);
+ }
/*
* Wait any running method to finish, of course including
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index e2f474d..87b0a3f 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -1912,14 +1912,20 @@ static struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress,
- bool sync_migration)
+ int migratetype, bool sync_migration,
+ bool *deferred_compaction,
+ unsigned long *did_some_progress)
{
struct page *page;
- if (!order || compaction_deferred(preferred_zone))
+ if (!order)
return NULL;
+ if (compaction_deferred(preferred_zone)) {
+ *deferred_compaction = true;
+ return NULL;
+ }
+
current->flags |= PF_MEMALLOC;
*did_some_progress = try_to_compact_pages(zonelist, order, gfp_mask,
nodemask, sync_migration);
@@ -1947,7 +1953,13 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
* but not enough to satisfy watermarks.
*/
count_vm_event(COMPACTFAIL);
- defer_compaction(preferred_zone);
+
+ /*
+ * As async compaction considers a subset of pageblocks, only
+ * defer if the failure was a sync compaction failure.
+ */
+ if (sync_migration)
+ defer_compaction(preferred_zone);
cond_resched();
}
@@ -1959,8 +1971,9 @@ static inline struct page *
__alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
struct zonelist *zonelist, enum zone_type high_zoneidx,
nodemask_t *nodemask, int alloc_flags, struct zone *preferred_zone,
- int migratetype, unsigned long *did_some_progress,
- bool sync_migration)
+ int migratetype, bool sync_migration,
+ bool *deferred_compaction,
+ unsigned long *did_some_progress)
{
return NULL;
}
@@ -2110,6 +2123,7 @@ __alloc_pages_slowpath(gfp_t gfp_mask, unsigned int order,
unsigned long pages_reclaimed = 0;
unsigned long did_some_progress;
bool sync_migration = false;
+ bool deferred_compaction = false;
/*
* In the slowpath, we sanity check order to avoid ever trying to
@@ -2190,12 +2204,22 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
- migratetype, &did_some_progress,
- sync_migration);
+ migratetype, sync_migration,
+ &deferred_compaction,
+ &did_some_progress);
if (page)
goto got_pg;
sync_migration = true;
+ /*
+ * If compaction is deferred for high-order allocations, it is because
+ * sync compaction recently failed. In this is the case and the caller
+ * has requested the system not be heavily disrupted, fail the
+ * allocation now instead of entering direct reclaim
+ */
+ if (deferred_compaction && (gfp_mask & __GFP_NO_KSWAPD))
+ goto nopage;
+
/* Try direct reclaim and then allocating */
page = __alloc_pages_direct_reclaim(gfp_mask, order,
zonelist, high_zoneidx,
@@ -2266,8 +2290,9 @@ rebalance:
zonelist, high_zoneidx,
nodemask,
alloc_flags, preferred_zone,
- migratetype, &did_some_progress,
- sync_migration);
+ migratetype, sync_migration,
+ &deferred_compaction,
+ &did_some_progress);
if (page)
goto got_pg;
}
@@ -2291,8 +2316,9 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
{
enum zone_type high_zoneidx = gfp_zone(gfp_mask);
struct zone *preferred_zone;
- struct page *page;
+ struct page *page = NULL;
int migratetype = allocflags_to_migratetype(gfp_mask);
+ unsigned int cpuset_mems_cookie;
gfp_mask &= gfp_allowed_mask;
@@ -2311,15 +2337,15 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
if (unlikely(!zonelist->_zonerefs->zone))
return NULL;
- get_mems_allowed();
+retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
+
/* The preferred zone is used for statistics later */
first_zones_zonelist(zonelist, high_zoneidx,
nodemask ? : &cpuset_current_mems_allowed,
&preferred_zone);
- if (!preferred_zone) {
- put_mems_allowed();
- return NULL;
- }
+ if (!preferred_zone)
+ goto out;
/* First allocation attempt */
page = get_page_from_freelist(gfp_mask|__GFP_HARDWALL, nodemask, order,
@@ -2329,9 +2355,19 @@ __alloc_pages_nodemask(gfp_t gfp_mask, unsigned int order,
page = __alloc_pages_slowpath(gfp_mask, order,
zonelist, high_zoneidx, nodemask,
preferred_zone, migratetype);
- put_mems_allowed();
trace_mm_page_alloc(page, order, gfp_mask, migratetype);
+
+out:
+ /*
+ * When updating a task's mems_allowed, it is possible to race with
+ * parallel threads in such a way that an allocation can fail while
+ * the mask is being updated. If a page allocation is about to fail,
+ * check if the cpuset changed during allocation and if so, retry.
+ */
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !page))
+ goto retry_cpuset;
+
return page;
}
EXPORT_SYMBOL(__alloc_pages_nodemask);
@@ -2555,13 +2591,15 @@ void si_meminfo_node(struct sysinfo *val, int nid)
bool skip_free_areas_node(unsigned int flags, int nid)
{
bool ret = false;
+ unsigned int cpuset_mems_cookie;
if (!(flags & SHOW_MEM_FILTER_NODES))
goto out;
- get_mems_allowed();
- ret = !node_isset(nid, cpuset_current_mems_allowed);
- put_mems_allowed();
+ do {
+ cpuset_mems_cookie = get_mems_allowed();
+ ret = !node_isset(nid, cpuset_current_mems_allowed);
+ } while (!put_mems_allowed(cpuset_mems_cookie));
out:
return ret;
}
@@ -3441,25 +3479,33 @@ static void setup_zone_migrate_reserve(struct zone *zone)
if (page_to_nid(page) != zone_to_nid(zone))
continue;
- /* Blocks with reserved pages will never free, skip them. */
- block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
- if (pageblock_is_reserved(pfn, block_end_pfn))
- continue;
-
block_migratetype = get_pageblock_migratetype(page);
- /* If this block is reserved, account for it */
- if (reserve > 0 && block_migratetype == MIGRATE_RESERVE) {
- reserve--;
- continue;
- }
+ /* Only test what is necessary when the reserves are not met */
+ if (reserve > 0) {
+ /*
+ * Blocks with reserved pages will never free, skip
+ * them.
+ */
+ block_end_pfn = min(pfn + pageblock_nr_pages, end_pfn);
+ if (pageblock_is_reserved(pfn, block_end_pfn))
+ continue;
- /* Suitable for reserving if this block is movable */
- if (reserve > 0 && block_migratetype == MIGRATE_MOVABLE) {
- set_pageblock_migratetype(page, MIGRATE_RESERVE);
- move_freepages_block(zone, page, MIGRATE_RESERVE);
- reserve--;
- continue;
+ /* If this block is reserved, account for it */
+ if (block_migratetype == MIGRATE_RESERVE) {
+ reserve--;
+ continue;
+ }
+
+ /* Suitable for reserving if this block is movable */
+ if (block_migratetype == MIGRATE_MOVABLE) {
+ set_pageblock_migratetype(page,
+ MIGRATE_RESERVE);
+ move_freepages_block(zone, page,
+ MIGRATE_RESERVE);
+ reserve--;
+ continue;
+ }
}
/*
diff --git a/mm/slab.c b/mm/slab.c
index d96e223..a67f812 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -3218,12 +3218,10 @@ static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
if (in_interrupt() || (flags & __GFP_THISNODE))
return NULL;
nid_alloc = nid_here = numa_mem_id();
- get_mems_allowed();
if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
nid_alloc = cpuset_slab_spread_node();
else if (current->mempolicy)
nid_alloc = slab_node(current->mempolicy);
- put_mems_allowed();
if (nid_alloc != nid_here)
return ____cache_alloc_node(cachep, flags, nid_alloc);
return NULL;
@@ -3246,14 +3244,17 @@ static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
enum zone_type high_zoneidx = gfp_zone(flags);
void *obj = NULL;
int nid;
+ unsigned int cpuset_mems_cookie;
if (flags & __GFP_THISNODE)
return NULL;
- get_mems_allowed();
- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
+retry_cpuset:
+ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+
retry:
/*
* Look through allowed nodes for objects available
@@ -3306,7 +3307,9 @@ retry:
}
}
}
- put_mems_allowed();
+
+ if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
+ goto retry_cpuset;
return obj;
}
diff --git a/mm/slub.c b/mm/slub.c
index 10ab233..ae6e80e 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1457,6 +1457,7 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
struct zone *zone;
enum zone_type high_zoneidx = gfp_zone(flags);
struct page *page;
+ unsigned int cpuset_mems_cookie;
/*
* The defrag ratio allows a configuration of the tradeoffs between
@@ -1480,23 +1481,32 @@ static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
get_cycles() % 1024 > s->remote_node_defrag_ratio)
return NULL;
- get_mems_allowed();
- zonelist = node_zonelist(slab_node(current->mempolicy), flags);
- for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
- struct kmem_cache_node *n;
-
- n = get_node(s, zone_to_nid(zone));
-
- if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
- n->nr_partial > s->min_partial) {
- page = get_partial_node(n);
- if (page) {
- put_mems_allowed();
- return page;
+ do {
+ cpuset_mems_cookie = get_mems_allowed();
+ zonelist = node_zonelist(slab_node(current->mempolicy), flags);
+ for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
+ struct kmem_cache_node *n;
+
+ n = get_node(s, zone_to_nid(zone));
+
+ if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
+ n->nr_partial > s->min_partial) {
+ page = get_partial_node(n);
+ if (page) {
+ /*
+ * Return the object even if
+ * put_mems_allowed indicated that
+ * the cpuset mems_allowed was
+ * updated in parallel. It's a
+ * harmless race between the alloc
+ * and the cpuset update.
+ */
+ put_mems_allowed(cpuset_mems_cookie);
+ return page;
+ }
}
}
- }
- put_mems_allowed();
+ } while (!put_mems_allowed(cpuset_mems_cookie));
#endif
return NULL;
}
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 1b0ed36..5326f98 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -248,35 +248,66 @@ unsigned long shrink_slab(struct shrink_control *shrink,
list_for_each_entry(shrinker, &shrinker_list, list) {
unsigned long long delta;
- unsigned long total_scan;
- unsigned long max_pass;
+ long total_scan;
+ long max_pass;
+ int shrink_ret = 0;
+ long nr;
+ long new_nr;
max_pass = do_shrinker_shrink(shrinker, shrink, 0);
+ if (max_pass <= 0)
+ continue;
+
+ /*
+ * copy the current shrinker scan count into a local variable
+ * and zero it so that other concurrent shrinker invocations
+ * don't also do this scanning work.
+ */
+ do {
+ nr = shrinker->nr;
+ } while (cmpxchg(&shrinker->nr, nr, 0) != nr);
+
+ total_scan = nr;
delta = (4 * nr_pages_scanned) / shrinker->seeks;
delta *= max_pass;
do_div(delta, lru_pages + 1);
- shrinker->nr += delta;
- if (shrinker->nr < 0) {
+ total_scan += delta;
+ if (total_scan < 0) {
printk(KERN_ERR "shrink_slab: %pF negative objects to "
"delete nr=%ld\n",
- shrinker->shrink, shrinker->nr);
- shrinker->nr = max_pass;
+ shrinker->shrink, total_scan);
+ total_scan = max_pass;
}
/*
+ * We need to avoid excessive windup on filesystem shrinkers
+ * due to large numbers of GFP_NOFS allocations causing the
+ * shrinkers to return -1 all the time. This results in a large
+ * nr being built up so when a shrink that can do some work
+ * comes along it empties the entire cache due to nr >>>
+ * max_pass. This is bad for sustaining a working set in
+ * memory.
+ *
+ * Hence only allow the shrinker to scan the entire cache when
+ * a large delta change is calculated directly.
+ */
+ if (delta < max_pass / 4)
+ total_scan = min(total_scan, max_pass / 2);
+
+ /*
* Avoid risking looping forever due to too large nr value:
* never try to free more than twice the estimate number of
* freeable entries.
*/
- if (shrinker->nr > max_pass * 2)
- shrinker->nr = max_pass * 2;
+ if (total_scan > max_pass * 2)
+ total_scan = max_pass * 2;
- total_scan = shrinker->nr;
- shrinker->nr = 0;
+ trace_mm_shrink_slab_start(shrinker, shrink, nr,
+ nr_pages_scanned, lru_pages,
+ max_pass, delta, total_scan);
while (total_scan >= SHRINK_BATCH) {
long this_scan = SHRINK_BATCH;
- int shrink_ret;
int nr_before;
nr_before = do_shrinker_shrink(shrinker, shrink, 0);
@@ -292,7 +323,19 @@ unsigned long shrink_slab(struct shrink_control *shrink,
cond_resched();
}
- shrinker->nr += total_scan;
+ /*
+ * move the unused scan count back into the shrinker in a
+ * manner that handles concurrent updates. If we exhausted the
+ * scan, there is no need to do an update.
+ */
+ do {
+ nr = shrinker->nr;
+ new_nr = total_scan + nr;
+ if (total_scan <= 0)
+ break;
+ } while (cmpxchg(&shrinker->nr, nr, new_nr) != nr);
+
+ trace_mm_shrink_slab_end(shrinker, shrink_ret, nr, new_nr);
}
up_read(&shrinker_rwsem);
out:
@@ -683,7 +726,13 @@ static enum page_references page_check_references(struct page *page,
*/
SetPageReferenced(page);
- if (referenced_page)
+ if (referenced_page || referenced_ptes > 1)
+ return PAGEREF_ACTIVATE;
+
+ /*
+ * Activate file-backed executable pages after first usage.
+ */
+ if (vm_flags & VM_EXEC)
return PAGEREF_ACTIVATE;
return PAGEREF_KEEP;
@@ -972,23 +1021,27 @@ keep_lumpy:
*
* returns 0 on success, -ve errno on failure.
*/
-int __isolate_lru_page(struct page *page, int mode, int file)
+int __isolate_lru_page(struct page *page, isolate_mode_t mode, int file)
{
+ bool all_lru_mode;
int ret = -EINVAL;
/* Only take pages on the LRU. */
if (!PageLRU(page))
return ret;
+ all_lru_mode = (mode & (ISOLATE_ACTIVE|ISOLATE_INACTIVE)) ==
+ (ISOLATE_ACTIVE|ISOLATE_INACTIVE);
+
/*
* When checking the active state, we need to be sure we are
* dealing with comparible boolean values. Take the logical not
* of each.
*/
- if (mode != ISOLATE_BOTH && (!PageActive(page) != !mode))
+ if (!all_lru_mode && !PageActive(page) != !(mode & ISOLATE_ACTIVE))
return ret;
- if (mode != ISOLATE_BOTH && page_is_file_cache(page) != file)
+ if (!all_lru_mode && !!page_is_file_cache(page) != file)
return ret;
/*
@@ -1001,6 +1054,43 @@ int __isolate_lru_page(struct page *page, int mode, int file)
ret = -EBUSY;
+ /*
+ * To minimise LRU disruption, the caller can indicate that it only
+ * wants to isolate pages it will be able to operate on without
+ * blocking - clean pages for the most part.
+ *
+ * ISOLATE_CLEAN means that only clean pages should be isolated. This
+ * is used by reclaim when it is cannot write to backing storage
+ *
+ * ISOLATE_ASYNC_MIGRATE is used to indicate that it only wants to pages
+ * that it is possible to migrate without blocking
+ */
+ if (mode & (ISOLATE_CLEAN|ISOLATE_ASYNC_MIGRATE)) {
+ /* All the caller can do on PageWriteback is block */
+ if (PageWriteback(page))
+ return ret;
+
+ if (PageDirty(page)) {
+ struct address_space *mapping;
+
+ /* ISOLATE_CLEAN means only clean pages */
+ if (mode & ISOLATE_CLEAN)
+ return ret;
+
+ /*
+ * Only pages without mappings or that have a
+ * ->migratepage callback are possible to migrate
+ * without blocking
+ */
+ mapping = page_mapping(page);
+ if (mapping && !mapping->a_ops->migratepage)
+ return ret;
+ }
+ }
+
+ if ((mode & ISOLATE_UNMAPPED) && page_mapped(page))
+ return ret;
+
if (likely(get_page_unless_zero(page))) {
/*
* Be careful not to clear PageLRU until after we're
@@ -1036,7 +1126,8 @@ int __isolate_lru_page(struct page *page, int mode, int file)
*/
static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
struct list_head *src, struct list_head *dst,
- unsigned long *scanned, int order, int mode, int file)
+ unsigned long *scanned, int order, isolate_mode_t mode,
+ int file)
{
unsigned long nr_taken = 0;
unsigned long nr_lumpy_taken = 0;
@@ -1111,7 +1202,7 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
* anon page which don't already have a swap slot is
* pointless.
*/
- if (nr_swap_pages <= 0 && PageAnon(cursor_page) &&
+ if (nr_swap_pages <= 0 && PageSwapBacked(cursor_page) &&
!PageSwapCache(cursor_page))
break;
@@ -1161,8 +1252,8 @@ static unsigned long isolate_lru_pages(unsigned long nr_to_scan,
static unsigned long isolate_pages_global(unsigned long nr,
struct list_head *dst,
unsigned long *scanned, int order,
- int mode, struct zone *z,
- int active, int file)
+ isolate_mode_t mode,
+ struct zone *z, int active, int file)
{
int lru = LRU_BASE;
if (active)
@@ -1408,6 +1499,7 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
unsigned long nr_taken;
unsigned long nr_anon;
unsigned long nr_file;
+ isolate_mode_t reclaim_mode = ISOLATE_INACTIVE;
while (unlikely(too_many_isolated(zone, file, sc))) {
congestion_wait(BLK_RW_ASYNC, HZ/10);
@@ -1418,15 +1510,21 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
}
set_reclaim_mode(priority, sc, false);
+ if (sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM)
+ reclaim_mode |= ISOLATE_ACTIVE;
+
lru_add_drain();
+
+ if (!sc->may_unmap)
+ reclaim_mode |= ISOLATE_UNMAPPED;
+ if (!sc->may_writepage)
+ reclaim_mode |= ISOLATE_CLEAN;
+
spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(sc)) {
- nr_taken = isolate_pages_global(nr_to_scan,
- &page_list, &nr_scanned, sc->order,
- sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
- ISOLATE_BOTH : ISOLATE_INACTIVE,
- zone, 0, file);
+ nr_taken = isolate_pages_global(nr_to_scan, &page_list,
+ &nr_scanned, sc->order, reclaim_mode, zone, 0, file);
zone->pages_scanned += nr_scanned;
if (current_is_kswapd())
__count_zone_vm_events(PGSCAN_KSWAPD, zone,
@@ -1435,12 +1533,9 @@ shrink_inactive_list(unsigned long nr_to_scan, struct zone *zone,
__count_zone_vm_events(PGSCAN_DIRECT, zone,
nr_scanned);
} else {
- nr_taken = mem_cgroup_isolate_pages(nr_to_scan,
- &page_list, &nr_scanned, sc->order,
- sc->reclaim_mode & RECLAIM_MODE_LUMPYRECLAIM ?
- ISOLATE_BOTH : ISOLATE_INACTIVE,
- zone, sc->mem_cgroup,
- 0, file);
+ nr_taken = mem_cgroup_isolate_pages(nr_to_scan, &page_list,
+ &nr_scanned, sc->order, reclaim_mode, zone,
+ sc->mem_cgroup, 0, file);
/*
* mem_cgroup_isolate_pages() keeps track of
* scanned pages on its own.
@@ -1542,19 +1637,26 @@ static void shrink_active_list(unsigned long nr_pages, struct zone *zone,
struct page *page;
struct zone_reclaim_stat *reclaim_stat = get_reclaim_stat(zone, sc);
unsigned long nr_rotated = 0;
+ isolate_mode_t reclaim_mode = ISOLATE_ACTIVE;
lru_add_drain();
+
+ if (!sc->may_unmap)
+ reclaim_mode |= ISOLATE_UNMAPPED;
+ if (!sc->may_writepage)
+ reclaim_mode |= ISOLATE_CLEAN;
+
spin_lock_irq(&zone->lru_lock);
if (scanning_global_lru(sc)) {
nr_taken = isolate_pages_global(nr_pages, &l_hold,
&pgscanned, sc->order,
- ISOLATE_ACTIVE, zone,
+ reclaim_mode, zone,
1, file);
zone->pages_scanned += pgscanned;
} else {
nr_taken = mem_cgroup_isolate_pages(nr_pages, &l_hold,
&pgscanned, sc->order,
- ISOLATE_ACTIVE, zone,
+ reclaim_mode, zone,
sc->mem_cgroup, 1, file);
/*
* mem_cgroup_isolate_pages() keeps track of
@@ -1747,23 +1849,16 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
u64 fraction[2], denominator;
enum lru_list l;
int noswap = 0;
- int force_scan = 0;
+ bool force_scan = false;
unsigned long nr_force_scan[2];
-
- anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
- file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
-
- if (((anon + file) >> priority) < SWAP_CLUSTER_MAX) {
- /* kswapd does zone balancing and need to scan this zone */
- if (scanning_global_lru(sc) && current_is_kswapd())
- force_scan = 1;
- /* memcg may have small limit and need to avoid priority drop */
- if (!scanning_global_lru(sc))
- force_scan = 1;
- }
+ /* kswapd does zone balancing and needs to scan this zone */
+ if (scanning_global_lru(sc) && current_is_kswapd() &&
+ zone->all_unreclaimable)
+ force_scan = true;
+ /* memcg may have small limit and need to avoid priority drop */
+ if (!scanning_global_lru(sc))
+ force_scan = true;
/* If we have no swap space, do not bother scanning anon pages. */
if (!sc->may_swap || (nr_swap_pages <= 0)) {
@@ -1776,6 +1871,11 @@ static void get_scan_count(struct zone *zone, struct scan_control *sc,
goto out;
}
+ anon = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_ANON) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
+ file = zone_nr_lru_pages(zone, sc, LRU_ACTIVE_FILE) +
+ zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+
if (scanning_global_lru(sc)) {
free = zone_page_state(zone, NR_FREE_PAGES);
/* If we have very few page cache pages,
@@ -1912,8 +2012,9 @@ static inline bool should_continue_reclaim(struct zone *zone,
* inactive lists are large enough, continue reclaiming
*/
pages_for_compaction = (2UL << sc->order);
- inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON) +
- zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+ inactive_lru_pages = zone_nr_lru_pages(zone, sc, LRU_INACTIVE_FILE);
+ if (nr_swap_pages > 0)
+ inactive_lru_pages += zone_nr_lru_pages(zone, sc, LRU_INACTIVE_ANON);
if (sc->nr_reclaimed < pages_for_compaction &&
inactive_lru_pages > pages_for_compaction)
return true;
@@ -1985,6 +2086,42 @@ restart:
throttle_vm_writeout(sc->gfp_mask);
}
+/* Returns true if compaction should go ahead for a high-order request */
+static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
+{
+ unsigned long balance_gap, watermark;
+ bool watermark_ok;
+
+ /* Do not consider compaction for orders reclaim is meant to satisfy */
+ if (sc->order <= PAGE_ALLOC_COSTLY_ORDER)
+ return false;
+
+ /*
+ * Compaction takes time to run and there are potentially other
+ * callers using the pages just freed. Continue reclaiming until
+ * there is a buffer of free pages available to give compaction
+ * a reasonable chance of completing and allocating the page
+ */
+ balance_gap = min(low_wmark_pages(zone),
+ (zone->present_pages + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) /
+ KSWAPD_ZONE_BALANCE_GAP_RATIO);
+ watermark = high_wmark_pages(zone) + balance_gap + (2UL << sc->order);
+ watermark_ok = zone_watermark_ok_safe(zone, 0, watermark, 0, 0);
+
+ /*
+ * If compaction is deferred, reclaim up to a point where
+ * compaction will have a chance of success when re-enabled
+ */
+ if (compaction_deferred(zone))
+ return watermark_ok;
+
+ /* If compaction is not ready to start, keep reclaiming */
+ if (!compaction_suitable(zone, sc->order))
+ return false;
+
+ return watermark_ok;
+}
+
/*
* This is the direct reclaim path, for page-allocating processes. We only
* try to reclaim pages from zones which will satisfy the caller's allocation
@@ -2000,14 +2137,20 @@ restart:
*
* If a zone is deemed to be full of pinned pages then just give it a light
* scan then give up on it.
+ *
+ * This function returns true if a zone is being reclaimed for a costly
+ * high-order allocation and compaction is ready to begin. This indicates to
+ * the caller that it should consider retrying the allocation instead of
+ * further reclaim.
*/
-static void shrink_zones(int priority, struct zonelist *zonelist,
+static bool shrink_zones(int priority, struct zonelist *zonelist,
struct scan_control *sc)
{
struct zoneref *z;
struct zone *zone;
unsigned long nr_soft_reclaimed;
unsigned long nr_soft_scanned;
+ bool aborted_reclaim = false;
for_each_zone_zonelist_nodemask(zone, z, zonelist,
gfp_zone(sc->gfp_mask), sc->nodemask) {
@@ -2022,6 +2165,21 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
continue;
if (zone->all_unreclaimable && priority != DEF_PRIORITY)
continue; /* Let kswapd poll it */
+ if (COMPACTION_BUILD) {
+ /*
+ * If we already have plenty of memory free for
+ * compaction in this zone, don't free any more.
+ * Even though compaction is invoked for any
+ * non-zero order, only frequent costly order
+ * reclamation is disruptive enough to become a
+ * noticable problem, like transparent huge page
+ * allocations.
+ */
+ if (compaction_ready(zone, sc)) {
+ aborted_reclaim = true;
+ continue;
+ }
+ }
/*
* This steals pages from memory cgroups over softlimit
* and returns the number of reclaimed pages and
@@ -2039,6 +2197,8 @@ static void shrink_zones(int priority, struct zonelist *zonelist,
shrink_zone(priority, zone, sc);
}
+
+ return aborted_reclaim;
}
static bool zone_reclaimable(struct zone *zone)
@@ -2092,8 +2252,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
struct zoneref *z;
struct zone *zone;
unsigned long writeback_threshold;
+ bool aborted_reclaim;
- get_mems_allowed();
delayacct_freepages_start();
if (scanning_global_lru(sc))
@@ -2103,7 +2263,8 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
sc->nr_scanned = 0;
if (!priority)
disable_swap_token(sc->mem_cgroup);
- shrink_zones(priority, zonelist, sc);
+ aborted_reclaim = shrink_zones(priority, zonelist, sc);
+
/*
* Don't shrink slabs when reclaiming memory from
* over limit cgroups
@@ -2155,7 +2316,6 @@ static unsigned long do_try_to_free_pages(struct zonelist *zonelist,
out:
delayacct_freepages_end();
- put_mems_allowed();
if (sc->nr_reclaimed)
return sc->nr_reclaimed;
@@ -2168,6 +2328,10 @@ out:
if (oom_killer_disabled)
return 0;
+ /* Aborted reclaim to try compaction? don't OOM, then */
+ if (aborted_reclaim)
+ return 1;
+
/* top priority shrink_zones still had more to do? don't OOM, then */
if (scanning_global_lru(sc) && !all_unreclaimable(zonelist, sc))
return 1;
@@ -2459,6 +2623,9 @@ loop_again:
high_wmark_pages(zone), 0, 0)) {
end_zone = i;
break;
+ } else {
+ /* If balanced, clear the congested flag */
+ zone_clear_flag(zone, ZONE_CONGESTED);
}
}
if (i < 0)
@@ -2695,7 +2862,10 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
* them before going back to sleep.
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- schedule();
+
+ if (!kthread_should_stop())
+ schedule();
+
set_pgdat_percpu_threshold(pgdat, calculate_pressure_threshold);
} else {
if (remaining)
@@ -2722,7 +2892,9 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
static int kswapd(void *p)
{
unsigned long order, new_order;
+ unsigned balanced_order;
int classzone_idx, new_classzone_idx;
+ int balanced_classzone_idx;
pg_data_t *pgdat = (pg_data_t*)p;
struct task_struct *tsk = current;
@@ -2753,7 +2925,9 @@ static int kswapd(void *p)
set_freezable();
order = new_order = 0;
+ balanced_order = 0;
classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
+ balanced_classzone_idx = classzone_idx;
for ( ; ; ) {
int ret;
@@ -2762,7 +2936,8 @@ static int kswapd(void *p)
* new request of a similar or harder type will succeed soon
* so consider going to sleep on the basis we reclaimed at
*/
- if (classzone_idx >= new_classzone_idx && order == new_order) {
+ if (balanced_classzone_idx >= new_classzone_idx &&
+ balanced_order == new_order) {
new_order = pgdat->kswapd_max_order;
new_classzone_idx = pgdat->classzone_idx;
pgdat->kswapd_max_order = 0;
@@ -2777,9 +2952,12 @@ static int kswapd(void *p)
order = new_order;
classzone_idx = new_classzone_idx;
} else {
- kswapd_try_to_sleep(pgdat, order, classzone_idx);
+ kswapd_try_to_sleep(pgdat, balanced_order,
+ balanced_classzone_idx);
order = pgdat->kswapd_max_order;
classzone_idx = pgdat->classzone_idx;
+ new_order = order;
+ new_classzone_idx = classzone_idx;
pgdat->kswapd_max_order = 0;
pgdat->classzone_idx = pgdat->nr_zones - 1;
}
@@ -2794,7 +2972,9 @@ static int kswapd(void *p)
*/
if (!ret) {
trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
- order = balance_pgdat(pgdat, order, &classzone_idx);
+ balanced_classzone_idx = classzone_idx;
+ balanced_order = balance_pgdat(pgdat, order,
+ &balanced_classzone_idx);
}
}
return 0;
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 20c18b7..6559013 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -78,7 +78,7 @@ void vm_events_fold_cpu(int cpu)
*
* vm_stat contains the global counters
*/
-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
+atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
EXPORT_SYMBOL(vm_stat);
#ifdef CONFIG_SMP
diff --git a/net/caif/caif_dev.c b/net/caif/caif_dev.c
index 5ba4366..804e50f 100644
--- a/net/caif/caif_dev.c
+++ b/net/caif/caif_dev.c
@@ -424,9 +424,9 @@ static int __init caif_device_init(void)
static void __exit caif_device_exit(void)
{
- unregister_pernet_subsys(&caif_net_ops);
unregister_netdevice_notifier(&caif_device_notifier);
dev_remove_pack(&caif_packet_type);
+ unregister_pernet_subsys(&caif_net_ops);
}
module_init(caif_device_init);
diff --git a/net/core/dev.c b/net/core/dev.c
index a71eafc..8235b81 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -1163,6 +1163,7 @@ static int __dev_open(struct net_device *dev)
net_dmaengine_get();
dev_set_rx_mode(dev);
dev_activate(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
return ret;
@@ -4730,6 +4731,7 @@ int dev_set_mac_address(struct net_device *dev, struct sockaddr *sa)
err = ops->ndo_set_mac_address(dev, sa);
if (!err)
call_netdevice_notifiers(NETDEV_CHANGEADDR, dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
return err;
}
EXPORT_SYMBOL(dev_set_mac_address);
@@ -5507,6 +5509,7 @@ int register_netdevice(struct net_device *dev)
dev_init_scheduler(dev);
dev_hold(dev);
list_netdevice(dev);
+ add_device_randomness(dev->dev_addr, dev->addr_len);
/* Notify protocols, that a new device appeared. */
ret = call_netdevice_notifiers(NETDEV_REGISTER, dev);
diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
index abd936d..ac49ad5 100644
--- a/net/core/rtnetlink.c
+++ b/net/core/rtnetlink.c
@@ -647,6 +647,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
}
}
+static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
+{
+ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
+ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
+}
+
static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
const struct ifinfomsg *ifm)
{
@@ -655,7 +661,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
/* bugwards compatibility: ifi_change == 0 is treated as ~0 */
if (ifm->ifi_change)
flags = (flags & ifm->ifi_change) |
- (dev->flags & ~ifm->ifi_change);
+ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
return flags;
}
@@ -1298,6 +1304,7 @@ static int do_setlink(struct net_device *dev, struct ifinfomsg *ifm,
goto errout;
send_addr_notify = 1;
modified = 1;
+ add_device_randomness(dev->dev_addr, dev->addr_len);
}
if (tb[IFLA_MTU]) {
diff --git a/net/ipv4/cipso_ipv4.c b/net/ipv4/cipso_ipv4.c
index 2b3c23c..062876b 100644
--- a/net/ipv4/cipso_ipv4.c
+++ b/net/ipv4/cipso_ipv4.c
@@ -1725,8 +1725,10 @@ int cipso_v4_validate(const struct sk_buff *skb, unsigned char **option)
case CIPSO_V4_TAG_LOCAL:
/* This is a non-standard tag that we only allow for
* local connections, so if the incoming interface is
- * not the loopback device drop the packet. */
- if (!(skb->dev->flags & IFF_LOOPBACK)) {
+ * not the loopback device drop the packet. Further,
+ * there is no legitimate reason for setting this from
+ * userspace so reject it if skb is NULL. */
+ if (skb == NULL || !(skb->dev->flags & IFF_LOOPBACK)) {
err_offset = opt_iter;
goto validate_return_locked;
}
diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
index 58a944f..31741cf 100644
--- a/net/ipv4/tcp.c
+++ b/net/ipv4/tcp.c
@@ -2409,7 +2409,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
/* Cap the max timeout in ms TCP will retry/retrans
* before giving up and aborting (ETIMEDOUT) a connection.
*/
- icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ if (val < 0)
+ err = -EINVAL;
+ else
+ icsk->icsk_user_timeout = msecs_to_jiffies(val);
break;
default:
err = -ENOPROTOOPT;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index 6e33b79c..b76aa2d 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -5340,7 +5340,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
if (tp->copied_seq == tp->rcv_nxt &&
len - tcp_header_len <= tp->ucopy.len) {
#ifdef CONFIG_NET_DMA
- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ if (tp->ucopy.task == current &&
+ sock_owned_by_user(sk) &&
+ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
copied_early = 1;
eaten = 1;
}
diff --git a/net/mac80211/mesh.c b/net/mac80211/mesh.c
index 29e9980..370aa94 100644
--- a/net/mac80211/mesh.c
+++ b/net/mac80211/mesh.c
@@ -490,6 +490,7 @@ void ieee80211_stop_mesh(struct ieee80211_sub_if_data *sdata)
del_timer_sync(&sdata->u.mesh.housekeeping_timer);
del_timer_sync(&sdata->u.mesh.mesh_path_root_timer);
+ del_timer_sync(&sdata->u.mesh.mesh_path_timer);
/*
* If the timer fired while we waited for it, it will have
* requeued the work. Now the work will be running again
diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c
index 47ee29f..e85b2487 100644
--- a/net/sched/sch_sfb.c
+++ b/net/sched/sch_sfb.c
@@ -556,6 +556,8 @@ static int sfb_dump(struct Qdisc *sch, struct sk_buff *skb)
sch->qstats.backlog = q->qdisc->qstats.backlog;
opts = nla_nest_start(skb, TCA_OPTIONS);
+ if (opts == NULL)
+ goto nla_put_failure;
NLA_PUT(skb, TCA_SFB_PARMS, sizeof(opt), &opt);
return nla_nest_end(skb, opts);
diff --git a/net/sctp/input.c b/net/sctp/input.c
index 741ed16..cd9eded 100644
--- a/net/sctp/input.c
+++ b/net/sctp/input.c
@@ -737,15 +737,12 @@ static void __sctp_unhash_endpoint(struct sctp_endpoint *ep)
epb = &ep->base;
- if (hlist_unhashed(&epb->node))
- return;
-
epb->hashent = sctp_ep_hashfn(epb->bind_addr.port);
head = &sctp_ep_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- __hlist_del(&epb->node);
+ hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}
@@ -826,7 +823,7 @@ static void __sctp_unhash_established(struct sctp_association *asoc)
head = &sctp_assoc_hashtable[epb->hashent];
sctp_write_lock(&head->lock);
- __hlist_del(&epb->node);
+ hlist_del_init(&epb->node);
sctp_write_unlock(&head->lock);
}
diff --git a/net/sctp/socket.c b/net/sctp/socket.c
index 4434853..b70a3ee 100644
--- a/net/sctp/socket.c
+++ b/net/sctp/socket.c
@@ -1160,8 +1160,14 @@ out_free:
SCTP_DEBUG_PRINTK("About to exit __sctp_connect() free asoc: %p"
" kaddrs: %p err: %d\n",
asoc, kaddrs, err);
- if (asoc)
+ if (asoc) {
+ /* sctp_primitive_ASSOCIATE may have added this association
+ * To the hash table, try to unhash it, just in case, its a noop
+ * if it wasn't hashed so we're safe
+ */
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
return err;
}
@@ -1871,8 +1877,10 @@ SCTP_STATIC int sctp_sendmsg(struct kiocb *iocb, struct sock *sk,
goto out_unlock;
out_free:
- if (new_asoc)
+ if (new_asoc) {
+ sctp_unhash_established(asoc);
sctp_association_free(asoc);
+ }
out_unlock:
sctp_release_sock(sk);
diff --git a/net/sunrpc/rpcb_clnt.c b/net/sunrpc/rpcb_clnt.c
index e45d2fb..bf0a7f6 100644
--- a/net/sunrpc/rpcb_clnt.c
+++ b/net/sunrpc/rpcb_clnt.c
@@ -193,7 +193,7 @@ static int rpcb_create_local_unix(void)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create AF_LOCAL rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
@@ -242,7 +242,7 @@ static int rpcb_create_local_net(void)
if (IS_ERR(clnt)) {
dprintk("RPC: failed to create local rpcbind "
"client (errno %ld).\n", PTR_ERR(clnt));
- result = -PTR_ERR(clnt);
+ result = PTR_ERR(clnt);
goto out;
}
diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
index b6bb225..c57f97f 100644
--- a/net/sunrpc/sched.c
+++ b/net/sunrpc/sched.c
@@ -713,7 +713,9 @@ void rpc_execute(struct rpc_task *task)
static void rpc_async_schedule(struct work_struct *work)
{
+ current->flags |= PF_FSTRANS;
__rpc_execute(container_of(work, struct rpc_task, u.tk_work));
+ current->flags &= ~PF_FSTRANS;
}
/**
diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
index 0867070..d0b5210 100644
--- a/net/sunrpc/xprtrdma/transport.c
+++ b/net/sunrpc/xprtrdma/transport.c
@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
int rc = 0;
if (!xprt->shutdown) {
+ current->flags |= PF_FSTRANS;
xprt_clear_connected(xprt);
dprintk("RPC: %s: %sconnect\n", __func__,
@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
out:
xprt_wake_pending_tasks(xprt, rc);
-
out_clear:
dprintk("RPC: %s: exit\n", __func__);
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
}
/*
diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
index ea75079..554111f 100644
--- a/net/sunrpc/xprtsock.c
+++ b/net/sunrpc/xprtsock.c
@@ -1882,6 +1882,8 @@ static void xs_local_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
status = __sock_create(xprt->xprt_net, AF_LOCAL,
SOCK_STREAM, 0, &sock, 1);
@@ -1915,6 +1917,7 @@ static void xs_local_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
@@ -1957,6 +1960,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
/* Start by resetting any existing state */
xs_reset_transport(transport);
sock = xs_create_sock(xprt, transport,
@@ -1975,6 +1980,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/*
@@ -2100,6 +2106,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
if (xprt->shutdown)
goto out;
+ current->flags |= PF_FSTRANS;
+
if (!sock) {
clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
sock = xs_create_sock(xprt, transport,
@@ -2149,6 +2157,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
case -EINPROGRESS:
case -EALREADY:
xprt_clear_connecting(xprt);
+ current->flags &= ~PF_FSTRANS;
return;
case -EINVAL:
/* Happens, for instance, if the user specified a link
@@ -2161,6 +2170,7 @@ out_eagain:
out:
xprt_clear_connecting(xprt);
xprt_wake_pending_tasks(xprt, status);
+ current->flags &= ~PF_FSTRANS;
}
/**
diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
index 788a12c..2ab7850 100644
--- a/net/wanrouter/wanmain.c
+++ b/net/wanrouter/wanmain.c
@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
* successfully, add it to the interface list.
*/
- if (dev->name == NULL) {
- err = -EINVAL;
- } else {
+#ifdef WANDEBUG
+ printk(KERN_INFO "%s: registering interface %s...\n",
+ wanrouter_modname, dev->name);
+#endif
- #ifdef WANDEBUG
- printk(KERN_INFO "%s: registering interface %s...\n",
- wanrouter_modname, dev->name);
- #endif
-
- err = register_netdev(dev);
- if (!err) {
- struct net_device *slave = NULL;
- unsigned long smp_flags=0;
-
- lock_adapter_irq(&wandev->lock, &smp_flags);
-
- if (wandev->dev == NULL) {
- wandev->dev = dev;
- } else {
- for (slave=wandev->dev;
- DEV_TO_SLAVE(slave);
- slave = DEV_TO_SLAVE(slave))
- DEV_TO_SLAVE(slave) = dev;
- }
- ++wandev->ndev;
-
- unlock_adapter_irq(&wandev->lock, &smp_flags);
- err = 0; /* done !!! */
- goto out;
+ err = register_netdev(dev);
+ if (!err) {
+ struct net_device *slave = NULL;
+ unsigned long smp_flags=0;
+
+ lock_adapter_irq(&wandev->lock, &smp_flags);
+
+ if (wandev->dev == NULL) {
+ wandev->dev = dev;
+ } else {
+ for (slave=wandev->dev;
+ DEV_TO_SLAVE(slave);
+ slave = DEV_TO_SLAVE(slave))
+ DEV_TO_SLAVE(slave) = dev;
}
+ ++wandev->ndev;
+
+ unlock_adapter_irq(&wandev->lock, &smp_flags);
+ err = 0; /* done !!! */
+ goto out;
}
if (wandev->del_if)
wandev->del_if(wandev, dev);
diff --git a/net/wireless/core.c b/net/wireless/core.c
index 880dbe2..498c760 100644
--- a/net/wireless/core.c
+++ b/net/wireless/core.c
@@ -959,6 +959,11 @@ static int cfg80211_netdev_notifier_call(struct notifier_block * nb,
*/
synchronize_rcu();
INIT_LIST_HEAD(&wdev->list);
+ /*
+ * Ensure that all events have been processed and
+ * freed.
+ */
+ cfg80211_process_wdev_events(wdev);
break;
case NETDEV_PRE_UP:
if (!(wdev->wiphy->interface_modes & BIT(wdev->iftype)))
diff --git a/net/wireless/core.h b/net/wireless/core.h
index a570ff9..8351645 100644
--- a/net/wireless/core.h
+++ b/net/wireless/core.h
@@ -426,6 +426,7 @@ int cfg80211_change_iface(struct cfg80211_registered_device *rdev,
struct net_device *dev, enum nl80211_iftype ntype,
u32 *flags, struct vif_params *params);
void cfg80211_process_rdev_events(struct cfg80211_registered_device *rdev);
+void cfg80211_process_wdev_events(struct wireless_dev *wdev);
int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
struct wireless_dev *wdev,
diff --git a/net/wireless/util.c b/net/wireless/util.c
index bbcb58e..18e22be 100644
--- a/net/wireless/util.c
+++ b/net/wireless/util.c
@@ -719,7 +719,7 @@ void cfg80211_upload_connect_keys(struct wireless_dev *wdev)
wdev->connect_keys = NULL;
}
-static void cfg80211_process_wdev_events(struct wireless_dev *wdev)
+void cfg80211_process_wdev_events(struct wireless_dev *wdev)
{
struct cfg80211_event *ev;
unsigned long flags;
@@ -975,6 +975,9 @@ int cfg80211_can_change_interface(struct cfg80211_registered_device *rdev,
}
mutex_unlock(&rdev->devlist_mtx);
+ if (total == 1)
+ return 0;
+
for (i = 0; i < rdev->wiphy.n_iface_combinations; i++) {
const struct ieee80211_iface_combination *c;
struct ieee80211_iface_limit *limits;
diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
index 2af0999..74f5a3d 100644
--- a/sound/drivers/mpu401/mpu401_uart.c
+++ b/sound/drivers/mpu401/mpu401_uart.c
@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
spin_lock_init(&mpu->output_lock);
spin_lock_init(&mpu->timer_lock);
mpu->hardware = hardware;
+ mpu->irq = -1;
if (! (info_flags & MPU401_INFO_INTEGRATED)) {
int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
mpu->res = request_region(port, res_size, "MPU401 UART");
diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
index baa7a49..8d288a7 100644
--- a/sound/pci/hda/patch_realtek.c
+++ b/sound/pci/hda/patch_realtek.c
@@ -20133,6 +20133,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
{ .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
{ .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
{ .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
+ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
{ .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
.patch = patch_alc861 },
{ .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
index c850e3d..f16f587 100644
--- a/sound/soc/codecs/wm8962.c
+++ b/sound/soc/codecs/wm8962.c
@@ -2890,6 +2890,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
/* VMID 2*250k */
snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
WM8962_VMID_SEL_MASK, 0x100);
+
+ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
+ msleep(100);
break;
case SND_SOC_BIAS_OFF:
diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
index 2194912..1f7616d 100644
--- a/sound/soc/codecs/wm8994.c
+++ b/sound/soc/codecs/wm8994.c
@@ -2127,7 +2127,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- bclk_rate = params_rate(params) * 2;
+ bclk_rate = params_rate(params) * 4;
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
bclk_rate *= 16;
diff --git a/sound/usb/clock.c b/sound/usb/clock.c
index 075195e..f0ff776 100644
--- a/sound/usb/clock.c
+++ b/sound/usb/clock.c
@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
return 0;
/* If a clock source can't tell us whether it's valid, we assume it is */
- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
+ if (!uac2_control_is_readable(cs_desc->bmControls,
+ UAC2_CS_CONTROL_CLOCK_VALID - 1))
return 1;
err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,