diff options
author | KalimochoAz <calimochoazucarado@gmail.com> | 2012-10-31 22:54:11 +0100 |
---|---|---|
committer | KalimochoAz <calimochoazucarado@gmail.com> | 2012-10-31 22:54:11 +0100 |
commit | cb65a3eb390b7cc995c4aa6b7bff32a4696242e6 (patch) | |
tree | 9a8a12cc439b49e5fc60283c5f9008e8beb47cec | |
parent | 2e8a7ef975522bc50b765f07ff297b09709515f0 (diff) | |
parent | 27d0858dbcf199838b8c50a3e94d397bf326d986 (diff) | |
download | kernel_samsung_crespo-cb65a3eb390b7cc995c4aa6b7bff32a4696242e6.zip kernel_samsung_crespo-cb65a3eb390b7cc995c4aa6b7bff32a4696242e6.tar.gz kernel_samsung_crespo-cb65a3eb390b7cc995c4aa6b7bff32a4696242e6.tar.bz2 |
Merge kernel.org linux-3.0.y into HEAD
99 files changed, 1198 insertions, 391 deletions
diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt index aa47be7..397ee05 100644 --- a/Documentation/kernel-parameters.txt +++ b/Documentation/kernel-parameters.txt @@ -1764,6 +1764,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted. noresidual [PPC] Don't use residual data on PReP machines. + nordrand [X86] Disable the direct use of the RDRAND + instruction even if it is supported by the + processor. RDRAND is still available to user + space applications. + noresume [SWSUSP] Disables resume and restores original swap space. @@ -1,6 +1,6 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 46 +SUBLEVEL = 50 EXTRAVERSION = NAME = Sneaky Weasel diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 310434b..ed96fa0 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1262,6 +1262,16 @@ config PL310_ERRATA_769419 on systems with an outer cache, the store buffer is drained explicitly. +config ARM_ERRATA_775420 + bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" + depends on CPU_V7 + help + This option enables the workaround for the 775420 Cortex-A9 (r2p2, + r2p6,r2p8,r2p10,r3p0) erratum. In case a date cache maintenance + operation aborts with MMU exception, it might cause the processor + to deadlock. This workaround puts DSB before executing ISB if + an abort may occur on cache maintenance. + endmenu source "arch/arm/common/Kconfig" diff --git a/arch/arm/include/asm/vfpmacros.h b/arch/arm/include/asm/vfpmacros.h index 3d5fc41..bf53047 100644 --- a/arch/arm/include/asm/vfpmacros.h +++ b/arch/arm/include/asm/vfpmacros.h @@ -28,7 +28,7 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPv3D16 - ldceq p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} + ldceql p11, cr0, [\base],#32*4 @ FLDMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 @@ -52,7 +52,7 @@ ldr \tmp, =elf_hwcap @ may not have MVFR regs ldr \tmp, [\tmp, #0] tst \tmp, #HWCAP_VFPv3D16 - stceq p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} + stceql p11, cr0, [\base],#32*4 @ FSTMIAD \base!, {d16-d31} addne \base, \base, #32*4 @ step over unused register space #else VFPFMRX \tmp, MVFR0 @ Media and VFP Feature Register 0 diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c index 84953bb..181baa2 100644 --- a/arch/arm/kernel/smp.c +++ b/arch/arm/kernel/smp.c @@ -278,18 +278,24 @@ static void __cpuinit smp_store_cpu_info(unsigned int cpuid) asmlinkage void __cpuinit secondary_start_kernel(void) { struct mm_struct *mm = &init_mm; - unsigned int cpu = smp_processor_id(); + unsigned int cpu; + + /* + * The identity mapping is uncached (strongly ordered), so + * switch away from it before attempting any exclusive accesses. + */ + cpu_switch_mm(mm->pgd, mm); + enter_lazy_tlb(mm, current); + local_flush_tlb_all(); /* * All kernel threads share the same mm context; grab a * reference and switch to it. */ + cpu = smp_processor_id(); atomic_inc(&mm->mm_count); current->active_mm = mm; cpumask_set_cpu(cpu, mm_cpumask(mm)); - cpu_switch_mm(mm->pgd, mm); - enter_lazy_tlb(mm, current); - local_flush_tlb_all(); printk("CPU%u: Booted secondary processor\n", cpu); diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c index 7227755..871a818 100644 --- a/arch/arm/mach-at91/at91rm9200_devices.c +++ b/arch/arm/mach-at91/at91rm9200_devices.c @@ -454,7 +454,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91rm9200_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c index 39f81f4..89a8414 100644 --- a/arch/arm/mach-at91/at91sam9260_devices.c +++ b/arch/arm/mach-at91/at91sam9260_devices.c @@ -459,7 +459,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9260_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c index 5004bf0..5d43cf4 100644 --- a/arch/arm/mach-at91/at91sam9261_devices.c +++ b/arch/arm/mach-at91/at91sam9261_devices.c @@ -276,7 +276,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9261_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c index a050f41..2bbd163 100644 --- a/arch/arm/mach-at91/at91sam9263_devices.c +++ b/arch/arm/mach-at91/at91sam9263_devices.c @@ -534,7 +534,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9263_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mach-at91/at91sam9rl_devices.c b/arch/arm/mach-at91/at91sam9rl_devices.c index aacb19d..659870e 100644 --- a/arch/arm/mach-at91/at91sam9rl_devices.c +++ b/arch/arm/mach-at91/at91sam9rl_devices.c @@ -319,7 +319,7 @@ static struct i2c_gpio_platform_data pdata = { static struct platform_device at91sam9rl_twi_device = { .name = "i2c-gpio", - .id = -1, + .id = 0, .dev.platform_data = &pdata, }; diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S index 1ed1fd3..428b243 100644 --- a/arch/arm/mm/cache-v7.S +++ b/arch/arm/mm/cache-v7.S @@ -211,6 +211,9 @@ ENTRY(v7_coherent_user_range) * isn't mapped, just try the next page. */ 9001: +#ifdef CONFIG_ARM_ERRATA_775420 + dsb +#endif mov r12, r12, lsr #12 mov r12, r12, lsl #12 add r12, r12, #4096 diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c index f4546e9..23817a6 100644 --- a/arch/mips/kernel/kgdb.c +++ b/arch/mips/kernel/kgdb.c @@ -283,6 +283,15 @@ static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd, struct pt_regs *regs = args->regs; int trap = (regs->cp0_cause & 0x7c) >> 2; +#ifdef CONFIG_KPROBES + /* + * Return immediately if the kprobes fault notifier has set + * DIE_PAGE_FAULT. + */ + if (cmd == DIE_PAGE_FAULT) + return NOTIFY_DONE; +#endif /* CONFIG_KPROBES */ + /* Userspace events, ignore. */ if (user_mode(regs)) return NOTIFY_DONE; diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c index 6860d40..904ed63 100644 --- a/arch/sparc/kernel/perf_event.c +++ b/arch/sparc/kernel/perf_event.c @@ -513,11 +513,13 @@ static u64 nop_for_index(int idx) static inline void sparc_pmu_enable_event(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc, int idx) { - u64 val, mask = mask_for_index(idx); + u64 enc, val, mask = mask_for_index(idx); + + enc = perf_event_get_enc(cpuc->events[idx]); val = cpuc->pcr; val &= ~mask; - val |= hwc->config; + val |= event_encoding(enc, idx); cpuc->pcr = val; pcr_ops->write(cpuc->pcr); @@ -1380,8 +1382,6 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] + STACK_BIAS; do { struct sparc_stackf *usf, sf; @@ -1402,8 +1402,6 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, { unsigned long ufp; - perf_callchain_store(entry, regs->tpc); - ufp = regs->u_regs[UREG_I6] & 0xffffffffUL; do { struct sparc_stackf32 *usf, sf; @@ -1422,6 +1420,11 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry, void perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) { + perf_callchain_store(entry, regs->tpc); + + if (!current->mm) + return; + flushw_user(); if (test_thread_flag(TIF_32BIT)) perf_callchain_user_32(entry, regs); diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c index 908b47a..10c9b36 100644 --- a/arch/sparc/kernel/sys_sparc_64.c +++ b/arch/sparc/kernel/sys_sparc_64.c @@ -519,12 +519,12 @@ SYSCALL_DEFINE1(sparc64_personality, unsigned long, personality) { int ret; - if (current->personality == PER_LINUX32 && - personality == PER_LINUX) - personality = PER_LINUX32; + if (personality(current->personality) == PER_LINUX32 && + personality(personality) == PER_LINUX) + personality |= PER_LINUX32; ret = sys_personality(personality); - if (ret == PER_LINUX32) - ret = PER_LINUX; + if (personality(ret) == PER_LINUX32) + ret &= ~PER_LINUX32; return ret; } diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 1d7e274..7f5f65d 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -212,24 +212,20 @@ linux_sparc_syscall: 3: stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] ret_sys_call: ldx [%sp + PTREGS_OFF + PT_V9_TSTATE], %g3 - ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc sra %o0, 0, %o0 mov %ulo(TSTATE_XCARRY | TSTATE_ICARRY), %g2 sllx %g2, 32, %g2 - /* Check if force_successful_syscall_return() - * was invoked. - */ - ldub [%g6 + TI_SYS_NOERROR], %l2 - brnz,a,pn %l2, 80f - stb %g0, [%g6 + TI_SYS_NOERROR] - cmp %o0, -ERESTART_RESTARTBLOCK bgeu,pn %xcc, 1f - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 -80: + andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc + +2: + stb %g0, [%g6 + TI_SYS_NOERROR] /* System call success, clear Carry condition code. */ andn %g3, %g2, %g3 +3: stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] bne,pn %icc, linux_syscall_trace2 add %l1, 0x4, %l2 ! npc = npc+4 @@ -238,20 +234,20 @@ ret_sys_call: stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] 1: + /* Check if force_successful_syscall_return() + * was invoked. + */ + ldub [%g6 + TI_SYS_NOERROR], %l2 + brnz,pn %l2, 2b + ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc /* System call failure, set Carry condition code. * Also, get abs(errno) to return to the process. */ - andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %l6 sub %g0, %o0, %o0 - or %g3, %g2, %g3 stx %o0, [%sp + PTREGS_OFF + PT_V9_I0] - stx %g3, [%sp + PTREGS_OFF + PT_V9_TSTATE] - bne,pn %icc, linux_syscall_trace2 - add %l1, 0x4, %l2 ! npc = npc+4 - stx %l1, [%sp + PTREGS_OFF + PT_V9_TPC] + ba,pt %xcc, 3b + or %g3, %g2, %g3 - b,pt %xcc, rtrap - stx %l2, [%sp + PTREGS_OFF + PT_V9_TNPC] linux_syscall_trace2: call syscall_trace_leave add %sp, PTREGS_OFF, %o0 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 8e073d8..6ff4d78 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -2118,6 +2118,9 @@ EXPORT_SYMBOL(_PAGE_CACHE); #ifdef CONFIG_SPARSEMEM_VMEMMAP unsigned long vmemmap_table[VMEMMAP_SIZE]; +static long __meminitdata addr_start, addr_end; +static int __meminitdata node_start; + int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) { unsigned long vstart = (unsigned long) start; @@ -2148,15 +2151,30 @@ int __meminit vmemmap_populate(struct page *start, unsigned long nr, int node) *vmem_pp = pte_base | __pa(block); - printk(KERN_INFO "[%p-%p] page_structs=%lu " - "node=%d entry=%lu/%lu\n", start, block, nr, - node, - addr >> VMEMMAP_CHUNK_SHIFT, - VMEMMAP_SIZE); + /* check to see if we have contiguous blocks */ + if (addr_end != addr || node_start != node) { + if (addr_start) + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = addr; + node_start = node; + } + addr_end = addr + VMEMMAP_CHUNK; } } return 0; } + +void __meminit vmemmap_populate_print_last(void) +{ + if (addr_start) { + printk(KERN_DEBUG " [%lx-%lx] on node %d\n", + addr_start, addr_end-1, node_start); + addr_start = 0; + addr_end = 0; + node_start = 0; + } +} #endif /* CONFIG_SPARSEMEM_VMEMMAP */ static void prot_init_common(unsigned long page_none, diff --git a/arch/tile/Makefile b/arch/tile/Makefile index 17acce7..04c637c 100644 --- a/arch/tile/Makefile +++ b/arch/tile/Makefile @@ -26,6 +26,10 @@ $(error Set TILERA_ROOT or CROSS_COMPILE when building $(ARCH) on $(HOST_ARCH)) endif endif +# The tile compiler may emit .eh_frame information for backtracing. +# In kernel modules, this causes load failures due to unsupported relocations. +KBUILD_CFLAGS += -fno-asynchronous-unwind-tables + ifneq ($(CONFIG_DEBUG_EXTRA_FLAGS),"") KBUILD_CFLAGS += $(CONFIG_DEBUG_EXTRA_FLAGS) endif diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index 37357a5..a0e9bda 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1451,6 +1451,15 @@ config ARCH_USES_PG_UNCACHED def_bool y depends on X86_PAT +config ARCH_RANDOM + def_bool y + prompt "x86 architectural random number generator" if EXPERT + ---help--- + Enable the x86 architectural RDRAND instruction + (Intel Bull Mountain technology) to generate random numbers. + If supported, this is a high bandwidth, cryptographically + secure hardware random number generator. + config EFI bool "EFI runtime service support" depends on ACPI diff --git a/arch/x86/include/asm/archrandom.h b/arch/x86/include/asm/archrandom.h new file mode 100644 index 0000000..0d9ec77 --- /dev/null +++ b/arch/x86/include/asm/archrandom.h @@ -0,0 +1,75 @@ +/* + * This file is part of the Linux kernel. + * + * Copyright (c) 2011, Intel Corporation + * Authors: Fenghua Yu <fenghua.yu@intel.com>, + * H. Peter Anvin <hpa@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#ifndef ASM_X86_ARCHRANDOM_H +#define ASM_X86_ARCHRANDOM_H + +#include <asm/processor.h> +#include <asm/cpufeature.h> +#include <asm/alternative.h> +#include <asm/nops.h> + +#define RDRAND_RETRY_LOOPS 10 + +#define RDRAND_INT ".byte 0x0f,0xc7,0xf0" +#ifdef CONFIG_X86_64 +# define RDRAND_LONG ".byte 0x48,0x0f,0xc7,0xf0" +#else +# define RDRAND_LONG RDRAND_INT +#endif + +#ifdef CONFIG_ARCH_RANDOM + +#define GET_RANDOM(name, type, rdrand, nop) \ +static inline int name(type *v) \ +{ \ + int ok; \ + alternative_io("movl $0, %0\n\t" \ + nop, \ + "\n1: " rdrand "\n\t" \ + "jc 2f\n\t" \ + "decl %0\n\t" \ + "jnz 1b\n\t" \ + "2:", \ + X86_FEATURE_RDRAND, \ + ASM_OUTPUT2("=r" (ok), "=a" (*v)), \ + "0" (RDRAND_RETRY_LOOPS)); \ + return ok; \ +} + +#ifdef CONFIG_X86_64 + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP5); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP4); + +#else + +GET_RANDOM(arch_get_random_long, unsigned long, RDRAND_LONG, ASM_NOP3); +GET_RANDOM(arch_get_random_int, unsigned int, RDRAND_INT, ASM_NOP3); + +#endif /* CONFIG_X86_64 */ + +#endif /* CONFIG_ARCH_RANDOM */ + +extern void x86_init_rdrand(struct cpuinfo_x86 *c); + +#endif /* ASM_X86_ARCHRANDOM_H */ diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile index 6042981..0e3a82a 100644 --- a/arch/x86/kernel/cpu/Makefile +++ b/arch/x86/kernel/cpu/Makefile @@ -15,6 +15,7 @@ CFLAGS_common.o := $(nostackp) obj-y := intel_cacheinfo.o scattered.o topology.o obj-y += proc.o capflags.o powerflags.o common.o obj-y += vmware.o hypervisor.o sched.o mshyperv.o +obj-y += rdrand.o obj-$(CONFIG_X86_32) += bugs.o obj-$(CONFIG_X86_64) += bugs_64.o diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c index 0cb2883..1579ab9 100644 --- a/arch/x86/kernel/cpu/common.c +++ b/arch/x86/kernel/cpu/common.c @@ -15,6 +15,7 @@ #include <asm/stackprotector.h> #include <asm/perf_event.h> #include <asm/mmu_context.h> +#include <asm/archrandom.h> #include <asm/hypervisor.h> #include <asm/processor.h> #include <asm/sections.h> @@ -852,6 +853,7 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c) #endif init_hypervisor(c); + x86_init_rdrand(c); /* * Clear/Set all flags overriden by options, need do it diff --git a/arch/x86/kernel/cpu/rdrand.c b/arch/x86/kernel/cpu/rdrand.c new file mode 100644 index 0000000..feca286 --- /dev/null +++ b/arch/x86/kernel/cpu/rdrand.c @@ -0,0 +1,73 @@ +/* + * This file is part of the Linux kernel. + * + * Copyright (c) 2011, Intel Corporation + * Authors: Fenghua Yu <fenghua.yu@intel.com>, + * H. Peter Anvin <hpa@linux.intel.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms and conditions of the GNU General Public License, + * version 2, as published by the Free Software Foundation. + * + * This program is distributed in the hope it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + * + * You should have received a copy of the GNU General Public License along with + * this program; if not, write to the Free Software Foundation, Inc., + * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. + * + */ + +#include <asm/processor.h> +#include <asm/archrandom.h> +#include <asm/sections.h> + +static int __init x86_rdrand_setup(char *s) +{ + setup_clear_cpu_cap(X86_FEATURE_RDRAND); + return 1; +} +__setup("nordrand", x86_rdrand_setup); + +/* We can't use arch_get_random_long() here since alternatives haven't run */ +static inline int rdrand_long(unsigned long *v) +{ + int ok; + asm volatile("1: " RDRAND_LONG "\n\t" + "jc 2f\n\t" + "decl %0\n\t" + "jnz 1b\n\t" + "2:" + : "=r" (ok), "=a" (*v) + : "0" (RDRAND_RETRY_LOOPS)); + return ok; +} + +/* + * Force a reseed cycle; we are architecturally guaranteed a reseed + * after no more than 512 128-bit chunks of random data. This also + * acts as a test of the CPU capability. + */ +#define RESEED_LOOP ((512*128)/sizeof(unsigned long)) + +void __cpuinit x86_init_rdrand(struct cpuinfo_x86 *c) +{ +#ifdef CONFIG_ARCH_RANDOM + unsigned long tmp; + int i, count, ok; + + if (!cpu_has(c, X86_FEATURE_RDRAND)) + return; /* Nothing to do */ + + for (count = i = 0; i < RESEED_LOOP; i++) { + ok = rdrand_long(&tmp); + if (ok) + count++; + } + + if (count != RESEED_LOOP) + clear_cpu_cap(c, X86_FEATURE_RDRAND); +#endif +} diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S index edb3d46..268b40d 100644 --- a/arch/x86/kernel/entry_32.S +++ b/arch/x86/kernel/entry_32.S @@ -1029,7 +1029,7 @@ ENTRY(xen_sysenter_target) ENTRY(xen_hypervisor_callback) CFI_STARTPROC - pushl_cfi $0 + pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL TRACE_IRQS_OFF @@ -1071,14 +1071,16 @@ ENTRY(xen_failsafe_callback) 2: mov 8(%esp),%es 3: mov 12(%esp),%fs 4: mov 16(%esp),%gs + /* EAX == 0 => Category 1 (Bad segment) + EAX != 0 => Category 2 (Bad IRET) */ testl %eax,%eax popl_cfi %eax lea 16(%esp),%esp CFI_ADJUST_CFA_OFFSET -16 jz 5f addl $16,%esp - jmp iret_exc # EAX != 0 => Category 2 (Bad IRET) -5: pushl_cfi $0 # EAX == 0 => Category 1 (Bad segment) + jmp iret_exc +5: pushl_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp ret_from_exception CFI_ENDPROC diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S index 8a445a0..dd4dba4 100644 --- a/arch/x86/kernel/entry_64.S +++ b/arch/x86/kernel/entry_64.S @@ -1308,7 +1308,7 @@ ENTRY(xen_failsafe_callback) CFI_RESTORE r11 addq $0x30,%rsp CFI_ADJUST_CFA_OFFSET -0x30 - pushq_cfi $0 + pushq_cfi $-1 /* orig_ax = -1 => not a system call */ SAVE_ALL jmp error_exit CFI_ENDPROC diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c index afaf384..af19a61 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p) #ifdef CONFIG_X86_64 if (max_pfn > max_low_pfn) { - max_pfn_mapped = init_memory_mapping(1UL<<32, - max_pfn<<PAGE_SHIFT); + int i; + for (i = 0; i < e820.nr_map; i++) { + struct e820entry *ei = &e820.map[i]; + + if (ei->addr + ei->size <= 1UL << 32) + continue; + + if (ei->type == E820_RESERVED) + continue; + + max_pfn_mapped = init_memory_mapping( + ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, + ei->addr + ei->size); + } + /* can we preseve max_low_pfn ?*/ max_low_pfn = max_pfn; } diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index 87488b9..c22c423 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -28,36 +28,50 @@ int direct_gbpages #endif ; -static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) +struct map_range { + unsigned long start; + unsigned long end; + unsigned page_size_mask; +}; + +/* + * First calculate space needed for kernel direct mapping page tables to cover + * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB + * pages. Then find enough contiguous space for those page tables. + */ +static void __init find_early_table_space(struct map_range *mr, int nr_range) { - unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; + int i; + unsigned long puds = 0, pmds = 0, ptes = 0, tables; + unsigned long start = 0, good_end; phys_addr_t base; - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; - tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); + for (i = 0; i < nr_range; i++) { + unsigned long range, extra; - if (use_gbpages) { - unsigned long extra; + range = mr[i].end - mr[i].start; + puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; - extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); - pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; - } else - pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; - - tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); + if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { + extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); + pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; + } else { + pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; + } - if (use_pse) { - unsigned long extra; - - extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); + if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { + extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); #ifdef CONFIG_X86_32 - extra += PMD_SIZE; + extra += PMD_SIZE; #endif - ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; - } else - ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; + ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; + } else { + ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; + } + } + tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); + tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); #ifdef CONFIG_X86_32 @@ -74,8 +88,9 @@ static void __init find_early_table_space(unsigned long end, int use_pse, pgt_buf_end = pgt_buf_start; pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); - printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", - end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); + printk(KERN_DEBUG "kernel direct mapping tables up to %#lx @ [mem %#010lx-%#010lx]\n", + mr[nr_range - 1].end - 1, pgt_buf_start << PAGE_SHIFT, + (pgt_buf_top << PAGE_SHIFT) - 1); } void __init native_pagetable_reserve(u64 start, u64 end) @@ -83,12 +98,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) memblock_x86_reserve_range(start, end, "PGTABLE"); } -struct map_range { - unsigned long start; - unsigned long end; - unsigned page_size_mask; -}; - #ifdef CONFIG_X86_32 #define NR_RANGE_MR 3 #else /* CONFIG_X86_64 */ @@ -260,7 +269,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * nodes are discovered. */ if (!after_bootmem) - find_early_table_space(end, use_pse, use_gbpages); + find_early_table_space(mr, nr_range); for (i = 0; i < nr_range; i++) ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c index 68894fd..a00c588 100644 --- a/arch/x86/oprofile/nmi_int.c +++ b/arch/x86/oprofile/nmi_int.c @@ -55,7 +55,7 @@ u64 op_x86_get_ctrl(struct op_x86_model_spec const *model, val |= counter_config->extra; event &= model->event_mask ? model->event_mask : 0xFF; val |= event & 0xFF; - val |= (event & 0x0F00) << 24; + val |= (u64)(event & 0x0F00) << 24; return val; } diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 8385d1d..9f808af 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -803,7 +803,16 @@ static void xen_write_cr4(unsigned long cr4) native_write_cr4(cr4); } - +#ifdef CONFIG_X86_64 +static inline unsigned long xen_read_cr8(void) +{ + return 0; +} +static inline void xen_write_cr8(unsigned long val) +{ + BUG_ON(val); +} +#endif static int xen_write_msr_safe(unsigned int msr, unsigned low, unsigned high) { int ret; @@ -968,6 +977,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_cr4_safe = native_read_cr4_safe, .write_cr4 = xen_write_cr4, +#ifdef CONFIG_X86_64 + .read_cr8 = xen_read_cr8, + .write_cr8 = xen_write_cr8, +#endif + .wbinvd = native_wbinvd, .read_msr = native_read_msr_safe, @@ -975,6 +989,8 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_tsc = native_read_tsc, .read_pmc = native_read_pmc, + .read_tscp = native_read_tscp, + .iret = xen_iret, .irq_enable_sysexit = xen_sysexit, #ifdef CONFIG_X86_64 diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index b19a18d..d2519b2 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -71,9 +71,6 @@ enum ec_command { #define ACPI_EC_UDELAY_GLK 1000 /* Wait 1ms max. to get global lock */ #define ACPI_EC_MSI_UDELAY 550 /* Wait 550us for MSI EC */ -#define ACPI_EC_STORM_THRESHOLD 8 /* number of false interrupts - per one transaction */ - enum { EC_FLAGS_QUERY_PENDING, /* Query is pending */ EC_FLAGS_GPE_STORM, /* GPE storm detected */ @@ -87,6 +84,15 @@ static unsigned int ec_delay __read_mostly = ACPI_EC_DELAY; module_param(ec_delay, uint, 0644); MODULE_PARM_DESC(ec_delay, "Timeout(ms) waited until an EC command completes"); +/* + * If the number of false interrupts per one transaction exceeds + * this threshold, will think there is a GPE storm happened and + * will disable the GPE for normal transaction. + */ +static unsigned int ec_storm_threshold __read_mostly = 8; +module_param(ec_storm_threshold, uint, 0644); +MODULE_PARM_DESC(ec_storm_threshold, "Maxim false GPE numbers not considered as GPE storm"); + /* If we find an EC via the ECDT, we need to keep a ptr to its context */ /* External interfaces use first EC only, so remember */ typedef int (*acpi_ec_query_func) (void *data); @@ -319,7 +325,7 @@ static int acpi_ec_transaction(struct acpi_ec *ec, struct transaction *t) msleep(1); /* It is safe to enable the GPE outside of the transaction. */ acpi_enable_gpe(NULL, ec->gpe); - } else if (t->irq_count > ACPI_EC_STORM_THRESHOLD) { + } else if (t->irq_count > ec_storm_threshold) { pr_info(PREFIX "GPE storm detected, " "transactions will use polling mode\n"); set_bit(EC_FLAGS_GPE_STORM, &ec->flags); @@ -914,6 +920,17 @@ static int ec_flag_msi(const struct dmi_system_id *id) return 0; } +/* + * Clevo M720 notebook actually works ok with IRQ mode, if we lifted + * the GPE storm threshold back to 20 + */ +static int ec_enlarge_storm_threshold(const struct dmi_system_id *id) +{ + pr_debug("Setting the EC GPE storm threshold to 20\n"); + ec_storm_threshold = 20; + return 0; +} + static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_skip_dsdt_scan, "Compal JFL92", { @@ -945,10 +962,13 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { { ec_validate_ecdt, "ASUS hardware", { DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer Inc.") }, NULL}, + { + ec_enlarge_storm_threshold, "CLEVO hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), + DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, {}, }; - int __init acpi_ec_ecdt_probe(void) { acpi_status status; diff --git a/drivers/bcma/main.c b/drivers/bcma/main.c index be52344..a9cb238 100644 --- a/drivers/bcma/main.c +++ b/drivers/bcma/main.c @@ -110,9 +110,10 @@ static int bcma_register_cores(struct bcma_bus *bus) static void bcma_unregister_cores(struct bcma_bus *bus) { - struct bcma_device *core; + struct bcma_device *core, *tmp; - list_for_each_entry(core, &bus->cores, list) { + list_for_each_entry_safe(core, tmp, &bus->cores, list) { + list_del(&core->list); if (core->dev_registered) device_unregister(&core->dev); } diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c index b85ee76..65b9d6f 100644 --- a/drivers/char/tpm/tpm.c +++ b/drivers/char/tpm/tpm.c @@ -1019,17 +1019,20 @@ ssize_t tpm_write(struct file *file, const char __user *buf, size_t size, loff_t *off) { struct tpm_chip *chip = file->private_data; - size_t in_size = size, out_size; + size_t in_size = size; + ssize_t out_size; /* cannot perform a write until the read has cleared - either via tpm_read or a user_read_timer timeout */ - while (atomic_read(&chip->data_pending) != 0) - msleep(TPM_TIMEOUT); - - mutex_lock(&chip->buffer_mutex); + either via tpm_read or a user_read_timer timeout. + This also prevents splitted buffered writes from blocking here. + */ + if (atomic_read(&chip->data_pending) != 0) + return -EBUSY; if (in_size > TPM_BUFSIZE) - in_size = TPM_BUFSIZE; + return -E2BIG; + + mutex_lock(&chip->buffer_mutex); if (copy_from_user (chip->data_buffer, (void __user *) buf, in_size)) { @@ -1039,6 +1042,10 @@ ssize_t tpm_write(struct file *file, const char __user *buf, /* atomic tpm command send and result receive */ out_size = tpm_transmit(chip, chip->data_buffer, TPM_BUFSIZE); + if (out_size < 0) { + mutex_unlock(&chip->buffer_mutex); + return out_size; + } atomic_set(&chip->data_pending, out_size); mutex_unlock(&chip->buffer_mutex); diff --git a/drivers/cpufreq/powernow-k8.c b/drivers/cpufreq/powernow-k8.c index b7fe343..f6cd315 100644 --- a/drivers/cpufreq/powernow-k8.c +++ b/drivers/cpufreq/powernow-k8.c @@ -1216,14 +1216,7 @@ static int powernowk8_target(struct cpufreq_policy *pol, struct powernowk8_target_arg pta = { .pol = pol, .targfreq = targfreq, .relation = relation }; - /* - * Must run on @pol->cpu. cpufreq core is responsible for ensuring - * that we're bound to the current CPU and pol->cpu stays online. - */ - if (smp_processor_id() == pol->cpu) - return powernowk8_target_fn(&pta); - else - return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); + return work_on_cpu(pol->cpu, powernowk8_target_fn, &pta); } /* Driver entry point to verify the policy and range of frequencies */ diff --git a/drivers/edac/amd64_edac.c b/drivers/edac/amd64_edac.c index 9a8bebc..feb2d10 100644 --- a/drivers/edac/amd64_edac.c +++ b/drivers/edac/amd64_edac.c @@ -161,8 +161,11 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) * memory controller and apply to register. Search for the first * bandwidth entry that is greater or equal than the setting requested * and program that. If at last entry, turn off DRAM scrubbing. + * + * If no suitable bandwidth is found, turn off DRAM scrubbing entirely + * by falling back to the last element in scrubrates[]. */ - for (i = 0; i < ARRAY_SIZE(scrubrates); i++) { + for (i = 0; i < ARRAY_SIZE(scrubrates) - 1; i++) { /* * skip scrub rates which aren't recommended * (see F10 BKDG, F3x58) @@ -172,12 +175,6 @@ static int __amd64_set_scrub_rate(struct pci_dev *ctl, u32 new_bw, u32 min_rate) if (scrubrates[i].bandwidth <= new_bw) break; - - /* - * if no suitable bandwidth found, turn off DRAM scrubbing - * entirely by falling back to the last element in the - * scrubrates array. - */ } scrubval = scrubrates[i].scrubval; diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c index 4799393..b97d4f0 100644 --- a/drivers/firewire/core-cdev.c +++ b/drivers/firewire/core-cdev.c @@ -471,8 +471,8 @@ static int ioctl_get_info(struct client *client, union ioctl_arg *arg) client->bus_reset_closure = a->bus_reset_closure; if (a->bus_reset != 0) { fill_bus_reset_event(&bus_reset, client); - ret = copy_to_user(u64_to_uptr(a->bus_reset), - &bus_reset, sizeof(bus_reset)); + /* unaligned size of bus_reset is 36 bytes */ + ret = copy_to_user(u64_to_uptr(a->bus_reset), &bus_reset, 36); } if (ret == 0 && list_empty(&client->link)) list_add_tail(&client->link, &client->device->client_list); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 387b2b3..557e007 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -3113,6 +3113,11 @@ #define TRANS_6BPC (2<<5) #define TRANS_12BPC (3<<5) +#define _TRANSA_CHICKEN2 0xf0064 +#define _TRANSB_CHICKEN2 0xf1064 +#define TRANS_CHICKEN2(pipe) _PIPE(pipe, _TRANSA_CHICKEN2, _TRANSB_CHICKEN2) +#define TRANS_AUTOTRAIN_GEN_STALL_DIS (1<<31) + #define SOUTH_CHICKEN2 0xc2004 #define DPLS_EDP_PPS_FIX_DIS (1<<0) diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c index 4b8e235..36d76989 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c @@ -7584,6 +7584,7 @@ static void ibx_init_clock_gating(struct drm_device *dev) static void cpt_init_clock_gating(struct drm_device *dev) { struct drm_i915_private *dev_priv = dev->dev_private; + int pipe; /* * On Ibex Peak and Cougar Point, we need to disable clock @@ -7593,6 +7594,9 @@ static void cpt_init_clock_gating(struct drm_device *dev) I915_WRITE(SOUTH_DSPCLK_GATE_D, PCH_DPLSUNIT_CLOCK_GATE_DISABLE); I915_WRITE(SOUTH_CHICKEN2, I915_READ(SOUTH_CHICKEN2) | DPLS_EDP_PPS_FIX_DIS); + /* Without this, mode sets may fail silently on FDI */ + for_each_pipe(pipe) + I915_WRITE(TRANS_CHICKEN2(pipe), TRANS_AUTOTRAIN_GEN_STALL_DIS); } static void ironlake_teardown_rc6(struct drm_device *dev) diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index ff85a91..7adba11 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -751,6 +751,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_BOARD_NAME, "MS-7469"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "ZOTAC ZBOXSD-ID12/ID13", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ZOTAC"), + DMI_MATCH(DMI_BOARD_NAME, "ZBOXSD-ID12/ID13"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c index 2f46e0c..3ad3cc6 100644 --- a/drivers/gpu/drm/radeon/radeon_legacy_encoders.c +++ b/drivers/gpu/drm/radeon/radeon_legacy_encoders.c @@ -973,11 +973,7 @@ static void radeon_legacy_tmds_ext_mode_set(struct drm_encoder *encoder, static void radeon_ext_tmds_enc_destroy(struct drm_encoder *encoder) { struct radeon_encoder *radeon_encoder = to_radeon_encoder(encoder); - struct radeon_encoder_ext_tmds *tmds = radeon_encoder->enc_priv; - if (tmds) { - if (tmds->i2c_bus) - radeon_i2c_destroy(tmds->i2c_bus); - } + /* don't destroy the i2c bus record here, this will be done in radeon_i2c_fini */ kfree(radeon_encoder->enc_priv); drm_encoder_cleanup(encoder); kfree(radeon_encoder); diff --git a/drivers/media/video/au0828/au0828-video.c b/drivers/media/video/au0828/au0828-video.c index c03eb29..9945aaf 100644 --- a/drivers/media/video/au0828/au0828-video.c +++ b/drivers/media/video/au0828/au0828-video.c @@ -1697,14 +1697,18 @@ static int vidioc_streamoff(struct file *file, void *priv, (AUVI_INPUT(i).audio_setup)(dev, 0); } - videobuf_streamoff(&fh->vb_vidq); - res_free(fh, AU0828_RESOURCE_VIDEO); + if (res_check(fh, AU0828_RESOURCE_VIDEO)) { + videobuf_streamoff(&fh->vb_vidq); + res_free(fh, AU0828_RESOURCE_VIDEO); + } } else if (fh->type == V4L2_BUF_TYPE_VBI_CAPTURE) { dev->vbi_timeout_running = 0; del_timer_sync(&dev->vbi_timeout); - videobuf_streamoff(&fh->vb_vbiq); - res_free(fh, AU0828_RESOURCE_VBI); + if (res_check(fh, AU0828_RESOURCE_VBI)) { + videobuf_streamoff(&fh->vb_vbiq); + res_free(fh, AU0828_RESOURCE_VBI); + } } return 0; diff --git a/drivers/net/skge.c b/drivers/net/skge.c index f4be5c7..b446e7e 100644 --- a/drivers/net/skge.c +++ b/drivers/net/skge.c @@ -4097,6 +4097,13 @@ static struct dmi_system_id skge_32bit_dma_boards[] = { DMI_MATCH(DMI_BOARD_NAME, "nForce"), }, }, + { + .ident = "ASUS P5NSLI", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "ASUSTeK Computer INC."), + DMI_MATCH(DMI_BOARD_NAME, "P5NSLI") + }, + }, {} }; diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index 85931ca..10a99e4 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -13689,8 +13689,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) */ tg3_flag_set(tp, 4G_DMA_BNDRY_BUG); - if (tg3_flag(tp, 5755_PLUS)) - tg3_flag_set(tp, SHORT_DMA_BUG); + if (tg3_flag(tp, 5755_PLUS) || + GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) + tg3_flag_set(tp, SHORT_DMA_BUG); else tg3_flag_set(tp, 40BIT_DMA_LIMIT_BUG); diff --git a/drivers/pcmcia/pxa2xx_sharpsl.c b/drivers/pcmcia/pxa2xx_sharpsl.c index 81af2b3..097fa00 100644 --- a/drivers/pcmcia/pxa2xx_sharpsl.c +++ b/drivers/pcmcia/pxa2xx_sharpsl.c @@ -222,7 +222,7 @@ static void sharpsl_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt) sharpsl_pcmcia_init_reset(skt); } -static struct pcmcia_low_level sharpsl_pcmcia_ops __initdata = { +static struct pcmcia_low_level sharpsl_pcmcia_ops = { .owner = THIS_MODULE, .hw_init = sharpsl_pcmcia_hw_init, .hw_shutdown = sharpsl_pcmcia_hw_shutdown, diff --git a/drivers/rtc/rtc-imxdi.c b/drivers/rtc/rtc-imxdi.c index 2dd3c01..d007609 100644 --- a/drivers/rtc/rtc-imxdi.c +++ b/drivers/rtc/rtc-imxdi.c @@ -391,6 +391,8 @@ static int dryice_rtc_probe(struct platform_device *pdev) if (imxdi->ioaddr == NULL) return -ENOMEM; + spin_lock_init(&imxdi->irq_lock); + imxdi->irq = platform_get_irq(pdev, 0); if (imxdi->irq < 0) return imxdi->irq; diff --git a/drivers/staging/comedi/drivers/amplc_pc236.c b/drivers/staging/comedi/drivers/amplc_pc236.c index 48246cd..b4311bf 100644 --- a/drivers/staging/comedi/drivers/amplc_pc236.c +++ b/drivers/staging/comedi/drivers/amplc_pc236.c @@ -470,7 +470,7 @@ static int pc236_detach(struct comedi_device *dev) { printk(KERN_DEBUG "comedi%d: %s: detach\n", dev->minor, PC236_DRIVER_NAME); - if (devpriv) + if (dev->iobase) pc236_intr_disable(dev); if (dev->irq) diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index de80171..b107339 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -760,10 +760,6 @@ static const __u32 acm_tty_speed[] = { 2500000, 3000000, 3500000, 4000000 }; -static const __u8 acm_tty_size[] = { - 5, 6, 7, 8 -}; - static void acm_tty_set_termios(struct tty_struct *tty, struct ktermios *termios_old) { @@ -780,7 +776,21 @@ static void acm_tty_set_termios(struct tty_struct *tty, newline.bParityType = termios->c_cflag & PARENB ? (termios->c_cflag & PARODD ? 1 : 2) + (termios->c_cflag & CMSPAR ? 2 : 0) : 0; - newline.bDataBits = acm_tty_size[(termios->c_cflag & CSIZE) >> 4]; + switch (termios->c_cflag & CSIZE) { + case CS5: + newline.bDataBits = 5; + break; + case CS6: + newline.bDataBits = 6; + break; + case CS7: + newline.bDataBits = 7; + break; + case CS8: + default: + newline.bDataBits = 8; + break; + } /* FIXME: Needs to clear unsupported bits in the termios */ acm->clocal = ((termios->c_cflag & CLOCAL) != 0); @@ -1172,7 +1182,7 @@ made_compressed_probe: if (usb_endpoint_xfer_int(epwrite)) usb_fill_int_urb(snd->urb, usb_dev, - usb_sndbulkpipe(usb_dev, epwrite->bEndpointAddress), + usb_sndintpipe(usb_dev, epwrite->bEndpointAddress), NULL, acm->writesize, acm_write_bulk, snd, epwrite->bInterval); else usb_fill_bulk_urb(snd->urb, usb_dev, @@ -1496,6 +1506,9 @@ static const struct usb_device_id acm_ids[] = { Maybe we should define a new quirk for this. */ }, + { USB_DEVICE(0x0572, 0x1340), /* Conexant CX93010-2x UCMxx */ + .driver_info = NO_UNION_NORMAL, + }, { USB_DEVICE(0x1bbb, 0x0003), /* Alcatel OT-I650 */ .driver_info = NO_UNION_NORMAL, /* reports zero length descriptor */ }, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 3c0aa02..b4688fa 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -482,13 +482,16 @@ static void hub_tt_work(struct work_struct *work) int limit = 100; spin_lock_irqsave (&hub->tt.lock, flags); - while (--limit && !list_empty (&hub->tt.clear_list)) { + while (!list_empty(&hub->tt.clear_list)) { struct list_head *next; struct usb_tt_clear *clear; struct usb_device *hdev = hub->hdev; const struct hc_driver *drv; int status; + if (!hub->quiescing && --limit < 0) + break; + next = hub->tt.clear_list.next; clear = list_entry (next, struct usb_tt_clear, clear_list); list_del (&clear->clear_list); @@ -952,7 +955,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type) if (hub->has_indicators) cancel_delayed_work_sync(&hub->leds); if (hub->tt.hub) - cancel_work_sync(&hub->tt.clear_work); + flush_work_sync(&hub->tt.clear_work); } /* caller has locked the hub device */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 3b82c81..3f623fb 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -543,7 +543,14 @@ static const struct dmi_system_id __devinitconst ehci_dmi_nohandoff_table[] = { /* Pegatron Lucid (Ordissimo AIRIS) */ .matches = { DMI_MATCH(DMI_BOARD_NAME, "M11JB"), - DMI_MATCH(DMI_BIOS_VERSION, "Lucid-GE-133"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), + }, + }, + { + /* Pegatron Lucid (Ordissimo) */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Ordissimo"), + DMI_MATCH(DMI_BIOS_VERSION, "Lucid-"), }, }, { } diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index b455f4c..a44f2d4 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -1505,6 +1505,7 @@ void xhci_free_command(struct xhci_hcd *xhci, void xhci_mem_cleanup(struct xhci_hcd *xhci) { struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller); + struct xhci_cd *cur_cd, *next_cd; int size; int i; @@ -1525,6 +1526,11 @@ void xhci_mem_cleanup(struct xhci_hcd *xhci) xhci_ring_free(xhci, xhci->cmd_ring); xhci->cmd_ring = NULL; xhci_dbg(xhci, "Freed command ring\n"); + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + } for (i = 1; i < MAX_HC_SLOTS; ++i) xhci_free_virt_device(xhci, i); @@ -2014,6 +2020,7 @@ int xhci_mem_init(struct xhci_hcd *xhci, gfp_t flags) xhci->cmd_ring = xhci_ring_alloc(xhci, 1, true, false, flags); if (!xhci->cmd_ring) goto fail; + INIT_LIST_HEAD(&xhci->cancel_cmd_list); xhci_dbg(xhci, "Allocated command ring at %p\n", xhci->cmd_ring); xhci_dbg(xhci, "First segment DMA is 0x%llx\n", (unsigned long long)xhci->cmd_ring->first_seg->dma); diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 152daca..1a38281 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -311,12 +311,123 @@ static int room_on_ring(struct xhci_hcd *xhci, struct xhci_ring *ring, /* Ring the host controller doorbell after placing a command on the ring */ void xhci_ring_cmd_db(struct xhci_hcd *xhci) { + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) + return; + xhci_dbg(xhci, "// Ding dong!\n"); xhci_writel(xhci, DB_VALUE_HOST, &xhci->dba->doorbell[0]); /* Flush PCI posted writes */ xhci_readl(xhci, &xhci->dba->doorbell[0]); } +static int xhci_abort_cmd_ring(struct xhci_hcd *xhci) +{ + u64 temp_64; + int ret; + + xhci_dbg(xhci, "Abort command ring\n"); + + if (!(xhci->cmd_ring_state & CMD_RING_STATE_RUNNING)) { + xhci_dbg(xhci, "The command ring isn't running, " + "Have the command ring been stopped?\n"); + return 0; + } + + temp_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring); + if (!(temp_64 & CMD_RING_RUNNING)) { + xhci_dbg(xhci, "Command ring had been stopped\n"); + return 0; + } + xhci->cmd_ring_state = CMD_RING_STATE_ABORTED; + xhci_write_64(xhci, temp_64 | CMD_RING_ABORT, + &xhci->op_regs->cmd_ring); + + /* Section 4.6.1.2 of xHCI 1.0 spec says software should + * time the completion od all xHCI commands, including + * the Command Abort operation. If software doesn't see + * CRR negated in a timely manner (e.g. longer than 5 + * seconds), then it should assume that the there are + * larger problems with the xHC and assert HCRST. + */ + ret = handshake(xhci, &xhci->op_regs->cmd_ring, + CMD_RING_RUNNING, 0, 5 * 1000 * 1000); + if (ret < 0) { + xhci_err(xhci, "Stopped the command ring failed, " + "maybe the host is dead\n"); + xhci->xhc_state |= XHCI_STATE_DYING; + xhci_quiesce(xhci); + xhci_halt(xhci); + return -ESHUTDOWN; + } + + return 0; +} + +static int xhci_queue_cd(struct xhci_hcd *xhci, + struct xhci_command *command, + union xhci_trb *cmd_trb) +{ + struct xhci_cd *cd; + cd = kzalloc(sizeof(struct xhci_cd), GFP_ATOMIC); + if (!cd) + return -ENOMEM; + INIT_LIST_HEAD(&cd->cancel_cmd_list); + + cd->command = command; + cd->cmd_trb = cmd_trb; + list_add_tail(&cd->cancel_cmd_list, &xhci->cancel_cmd_list); + + return 0; +} + +/* + * Cancel the command which has issue. + * + * Some commands may hang due to waiting for acknowledgement from + * usb device. It is outside of the xHC's ability to control and + * will cause the command ring is blocked. When it occurs software + * should intervene to recover the command ring. + * See Section 4.6.1.1 and 4.6.1.2 + */ +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, + union xhci_trb *cmd_trb) +{ + int retval = 0; + unsigned long flags; + + spin_lock_irqsave(&xhci->lock, flags); + + if (xhci->xhc_state & XHCI_STATE_DYING) { + xhci_warn(xhci, "Abort the command ring," + " but the xHCI is dead.\n"); + retval = -ESHUTDOWN; + goto fail; + } + + /* queue the cmd desriptor to cancel_cmd_list */ + retval = xhci_queue_cd(xhci, command, cmd_trb); + if (retval) { + xhci_warn(xhci, "Queuing command descriptor failed.\n"); + goto fail; + } + + /* abort command ring */ + retval = xhci_abort_cmd_ring(xhci); + if (retval) { + xhci_err(xhci, "Abort command ring failed\n"); + if (unlikely(retval == -ESHUTDOWN)) { + spin_unlock_irqrestore(&xhci->lock, flags); + usb_hc_died(xhci_to_hcd(xhci)->primary_hcd); + xhci_dbg(xhci, "xHCI host controller is dead.\n"); + return retval; + } + } + +fail: + spin_unlock_irqrestore(&xhci->lock, flags); + return retval; +} + void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, @@ -1046,6 +1157,20 @@ static void handle_reset_ep_completion(struct xhci_hcd *xhci, } } +/* Complete the command and detele it from the devcie's command queue. + */ +static void xhci_complete_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, + struct xhci_command *command, u32 status) +{ + command->status = status; + list_del(&command->cmd_list); + if (command->completion) + complete(command->completion); + else + xhci_free_command(xhci, command); +} + + /* Check to see if a command in the device's command queue matches this one. * Signal the completion or free the command, and return 1. Return 0 if the * completed command isn't at the head of the command list. @@ -1064,15 +1189,155 @@ static int handle_cmd_in_cmd_wait_list(struct xhci_hcd *xhci, if (xhci->cmd_ring->dequeue != command->command_trb) return 0; - command->status = GET_COMP_CODE(le32_to_cpu(event->status)); - list_del(&command->cmd_list); - if (command->completion) - complete(command->completion); - else - xhci_free_command(xhci, command); + xhci_complete_cmd_in_cmd_wait_list(xhci, command, + GET_COMP_CODE(le32_to_cpu(event->status))); return 1; } +/* + * Finding the command trb need to be cancelled and modifying it to + * NO OP command. And if the command is in device's command wait + * list, finishing and freeing it. + * + * If we can't find the command trb, we think it had already been + * executed. + */ +static void xhci_cmd_to_noop(struct xhci_hcd *xhci, struct xhci_cd *cur_cd) +{ + struct xhci_segment *cur_seg; + union xhci_trb *cmd_trb; + u32 cycle_state; + + if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) + return; + + /* find the current segment of command ring */ + cur_seg = find_trb_seg(xhci->cmd_ring->first_seg, + xhci->cmd_ring->dequeue, &cycle_state); + + if (!cur_seg) { + xhci_warn(xhci, "Command ring mismatch, dequeue = %p %llx (dma)\n", + xhci->cmd_ring->dequeue, + (unsigned long long) + xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg, + xhci->cmd_ring->dequeue)); + xhci_debug_ring(xhci, xhci->cmd_ring); + xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring); + return; + } + + /* find the command trb matched by cd from command ring */ + for (cmd_trb = xhci->cmd_ring->dequeue; + cmd_trb != xhci->cmd_ring->enqueue; + next_trb(xhci, xhci->cmd_ring, &cur_seg, &cmd_trb)) { + /* If the trb is link trb, continue */ + if (TRB_TYPE_LINK_LE32(cmd_trb->generic.field[3])) + continue; + + if (cur_cd->cmd_trb == cmd_trb) { + + /* If the command in device's command list, we should + * finish it and free the command structure. + */ + if (cur_cd->command) + xhci_complete_cmd_in_cmd_wait_list(xhci, + cur_cd->command, COMP_CMD_STOP); + + /* get cycle state from the origin command trb */ + cycle_state = le32_to_cpu(cmd_trb->generic.field[3]) + & TRB_CYCLE; + + /* modify the command trb to NO OP command */ + cmd_trb->generic.field[0] = 0; + cmd_trb->generic.field[1] = 0; + cmd_trb->generic.field[2] = 0; + cmd_trb->generic.field[3] = cpu_to_le32( + TRB_TYPE(TRB_CMD_NOOP) | cycle_state); + break; + } + } +} + +static void xhci_cancel_cmd_in_cd_list(struct xhci_hcd *xhci) +{ + struct xhci_cd *cur_cd, *next_cd; + + if (list_empty(&xhci->cancel_cmd_list)) + return; + + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + xhci_cmd_to_noop(xhci, cur_cd); + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + } +} + +/* + * traversing the cancel_cmd_list. If the command descriptor according + * to cmd_trb is found, the function free it and return 1, otherwise + * return 0. + */ +static int xhci_search_cmd_trb_in_cd_list(struct xhci_hcd *xhci, + union xhci_trb *cmd_trb) +{ + struct xhci_cd *cur_cd, *next_cd; + + if (list_empty(&xhci->cancel_cmd_list)) + return 0; + + list_for_each_entry_safe(cur_cd, next_cd, + &xhci->cancel_cmd_list, cancel_cmd_list) { + if (cur_cd->cmd_trb == cmd_trb) { + if (cur_cd->command) + xhci_complete_cmd_in_cmd_wait_list(xhci, + cur_cd->command, COMP_CMD_STOP); + list_del(&cur_cd->cancel_cmd_list); + kfree(cur_cd); + return 1; + } + } + + return 0; +} + +/* + * If the cmd_trb_comp_code is COMP_CMD_ABORT, we just check whether the + * trb pointed by the command ring dequeue pointer is the trb we want to + * cancel or not. And if the cmd_trb_comp_code is COMP_CMD_STOP, we will + * traverse the cancel_cmd_list to trun the all of the commands according + * to command descriptor to NO-OP trb. + */ +static int handle_stopped_cmd_ring(struct xhci_hcd *xhci, + int cmd_trb_comp_code) +{ + int cur_trb_is_good = 0; + + /* Searching the cmd trb pointed by the command ring dequeue + * pointer in command descriptor list. If it is found, free it. + */ + cur_trb_is_good = xhci_search_cmd_trb_in_cd_list(xhci, + xhci->cmd_ring->dequeue); + + if (cmd_trb_comp_code == COMP_CMD_ABORT) + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + else if (cmd_trb_comp_code == COMP_CMD_STOP) { + /* traversing the cancel_cmd_list and canceling + * the command according to command descriptor + */ + xhci_cancel_cmd_in_cd_list(xhci); + + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; + /* + * ring command ring doorbell again to restart the + * command ring + */ + if (xhci->cmd_ring->dequeue != xhci->cmd_ring->enqueue) + xhci_ring_cmd_db(xhci); + } + return cur_trb_is_good; +} + static void handle_cmd_completion(struct xhci_hcd *xhci, struct xhci_event_cmd *event) { @@ -1098,6 +1363,22 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, xhci->error_bitmask |= 1 << 5; return; } + + if ((GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_ABORT) || + (GET_COMP_CODE(le32_to_cpu(event->status)) == COMP_CMD_STOP)) { + /* If the return value is 0, we think the trb pointed by + * command ring dequeue pointer is a good trb. The good + * trb means we don't want to cancel the trb, but it have + * been stopped by host. So we should handle it normally. + * Otherwise, driver should invoke inc_deq() and return. + */ + if (handle_stopped_cmd_ring(xhci, + GET_COMP_CODE(le32_to_cpu(event->status)))) { + inc_deq(xhci, xhci->cmd_ring, false); + return; + } + } + switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) & TRB_TYPE_BITMASK) { case TRB_TYPE(TRB_ENABLE_SLOT): diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index c39ab20..4864b25 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -51,7 +51,7 @@ MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB"); * handshake done). There are two failure modes: "usec" have passed (major * hardware flakeout), or the register reads as all-ones (hardware removed). */ -static int handshake(struct xhci_hcd *xhci, void __iomem *ptr, +int handshake(struct xhci_hcd *xhci, void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; @@ -104,8 +104,10 @@ int xhci_halt(struct xhci_hcd *xhci) ret = handshake(xhci, &xhci->op_regs->status, STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC); - if (!ret) + if (!ret) { xhci->xhc_state |= XHCI_STATE_HALTED; + xhci->cmd_ring_state = CMD_RING_STATE_STOPPED; + } return ret; } @@ -390,6 +392,7 @@ static int xhci_run_finished(struct xhci_hcd *xhci) return -ENODEV; } xhci->shared_hcd->state = HC_STATE_RUNNING; + xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; if (xhci->quirks & XHCI_NEC_HOST) xhci_ring_cmd_db(xhci); @@ -1775,6 +1778,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, struct completion *cmd_completion; u32 *cmd_status; struct xhci_virt_device *virt_dev; + union xhci_trb *cmd_trb; spin_lock_irqsave(&xhci->lock, flags); virt_dev = xhci->devs[udev->slot_id]; @@ -1817,6 +1821,7 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, } init_completion(cmd_completion); + cmd_trb = xhci->cmd_ring->dequeue; if (!ctx_change) ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma, udev->slot_id, must_succeed); @@ -1838,14 +1843,17 @@ static int xhci_configure_endpoint(struct xhci_hcd *xhci, /* Wait for the configure endpoint command to complete */ timeleft = wait_for_completion_interruptible_timeout( cmd_completion, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for %s command\n", timeleft == 0 ? "Timeout" : "Signal", ctx_change == 0 ? "configure endpoint" : "evaluate context"); - /* FIXME cancel the configure endpoint command */ + /* cancel the configure endpoint command */ + ret = xhci_cancel_cmd(xhci, command, cmd_trb); + if (ret < 0) + return ret; return -ETIME; } @@ -2778,8 +2786,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) unsigned long flags; int timeleft; int ret; + union xhci_trb *cmd_trb; spin_lock_irqsave(&xhci->lock, flags); + cmd_trb = xhci->cmd_ring->dequeue; ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0); if (ret) { spin_unlock_irqrestore(&xhci->lock, flags); @@ -2791,12 +2801,12 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) /* XXX: how much time for xHC slot assignment? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the enable slot request */ - return 0; + /* cancel the enable slot request */ + return xhci_cancel_cmd(xhci, NULL, cmd_trb); } if (!xhci->slot_id) { @@ -2857,6 +2867,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) struct xhci_slot_ctx *slot_ctx; struct xhci_input_control_ctx *ctrl_ctx; u64 temp_64; + union xhci_trb *cmd_trb; if (!udev->slot_id) { xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id); @@ -2895,6 +2906,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2); spin_lock_irqsave(&xhci->lock, flags); + cmd_trb = xhci->cmd_ring->dequeue; ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma, udev->slot_id); if (ret) { @@ -2907,7 +2919,7 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */ timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev, - USB_CTRL_SET_TIMEOUT); + XHCI_CMD_DEFAULT_TIMEOUT); /* FIXME: From section 4.3.4: "Software shall be responsible for timing * the SetAddress() "recovery interval" required by USB and aborting the * command on a timeout. @@ -2915,7 +2927,10 @@ int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev) if (timeleft <= 0) { xhci_warn(xhci, "%s while waiting for a slot\n", timeleft == 0 ? "Timeout" : "Signal"); - /* FIXME cancel the address device command */ + /* cancel the address device command */ + ret = xhci_cancel_cmd(xhci, NULL, cmd_trb); + if (ret < 0) + return ret; return -ETIME; } diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index 21482df..1d72895 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1070,6 +1070,9 @@ union xhci_trb { #define TRB_MFINDEX_WRAP 39 /* TRB IDs 40-47 reserved, 48-63 is vendor-defined */ +#define TRB_TYPE_LINK_LE32(x) (((x) & cpu_to_le32(TRB_TYPE_BITMASK)) == \ + cpu_to_le32(TRB_TYPE(TRB_LINK))) + /* Nec vendor-specific command completion event. */ #define TRB_NEC_CMD_COMP 48 /* Get NEC firmware revision. */ @@ -1111,6 +1114,16 @@ struct xhci_td { union xhci_trb *last_trb; }; +/* xHCI command default timeout value */ +#define XHCI_CMD_DEFAULT_TIMEOUT (5 * HZ) + +/* command descriptor */ +struct xhci_cd { + struct list_head cancel_cmd_list; + struct xhci_command *command; + union xhci_trb *cmd_trb; +}; + struct xhci_dequeue_state { struct xhci_segment *new_deq_seg; union xhci_trb *new_deq_ptr; @@ -1252,6 +1265,11 @@ struct xhci_hcd { /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; + unsigned int cmd_ring_state; +#define CMD_RING_STATE_RUNNING (1 << 0) +#define CMD_RING_STATE_ABORTED (1 << 1) +#define CMD_RING_STATE_STOPPED (1 << 2) + struct list_head cancel_cmd_list; unsigned int cmd_ring_reserved_trbs; struct xhci_ring *event_ring; struct xhci_erst erst; @@ -1486,6 +1504,8 @@ void xhci_unregister_pci(void); #endif /* xHCI host controller glue */ +int handshake(struct xhci_hcd *xhci, void __iomem *ptr, + u32 mask, u32 done, int usec); void xhci_quiesce(struct xhci_hcd *xhci); int xhci_halt(struct xhci_hcd *xhci); int xhci_reset(struct xhci_hcd *xhci); @@ -1568,6 +1588,8 @@ void xhci_queue_config_ep_quirk(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, struct xhci_dequeue_state *deq_state); void xhci_stop_endpoint_command_watchdog(unsigned long arg); +int xhci_cancel_cmd(struct xhci_hcd *xhci, struct xhci_command *command, + union xhci_trb *cmd_trb); void xhci_ring_ep_doorbell(struct xhci_hcd *xhci, unsigned int slot_id, unsigned int ep_index, unsigned int stream_id); diff --git a/drivers/usb/serial/mos7840.c b/drivers/usb/serial/mos7840.c index fd2463e..2d34dfd 100644 --- a/drivers/usb/serial/mos7840.c +++ b/drivers/usb/serial/mos7840.c @@ -235,12 +235,10 @@ struct moschip_port { int port_num; /*Actual port number in the device(1,2,etc) */ struct urb *write_urb; /* write URB for this port */ struct urb *read_urb; /* read URB for this port */ - struct urb *int_urb; __u8 shadowLCR; /* last LCR value received */ __u8 shadowMCR; /* last MCR value received */ char open; char open_ports; - char zombie; wait_queue_head_t wait_chase; /* for handling sleeping while waiting for chase to finish */ wait_queue_head_t delta_msr_wait; /* for handling sleeping while waiting for msr change to happen */ int delta_msr_cond; @@ -505,7 +503,6 @@ static void mos7840_control_callback(struct urb *urb) unsigned char *data; struct moschip_port *mos7840_port; __u8 regval = 0x0; - int result = 0; int status = urb->status; mos7840_port = urb->context; @@ -524,7 +521,7 @@ static void mos7840_control_callback(struct urb *urb) default: dbg("%s - nonzero urb status received: %d", __func__, status); - goto exit; + return; } dbg("%s urb buffer size is %d", __func__, urb->actual_length); @@ -537,17 +534,6 @@ static void mos7840_control_callback(struct urb *urb) mos7840_handle_new_msr(mos7840_port, regval); else if (mos7840_port->MsrLsr == 1) mos7840_handle_new_lsr(mos7840_port, regval); - -exit: - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) - result = usb_submit_urb(mos7840_port->int_urb, GFP_ATOMIC); - spin_unlock(&mos7840_port->pool_lock); - if (result) { - dev_err(&urb->dev->dev, - "%s - Error %d submitting interrupt urb\n", - __func__, result); - } } static int mos7840_get_reg(struct moschip_port *mcs, __u16 Wval, __u16 reg, @@ -655,14 +641,7 @@ static void mos7840_interrupt_callback(struct urb *urb) wreg = MODEM_STATUS_REGISTER; break; } - spin_lock(&mos7840_port->pool_lock); - if (!mos7840_port->zombie) { - rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); - } else { - spin_unlock(&mos7840_port->pool_lock); - return; - } - spin_unlock(&mos7840_port->pool_lock); + rv = mos7840_get_reg(mos7840_port, wval, wreg, &Data); } } } @@ -2595,7 +2574,6 @@ error: kfree(mos7840_port->ctrl_buf); usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port); - serial->port[i] = NULL; } return status; } @@ -2626,9 +2604,6 @@ static void mos7840_disconnect(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg ("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { - spin_lock_irqsave(&mos7840_port->pool_lock, flags); - mos7840_port->zombie = 1; - spin_unlock_irqrestore(&mos7840_port->pool_lock, flags); usb_kill_urb(mos7840_port->control_urb); } } @@ -2662,6 +2637,7 @@ static void mos7840_release(struct usb_serial *serial) mos7840_port = mos7840_get_port_private(serial->port[i]); dbg("mos7840_port %d = %p", i, mos7840_port); if (mos7840_port) { + usb_free_urb(mos7840_port->control_urb); kfree(mos7840_port->ctrl_buf); kfree(mos7840_port->dr); kfree(mos7840_port); diff --git a/drivers/usb/serial/opticon.c b/drivers/usb/serial/opticon.c index 96423f3..5d274b3 100644 --- a/drivers/usb/serial/opticon.c +++ b/drivers/usb/serial/opticon.c @@ -160,7 +160,11 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, { struct usb_serial *serial = port->serial; int retval; - u8 buffer[2]; + u8 *buffer; + + buffer = kzalloc(1, GFP_KERNEL); + if (!buffer) + return -ENOMEM; buffer[0] = val; /* Send the message to the vendor control endpoint @@ -169,6 +173,7 @@ static int send_control_msg(struct usb_serial_port *port, u8 requesttype, requesttype, USB_DIR_OUT|USB_TYPE_VENDOR|USB_RECIP_INTERFACE, 0, 0, buffer, 1, 0); + kfree(buffer); return retval; } @@ -292,7 +297,7 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, if (!dr) { dev_err(&port->dev, "out of memory\n"); count = -ENOMEM; - goto error; + goto error_no_dr; } dr->bRequestType = USB_TYPE_VENDOR | USB_RECIP_INTERFACE | USB_DIR_OUT; @@ -322,6 +327,8 @@ static int opticon_write(struct tty_struct *tty, struct usb_serial_port *port, return count; error: + kfree(dr); +error_no_dr: usb_free_urb(urb); error_no_urb: kfree(buffer); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 4e0c118..c334670 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -503,11 +503,19 @@ static const struct option_blacklist_info net_intf5_blacklist = { .reserved = BIT(5), }; +static const struct option_blacklist_info net_intf6_blacklist = { + .reserved = BIT(6), +}; + static const struct option_blacklist_info zte_mf626_blacklist = { .sendsetup = BIT(0) | BIT(1), .reserved = BIT(4), }; +static const struct option_blacklist_info zte_1255_blacklist = { + .reserved = BIT(3) | BIT(4), +}; + static const struct usb_device_id option_ids[] = { { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_COLT) }, { USB_DEVICE(OPTION_VENDOR_ID, OPTION_PRODUCT_RICOLA) }, @@ -853,13 +861,19 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0113, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0117, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0118, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0121, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0122, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0123, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0124, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0125, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf6_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0126, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0128, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0142, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, @@ -872,7 +886,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, @@ -880,9 +895,22 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0191, 0xff, 0xff, 0xff), /* ZTE EuFi890 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0199, 0xff, 0xff, 0xff), /* ZTE MF820S */ + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0257, 0xff, 0xff, 0xff), /* ZTE MF821 */ + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1021, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1057, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1058, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1059, 0xff, 0xff, 0xff) }, @@ -998,18 +1026,24 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1169, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1170, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1244, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1245, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1246, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1247, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1248, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1249, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1250, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1251, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1252, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1253, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1254, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1255, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&zte_1255_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1256, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1257, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1258, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1259, 0xff, 0xff, 0xff) }, @@ -1054,8 +1088,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff), /* ZTE MF91 */ + .driver_info = (kernel_ulong_t)&net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, @@ -1067,15 +1109,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf5_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, diff --git a/drivers/usb/serial/sierra.c b/drivers/usb/serial/sierra.c index d547638..a159ad0 100644 --- a/drivers/usb/serial/sierra.c +++ b/drivers/usb/serial/sierra.c @@ -171,7 +171,6 @@ static int sierra_probe(struct usb_serial *serial, { int result = 0; struct usb_device *udev; - struct sierra_intf_private *data; u8 ifnum; udev = serial->dev; @@ -199,11 +198,6 @@ static int sierra_probe(struct usb_serial *serial, return -ENODEV; } - data = serial->private = kzalloc(sizeof(struct sierra_intf_private), GFP_KERNEL); - if (!data) - return -ENOMEM; - spin_lock_init(&data->susp_lock); - return result; } @@ -915,6 +909,7 @@ static void sierra_dtr_rts(struct usb_serial_port *port, int on) static int sierra_startup(struct usb_serial *serial) { struct usb_serial_port *port; + struct sierra_intf_private *intfdata; struct sierra_port_private *portdata; struct sierra_iface_info *himemoryp = NULL; int i; @@ -922,6 +917,14 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&serial->dev->dev, "%s\n", __func__); + intfdata = kzalloc(sizeof(*intfdata), GFP_KERNEL); + if (!intfdata) + return -ENOMEM; + + spin_lock_init(&intfdata->susp_lock); + + usb_set_serial_data(serial, intfdata); + /* Set Device mode to D0 */ sierra_set_power_state(serial->dev, 0x0000); @@ -937,7 +940,7 @@ static int sierra_startup(struct usb_serial *serial) dev_dbg(&port->dev, "%s: kmalloc for " "sierra_port_private (%d) failed!\n", __func__, i); - return -ENOMEM; + goto err; } spin_lock_init(&portdata->lock); init_usb_anchor(&portdata->active); @@ -974,6 +977,14 @@ static int sierra_startup(struct usb_serial *serial) } return 0; +err: + for (--i; i >= 0; --i) { + portdata = usb_get_serial_port_data(serial->port[i]); + kfree(portdata); + } + kfree(intfdata); + + return -ENOMEM; } static void sierra_release(struct usb_serial *serial) @@ -993,6 +1004,7 @@ static void sierra_release(struct usb_serial *serial) continue; kfree(portdata); } + kfree(serial->private); } #ifdef CONFIG_PM diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 5b073bc..59d646d 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -576,6 +576,7 @@ no_firmware: "%s: please contact support@connecttech.com\n", serial->type->description); kfree(result); + kfree(command); return -ENODEV; no_command_private: diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 591f57f..fa8a1b2 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -1004,6 +1004,12 @@ UNUSUAL_DEV( 0x07cf, 0x1001, 0x1000, 0x9999, USB_SC_8070, USB_PR_CB, NULL, US_FL_NEED_OVERRIDE | US_FL_FIX_INQUIRY ), +/* Submitted by Oleksandr Chumachenko <ledest@gmail.com> */ +UNUSUAL_DEV( 0x07cf, 0x1167, 0x0100, 0x0100, + "Casio", + "EX-N1 DigitalCamera", + USB_SC_8070, USB_PR_DEVICE, NULL, 0), + /* Submitted by Hartmut Wahl <hwahl@hwahl.de>*/ UNUSUAL_DEV( 0x0839, 0x000a, 0x0001, 0x0001, "Samsung", diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c index e224a92..f274826 100644 --- a/drivers/vhost/net.c +++ b/drivers/vhost/net.c @@ -305,7 +305,8 @@ static void handle_rx(struct vhost_net *net) .hdr.gso_type = VIRTIO_NET_HDR_GSO_NONE }; size_t total_len = 0; - int err, headcount, mergeable; + int err, mergeable; + s16 headcount; size_t vhost_hlen, sock_hlen; size_t vhost_len, sock_len; /* TODO: check that we are running from vhost_worker? */ diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c index 415e9b2..6a7725a 100644 --- a/drivers/video/udlfb.c +++ b/drivers/video/udlfb.c @@ -613,7 +613,7 @@ static ssize_t dlfb_ops_write(struct fb_info *info, const char __user *buf, result = fb_sys_write(info, buf, count, ppos); if (result > 0) { - int start = max((int)(offset / info->fix.line_length) - 1, 0); + int start = max((int)(offset / info->fix.line_length), 0); int lines = min((u32)((result / info->fix.line_length) + 1), (u32)info->var.yres); diff --git a/drivers/video/via/via_clock.c b/drivers/video/via/via_clock.c index af8f26b..db1e392 100644 --- a/drivers/video/via/via_clock.c +++ b/drivers/video/via/via_clock.c @@ -25,6 +25,7 @@ #include <linux/kernel.h> #include <linux/via-core.h> +#include <asm/olpc.h> #include "via_clock.h" #include "global.h" #include "debug.h" @@ -289,6 +290,10 @@ static void dummy_set_pll(struct via_pll_config config) printk(KERN_INFO "Using undocumented set PLL.\n%s", via_slap); } +static void noop_set_clock_state(u8 state) +{ +} + void via_clock_init(struct via_clock *clock, int gfx_chip) { switch (gfx_chip) { @@ -346,4 +351,18 @@ void via_clock_init(struct via_clock *clock, int gfx_chip) break; } + + if (machine_is_olpc()) { + /* The OLPC XO-1.5 cannot suspend/resume reliably if the + * IGA1/IGA2 clocks are set as on or off (memory rot + * occasionally happens during suspend under such + * configurations). + * + * The only known stable scenario is to leave this bits as-is, + * which in their default states are documented to enable the + * clock only when it is needed. + */ + clock->set_primary_clock_state = noop_set_clock_state; + clock->set_secondary_clock_state = noop_set_clock_state; + } } diff --git a/fs/autofs4/root.c b/fs/autofs4/root.c index f55ae23..790fa63 100644 --- a/fs/autofs4/root.c +++ b/fs/autofs4/root.c @@ -392,10 +392,12 @@ static struct vfsmount *autofs4_d_automount(struct path *path) ino->flags |= AUTOFS_INF_PENDING; spin_unlock(&sbi->fs_lock); status = autofs4_mount_wait(dentry); - if (status) - return ERR_PTR(status); spin_lock(&sbi->fs_lock); ino->flags &= ~AUTOFS_INF_PENDING; + if (status) { + spin_unlock(&sbi->fs_lock); + return ERR_PTR(status); + } } done: if (!(ino->flags & AUTOFS_INF_EXPIRING)) { diff --git a/fs/ceph/export.c b/fs/ceph/export.c index f67b687..a080779 100644 --- a/fs/ceph/export.c +++ b/fs/ceph/export.c @@ -84,7 +84,7 @@ static int ceph_encode_fh(struct dentry *dentry, u32 *rawfh, int *max_len, * FIXME: we should try harder by querying the mds for the ino. */ static struct dentry *__fh_to_dentry(struct super_block *sb, - struct ceph_nfs_fh *fh) + struct ceph_nfs_fh *fh, int fh_len) { struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; struct inode *inode; @@ -92,6 +92,9 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, struct ceph_vino vino; int err; + if (fh_len < sizeof(*fh) / 4) + return ERR_PTR(-ESTALE); + dout("__fh_to_dentry %llx\n", fh->ino); vino.ino = fh->ino; vino.snap = CEPH_NOSNAP; @@ -136,7 +139,7 @@ static struct dentry *__fh_to_dentry(struct super_block *sb, * convert connectable fh to dentry */ static struct dentry *__cfh_to_dentry(struct super_block *sb, - struct ceph_nfs_confh *cfh) + struct ceph_nfs_confh *cfh, int fh_len) { struct ceph_mds_client *mdsc = ceph_sb_to_client(sb)->mdsc; struct inode *inode; @@ -144,6 +147,9 @@ static struct dentry *__cfh_to_dentry(struct super_block *sb, struct ceph_vino vino; int err; + if (fh_len < sizeof(*cfh) / 4) + return ERR_PTR(-ESTALE); + dout("__cfh_to_dentry %llx (%llx/%x)\n", cfh->ino, cfh->parent_ino, cfh->parent_name_hash); @@ -193,9 +199,11 @@ static struct dentry *ceph_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { if (fh_type == 1) - return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw); + return __fh_to_dentry(sb, (struct ceph_nfs_fh *)fid->raw, + fh_len); else - return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw); + return __cfh_to_dentry(sb, (struct ceph_nfs_confh *)fid->raw, + fh_len); } /* @@ -216,6 +224,8 @@ static struct dentry *ceph_fh_to_parent(struct super_block *sb, if (fh_type == 1) return ERR_PTR(-ESTALE); + if (fh_len < sizeof(*cfh) / 4) + return ERR_PTR(-ESTALE); pr_debug("fh_to_parent %llx/%d\n", cfh->parent_ino, cfh->parent_name_hash); diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c index 61abb63..3deb58d 100644 --- a/fs/compat_ioctl.c +++ b/fs/compat_ioctl.c @@ -208,6 +208,8 @@ static int do_video_set_spu_palette(unsigned int fd, unsigned int cmd, err = get_user(palp, &up->palette); err |= get_user(length, &up->length); + if (err) + return -EFAULT; up_native = compat_alloc_user_space(sizeof(struct video_spu_palette)); err = put_user(compat_ptr(palp), &up_native->palette); diff --git a/fs/gfs2/export.c b/fs/gfs2/export.c index fe9945f..5235d6e 100644 --- a/fs/gfs2/export.c +++ b/fs/gfs2/export.c @@ -167,6 +167,8 @@ static struct dentry *gfs2_fh_to_dentry(struct super_block *sb, struct fid *fid, case GFS2_SMALL_FH_SIZE: case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: + if (fh_len < GFS2_SMALL_FH_SIZE) + return NULL; this.no_formal_ino = ((u64)be32_to_cpu(fh[0])) << 32; this.no_formal_ino |= be32_to_cpu(fh[1]); this.no_addr = ((u64)be32_to_cpu(fh[2])) << 32; @@ -186,6 +188,8 @@ static struct dentry *gfs2_fh_to_parent(struct super_block *sb, struct fid *fid, switch (fh_type) { case GFS2_LARGE_FH_SIZE: case GFS2_OLD_FH_SIZE: + if (fh_len < GFS2_LARGE_FH_SIZE) + return NULL; parent.no_formal_ino = ((u64)be32_to_cpu(fh[4])) << 32; parent.no_formal_ino |= be32_to_cpu(fh[5]); parent.no_addr = ((u64)be32_to_cpu(fh[6])) << 32; diff --git a/fs/isofs/export.c b/fs/isofs/export.c index dd4687f..516eb21 100644 --- a/fs/isofs/export.c +++ b/fs/isofs/export.c @@ -179,7 +179,7 @@ static struct dentry *isofs_fh_to_parent(struct super_block *sb, { struct isofs_fid *ifid = (struct isofs_fid *)fid; - if (fh_type != 2) + if (fh_len < 2 || fh_type != 2) return NULL; return isofs_export_iget(sb, diff --git a/fs/jbd/commit.c b/fs/jbd/commit.c index 72ffa97..dcd23f8 100644 --- a/fs/jbd/commit.c +++ b/fs/jbd/commit.c @@ -85,7 +85,12 @@ nope: static void release_data_buffer(struct buffer_head *bh) { if (buffer_freed(bh)) { + WARN_ON_ONCE(buffer_dirty(bh)); clear_buffer_freed(bh); + clear_buffer_mapped(bh); + clear_buffer_new(bh); + clear_buffer_req(bh); + bh->b_bdev = NULL; release_buffer_page(bh); } else put_bh(bh); @@ -840,17 +845,35 @@ restart_loop: * there's no point in keeping a checkpoint record for * it. */ - /* A buffer which has been freed while still being - * journaled by a previous transaction may end up still - * being dirty here, but we want to avoid writing back - * that buffer in the future after the "add to orphan" - * operation been committed, That's not only a performance - * gain, it also stops aliasing problems if the buffer is - * left behind for writeback and gets reallocated for another - * use in a different page. */ - if (buffer_freed(bh) && !jh->b_next_transaction) { - clear_buffer_freed(bh); - clear_buffer_jbddirty(bh); + /* + * A buffer which has been freed while still being journaled by + * a previous transaction. + */ + if (buffer_freed(bh)) { + /* + * If the running transaction is the one containing + * "add to orphan" operation (b_next_transaction != + * NULL), we have to wait for that transaction to + * commit before we can really get rid of the buffer. + * So just clear b_modified to not confuse transaction + * credit accounting and refile the buffer to + * BJ_Forget of the running transaction. If the just + * committed transaction contains "add to orphan" + * operation, we can completely invalidate the buffer + * now. We are rather throughout in that since the + * buffer may be still accessible when blocksize < + * pagesize and it is attached to the last partial + * page. + */ + jh->b_modified = 0; + if (!jh->b_next_transaction) { + clear_buffer_freed(bh); + clear_buffer_jbddirty(bh); + clear_buffer_mapped(bh); + clear_buffer_new(bh); + clear_buffer_req(bh); + bh->b_bdev = NULL; + } } if (buffer_jbddirty(bh)) { diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c index f7ee81a..b0161a6 100644 --- a/fs/jbd/transaction.c +++ b/fs/jbd/transaction.c @@ -1837,15 +1837,16 @@ static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) * We're outside-transaction here. Either or both of j_running_transaction * and j_committing_transaction may be NULL. */ -static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) +static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh, + int partial_page) { transaction_t *transaction; struct journal_head *jh; int may_free = 1; - int ret; BUFFER_TRACE(bh, "entry"); +retry: /* * It is safe to proceed here without the j_list_lock because the * buffers cannot be stolen by try_to_free_buffers as long as we are @@ -1873,10 +1874,18 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * clear the buffer dirty bit at latest at the moment when the * transaction marking the buffer as freed in the filesystem * structures is committed because from that moment on the - * buffer can be reallocated and used by a different page. + * block can be reallocated and used by a different page. * Since the block hasn't been freed yet but the inode has * already been added to orphan list, it is safe for us to add * the buffer to BJ_Forget list of the newest transaction. + * + * Also we have to clear buffer_mapped flag of a truncated buffer + * because the buffer_head may be attached to the page straddling + * i_size (can happen only when blocksize < pagesize) and thus the + * buffer_head can be reused when the file is extended again. So we end + * up keeping around invalidated buffers attached to transactions' + * BJ_Forget list just to stop checkpointing code from cleaning up + * the transaction this buffer was modified in. */ transaction = jh->b_transaction; if (transaction == NULL) { @@ -1903,13 +1912,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * committed, the buffer won't be needed any * longer. */ JBUFFER_TRACE(jh, "checkpointed: add to BJ_Forget"); - ret = __dispose_buffer(jh, + may_free = __dispose_buffer(jh, journal->j_running_transaction); - journal_put_journal_head(jh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - spin_unlock(&journal->j_state_lock); - return ret; + goto zap_buffer; } else { /* There is no currently-running transaction. So the * orphan record which we wrote for this file must have @@ -1917,13 +1922,9 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) * the committing transaction, if it exists. */ if (journal->j_committing_transaction) { JBUFFER_TRACE(jh, "give to committing trans"); - ret = __dispose_buffer(jh, + may_free = __dispose_buffer(jh, journal->j_committing_transaction); - journal_put_journal_head(jh); - spin_unlock(&journal->j_list_lock); - jbd_unlock_bh_state(bh); - spin_unlock(&journal->j_state_lock); - return ret; + goto zap_buffer; } else { /* The orphan record's transaction has * committed. We can cleanse this buffer */ @@ -1944,10 +1945,24 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) } /* * The buffer is committing, we simply cannot touch - * it. So we just set j_next_transaction to the - * running transaction (if there is one) and mark - * buffer as freed so that commit code knows it should - * clear dirty bits when it is done with the buffer. + * it. If the page is straddling i_size we have to wait + * for commit and try again. + */ + if (partial_page) { + tid_t tid = journal->j_committing_transaction->t_tid; + + journal_put_journal_head(jh); + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + spin_unlock(&journal->j_state_lock); + log_wait_commit(journal, tid); + goto retry; + } + /* + * OK, buffer won't be reachable after truncate. We just set + * j_next_transaction to the running transaction (if there is + * one) and mark buffer as freed so that commit code knows it + * should clear dirty bits when it is done with the buffer. */ set_buffer_freed(bh); if (journal->j_running_transaction && buffer_jbddirty(bh)) @@ -1970,6 +1985,14 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) } zap_buffer: + /* + * This is tricky. Although the buffer is truncated, it may be reused + * if blocksize < pagesize and it is attached to the page straddling + * EOF. Since the buffer might have been added to BJ_Forget list of the + * running transaction, journal_get_write_access() won't clear + * b_modified and credit accounting gets confused. So clear b_modified + * here. */ + jh->b_modified = 0; journal_put_journal_head(jh); zap_buffer_no_jh: spin_unlock(&journal->j_list_lock); @@ -2018,7 +2041,8 @@ void journal_invalidatepage(journal_t *journal, if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); - may_free &= journal_unmap_buffer(journal, bh); + may_free &= journal_unmap_buffer(journal, bh, + offset > 0); unlock_buffer(bh); } curr_off = next_off; diff --git a/fs/lockd/clntxdr.c b/fs/lockd/clntxdr.c index 36057ce..6e2a2d5 100644 --- a/fs/lockd/clntxdr.c +++ b/fs/lockd/clntxdr.c @@ -223,7 +223,7 @@ static void encode_nlm_stat(struct xdr_stream *xdr, { __be32 *p; - BUG_ON(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); + WARN_ON_ONCE(be32_to_cpu(stat) > NLM_LCK_DENIED_GRACE_PERIOD); p = xdr_reserve_space(xdr, 4); *p = stat; } diff --git a/fs/lockd/svcproc.c b/fs/lockd/svcproc.c index d27aab1..d413af3 100644 --- a/fs/lockd/svcproc.c +++ b/fs/lockd/svcproc.c @@ -67,7 +67,8 @@ nlmsvc_retrieve_args(struct svc_rqst *rqstp, struct nlm_args *argp, /* Obtain file pointer. Not used by FREE_ALL call. */ if (filp != NULL) { - if ((error = nlm_lookup_file(rqstp, &file, &lock->fh)) != 0) + error = cast_status(nlm_lookup_file(rqstp, &file, &lock->fh)); + if (error != 0) goto no_locks; *filp = file; diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c index 4fd5bb3..0363aa4 100644 --- a/fs/reiserfs/inode.c +++ b/fs/reiserfs/inode.c @@ -1568,8 +1568,10 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, reiserfs_warning(sb, "reiserfs-13077", "nfsd/reiserfs, fhtype=%d, len=%d - odd", fh_type, fh_len); - fh_type = 5; + fh_type = fh_len; } + if (fh_len < 2) + return NULL; return reiserfs_get_dentry(sb, fid->raw[0], fid->raw[1], (fh_type == 3 || fh_type >= 5) ? fid->raw[2] : 0); @@ -1578,6 +1580,8 @@ struct dentry *reiserfs_fh_to_dentry(struct super_block *sb, struct fid *fid, struct dentry *reiserfs_fh_to_parent(struct super_block *sb, struct fid *fid, int fh_len, int fh_type) { + if (fh_type > fh_len) + fh_type = fh_len; if (fh_type < 4) return NULL; diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index ea9120a..567b3db 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -404,20 +404,18 @@ int __sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) /** * sysfs_pathname - return full path to sysfs dirent * @sd: sysfs_dirent whose path we want - * @path: caller allocated buffer + * @path: caller allocated buffer of size PATH_MAX * * Gives the name "/" to the sysfs_root entry; any path returned * is relative to wherever sysfs is mounted. - * - * XXX: does no error checking on @path size */ static char *sysfs_pathname(struct sysfs_dirent *sd, char *path) { if (sd->s_parent) { sysfs_pathname(sd->s_parent, path); - strcat(path, "/"); + strlcat(path, "/", PATH_MAX); } - strcat(path, sd->s_name); + strlcat(path, sd->s_name, PATH_MAX); return path; } @@ -450,9 +448,11 @@ int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd) char *path = kzalloc(PATH_MAX, GFP_KERNEL); WARN(1, KERN_WARNING "sysfs: cannot create duplicate filename '%s'\n", - (path == NULL) ? sd->s_name : - strcat(strcat(sysfs_pathname(acxt->parent_sd, path), "/"), - sd->s_name)); + (path == NULL) ? sd->s_name + : (sysfs_pathname(acxt->parent_sd, path), + strlcat(path, "/", PATH_MAX), + strlcat(path, sd->s_name, PATH_MAX), + path)); kfree(path); } diff --git a/fs/udf/super.c b/fs/udf/super.c index a8e867a..b0c7b53 100644 --- a/fs/udf/super.c +++ b/fs/udf/super.c @@ -1316,6 +1316,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, udf_error(sb, __func__, "error loading logical volume descriptor: " "Partition table too long (%u > %lu)\n", table_len, sb->s_blocksize - sizeof(*lvd)); + ret = 1; goto out_bh; } @@ -1360,8 +1361,10 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block, UDF_ID_SPARABLE, strlen(UDF_ID_SPARABLE))) { if (udf_load_sparable_map(sb, map, - (struct sparablePartitionMap *)gpm) < 0) + (struct sparablePartitionMap *)gpm) < 0) { + ret = 1; goto out_bh; + } } else if (!strncmp(upm2->partIdent.ident, UDF_ID_METADATA, strlen(UDF_ID_METADATA))) { diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c index fed3f3c..844b22b 100644 --- a/fs/xfs/linux-2.6/xfs_export.c +++ b/fs/xfs/linux-2.6/xfs_export.c @@ -195,6 +195,9 @@ xfs_fs_fh_to_parent(struct super_block *sb, struct fid *fid, struct xfs_fid64 *fid64 = (struct xfs_fid64 *)fid; struct inode *inode = NULL; + if (fh_len < xfs_fileid_length(fileid_type)) + return NULL; + switch (fileid_type) { case FILEID_INO32_GEN_PARENT: inode = xfs_nfs_get_inode(sb, fid->i32.parent_ino, diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 481f856..15b62bb 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -1361,7 +1361,7 @@ static inline void ip_vs_notrack(struct sk_buff *skb) struct nf_conn *ct = nf_ct_get(skb, &ctinfo); if (!ct || !nf_ct_is_untracked(ct)) { - nf_reset(skb); + nf_conntrack_put(skb->nfct); skb->nfct = &nf_ct_untracked_get()->ct_general; skb->nfctinfo = IP_CT_NEW; nf_conntrack_get(skb->nfct); diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h index 4283508..3a0feb1 100644 --- a/include/net/netfilter/nf_conntrack_ecache.h +++ b/include/net/netfilter/nf_conntrack_ecache.h @@ -18,6 +18,7 @@ struct nf_conntrack_ecache { u16 ctmask; /* bitmask of ct events to be delivered */ u16 expmask; /* bitmask of expect events to be delivered */ u32 pid; /* netlink pid of destroyer */ + struct timer_list timeout; }; static inline struct nf_conntrack_ecache * diff --git a/kernel/cgroup.c b/kernel/cgroup.c index 5083a09..739553e 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -1803,9 +1803,8 @@ static int cgroup_task_migrate(struct cgroup *cgrp, struct cgroup *oldcgrp, * trading it for newcg is protected by cgroup_mutex, we're safe to drop * it here; it will be freed under RCU. */ - put_css_set(oldcg); - set_bit(CGRP_RELEASABLE, &oldcgrp->flags); + put_css_set(oldcg); return 0; } diff --git a/kernel/module.c b/kernel/module.c index b9d0667..a8bd215 100644 --- a/kernel/module.c +++ b/kernel/module.c @@ -2605,6 +2605,10 @@ static int check_module_license_and_versions(struct module *mod) if (strcmp(mod->name, "driverloader") == 0) add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + /* lve claims to be GPL but upstream won't provide source */ + if (strcmp(mod->name, "lve") == 0) + add_taint_module(mod, TAINT_PROPRIETARY_MODULE); + #ifdef CONFIG_MODVERSIONS if ((mod->num_syms && !mod->crcs) || (mod->num_gpl_syms && !mod->gpl_crcs) diff --git a/kernel/sys.c b/kernel/sys.c index dd29555..84e353b 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -1133,15 +1133,16 @@ DECLARE_RWSEM(uts_sem); * Work around broken programs that cannot handle "Linux 3.0". * Instead we map 3.x to 2.6.40+x, so e.g. 3.0 would be 2.6.40 */ -static int override_release(char __user *release, int len) +static int override_release(char __user *release, size_t len) { int ret = 0; - char buf[65]; if (current->personality & UNAME26) { - char *rest = UTS_RELEASE; + const char *rest = UTS_RELEASE; + char buf[65] = { 0 }; int ndots = 0; unsigned v; + size_t copy; while (*rest) { if (*rest == '.' && ++ndots >= 3) @@ -1151,8 +1152,9 @@ static int override_release(char __user *release, int len) rest++; } v = ((LINUX_VERSION_CODE >> 8) & 0xff) + 40; - snprintf(buf, len, "2.6.%u%s", v, rest); - ret = copy_to_user(release, buf, len); + copy = clamp_t(size_t, len, 1, sizeof(buf)); + copy = scnprintf(buf, copy, "2.6.%u%s", v, rest); + ret = copy_to_user(release, buf, copy + 1); } return ret; } diff --git a/kernel/timer.c b/kernel/timer.c index 8cff361..27982d9 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -63,6 +63,7 @@ EXPORT_SYMBOL(jiffies_64); #define TVR_SIZE (1 << TVR_BITS) #define TVN_MASK (TVN_SIZE - 1) #define TVR_MASK (TVR_SIZE - 1) +#define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1)) struct tvec { struct list_head vec[TVN_SIZE]; @@ -356,11 +357,12 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer) vec = base->tv1.vec + (base->timer_jiffies & TVR_MASK); } else { int i; - /* If the timeout is larger than 0xffffffff on 64-bit - * architectures then we use the maximum timeout: + /* If the timeout is larger than MAX_TVAL (on 64-bit + * architectures or with CONFIG_BASE_SMALL=1) then we + * use the maximum timeout. */ - if (idx > 0xffffffffUL) { - idx = 0xffffffffUL; + if (idx > MAX_TVAL) { + idx = MAX_TVAL; expires = idx + base->timer_jiffies; } i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK; diff --git a/lib/genalloc.c b/lib/genalloc.c index 577ddf8..cbb294c 100644 --- a/lib/genalloc.c +++ b/lib/genalloc.c @@ -57,7 +57,7 @@ int gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phy struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + - (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; + BITS_TO_LONGS(nbits) * sizeof(long); chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) @@ -2348,12 +2348,14 @@ static struct dentry *shmem_fh_to_dentry(struct super_block *sb, { struct inode *inode; struct dentry *dentry = NULL; - u64 inum = fid->raw[2]; - inum = (inum << 32) | fid->raw[1]; + u64 inum; if (fh_len < 3) return NULL; + inum = fid->raw[2]; + inum = (inum << 32) | fid->raw[1]; + inode = ilookup5(sb, (unsigned long)(inum + fid->raw[0]), shmem_match, fid->raw); if (inode) { diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 96bb0a3..eb8857a 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -1313,8 +1313,6 @@ int neigh_resolve_output(struct sk_buff *skb) if (!dst) goto discard; - __skb_pull(skb, skb_network_offset(skb)); - if (!neigh_event_send(neigh, skb)) { int err; struct net_device *dev = neigh->dev; @@ -1326,6 +1324,7 @@ int neigh_resolve_output(struct sk_buff *skb) neigh_hh_init(neigh, dst, dst->ops->protocol); do { + __skb_pull(skb, skb_network_offset(skb)); seq = read_seqbegin(&neigh->ha_lock); err = dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); @@ -1358,9 +1357,8 @@ int neigh_connected_output(struct sk_buff *skb) struct net_device *dev = neigh->dev; unsigned int seq; - __skb_pull(skb, skb_network_offset(skb)); - do { + __skb_pull(skb, skb_network_offset(skb)); seq = read_seqbegin(&neigh->ha_lock); err = dev_hard_header(skb, dev, ntohs(skb->protocol), neigh->ha, NULL, skb->len); diff --git a/net/core/pktgen.c b/net/core/pktgen.c index c0e0f76..01890e1 100644 --- a/net/core/pktgen.c +++ b/net/core/pktgen.c @@ -2932,7 +2932,7 @@ static struct sk_buff *fill_packet_ipv6(struct net_device *odev, sizeof(struct ipv6hdr) - sizeof(struct udphdr) - pkt_dev->pkt_overhead; - if (datalen < sizeof(struct pktgen_hdr)) { + if (datalen < 0 || datalen < sizeof(struct pktgen_hdr)) { datalen = sizeof(struct pktgen_hdr); if (net_ratelimit()) pr_info("increased datalen to %d\n", datalen); diff --git a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c index de9da21..d7d63f4 100644 --- a/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c +++ b/net/ipv4/netfilter/nf_conntrack_l3proto_ipv4.c @@ -84,6 +84,14 @@ static int ipv4_get_l4proto(const struct sk_buff *skb, unsigned int nhoff, *dataoff = nhoff + (iph->ihl << 2); *protonum = iph->protocol; + /* Check bogus IP headers */ + if (*dataoff > skb->len) { + pr_debug("nf_conntrack_ipv4: bogus IPv4 packet: " + "nhoff %u, ihl %u, skblen %u\n", + nhoff, iph->ihl << 2, skb->len); + return -NF_ACCEPT; + } + return NF_ACCEPT; } diff --git a/net/ipv4/netfilter/nf_nat_sip.c b/net/ipv4/netfilter/nf_nat_sip.c index e40cf78..cd6881e 100644 --- a/net/ipv4/netfilter/nf_nat_sip.c +++ b/net/ipv4/netfilter/nf_nat_sip.c @@ -148,7 +148,7 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, if (ct_sip_parse_header_uri(ct, *dptr, NULL, *datalen, hdr, NULL, &matchoff, &matchlen, &addr, &port) > 0) { - unsigned int matchend, poff, plen, buflen, n; + unsigned int olen, matchend, poff, plen, buflen, n; char buffer[sizeof("nnn.nnn.nnn.nnn:nnnnn")]; /* We're only interested in headers related to this @@ -163,11 +163,12 @@ static unsigned int ip_nat_sip(struct sk_buff *skb, unsigned int dataoff, goto next; } + olen = *datalen; if (!map_addr(skb, dataoff, dptr, datalen, matchoff, matchlen, &addr, port)) return NF_DROP; - matchend = matchoff + matchlen; + matchend = matchoff + matchlen + *datalen - olen; /* The maddr= parameter (RFC 2361) specifies where to send * the reply. */ @@ -501,7 +502,10 @@ static unsigned int ip_nat_sdp_media(struct sk_buff *skb, unsigned int dataoff, ret = nf_ct_expect_related(rtcp_exp); if (ret == 0) break; - else if (ret != -EBUSY) { + else if (ret == -EBUSY) { + nf_ct_unexpect_related(rtp_exp); + continue; + } else if (ret < 0) { nf_ct_unexpect_related(rtp_exp); port = 0; break; diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c index 53a5af6..d645c6f 100644 --- a/net/ipv4/tcp_ipv4.c +++ b/net/ipv4/tcp_ipv4.c @@ -651,10 +651,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) arg.csumoffset = offsetof(struct tcphdr, check) / 2; arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; /* When socket is gone, all binding information is lost. - * routing might fail in this case. using iif for oif to - * make sure we can deliver it + * routing might fail in this case. No choice here, if we choose to force + * input interface, we will misroute in case of asymmetric route. */ - arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); + if (sk) + arg.bound_dev_if = sk->sk_bound_dev_if; net = dev_net(skb_dst(skb)->dev); ip_send_reply(net->ipv4.tcp_sock, skb, ip_hdr(skb)->saddr, diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index 848f963..a6d5850 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -1060,7 +1060,8 @@ static void tcp_v6_send_response(struct sk_buff *skb, u32 seq, u32 ack, u32 win, __tcp_v6_send_check(buff, &fl6.saddr, &fl6.daddr); fl6.flowi6_proto = IPPROTO_TCP; - fl6.flowi6_oif = inet6_iif(skb); + if (ipv6_addr_type(&fl6.daddr) & IPV6_ADDR_LINKLOCAL) + fl6.flowi6_oif = inet6_iif(skb); fl6.fl6_dport = t1->dest; fl6.fl6_sport = t1->source; security_skb_classify_flow(skb, flowi6_to_flowi(&fl6)); diff --git a/net/mac80211/wpa.c b/net/mac80211/wpa.c index aa1c40a..d9e03cf 100644 --- a/net/mac80211/wpa.c +++ b/net/mac80211/wpa.c @@ -109,7 +109,8 @@ ieee80211_rx_h_michael_mic_verify(struct ieee80211_rx_data *rx) if (status->flag & RX_FLAG_MMIC_ERROR) goto mic_fail; - if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key) + if (!(status->flag & RX_FLAG_IV_STRIPPED) && rx->key && + rx->key->conf.cipher == WLAN_CIPHER_SUITE_TKIP) goto update_iv; return RX_CONTINUE; diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 9528ea0..d75eb39 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1520,11 +1520,12 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, { struct net_device *dev = ptr; struct net *net = dev_net(dev); + struct netns_ipvs *ipvs = net_ipvs(net); struct ip_vs_service *svc; struct ip_vs_dest *dest; unsigned int idx; - if (event != NETDEV_UNREGISTER) + if (event != NETDEV_UNREGISTER || !ipvs) return NOTIFY_DONE; IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); EnterFunction(2); @@ -1550,7 +1551,7 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, } } - list_for_each_entry(dest, &net_ipvs(net)->dest_trash, n_list) { + list_for_each_entry(dest, &ipvs->dest_trash, n_list) { __ip_vs_dev_reset(dest, dev); } mutex_unlock(&__ip_vs_mutex); diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c index f7af8b8..dff164e 100644 --- a/net/netfilter/nf_conntrack_core.c +++ b/net/netfilter/nf_conntrack_core.c @@ -247,12 +247,15 @@ static void death_by_event(unsigned long ul_conntrack) { struct nf_conn *ct = (void *)ul_conntrack; struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); if (nf_conntrack_event(IPCT_DESTROY, ct) < 0) { /* bad luck, let's retry again */ - ct->timeout.expires = jiffies + + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); return; } /* we've got the event delivered, now it's dying */ @@ -266,6 +269,9 @@ static void death_by_event(unsigned long ul_conntrack) void nf_ct_insert_dying_list(struct nf_conn *ct) { struct net *net = nf_ct_net(ct); + struct nf_conntrack_ecache *ecache = nf_ct_ecache_find(ct); + + BUG_ON(ecache == NULL); /* add this conntrack to the dying list */ spin_lock_bh(&nf_conntrack_lock); @@ -273,10 +279,10 @@ void nf_ct_insert_dying_list(struct nf_conn *ct) &net->ct.dying); spin_unlock_bh(&nf_conntrack_lock); /* set a new timer to retry event delivery */ - setup_timer(&ct->timeout, death_by_event, (unsigned long)ct); - ct->timeout.expires = jiffies + + setup_timer(&ecache->timeout, death_by_event, (unsigned long)ct); + ecache->timeout.expires = jiffies + (random32() % net->ct.sysctl_events_retry_timeout); - add_timer(&ct->timeout); + add_timer(&ecache->timeout); } EXPORT_SYMBOL_GPL(nf_ct_insert_dying_list); diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c index cd1e8e0..a3dffab 100644 --- a/net/netfilter/nf_conntrack_expect.c +++ b/net/netfilter/nf_conntrack_expect.c @@ -364,23 +364,6 @@ static void evict_oldest_expect(struct nf_conn *master, } } -static inline int refresh_timer(struct nf_conntrack_expect *i) -{ - struct nf_conn_help *master_help = nfct_help(i->master); - const struct nf_conntrack_expect_policy *p; - - if (!del_timer(&i->timeout)) - return 0; - - p = &rcu_dereference_protected( - master_help->helper, - lockdep_is_held(&nf_conntrack_lock) - )->expect_policy[i->class]; - i->timeout.expires = jiffies + p->timeout * HZ; - add_timer(&i->timeout); - return 1; -} - static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) { const struct nf_conntrack_expect_policy *p; @@ -388,7 +371,7 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) struct nf_conn *master = expect->master; struct nf_conn_help *master_help = nfct_help(master); struct net *net = nf_ct_exp_net(expect); - struct hlist_node *n; + struct hlist_node *n, *next; unsigned int h; int ret = 1; @@ -399,12 +382,12 @@ static inline int __nf_ct_expect_check(struct nf_conntrack_expect *expect) goto out; } h = nf_ct_expect_dst_hash(&expect->tuple); - hlist_for_each_entry(i, n, &net->ct.expect_hash[h], hnode) { + hlist_for_each_entry_safe(i, n, next, &net->ct.expect_hash[h], hnode) { if (expect_matches(i, expect)) { - /* Refresh timer: if it's dying, ignore.. */ - if (refresh_timer(i)) { - ret = 0; - goto out; + if (del_timer(&i->timeout)) { + nf_ct_unlink_expect(i); + nf_ct_expect_put(i); + break; } } else if (expect_clash(i, expect)) { ret = -EBUSY; diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c index 9228ee0..6092b0c 100644 --- a/net/netfilter/xt_hashlimit.c +++ b/net/netfilter/xt_hashlimit.c @@ -392,8 +392,7 @@ static void htable_put(struct xt_hashlimit_htable *hinfo) #define CREDITS_PER_JIFFY POW2_BELOW32(MAX_CPJ) /* Precision saver. */ -static inline u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -403,7 +402,7 @@ user2credits(u_int32_t user) return (user * HZ * CREDITS_PER_JIFFY) / XT_HASHLIMIT_SCALE; } -static inline void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) +static void rateinfo_recalc(struct dsthash_ent *dh, unsigned long now) { dh->rateinfo.credit += (now - dh->rateinfo.prev) * CREDITS_PER_JIFFY; if (dh->rateinfo.credit > dh->rateinfo.credit_cap) @@ -534,8 +533,7 @@ hashlimit_mt(const struct sk_buff *skb, struct xt_action_param *par) dh->rateinfo.prev = jiffies; dh->rateinfo.credit = user2credits(hinfo->cfg.avg * hinfo->cfg.burst); - dh->rateinfo.credit_cap = user2credits(hinfo->cfg.avg * - hinfo->cfg.burst); + dh->rateinfo.credit_cap = dh->rateinfo.credit; dh->rateinfo.cost = user2credits(hinfo->cfg.avg); } else { /* update expiration timeout */ diff --git a/net/netfilter/xt_limit.c b/net/netfilter/xt_limit.c index 32b7a57..a4c1e45 100644 --- a/net/netfilter/xt_limit.c +++ b/net/netfilter/xt_limit.c @@ -88,8 +88,7 @@ limit_mt(const struct sk_buff *skb, struct xt_action_param *par) } /* Precision saver. */ -static u_int32_t -user2credits(u_int32_t user) +static u32 user2credits(u32 user) { /* If multiplying would overflow... */ if (user > 0xFFFFFFFF / (HZ*CREDITS_PER_JIFFY)) @@ -118,12 +117,12 @@ static int limit_mt_check(const struct xt_mtchk_param *par) /* For SMP, we only want to use one set of state. */ r->master = priv; + /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * + 128. */ + priv->prev = jiffies; + priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ if (r->cost == 0) { - /* User avg in seconds * XT_LIMIT_SCALE: convert to jiffies * - 128. */ - priv->prev = jiffies; - priv->credit = user2credits(r->avg * r->burst); /* Credits full. */ - r->credit_cap = user2credits(r->avg * r->burst); /* Credits full. */ + r->credit_cap = priv->credit; /* Credits full. */ r->cost = user2credits(r->avg); } return 0; diff --git a/net/rds/send.c b/net/rds/send.c index c803341..f6bdfb0 100644 --- a/net/rds/send.c +++ b/net/rds/send.c @@ -1121,7 +1121,7 @@ rds_send_pong(struct rds_connection *conn, __be16 dport) rds_stats_inc(s_send_pong); if (!test_bit(RDS_LL_SEND_FULL, &conn->c_flags)) - rds_send_xmit(conn); + queue_delayed_work(rds_wq, &conn->c_send_w, 0); rds_message_put(rm); return 0; diff --git a/net/sunrpc/cache.c b/net/sunrpc/cache.c index 4530a91..237a2ee 100644 --- a/net/sunrpc/cache.c +++ b/net/sunrpc/cache.c @@ -1404,11 +1404,11 @@ static ssize_t read_flush(struct file *file, char __user *buf, size_t count, loff_t *ppos, struct cache_detail *cd) { - char tbuf[20]; + char tbuf[22]; unsigned long p = *ppos; size_t len; - sprintf(tbuf, "%lu\n", convert_to_wallclock(cd->flush_time)); + snprintf(tbuf, sizeof(tbuf), "%lu\n", convert_to_wallclock(cd->flush_time)); len = strlen(tbuf); if (p >= len) return 0; diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c index 554111f..2202a14 100644 --- a/net/sunrpc/xprtsock.c +++ b/net/sunrpc/xprtsock.c @@ -243,7 +243,6 @@ struct sock_xprt { void (*old_data_ready)(struct sock *, int); void (*old_state_change)(struct sock *); void (*old_write_space)(struct sock *); - void (*old_error_report)(struct sock *); }; /* @@ -726,10 +725,10 @@ static int xs_tcp_send_request(struct rpc_task *task) dprintk("RPC: sendmsg returned unrecognized error %d\n", -status); case -ECONNRESET: - case -EPIPE: xs_tcp_shutdown(xprt); case -ECONNREFUSED: case -ENOTCONN: + case -EPIPE: clear_bit(SOCK_ASYNC_NOSPACE, &transport->sock->flags); } @@ -768,7 +767,6 @@ static void xs_save_old_callbacks(struct sock_xprt *transport, struct sock *sk) transport->old_data_ready = sk->sk_data_ready; transport->old_state_change = sk->sk_state_change; transport->old_write_space = sk->sk_write_space; - transport->old_error_report = sk->sk_error_report; } static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *sk) @@ -776,7 +774,6 @@ static void xs_restore_old_callbacks(struct sock_xprt *transport, struct sock *s sk->sk_data_ready = transport->old_data_ready; sk->sk_state_change = transport->old_state_change; sk->sk_write_space = transport->old_write_space; - sk->sk_error_report = transport->old_error_report; } static void xs_reset_transport(struct sock_xprt *transport) @@ -1015,6 +1012,16 @@ static void xs_udp_data_ready(struct sock *sk, int len) read_unlock_bh(&sk->sk_callback_lock); } +/* + * Helper function to force a TCP close if the server is sending + * junk and/or it has put us in CLOSE_WAIT + */ +static void xs_tcp_force_close(struct rpc_xprt *xprt) +{ + set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); + xprt_force_disconnect(xprt); +} + static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_reader *desc) { struct sock_xprt *transport = container_of(xprt, struct sock_xprt, xprt); @@ -1041,7 +1048,7 @@ static inline void xs_tcp_read_fraghdr(struct rpc_xprt *xprt, struct xdr_skb_rea /* Sanity check of the record length */ if (unlikely(transport->tcp_reclen < 8)) { dprintk("RPC: invalid TCP record fragment length\n"); - xprt_force_disconnect(xprt); + xs_tcp_force_close(xprt); return; } dprintk("RPC: reading TCP record fragment of length %d\n", @@ -1122,7 +1129,7 @@ static inline void xs_tcp_read_calldir(struct sock_xprt *transport, break; default: dprintk("RPC: invalid request message type\n"); - xprt_force_disconnect(&transport->xprt); + xs_tcp_force_close(&transport->xprt); } xs_tcp_check_fraghdr(transport); } @@ -1442,12 +1449,19 @@ static void xs_tcp_cancel_linger_timeout(struct rpc_xprt *xprt) xprt_clear_connecting(xprt); } -static void xs_sock_mark_closed(struct rpc_xprt *xprt) +static void xs_sock_reset_connection_flags(struct rpc_xprt *xprt) { smp_mb__before_clear_bit(); + clear_bit(XPRT_CONNECTION_ABORT, &xprt->state); + clear_bit(XPRT_CONNECTION_CLOSE, &xprt->state); clear_bit(XPRT_CLOSE_WAIT, &xprt->state); clear_bit(XPRT_CLOSING, &xprt->state); smp_mb__after_clear_bit(); +} + +static void xs_sock_mark_closed(struct rpc_xprt *xprt) +{ + xs_sock_reset_connection_flags(xprt); /* Mark transport as closed and wake up all pending tasks */ xprt_disconnect_done(xprt); } @@ -1502,8 +1516,9 @@ static void xs_tcp_state_change(struct sock *sk) break; case TCP_CLOSE_WAIT: /* The server initiated a shutdown of the socket */ - xprt_force_disconnect(xprt); xprt->connect_cookie++; + clear_bit(XPRT_CONNECTED, &xprt->state); + xs_tcp_force_close(xprt); case TCP_CLOSING: /* * If the server closed down the connection, make sure that @@ -1527,25 +1542,6 @@ static void xs_tcp_state_change(struct sock *sk) read_unlock_bh(&sk->sk_callback_lock); } -/** - * xs_error_report - callback mainly for catching socket errors - * @sk: socket - */ -static void xs_error_report(struct sock *sk) -{ - struct rpc_xprt *xprt; - - read_lock_bh(&sk->sk_callback_lock); - if (!(xprt = xprt_from_sock(sk))) - goto out; - dprintk("RPC: %s client %p...\n" - "RPC: error %d\n", - __func__, xprt, sk->sk_err); - xprt_wake_pending_tasks(xprt, -EAGAIN); -out: - read_unlock_bh(&sk->sk_callback_lock); -} - static void xs_write_space(struct sock *sk) { struct socket *sock; @@ -1845,7 +1841,6 @@ static int xs_local_finish_connecting(struct rpc_xprt *xprt, sk->sk_user_data = xprt; sk->sk_data_ready = xs_local_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; xprt_clear_connected(xprt); @@ -1934,7 +1929,6 @@ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_user_data = xprt; sk->sk_data_ready = xs_udp_data_ready; sk->sk_write_space = xs_udp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_no_check = UDP_CSUM_NORCV; sk->sk_allocation = GFP_ATOMIC; @@ -2002,10 +1996,8 @@ static void xs_abort_connection(struct sock_xprt *transport) any.sa_family = AF_UNSPEC; result = kernel_connect(transport->sock, &any, sizeof(any), 0); if (!result) - xs_sock_mark_closed(&transport->xprt); - else - dprintk("RPC: AF_UNSPEC connect return code %d\n", - result); + xs_sock_reset_connection_flags(&transport->xprt); + dprintk("RPC: AF_UNSPEC connect return code %d\n", result); } static void xs_tcp_reuse_connection(struct sock_xprt *transport) @@ -2050,7 +2042,6 @@ static int xs_tcp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock) sk->sk_data_ready = xs_tcp_data_ready; sk->sk_state_change = xs_tcp_state_change; sk->sk_write_space = xs_tcp_write_space; - sk->sk_error_report = xs_error_report; sk->sk_allocation = GFP_ATOMIC; /* socket options */ @@ -2146,8 +2137,7 @@ static void xs_tcp_setup_socket(struct work_struct *work) /* We're probably in TIME_WAIT. Get rid of existing socket, * and retry */ - set_bit(XPRT_CONNECTION_CLOSE, &xprt->state); - xprt_force_disconnect(xprt); + xs_tcp_force_close(xprt); break; case -ECONNREFUSED: case -ECONNRESET: diff --git a/sound/pci/ac97/ac97_codec.c b/sound/pci/ac97/ac97_codec.c index 7f4d619..11ccc23 100644 --- a/sound/pci/ac97/ac97_codec.c +++ b/sound/pci/ac97/ac97_codec.c @@ -1271,6 +1271,8 @@ static int snd_ac97_cvol_new(struct snd_card *card, char *name, int reg, unsigne tmp.index = ac97->num; kctl = snd_ctl_new1(&tmp, ac97); } + if (!kctl) + return -ENOMEM; if (reg >= AC97_PHONE && reg <= AC97_PCM) set_tlv_db_scale(kctl, db_scale_5bit_12db_max); else diff --git a/sound/pci/emu10k1/emu10k1_main.c b/sound/pci/emu10k1/emu10k1_main.c index 15f0161..0800bcc 100644 --- a/sound/pci/emu10k1/emu10k1_main.c +++ b/sound/pci/emu10k1/emu10k1_main.c @@ -1415,6 +1415,15 @@ static struct snd_emu_chip_details emu_chip_details[] = { .ca0108_chip = 1, .spk71 = 1, .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 new revision */ + /* Tested by Maxim Kachur <mcdebugger@duganet.ru> 17th Oct 2012. */ + /* This is MAEM8986, 0202 is MAEM8980 */ + {.vendor = 0x1102, .device = 0x0008, .subsystem = 0x40071102, + .driver = "Audigy2", .name = "E-mu 1010 PCIe [MAEM8986]", + .id = "EMU1010", + .emu10k2_chip = 1, + .ca0108_chip = 1, + .spk71 = 1, + .emu_model = EMU_MODEL_EMU1010B}, /* EMU 1010 PCIe */ /* Tested by James@superbug.co.uk 8th July 2005. */ /* This is MAEM8810, 0202 is MAEM8820 */ {.vendor = 0x1102, .device = 0x0004, .subsystem = 0x40011102, diff --git a/usr/gen_init_cpio.c b/usr/gen_init_cpio.c index af0f22f..aca6edc 100644 --- a/usr/gen_init_cpio.c +++ b/usr/gen_init_cpio.c @@ -303,7 +303,7 @@ static int cpio_mkfile(const char *name, const char *location, int retval; int rc = -1; int namesize; - int i; + unsigned int i; mode |= S_IFREG; @@ -381,25 +381,28 @@ error: static char *cpio_replace_env(char *new_location) { - char expanded[PATH_MAX + 1]; - char env_var[PATH_MAX + 1]; - char *start; - char *end; - - for (start = NULL; (start = strstr(new_location, "${")); ) { - end = strchr(start, '}'); - if (start < end) { - *env_var = *expanded = '\0'; - strncat(env_var, start + 2, end - start - 2); - strncat(expanded, new_location, start - new_location); - strncat(expanded, getenv(env_var), PATH_MAX); - strncat(expanded, end + 1, PATH_MAX); - strncpy(new_location, expanded, PATH_MAX); - } else - break; - } - - return new_location; + char expanded[PATH_MAX + 1]; + char env_var[PATH_MAX + 1]; + char *start; + char *end; + + for (start = NULL; (start = strstr(new_location, "${")); ) { + end = strchr(start, '}'); + if (start < end) { + *env_var = *expanded = '\0'; + strncat(env_var, start + 2, end - start - 2); + strncat(expanded, new_location, start - new_location); + strncat(expanded, getenv(env_var), + PATH_MAX - strlen(expanded)); + strncat(expanded, end + 1, + PATH_MAX - strlen(expanded)); + strncpy(new_location, expanded, PATH_MAX); + new_location[PATH_MAX] = 0; + } else + break; + } + + return new_location; } |