diff options
author | Ziyann <jaraidaniel@gmail.com> | 2014-11-26 11:37:03 +0100 |
---|---|---|
committer | Ziyann <jaraidaniel@gmail.com> | 2014-11-26 11:37:03 +0100 |
commit | e90611725bc4dcc8946e74c38bb2c58fdf6bf98f (patch) | |
tree | a6a2441a53406a47f3a4b4a5103c0a067aaab445 | |
parent | da82942bbd6a40cf7ddd3edc52352610905137ce (diff) | |
parent | 5dba9ddd98cbc7ad319d687887981a0ea0062c75 (diff) | |
download | kernel_samsung_tuna-e90611725bc4dcc8946e74c38bb2c58fdf6bf98f.zip kernel_samsung_tuna-e90611725bc4dcc8946e74c38bb2c58fdf6bf98f.tar.gz kernel_samsung_tuna-e90611725bc4dcc8946e74c38bb2c58fdf6bf98f.tar.bz2 |
Merge remote-tracking branch 'linux/linux-3.0.y' into stable-newpvr
Conflicts:
arch/arm/include/asm/hardware/cache-l2x0.h
504 files changed, 3935 insertions, 1973 deletions
diff --git a/MAINTAINERS b/MAINTAINERS index 105683a..e3763b0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -6040,7 +6040,8 @@ F: arch/alpha/kernel/srm_env.c STABLE BRANCH M: Greg Kroah-Hartman <greg@kroah.com> L: stable@vger.kernel.org -S: Maintained +S: Supported +F: Documentation/stable_kernel_rules.txt STAGING SUBSYSTEM M: Greg Kroah-Hartman <gregkh@suse.de> @@ -1,8 +1,8 @@ VERSION = 3 PATCHLEVEL = 0 -SUBLEVEL = 72 +SUBLEVEL = 101 EXTRAVERSION = -NAME = Sneaky Weasel +NAME = Sodden Ben Lomond # *DOCUMENTATION* # To see a list of typical targets execute "make help" diff --git a/arch/alpha/kernel/sys_nautilus.c b/arch/alpha/kernel/sys_nautilus.c index 99c0f46..dc616b3 100644 --- a/arch/alpha/kernel/sys_nautilus.c +++ b/arch/alpha/kernel/sys_nautilus.c @@ -189,6 +189,10 @@ nautilus_machine_check(unsigned long vector, unsigned long la_ptr) extern void free_reserved_mem(void *, void *); extern void pcibios_claim_one_bus(struct pci_bus *); +static struct resource irongate_io = { + .name = "Irongate PCI IO", + .flags = IORESOURCE_IO, +}; static struct resource irongate_mem = { .name = "Irongate PCI MEM", .flags = IORESOURCE_MEM, @@ -210,6 +214,7 @@ nautilus_init_pci(void) irongate = pci_get_bus_and_slot(0, 0); bus->self = irongate; + bus->resource[0] = &irongate_io; bus->resource[1] = &irongate_mem; pci_bus_size_bridges(bus); diff --git a/arch/arm/include/asm/hardware/cache-l2x0.h b/arch/arm/include/asm/hardware/cache-l2x0.h index 5dc6db1..2871580 100644 --- a/arch/arm/include/asm/hardware/cache-l2x0.h +++ b/arch/arm/include/asm/hardware/cache-l2x0.h @@ -45,10 +45,19 @@ #define L2X0_CLEAN_INV_LINE_PA 0x7F0 #define L2X0_CLEAN_INV_LINE_IDX 0x7F8 #define L2X0_CLEAN_INV_WAY 0x7FC + #define L2X0_LOCKDOWN_WAY_D0 0x900 #define L2X0_LOCKDOWN_WAY_D1 0x908 #define L2X0_LOCKDOWN_WAY_I0 0x904 #define L2X0_LOCKDOWN_WAY_I1 0x90C +/* + * The lockdown registers repeat 8 times for L310, the L210 has only one + * D and one I lockdown register at 0x0900 and 0x0904. + */ +#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 +#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 +#define L2X0_LOCKDOWN_STRIDE 0x08 + #define L2X0_TEST_OPERATION 0xF00 #define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_TAG 0xF30 diff --git a/arch/arm/kernel/perf_event.c b/arch/arm/kernel/perf_event.c index 2b5b142..75373a9 100644 --- a/arch/arm/kernel/perf_event.c +++ b/arch/arm/kernel/perf_event.c @@ -741,6 +741,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs) struct frame_tail __user *tail; + perf_callchain_store(entry, regs->ARM_pc); tail = (struct frame_tail __user *)regs->ARM_fp - 1; while ((entry->nr < PERF_MAX_STACK_DEPTH) && diff --git a/arch/arm/mach-kirkwood/ts219-setup.c b/arch/arm/mach-kirkwood/ts219-setup.c index 68f32f2..eb1a7ba 100644 --- a/arch/arm/mach-kirkwood/ts219-setup.c +++ b/arch/arm/mach-kirkwood/ts219-setup.c @@ -124,7 +124,7 @@ static void __init qnap_ts219_init(void) static int __init ts219_pci_init(void) { if (machine_is_ts219()) - kirkwood_pcie_init(KW_PCIE0); + kirkwood_pcie_init(KW_PCIE1 | KW_PCIE0); return 0; } diff --git a/arch/arm/mach-omap2/board-rx51-peripherals.c b/arch/arm/mach-omap2/board-rx51-peripherals.c index c565971..9a1e1f7 100644 --- a/arch/arm/mach-omap2/board-rx51-peripherals.c +++ b/arch/arm/mach-omap2/board-rx51-peripherals.c @@ -56,11 +56,11 @@ #define RX51_USB_TRANSCEIVER_RST_GPIO 67 -/* list all spi devices here */ +/* List all SPI devices here. Note that the list/probe order seems to matter! */ enum { RX51_SPI_WL1251, - RX51_SPI_MIPID, /* LCD panel */ RX51_SPI_TSC2005, /* Touch Controller */ + RX51_SPI_MIPID, /* LCD panel */ }; static struct wl12xx_platform_data wl1251_pdata; diff --git a/arch/arm/mach-versatile/pci.c b/arch/arm/mach-versatile/pci.c index 13c7e5f..3f47259 100644 --- a/arch/arm/mach-versatile/pci.c +++ b/arch/arm/mach-versatile/pci.c @@ -43,9 +43,9 @@ #define PCI_IMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x0) #define PCI_IMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x4) #define PCI_IMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x8) -#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x10) -#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14) -#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18) +#define PCI_SMAP0 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x14) +#define PCI_SMAP1 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x18) +#define PCI_SMAP2 __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0x1c) #define PCI_SELFID __IO_ADDRESS(VERSATILE_PCI_CORE_BASE+0xc) #define DEVICE_ID_OFFSET 0x00 diff --git a/arch/arm/mm/cache-feroceon-l2.c b/arch/arm/mm/cache-feroceon-l2.c index e0b0e7a..09f8851 100644 --- a/arch/arm/mm/cache-feroceon-l2.c +++ b/arch/arm/mm/cache-feroceon-l2.c @@ -342,6 +342,7 @@ void __init feroceon_l2_init(int __l2_wt_override) outer_cache.inv_range = feroceon_l2_inv_range; outer_cache.clean_range = feroceon_l2_clean_range; outer_cache.flush_range = feroceon_l2_flush_range; + outer_cache.inv_all = l2_inv_all; enable_l2(); diff --git a/arch/arm/mm/cache-l2x0.c b/arch/arm/mm/cache-l2x0.c index 6f6c1a6..11d1349 100644 --- a/arch/arm/mm/cache-l2x0.c +++ b/arch/arm/mm/cache-l2x0.c @@ -320,6 +320,25 @@ static void l2x0_disable(void) spin_unlock_irqrestore(&l2x0_lock, flags); } +static void __init l2x0_unlock(__u32 cache_id) +{ + int lockregs; + int i; + + if (cache_id == L2X0_CACHE_ID_PART_L310) + lockregs = 8; + else + /* L210 and unknown types */ + lockregs = 1; + + for (i = 0; i < lockregs; i++) { + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE + + i * L2X0_LOCKDOWN_STRIDE); + writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE + + i * L2X0_LOCKDOWN_STRIDE); + } +} + void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) { __u32 aux; @@ -370,6 +389,8 @@ void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask) * accessing the below registers will fault. */ if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) { + /* Make sure that I&D is not locked down when starting */ + l2x0_unlock(cache_id); /* l2x0 controller is disabled */ writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL); diff --git a/arch/arm/plat-orion/common.c b/arch/arm/plat-orion/common.c index 11dce87..214b002 100644 --- a/arch/arm/plat-orion/common.c +++ b/arch/arm/plat-orion/common.c @@ -343,7 +343,7 @@ static struct resource orion_ge10_shared_resources[] = { static struct platform_device orion_ge10_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 1, + .id = 2, .dev = { .platform_data = &orion_ge10_shared_data, }, @@ -358,8 +358,8 @@ static struct resource orion_ge10_resources[] = { static struct platform_device orion_ge10 = { .name = MV643XX_ETH_NAME, - .id = 1, - .num_resources = 2, + .id = 2, + .num_resources = 1, .resource = orion_ge10_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), @@ -397,7 +397,7 @@ static struct resource orion_ge11_shared_resources[] = { static struct platform_device orion_ge11_shared = { .name = MV643XX_ETH_SHARED_NAME, - .id = 1, + .id = 3, .dev = { .platform_data = &orion_ge11_shared_data, }, @@ -412,8 +412,8 @@ static struct resource orion_ge11_resources[] = { static struct platform_device orion_ge11 = { .name = MV643XX_ETH_NAME, - .id = 1, - .num_resources = 2, + .id = 3, + .num_resources = 1, .resource = orion_ge11_resources, .dev = { .coherent_dma_mask = DMA_BIT_MASK(32), diff --git a/arch/avr32/kernel/module.c b/arch/avr32/kernel/module.c index a727f54..9c266ab 100644 --- a/arch/avr32/kernel/module.c +++ b/arch/avr32/kernel/module.c @@ -271,7 +271,7 @@ int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab, break; case R_AVR32_GOT18SW: if ((relocation & 0xfffe0003) != 0 - && (relocation & 0xfffc0003) != 0xffff0000) + && (relocation & 0xfffc0000) != 0xfffc0000) return reloc_overflow(module, "R_AVR32_GOT18SW", relocation); relocation >>= 2; diff --git a/arch/cris/kernel/vmlinux.lds.S b/arch/cris/kernel/vmlinux.lds.S index a6990cb..a68b983 100644 --- a/arch/cris/kernel/vmlinux.lds.S +++ b/arch/cris/kernel/vmlinux.lds.S @@ -52,6 +52,7 @@ SECTIONS EXCEPTION_TABLE(4) + _sdata = .; RODATA . = ALIGN (4); diff --git a/arch/ia64/include/asm/futex.h b/arch/ia64/include/asm/futex.h index 21ab376..1bd14d5 100644 --- a/arch/ia64/include/asm/futex.h +++ b/arch/ia64/include/asm/futex.h @@ -107,16 +107,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -EFAULT; { - register unsigned long r8 __asm ("r8"); + register unsigned long r8 __asm ("r8") = 0; unsigned long prev; __asm__ __volatile__( " mf;; \n" - " mov %0=r0 \n" " mov ar.ccv=%4;; \n" "[1:] cmpxchg4.acq %1=[%2],%3,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (r8), "=r" (prev) + : "+r" (r8), "=&r" (prev) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); diff --git a/arch/ia64/include/asm/mca.h b/arch/ia64/include/asm/mca.h index 43f96ab..8c70961 100644 --- a/arch/ia64/include/asm/mca.h +++ b/arch/ia64/include/asm/mca.h @@ -143,6 +143,7 @@ extern unsigned long __per_cpu_mca[NR_CPUS]; extern int cpe_vector; extern int ia64_cpe_irq; extern void ia64_mca_init(void); +extern void ia64_mca_irq_init(void); extern void ia64_mca_cpu_init(void *); extern void ia64_os_mca_dispatch(void); extern void ia64_os_mca_dispatch_end(void); diff --git a/arch/ia64/kernel/irq.c b/arch/ia64/kernel/irq.c index ad69606..f2c41828 100644 --- a/arch/ia64/kernel/irq.c +++ b/arch/ia64/kernel/irq.c @@ -23,6 +23,8 @@ #include <linux/interrupt.h> #include <linux/kernel_stat.h> +#include <asm/mca.h> + /* * 'what should we do if we get a hw irq event on an illegal vector'. * each architecture has to answer this themselves. @@ -83,6 +85,12 @@ bool is_affinity_mask_valid(const struct cpumask *cpumask) #endif /* CONFIG_SMP */ +int __init arch_early_irq_init(void) +{ + ia64_mca_irq_init(); + return 0; +} + #ifdef CONFIG_HOTPLUG_CPU unsigned int vectors_in_migration[NR_IRQS]; diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c index 84fb405..9b97303 100644 --- a/arch/ia64/kernel/mca.c +++ b/arch/ia64/kernel/mca.c @@ -2071,22 +2071,16 @@ ia64_mca_init(void) printk(KERN_INFO "MCA related initialization done\n"); } + /* - * ia64_mca_late_init - * - * Opportunity to setup things that require initialization later - * than ia64_mca_init. Setup a timer to poll for CPEs if the - * platform doesn't support an interrupt driven mechanism. - * - * Inputs : None - * Outputs : Status + * These pieces cannot be done in ia64_mca_init() because it is called before + * early_irq_init() which would wipe out our percpu irq registrations. But we + * cannot leave them until ia64_mca_late_init() because by then all the other + * processors have been brought online and have set their own CMC vectors to + * point at a non-existant action. Called from arch_early_irq_init(). */ -static int __init -ia64_mca_late_init(void) +void __init ia64_mca_irq_init(void) { - if (!mca_init) - return 0; - /* * Configure the CMCI/P vector and handler. Interrupts for CMC are * per-processor, so AP CMC interrupts are setup in smp_callin() (smpboot.c). @@ -2105,6 +2099,23 @@ ia64_mca_late_init(void) /* Setup the CPEI/P handler */ register_percpu_irq(IA64_CPEP_VECTOR, &mca_cpep_irqaction); #endif +} + +/* + * ia64_mca_late_init + * + * Opportunity to setup things that require initialization later + * than ia64_mca_init. Setup a timer to poll for CPEs if the + * platform doesn't support an interrupt driven mechanism. + * + * Inputs : None + * Outputs : Status + */ +static int __init +ia64_mca_late_init(void) +{ + if (!mca_init) + return 0; register_hotcpu_notifier(&mca_cpu_notifier); diff --git a/arch/ia64/kvm/vtlb.c b/arch/ia64/kvm/vtlb.c index 4332f7e..a7869f8 100644 --- a/arch/ia64/kvm/vtlb.c +++ b/arch/ia64/kvm/vtlb.c @@ -256,7 +256,7 @@ u64 guest_vhpt_lookup(u64 iha, u64 *pte) "srlz.d;;" "ssm psr.i;;" "srlz.d;;" - : "=r"(ret) : "r"(iha), "r"(pte):"memory"); + : "=&r"(ret) : "r"(iha), "r"(pte) : "memory"); return ret; } diff --git a/arch/m32r/boot/compressed/Makefile b/arch/m32r/boot/compressed/Makefile index 177716b..01729c2 100644 --- a/arch/m32r/boot/compressed/Makefile +++ b/arch/m32r/boot/compressed/Makefile @@ -43,9 +43,9 @@ endif OBJCOPYFLAGS += -R .empty_zero_page -suffix_$(CONFIG_KERNEL_GZIP) = gz -suffix_$(CONFIG_KERNEL_BZIP2) = bz2 -suffix_$(CONFIG_KERNEL_LZMA) = lzma +suffix-$(CONFIG_KERNEL_GZIP) = gz +suffix-$(CONFIG_KERNEL_BZIP2) = bz2 +suffix-$(CONFIG_KERNEL_LZMA) = lzma $(obj)/piggy.o: $(obj)/vmlinux.scr $(obj)/vmlinux.bin.$(suffix-y) FORCE $(call if_changed,ld) diff --git a/arch/m32r/boot/compressed/misc.c b/arch/m32r/boot/compressed/misc.c index 370d608..28a0952 100644 --- a/arch/m32r/boot/compressed/misc.c +++ b/arch/m32r/boot/compressed/misc.c @@ -28,7 +28,7 @@ static unsigned long free_mem_ptr; static unsigned long free_mem_end_ptr; #ifdef CONFIG_KERNEL_BZIP2 -static void *memset(void *s, int c, size_t n) +void *memset(void *s, int c, size_t n) { char *ss = s; @@ -39,6 +39,16 @@ static void *memset(void *s, int c, size_t n) #endif #ifdef CONFIG_KERNEL_GZIP +void *memcpy(void *dest, const void *src, size_t n) +{ + char *d = dest; + const char *s = src; + while (n--) + *d++ = *s++; + + return dest; +} + #define BOOT_HEAP_SIZE 0x10000 #include "../../../../lib/decompress_inflate.c" #endif diff --git a/arch/m68k/emu/natfeat.c b/arch/m68k/emu/natfeat.c index 2291a7d..fa277ae 100644 --- a/arch/m68k/emu/natfeat.c +++ b/arch/m68k/emu/natfeat.c @@ -18,9 +18,11 @@ #include <asm/machdep.h> #include <asm/natfeat.h> +extern long nf_get_id2(const char *feature_name); + asm("\n" -" .global nf_get_id,nf_call\n" -"nf_get_id:\n" +" .global nf_get_id2,nf_call\n" +"nf_get_id2:\n" " .short 0x7300\n" " rts\n" "nf_call:\n" @@ -29,12 +31,25 @@ asm("\n" "1: moveq.l #0,%d0\n" " rts\n" " .section __ex_table,\"a\"\n" -" .long nf_get_id,1b\n" +" .long nf_get_id2,1b\n" " .long nf_call,1b\n" " .previous"); -EXPORT_SYMBOL_GPL(nf_get_id); EXPORT_SYMBOL_GPL(nf_call); +long nf_get_id(const char *feature_name) +{ + /* feature_name may be in vmalloc()ed memory, so make a copy */ + char name_copy[32]; + size_t n; + + n = strlcpy(name_copy, feature_name, sizeof(name_copy)); + if (n >= sizeof(name_copy)) + return 0; + + return nf_get_id2(name_copy); +} +EXPORT_SYMBOL_GPL(nf_get_id); + void nfprint(const char *fmt, ...) { static char buf[256]; diff --git a/arch/m68k/include/asm/div64.h b/arch/m68k/include/asm/div64.h index edb6614..7558032 100644 --- a/arch/m68k/include/asm/div64.h +++ b/arch/m68k/include/asm/div64.h @@ -13,16 +13,17 @@ unsigned long long n64; \ } __n; \ unsigned long __rem, __upper; \ + unsigned long __base = (base); \ \ __n.n64 = (n); \ if ((__upper = __n.n32[0])) { \ asm ("divul.l %2,%1:%0" \ - : "=d" (__n.n32[0]), "=d" (__upper) \ - : "d" (base), "0" (__n.n32[0])); \ + : "=d" (__n.n32[0]), "=d" (__upper) \ + : "d" (__base), "0" (__n.n32[0])); \ } \ asm ("divu.l %2,%1:%0" \ - : "=d" (__n.n32[1]), "=d" (__rem) \ - : "d" (base), "1" (__upper), "0" (__n.n32[1])); \ + : "=d" (__n.n32[1]), "=d" (__rem) \ + : "d" (__base), "1" (__upper), "0" (__n.n32[1])); \ (n) = __n.n64; \ __rem; \ }) diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c index 8b58bf0..0acc27b 100644 --- a/arch/parisc/kernel/traps.c +++ b/arch/parisc/kernel/traps.c @@ -811,14 +811,14 @@ void notrace handle_interruption(int code, struct pt_regs *regs) else { /* - * The kernel should never fault on its own address space. + * The kernel should never fault on its own address space, + * unless pagefault_disable() was called before. */ - if (fault_space == 0) + if (fault_space == 0 && !in_atomic()) { pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC); parisc_terminate("Kernel Fault", regs, code, fault_address); - } } diff --git a/arch/powerpc/Makefile b/arch/powerpc/Makefile index b7212b6..f1b5251 100644 --- a/arch/powerpc/Makefile +++ b/arch/powerpc/Makefile @@ -67,7 +67,7 @@ LDFLAGS_vmlinux-yy := -Bstatic LDFLAGS_vmlinux-$(CONFIG_PPC64)$(CONFIG_RELOCATABLE) := -pie LDFLAGS_vmlinux := $(LDFLAGS_vmlinux-yy) -CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=none -mcall-aixdesc +CFLAGS-$(CONFIG_PPC64) := -mminimal-toc -mtraceback=no -mcall-aixdesc CFLAGS-$(CONFIG_PPC32) := -ffixed-r2 -mmultiple KBUILD_CPPFLAGS += -Iarch/$(ARCH) KBUILD_AFLAGS += -Iarch/$(ARCH) diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index 0192a4e..80de64b 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -87,10 +87,9 @@ struct exception_table_entry; void sort_ex_table(struct exception_table_entry *start, struct exception_table_entry *finish); -#ifdef CONFIG_MODVERSIONS +#if defined(CONFIG_MODVERSIONS) && defined(CONFIG_PPC64) #define ARCH_RELOCATES_KCRCTAB - -extern const unsigned long reloc_start[]; +#define reloc_start PHYSICAL_START #endif #endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_MODULE_H */ diff --git a/arch/powerpc/kernel/align.c b/arch/powerpc/kernel/align.c index 8184ee9..3fcbae0 100644 --- a/arch/powerpc/kernel/align.c +++ b/arch/powerpc/kernel/align.c @@ -764,6 +764,16 @@ int fix_alignment(struct pt_regs *regs) nb = aligninfo[instr].len; flags = aligninfo[instr].flags; + /* ldbrx/stdbrx overlap lfs/stfs in the DSISR unfortunately */ + if (IS_XFORM(instruction) && ((instruction >> 1) & 0x3ff) == 532) { + nb = 8; + flags = LD+SW; + } else if (IS_XFORM(instruction) && + ((instruction >> 1) & 0x3ff) == 660) { + nb = 8; + flags = ST+SW; + } + /* Byteswap little endian loads and stores */ swiz = 0; if (regs->msr & MSR_LE) { diff --git a/arch/powerpc/kernel/head_64.S b/arch/powerpc/kernel/head_64.S index e8befef..a5031c3 100644 --- a/arch/powerpc/kernel/head_64.S +++ b/arch/powerpc/kernel/head_64.S @@ -492,6 +492,7 @@ _GLOBAL(copy_and_flush) sync addi r5,r5,8 addi r6,r6,8 + isync blr .align 8 diff --git a/arch/powerpc/kernel/iommu.c b/arch/powerpc/kernel/iommu.c index 961bb03..795e807 100644 --- a/arch/powerpc/kernel/iommu.c +++ b/arch/powerpc/kernel/iommu.c @@ -495,7 +495,7 @@ struct iommu_table *iommu_init_table(struct iommu_table *tbl, int nid) /* number of bytes needed for the bitmap */ sz = (tbl->it_size + 7) >> 3; - page = alloc_pages_node(nid, GFP_ATOMIC, get_order(sz)); + page = alloc_pages_node(nid, GFP_KERNEL, get_order(sz)); if (!page) panic("iommu_init_table: Can't allocate %ld bytes\n", sz); tbl->it_map = page_address(page); diff --git a/arch/powerpc/kernel/vio.c b/arch/powerpc/kernel/vio.c index 1b695fd..c9f2ac8 100644 --- a/arch/powerpc/kernel/vio.c +++ b/arch/powerpc/kernel/vio.c @@ -1345,11 +1345,15 @@ static ssize_t modalias_show(struct device *dev, struct device_attribute *attr, const char *cp; dn = dev->of_node; - if (!dn) - return -ENODEV; + if (!dn) { + strcat(buf, "\n"); + return strlen(buf); + } cp = of_get_property(dn, "compatible", NULL); - if (!cp) - return -ENODEV; + if (!cp) { + strcat(buf, "\n"); + return strlen(buf); + } return sprintf(buf, "vio:T%sS%s\n", vio_dev->type, cp); } diff --git a/arch/powerpc/kernel/vmlinux.lds.S b/arch/powerpc/kernel/vmlinux.lds.S index 920276c..3e8fe4b 100644 --- a/arch/powerpc/kernel/vmlinux.lds.S +++ b/arch/powerpc/kernel/vmlinux.lds.S @@ -38,9 +38,6 @@ jiffies = jiffies_64 + 4; #endif SECTIONS { - . = 0; - reloc_start = .; - . = KERNELBASE; /* diff --git a/arch/powerpc/lib/checksum_64.S b/arch/powerpc/lib/checksum_64.S index 18245af..afa2eba 100644 --- a/arch/powerpc/lib/checksum_64.S +++ b/arch/powerpc/lib/checksum_64.S @@ -272,8 +272,8 @@ _GLOBAL(csum_partial_copy_generic) rldicl. r6,r3,64-1,64-2 /* r6 = (r3 & 0x3) >> 1 */ beq .Lcopy_aligned - li r7,4 - sub r6,r7,r6 + li r9,4 + sub r6,r9,r6 mtctr r6 1: diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 2c1ae7a..97042c6 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -221,7 +221,7 @@ int __node_distance(int a, int b) int distance = LOCAL_DISTANCE; if (!form1_affinity) - return distance; + return ((a == b) ? LOCAL_DISTANCE : REMOTE_DISTANCE); for (i = 0; i < distance_ref_points_depth; i++) { if (distance_lookup_table[a][i] == distance_lookup_table[b][i]) diff --git a/arch/powerpc/platforms/cell/spufs/inode.c b/arch/powerpc/platforms/cell/spufs/inode.c index 856e9c3..6786f9d 100644 --- a/arch/powerpc/platforms/cell/spufs/inode.c +++ b/arch/powerpc/platforms/cell/spufs/inode.c @@ -100,6 +100,7 @@ spufs_new_inode(struct super_block *sb, int mode) if (!inode) goto out; + inode->i_ino = get_next_ino(); inode->i_mode = mode; inode->i_uid = current_fsuid(); inode->i_gid = current_fsgid(); diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 81e30d9..2e0b2a7 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -377,7 +377,13 @@ static long pSeries_lpar_hpte_remove(unsigned long hpte_group) (0x1UL << 4), &dummy1, &dummy2); if (lpar_rc == H_SUCCESS) return i; - BUG_ON(lpar_rc != H_NOT_FOUND); + + /* + * The test for adjunct partition is performed before the + * ANDCOND test. H_RESOURCE may be returned, so we need to + * check for that as well. + */ + BUG_ON(lpar_rc != H_NOT_FOUND && lpar_rc != H_RESOURCE); slot_offset++; slot_offset &= 0x7; diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 801fbe1..4e15253 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -67,6 +67,10 @@ static inline int is_zero_pfn(unsigned long pfn) #define my_zero_pfn(addr) page_to_pfn(ZERO_PAGE(addr)) +/* TODO: s390 cannot support io_remap_pfn_range... */ +#define io_remap_pfn_range(vma, vaddr, pfn, size, prot) \ + remap_pfn_range(vma, vaddr, pfn, size, prot) + #endif /* !__ASSEMBLY__ */ /* diff --git a/arch/s390/kvm/kvm-s390.c b/arch/s390/kvm/kvm-s390.c index f9804b7..1e88eef 100644 --- a/arch/s390/kvm/kvm-s390.c +++ b/arch/s390/kvm/kvm-s390.c @@ -445,6 +445,8 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, static void __vcpu_run(struct kvm_vcpu *vcpu) { + int rc; + memcpy(&vcpu->arch.sie_block->gg14, &vcpu->arch.guest_gprs[14], 16); if (need_resched()) @@ -455,21 +457,24 @@ static void __vcpu_run(struct kvm_vcpu *vcpu) kvm_s390_deliver_pending_interrupts(vcpu); + VCPU_EVENT(vcpu, 6, "entering sie flags %x", + atomic_read(&vcpu->arch.sie_block->cpuflags)); + vcpu->arch.sie_block->icptcode = 0; local_irq_disable(); kvm_guest_enter(); local_irq_enable(); - VCPU_EVENT(vcpu, 6, "entering sie flags %x", - atomic_read(&vcpu->arch.sie_block->cpuflags)); - if (sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs)) { + rc = sie64a(vcpu->arch.sie_block, vcpu->arch.guest_gprs); + local_irq_disable(); + kvm_guest_exit(); + local_irq_enable(); + + if (rc) { VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction"); kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } VCPU_EVENT(vcpu, 6, "exit sie icptcode %d", vcpu->arch.sie_block->icptcode); - local_irq_disable(); - kvm_guest_exit(); - local_irq_enable(); memcpy(&vcpu->arch.guest_gprs[14], &vcpu->arch.sie_block->gg14, 16); } diff --git a/arch/sparc/Kconfig b/arch/sparc/Kconfig index 9e70257..bc31e5e 100644 --- a/arch/sparc/Kconfig +++ b/arch/sparc/Kconfig @@ -31,6 +31,7 @@ config SPARC config SPARC32 def_bool !64BIT + select GENERIC_ATOMIC64 config SPARC64 def_bool 64BIT diff --git a/arch/sparc/include/asm/atomic_32.h b/arch/sparc/include/asm/atomic_32.h index 7ae128b..98f223a 100644 --- a/arch/sparc/include/asm/atomic_32.h +++ b/arch/sparc/include/asm/atomic_32.h @@ -15,6 +15,8 @@ #ifdef __KERNEL__ +#include <asm-generic/atomic64.h> + #include <asm/system.h> #define ATOMIC_INIT(i) { (i) } diff --git a/arch/sparc/include/asm/pgtable_64.h b/arch/sparc/include/asm/pgtable_64.h index 9822628..ba63d08 100644 --- a/arch/sparc/include/asm/pgtable_64.h +++ b/arch/sparc/include/asm/pgtable_64.h @@ -774,6 +774,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma, return remap_pfn_range(vma, from, phys_base >> PAGE_SHIFT, size, prot); } +#include <asm/tlbflush.h> #include <asm-generic/pgtable.h> /* We provide our own get_unmapped_area to cope with VA holes and diff --git a/arch/sparc/include/asm/system_64.h b/arch/sparc/include/asm/system_64.h index 10bcabc..f856c7f 100644 --- a/arch/sparc/include/asm/system_64.h +++ b/arch/sparc/include/asm/system_64.h @@ -140,8 +140,7 @@ do { \ * and 2 stores in this critical code path. -DaveM */ #define switch_to(prev, next, last) \ -do { flush_tlb_pending(); \ - save_and_clear_fpu(); \ +do { save_and_clear_fpu(); \ /* If you are tempted to conditionalize the following */ \ /* so that ASI is only written if it changes, think again. */ \ __asm__ __volatile__("wr %%g0, %0, %%asi" \ diff --git a/arch/sparc/include/asm/tlbflush_64.h b/arch/sparc/include/asm/tlbflush_64.h index 2ef4634..f0d6a97 100644 --- a/arch/sparc/include/asm/tlbflush_64.h +++ b/arch/sparc/include/asm/tlbflush_64.h @@ -11,24 +11,40 @@ struct tlb_batch { struct mm_struct *mm; unsigned long tlb_nr; + unsigned long active; unsigned long vaddrs[TLB_BATCH_NR]; }; extern void flush_tsb_kernel_range(unsigned long start, unsigned long end); extern void flush_tsb_user(struct tlb_batch *tb); +extern void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr); /* TLB flush operations. */ -extern void flush_tlb_pending(void); +static inline void flush_tlb_mm(struct mm_struct *mm) +{ +} + +static inline void flush_tlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ +} + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ +} + +#define __HAVE_ARCH_ENTER_LAZY_MMU_MODE -#define flush_tlb_range(vma,start,end) \ - do { (void)(start); flush_tlb_pending(); } while (0) -#define flush_tlb_page(vma,addr) flush_tlb_pending() -#define flush_tlb_mm(mm) flush_tlb_pending() +extern void flush_tlb_pending(void); +extern void arch_enter_lazy_mmu_mode(void); +extern void arch_leave_lazy_mmu_mode(void); +#define arch_flush_lazy_mmu_mode() do {} while (0) /* Local cpu only. */ extern void __flush_tlb_all(void); - +extern void __flush_tlb_page(unsigned long context, unsigned long vaddr); extern void __flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifndef CONFIG_SMP @@ -38,15 +54,24 @@ do { flush_tsb_kernel_range(start,end); \ __flush_tlb_kernel_range(start,end); \ } while (0) +static inline void global_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + __flush_tlb_page(CTX_HWBITS(mm->context), vaddr); +} + #else /* CONFIG_SMP */ extern void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr); #define flush_tlb_kernel_range(start, end) \ do { flush_tsb_kernel_range(start,end); \ smp_flush_tlb_kernel_range(start, end); \ } while (0) +#define global_flush_tlb_page(mm, vaddr) \ + smp_flush_tlb_page(mm, vaddr) + #endif /* ! CONFIG_SMP */ #endif /* _SPARC64_TLBFLUSH_H */ diff --git a/arch/sparc/kernel/asm-offsets.c b/arch/sparc/kernel/asm-offsets.c index 68f7e11..ce48203 100644 --- a/arch/sparc/kernel/asm-offsets.c +++ b/arch/sparc/kernel/asm-offsets.c @@ -34,6 +34,8 @@ int foo(void) DEFINE(AOFF_task_thread, offsetof(struct task_struct, thread)); BLANK(); DEFINE(AOFF_mm_context, offsetof(struct mm_struct, context)); + BLANK(); + DEFINE(VMA_VM_MM, offsetof(struct vm_area_struct, vm_mm)); /* DEFINE(NUM_USER_SEGMENTS, TASK_SIZE>>28); */ return 0; diff --git a/arch/sparc/kernel/entry.S b/arch/sparc/kernel/entry.S index f445e98..cfabc3d 100644 --- a/arch/sparc/kernel/entry.S +++ b/arch/sparc/kernel/entry.S @@ -1177,7 +1177,7 @@ sys_sigreturn: nop call syscall_trace - nop + mov 1, %o1 1: /* We don't want to muck with user registers like a diff --git a/arch/sparc/kernel/ktlb.S b/arch/sparc/kernel/ktlb.S index 79f3103..7c00735 100644 --- a/arch/sparc/kernel/ktlb.S +++ b/arch/sparc/kernel/ktlb.S @@ -25,11 +25,10 @@ kvmap_itlb: */ kvmap_itlb_4v: -kvmap_itlb_nonlinear: /* Catch kernel NULL pointer calls. */ sethi %hi(PAGE_SIZE), %g5 cmp %g4, %g5 - bleu,pn %xcc, kvmap_dtlb_longpath + blu,pn %xcc, kvmap_itlb_longpath nop KERN_TSB_LOOKUP_TL1(%g4, %g6, %g5, %g1, %g2, %g3, kvmap_itlb_load) diff --git a/arch/sparc/kernel/smp_64.c b/arch/sparc/kernel/smp_64.c index 99cb172..e82c18e 100644 --- a/arch/sparc/kernel/smp_64.c +++ b/arch/sparc/kernel/smp_64.c @@ -856,7 +856,7 @@ void smp_tsb_sync(struct mm_struct *mm) } extern unsigned long xcall_flush_tlb_mm; -extern unsigned long xcall_flush_tlb_pending; +extern unsigned long xcall_flush_tlb_page; extern unsigned long xcall_flush_tlb_kernel_range; extern unsigned long xcall_fetch_glob_regs; extern unsigned long xcall_receive_signal; @@ -1070,23 +1070,56 @@ local_flush_and_out: put_cpu(); } +struct tlb_pending_info { + unsigned long ctx; + unsigned long nr; + unsigned long *vaddrs; +}; + +static void tlb_pending_func(void *info) +{ + struct tlb_pending_info *t = info; + + __flush_tlb_pending(t->ctx, t->nr, t->vaddrs); +} + void smp_flush_tlb_pending(struct mm_struct *mm, unsigned long nr, unsigned long *vaddrs) { u32 ctx = CTX_HWBITS(mm->context); + struct tlb_pending_info info; int cpu = get_cpu(); + info.ctx = ctx; + info.nr = nr; + info.vaddrs = vaddrs; + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); else - smp_cross_call_masked(&xcall_flush_tlb_pending, - ctx, nr, (unsigned long) vaddrs, - mm_cpumask(mm)); + smp_call_function_many(mm_cpumask(mm), tlb_pending_func, + &info, 1); __flush_tlb_pending(ctx, nr, vaddrs); put_cpu(); } +void smp_flush_tlb_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long context = CTX_HWBITS(mm->context); + int cpu = get_cpu(); + + if (mm == current->mm && atomic_read(&mm->mm_users) == 1) + cpumask_copy(mm_cpumask(mm), cpumask_of(cpu)); + else + smp_cross_call_masked(&xcall_flush_tlb_page, + context, vaddr, 0, + mm_cpumask(mm)); + __flush_tlb_page(context, vaddr); + + put_cpu(); +} + void smp_flush_tlb_kernel_range(unsigned long start, unsigned long end) { start &= PAGE_MASK; diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S index 7f5f65d..817187d 100644 --- a/arch/sparc/kernel/syscalls.S +++ b/arch/sparc/kernel/syscalls.S @@ -147,7 +147,7 @@ linux_syscall_trace32: srl %i4, 0, %o4 srl %i1, 0, %o1 srl %i2, 0, %o2 - ba,pt %xcc, 2f + ba,pt %xcc, 5f srl %i3, 0, %o3 linux_syscall_trace: @@ -177,13 +177,13 @@ linux_sparc_syscall32: srl %i1, 0, %o1 ! IEU0 Group ldx [%g6 + TI_FLAGS], %l0 ! Load - srl %i5, 0, %o5 ! IEU1 + srl %i3, 0, %o3 ! IEU0 srl %i2, 0, %o2 ! IEU0 Group andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0 bne,pn %icc, linux_syscall_trace32 ! CTI mov %i0, %l5 ! IEU1 - call %l7 ! CTI Group brk forced - srl %i3, 0, %o3 ! IEU0 +5: call %l7 ! CTI Group brk forced + srl %i5, 0, %o5 ! IEU1 ba,a,pt %xcc, 3f /* Linux native system calls enter here... */ diff --git a/arch/sparc/kernel/trampoline_64.S b/arch/sparc/kernel/trampoline_64.S index da1b781..8fa84a3 100644 --- a/arch/sparc/kernel/trampoline_64.S +++ b/arch/sparc/kernel/trampoline_64.S @@ -131,7 +131,6 @@ startup_continue: clr %l5 sethi %hi(num_kernel_image_mappings), %l6 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 - add %l6, 1, %l6 mov 15, %l7 BRANCH_IF_ANY_CHEETAH(g1,g5,2f) @@ -224,7 +223,6 @@ niagara_lock_tlb: clr %l5 sethi %hi(num_kernel_image_mappings), %l6 lduw [%l6 + %lo(num_kernel_image_mappings)], %l6 - add %l6, 1, %l6 1: mov HV_FAST_MMU_MAP_PERM_ADDR, %o5 diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile index a3fc437..4961516 100644 --- a/arch/sparc/lib/Makefile +++ b/arch/sparc/lib/Makefile @@ -40,7 +40,7 @@ lib-$(CONFIG_SPARC64) += copy_in_user.o user_fixup.o memmove.o lib-$(CONFIG_SPARC64) += mcount.o ipcsum.o xor.o hweight.o ffs.o obj-y += iomap.o -obj-$(CONFIG_SPARC32) += atomic32.o +obj-$(CONFIG_SPARC32) += atomic32.o ucmpdi2.o obj-y += ksyms.o obj-$(CONFIG_SPARC64) += PeeCeeI.o obj-y += usercopy.o diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c index 1b30bb3..fbb8005 100644 --- a/arch/sparc/lib/ksyms.c +++ b/arch/sparc/lib/ksyms.c @@ -131,15 +131,6 @@ EXPORT_SYMBOL(___copy_from_user); EXPORT_SYMBOL(___copy_in_user); EXPORT_SYMBOL(__clear_user); -/* RW semaphores */ -EXPORT_SYMBOL(__down_read); -EXPORT_SYMBOL(__down_read_trylock); -EXPORT_SYMBOL(__down_write); -EXPORT_SYMBOL(__down_write_trylock); -EXPORT_SYMBOL(__up_read); -EXPORT_SYMBOL(__up_write); -EXPORT_SYMBOL(__downgrade_write); - /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); EXPORT_SYMBOL(atomic_add_ret); diff --git a/arch/sparc/lib/ucmpdi2.c b/arch/sparc/lib/ucmpdi2.c new file mode 100644 index 0000000..1e06ed5 --- /dev/null +++ b/arch/sparc/lib/ucmpdi2.c @@ -0,0 +1,19 @@ +#include <linux/module.h> +#include "libgcc.h" + +word_type __ucmpdi2(unsigned long long a, unsigned long long b) +{ + const DWunion au = {.ll = a}; + const DWunion bu = {.ll = b}; + + if ((unsigned int) au.s.high < (unsigned int) bu.s.high) + return 0; + else if ((unsigned int) au.s.high > (unsigned int) bu.s.high) + return 2; + if ((unsigned int) au.s.low < (unsigned int) bu.s.low) + return 0; + else if ((unsigned int) au.s.low > (unsigned int) bu.s.low) + return 2; + return 1; +} +EXPORT_SYMBOL(__ucmpdi2); diff --git a/arch/sparc/mm/hypersparc.S b/arch/sparc/mm/hypersparc.S index 44aad32..969f964 100644 --- a/arch/sparc/mm/hypersparc.S +++ b/arch/sparc/mm/hypersparc.S @@ -74,7 +74,7 @@ hypersparc_flush_cache_mm_out: /* The things we do for performance... */ hypersparc_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #ifndef CONFIG_SMP ld [%o0 + AOFF_mm_context], %g1 cmp %g1, -1 @@ -163,7 +163,7 @@ hypersparc_flush_cache_range_out: */ /* Verified, my ass... */ hypersparc_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %g2 #ifndef CONFIG_SMP cmp %g2, -1 @@ -284,7 +284,7 @@ hypersparc_flush_tlb_mm_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -307,7 +307,7 @@ hypersparc_flush_tlb_range_out: sta %g5, [%g1] ASI_M_MMUREGS hypersparc_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c index 6ff4d78..b4989f9 100644 --- a/arch/sparc/mm/init_64.c +++ b/arch/sparc/mm/init_64.c @@ -1071,7 +1071,14 @@ static int __init grab_mblocks(struct mdesc_handle *md) m->size = *val; val = mdesc_get_property(md, node, "address-congruence-offset", NULL); - m->offset = *val; + + /* The address-congruence-offset property is optional. + * Explicity zero it be identifty this. + */ + if (val) + m->offset = *val; + else + m->offset = 0UL; numadbg("MBLOCK[%d]: base[%llx] size[%llx] offset[%llx]\n", count - 1, m->base, m->size, m->offset); diff --git a/arch/sparc/mm/swift.S b/arch/sparc/mm/swift.S index c801c39..5d2b88d 100644 --- a/arch/sparc/mm/swift.S +++ b/arch/sparc/mm/swift.S @@ -105,7 +105,7 @@ swift_flush_cache_mm_out: .globl swift_flush_cache_range swift_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 sub %o2, %o1, %o2 sethi %hi(4096), %o3 cmp %o2, %o3 @@ -116,7 +116,7 @@ swift_flush_cache_range: .globl swift_flush_cache_page swift_flush_cache_page: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 70: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -219,7 +219,7 @@ swift_flush_sig_insns: .globl swift_flush_tlb_range .globl swift_flush_tlb_all swift_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 swift_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -233,7 +233,7 @@ swift_flush_tlb_all_out: .globl swift_flush_tlb_page swift_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/tlb.c b/arch/sparc/mm/tlb.c index b1f279c..072f553 100644 --- a/arch/sparc/mm/tlb.c +++ b/arch/sparc/mm/tlb.c @@ -24,11 +24,17 @@ static DEFINE_PER_CPU(struct tlb_batch, tlb_batch); void flush_tlb_pending(void) { struct tlb_batch *tb = &get_cpu_var(tlb_batch); + struct mm_struct *mm = tb->mm; - if (tb->tlb_nr) { - flush_tsb_user(tb); + if (!tb->tlb_nr) + goto out; - if (CTX_VALID(tb->mm->context)) { + flush_tsb_user(tb); + + if (CTX_VALID(mm->context)) { + if (tb->tlb_nr == 1) { + global_flush_tlb_page(mm, tb->vaddrs[0]); + } else { #ifdef CONFIG_SMP smp_flush_tlb_pending(tb->mm, tb->tlb_nr, &tb->vaddrs[0]); @@ -37,12 +43,30 @@ void flush_tlb_pending(void) tb->tlb_nr, &tb->vaddrs[0]); #endif } - tb->tlb_nr = 0; } + tb->tlb_nr = 0; + +out: put_cpu_var(tlb_batch); } +void arch_enter_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + tb->active = 1; +} + +void arch_leave_lazy_mmu_mode(void) +{ + struct tlb_batch *tb = &__get_cpu_var(tlb_batch); + + if (tb->tlb_nr) + flush_tlb_pending(); + tb->active = 0; +} + void tlb_batch_add(struct mm_struct *mm, unsigned long vaddr, pte_t *ptep, pte_t orig, int fullmm) { @@ -90,6 +114,12 @@ no_cache_flush: nr = 0; } + if (!tb->active) { + flush_tsb_user_page(mm, vaddr); + global_flush_tlb_page(mm, vaddr); + goto out; + } + if (nr == 0) tb->mm = mm; @@ -98,5 +128,6 @@ no_cache_flush: if (nr >= TLB_BATCH_NR) flush_tlb_pending(); +out: put_cpu_var(tlb_batch); } diff --git a/arch/sparc/mm/tsb.c b/arch/sparc/mm/tsb.c index a5f51b2..cb16ff3 100644 --- a/arch/sparc/mm/tsb.c +++ b/arch/sparc/mm/tsb.c @@ -8,11 +8,10 @@ #include <linux/slab.h> #include <asm/system.h> #include <asm/page.h> -#include <asm/tlbflush.h> -#include <asm/tlb.h> -#include <asm/mmu_context.h> #include <asm/pgtable.h> +#include <asm/mmu_context.h> #include <asm/tsb.h> +#include <asm/tlb.h> #include <asm/oplib.h> extern struct tsb swapper_tsb[KERNEL_TSB_NENTRIES]; @@ -47,23 +46,27 @@ void flush_tsb_kernel_range(unsigned long start, unsigned long end) } } -static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, - unsigned long tsb, unsigned long nentries) +static void __flush_tsb_one_entry(unsigned long tsb, unsigned long v, + unsigned long hash_shift, + unsigned long nentries) { - unsigned long i; + unsigned long tag, ent, hash; - for (i = 0; i < tb->tlb_nr; i++) { - unsigned long v = tb->vaddrs[i]; - unsigned long tag, ent, hash; + v &= ~0x1UL; + hash = tsb_hash(v, hash_shift, nentries); + ent = tsb + (hash * sizeof(struct tsb)); + tag = (v >> 22UL); - v &= ~0x1UL; + tsb_flush(ent, tag); +} - hash = tsb_hash(v, hash_shift, nentries); - ent = tsb + (hash * sizeof(struct tsb)); - tag = (v >> 22UL); +static void __flush_tsb_one(struct tlb_batch *tb, unsigned long hash_shift, + unsigned long tsb, unsigned long nentries) +{ + unsigned long i; - tsb_flush(ent, tag); - } + for (i = 0; i < tb->tlb_nr; i++) + __flush_tsb_one_entry(tsb, tb->vaddrs[i], hash_shift, nentries); } void flush_tsb_user(struct tlb_batch *tb) @@ -91,6 +94,30 @@ void flush_tsb_user(struct tlb_batch *tb) spin_unlock_irqrestore(&mm->context.lock, flags); } +void flush_tsb_user_page(struct mm_struct *mm, unsigned long vaddr) +{ + unsigned long nentries, base, flags; + + spin_lock_irqsave(&mm->context.lock, flags); + + base = (unsigned long) mm->context.tsb_block[MM_TSB_BASE].tsb; + nentries = mm->context.tsb_block[MM_TSB_BASE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, PAGE_SHIFT, nentries); + +#if defined(CONFIG_HUGETLB_PAGE) || defined(CONFIG_TRANSPARENT_HUGEPAGE) + if (mm->context.tsb_block[MM_TSB_HUGE].tsb) { + base = (unsigned long) mm->context.tsb_block[MM_TSB_HUGE].tsb; + nentries = mm->context.tsb_block[MM_TSB_HUGE].tsb_nentries; + if (tlb_type == cheetah_plus || tlb_type == hypervisor) + base = __pa(base); + __flush_tsb_one_entry(base, vaddr, HPAGE_SHIFT, nentries); + } +#endif + spin_unlock_irqrestore(&mm->context.lock, flags); +} + #if defined(CONFIG_SPARC64_PAGE_SIZE_8KB) #define HV_PGSZ_IDX_BASE HV_PGSZ_IDX_8K #define HV_PGSZ_MASK_BASE HV_PGSZ_MASK_8K diff --git a/arch/sparc/mm/tsunami.S b/arch/sparc/mm/tsunami.S index 4e55e8f..bf10a34 100644 --- a/arch/sparc/mm/tsunami.S +++ b/arch/sparc/mm/tsunami.S @@ -24,7 +24,7 @@ /* Sliiick... */ tsunami_flush_cache_page: tsunami_flush_cache_range: - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_cache_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -46,7 +46,7 @@ tsunami_flush_sig_insns: /* More slick stuff... */ tsunami_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 tsunami_flush_tlb_mm: ld [%o0 + AOFF_mm_context], %g2 cmp %g2, -1 @@ -65,7 +65,7 @@ tsunami_flush_tlb_out: /* This one can be done in a fine grained manner... */ tsunami_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 andn %o1, (PAGE_SIZE - 1), %o1 diff --git a/arch/sparc/mm/ultra.S b/arch/sparc/mm/ultra.S index 874162a..dd10caa 100644 --- a/arch/sparc/mm/ultra.S +++ b/arch/sparc/mm/ultra.S @@ -53,6 +53,33 @@ __flush_tlb_mm: /* 18 insns */ nop .align 32 + .globl __flush_tlb_page +__flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, %pstate + mov SECONDARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + andn %o1, 1, %o3 + be,pn %icc, 1f + or %o3, 0x10, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + retl + wrpr %g7, 0x0, %pstate + nop + nop + nop + nop + + .align 32 .globl __flush_tlb_pending __flush_tlb_pending: /* 26 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ @@ -203,6 +230,31 @@ __cheetah_flush_tlb_mm: /* 19 insns */ retl wrpr %g7, 0x0, %pstate +__cheetah_flush_tlb_page: /* 22 insns */ + /* %o0 = context, %o1 = vaddr */ + rdpr %pstate, %g7 + andn %g7, PSTATE_IE, %g2 + wrpr %g2, 0x0, %pstate + wrpr %g0, 1, %tl + mov PRIMARY_CONTEXT, %o4 + ldxa [%o4] ASI_DMMU, %g2 + srlx %g2, CTX_PGSZ1_NUC_SHIFT, %o3 + sllx %o3, CTX_PGSZ1_NUC_SHIFT, %o3 + or %o0, %o3, %o0 /* Preserve nucleus page size fields */ + stxa %o0, [%o4] ASI_DMMU + andcc %o1, 1, %g0 + be,pn %icc, 1f + andn %o1, 1, %o3 + stxa %g0, [%o3] ASI_IMMU_DEMAP +1: stxa %g0, [%o3] ASI_DMMU_DEMAP + membar #Sync + stxa %g2, [%o4] ASI_DMMU + sethi %hi(KERNBASE), %o4 + flush %o4 + wrpr %g0, 0, %tl + retl + wrpr %g7, 0x0, %pstate + __cheetah_flush_tlb_pending: /* 27 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ rdpr %pstate, %g7 @@ -269,6 +321,20 @@ __hypervisor_flush_tlb_mm: /* 10 insns */ retl nop +__hypervisor_flush_tlb_page: /* 11 insns */ + /* %o0 = context, %o1 = vaddr */ + mov %o0, %g2 + mov %o1, %o0 /* ARG0: vaddr + IMMU-bit */ + mov %g2, %o1 /* ARG1: mmu context */ + mov HV_MMU_ALL, %o2 /* ARG2: flags */ + srlx %o0, PAGE_SHIFT, %o0 + sllx %o0, PAGE_SHIFT, %o0 + ta HV_MMU_UNMAP_ADDR_TRAP + brnz,pn %o0, __hypervisor_tlb_tl0_error + mov HV_MMU_UNMAP_ADDR_TRAP, %o1 + retl + nop + __hypervisor_flush_tlb_pending: /* 16 insns */ /* %o0 = context, %o1 = nr, %o2 = vaddrs[] */ sllx %o1, 3, %g1 @@ -339,6 +405,13 @@ cheetah_patch_cachetlbops: call tlb_patch_one mov 19, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__cheetah_flush_tlb_page), %o1 + or %o1, %lo(__cheetah_flush_tlb_page), %o1 + call tlb_patch_one + mov 22, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__cheetah_flush_tlb_pending), %o1 @@ -397,10 +470,9 @@ xcall_flush_tlb_mm: /* 21 insns */ nop nop - .globl xcall_flush_tlb_pending -xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=context, %g1=nr, %g7=vaddrs[] */ - sllx %g1, 3, %g1 + .globl xcall_flush_tlb_page +xcall_flush_tlb_page: /* 17 insns */ + /* %g5=context, %g1=vaddr */ mov PRIMARY_CONTEXT, %g4 ldxa [%g4] ASI_DMMU, %g2 srlx %g2, CTX_PGSZ1_NUC_SHIFT, %g4 @@ -408,20 +480,16 @@ xcall_flush_tlb_pending: /* 21 insns */ or %g5, %g4, %g5 mov PRIMARY_CONTEXT, %g4 stxa %g5, [%g4] ASI_DMMU -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %g5 - andcc %g5, 0x1, %g0 + andcc %g1, 0x1, %g0 be,pn %icc, 2f - - andn %g5, 0x1, %g5 + andn %g1, 0x1, %g5 stxa %g0, [%g5] ASI_IMMU_DEMAP 2: stxa %g0, [%g5] ASI_DMMU_DEMAP membar #Sync - brnz,pt %g1, 1b - nop stxa %g2, [%g4] ASI_DMMU retry nop + nop .globl xcall_flush_tlb_kernel_range xcall_flush_tlb_kernel_range: /* 25 insns */ @@ -596,15 +664,13 @@ __hypervisor_xcall_flush_tlb_mm: /* 21 insns */ membar #Sync retry - .globl __hypervisor_xcall_flush_tlb_pending -__hypervisor_xcall_flush_tlb_pending: /* 21 insns */ - /* %g5=ctx, %g1=nr, %g7=vaddrs[], %g2,%g3,%g4,g6=scratch */ - sllx %g1, 3, %g1 + .globl __hypervisor_xcall_flush_tlb_page +__hypervisor_xcall_flush_tlb_page: /* 17 insns */ + /* %g5=ctx, %g1=vaddr */ mov %o0, %g2 mov %o1, %g3 mov %o2, %g4 -1: sub %g1, (1 << 3), %g1 - ldx [%g7 + %g1], %o0 /* ARG0: virtual address */ + mov %g1, %o0 /* ARG0: virtual address */ mov %g5, %o1 /* ARG1: mmu context */ mov HV_MMU_ALL, %o2 /* ARG2: flags */ srlx %o0, PAGE_SHIFT, %o0 @@ -613,8 +679,6 @@ __hypervisor_xcall_flush_tlb_pending: /* 21 insns */ mov HV_MMU_UNMAP_ADDR_TRAP, %g6 brnz,a,pn %o0, __hypervisor_tlb_xcall_error mov %o0, %g5 - brnz,pt %g1, 1b - nop mov %g2, %o0 mov %g3, %o1 mov %g4, %o2 @@ -697,6 +761,13 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 10, %o2 + sethi %hi(__flush_tlb_page), %o0 + or %o0, %lo(__flush_tlb_page), %o0 + sethi %hi(__hypervisor_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_flush_tlb_page), %o1 + call tlb_patch_one + mov 11, %o2 + sethi %hi(__flush_tlb_pending), %o0 or %o0, %lo(__flush_tlb_pending), %o0 sethi %hi(__hypervisor_flush_tlb_pending), %o1 @@ -728,12 +799,12 @@ hypervisor_patch_cachetlbops: call tlb_patch_one mov 21, %o2 - sethi %hi(xcall_flush_tlb_pending), %o0 - or %o0, %lo(xcall_flush_tlb_pending), %o0 - sethi %hi(__hypervisor_xcall_flush_tlb_pending), %o1 - or %o1, %lo(__hypervisor_xcall_flush_tlb_pending), %o1 + sethi %hi(xcall_flush_tlb_page), %o0 + or %o0, %lo(xcall_flush_tlb_page), %o0 + sethi %hi(__hypervisor_xcall_flush_tlb_page), %o1 + or %o1, %lo(__hypervisor_xcall_flush_tlb_page), %o1 call tlb_patch_one - mov 21, %o2 + mov 17, %o2 sethi %hi(xcall_flush_tlb_kernel_range), %o0 or %o0, %lo(xcall_flush_tlb_kernel_range), %o0 diff --git a/arch/sparc/mm/viking.S b/arch/sparc/mm/viking.S index 6dfcc13..a516372 100644 --- a/arch/sparc/mm/viking.S +++ b/arch/sparc/mm/viking.S @@ -109,7 +109,7 @@ viking_mxcc_flush_page: viking_flush_cache_page: viking_flush_cache_range: #ifndef CONFIG_SMP - ld [%o0 + 0x0], %o0 /* XXX vma->vm_mm, GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 #endif viking_flush_cache_mm: #ifndef CONFIG_SMP @@ -149,7 +149,7 @@ viking_flush_tlb_mm: #endif viking_flush_tlb_range: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -174,7 +174,7 @@ viking_flush_tlb_range: #endif viking_flush_tlb_page: - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 mov SRMMU_CTX_REG, %g1 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 @@ -240,7 +240,7 @@ sun4dsmp_flush_tlb_range: tst %g5 bne 3f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 sethi %hi(~((1 << SRMMU_PGDIR_SHIFT) - 1)), %o4 @@ -266,7 +266,7 @@ sun4dsmp_flush_tlb_page: tst %g5 bne 2f mov SRMMU_CTX_REG, %g1 - ld [%o0 + 0x00], %o0 /* XXX vma->vm_mm GROSS XXX */ + ld [%o0 + VMA_VM_MM], %o0 ld [%o0 + AOFF_mm_context], %o3 lda [%g1] ASI_M_MMUREGS, %g5 and %o1, PAGE_MASK, %o1 diff --git a/arch/tile/include/asm/percpu.h b/arch/tile/include/asm/percpu.h index 63294f5..4f7ae39 100644 --- a/arch/tile/include/asm/percpu.h +++ b/arch/tile/include/asm/percpu.h @@ -15,9 +15,37 @@ #ifndef _ASM_TILE_PERCPU_H #define _ASM_TILE_PERCPU_H -register unsigned long __my_cpu_offset __asm__("tp"); -#define __my_cpu_offset __my_cpu_offset -#define set_my_cpu_offset(tp) (__my_cpu_offset = (tp)) +register unsigned long my_cpu_offset_reg asm("tp"); + +#ifdef CONFIG_PREEMPT +/* + * For full preemption, we can't just use the register variable + * directly, since we need barrier() to hazard against it, causing the + * compiler to reload anything computed from a previous "tp" value. + * But we also don't want to use volatile asm, since we'd like the + * compiler to be able to cache the value across multiple percpu reads. + * So we use a fake stack read as a hazard against barrier(). + * The 'U' constraint is like 'm' but disallows postincrement. + */ +static inline unsigned long __my_cpu_offset(void) +{ + unsigned long tp; + register unsigned long *sp asm("sp"); + asm("move %0, tp" : "=r" (tp) : "U" (*sp)); + return tp; +} +#define __my_cpu_offset __my_cpu_offset() +#else +/* + * We don't need to hazard against barrier() since "tp" doesn't ever + * change with PREEMPT_NONE, and with PREEMPT_VOLUNTARY it only + * changes at function call points, at which we are already re-reading + * the value of "tp" due to "my_cpu_offset_reg" being a global variable. + */ +#define __my_cpu_offset my_cpu_offset_reg +#endif + +#define set_my_cpu_offset(tp) (my_cpu_offset_reg = (tp)) #include <asm-generic/percpu.h> diff --git a/arch/tile/lib/exports.c b/arch/tile/lib/exports.c index 49284fa..0996ef7 100644 --- a/arch/tile/lib/exports.c +++ b/arch/tile/lib/exports.c @@ -89,4 +89,6 @@ uint64_t __ashrdi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashrdi3); uint64_t __ashldi3(uint64_t, unsigned int); EXPORT_SYMBOL(__ashldi3); +int __ffsdi2(uint64_t); +EXPORT_SYMBOL(__ffsdi2); #endif diff --git a/arch/um/include/asm/pgtable.h b/arch/um/include/asm/pgtable.h index aa365c5..5888f1b 100644 --- a/arch/um/include/asm/pgtable.h +++ b/arch/um/include/asm/pgtable.h @@ -69,6 +69,8 @@ extern unsigned long end_iomem; #define PAGE_KERNEL __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED) #define PAGE_KERNEL_EXEC __pgprot(__PAGE_KERNEL_EXEC) +#define io_remap_pfn_range remap_pfn_range + /* * The i386 can't do page protection for execute, and considers that the same * are read. diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig index a0e9bda..90bf314 100644 --- a/arch/x86/Kconfig +++ b/arch/x86/Kconfig @@ -1219,10 +1219,6 @@ config HAVE_ARCH_BOOTMEM def_bool y depends on X86_32 && NUMA -config HAVE_ARCH_ALLOC_REMAP - def_bool y - depends on X86_32 && NUMA - config ARCH_HAVE_MEMORY_PRESENT def_bool y depends on X86_32 && DISCONTIGMEM diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h index d2ac8e2..1eb45de 100644 --- a/arch/x86/include/asm/kvm_host.h +++ b/arch/x86/include/asm/kvm_host.h @@ -391,8 +391,8 @@ struct kvm_vcpu_arch { gpa_t time; struct pvclock_vcpu_time_info hv_clock; unsigned int hw_tsc_khz; - unsigned int time_offset; - struct page *time_page; + struct gfn_to_hva_cache pv_time; + bool pv_time_enabled; u64 last_guest_tsc; u64 last_kernel_ns; u64 last_tsc_nsec; diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index ebbc4d8..2fdfe31 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -731,7 +731,10 @@ static inline void arch_leave_lazy_mmu_mode(void) PVOP_VCALL0(pv_mmu_ops.lazy_mode.leave); } -void arch_flush_lazy_mmu_mode(void); +static inline void arch_flush_lazy_mmu_mode(void) +{ + PVOP_VCALL0(pv_mmu_ops.lazy_mode.flush); +} static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx, phys_addr_t phys, pgprot_t flags) diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 8288509..4b67ec9 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -85,6 +85,7 @@ struct pv_lazy_ops { /* Set deferred update mode, used for batching operations. */ void (*enter)(void); void (*leave)(void); + void (*flush)(void); }; struct pv_time_ops { @@ -673,6 +674,7 @@ void paravirt_end_context_switch(struct task_struct *next); void paravirt_enter_lazy_mmu(void); void paravirt_leave_lazy_mmu(void); +void paravirt_flush_lazy_mmu(void); void _paravirt_nop(void); u32 _paravirt_ident_32(u32); diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c index a93741d..3f4b6da 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c @@ -568,20 +568,6 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) } } - /* - * The way access filter has a performance penalty on some workloads. - * Disable it on the affected CPUs. - */ - if ((c->x86 == 0x15) && - (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { - u64 val; - - if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { - val |= 0x1E; - checking_wrmsrl(0xc0011021, val); - } - } - cpu_detect_cache_sizes(c); /* Multi core CPU? */ diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c index 12aff25..f7183ec 100644 --- a/arch/x86/kernel/i387.c +++ b/arch/x86/kernel/i387.c @@ -51,7 +51,7 @@ void __cpuinit mxcsr_feature_mask_init(void) clts(); if (cpu_has_fxsr) { memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct)); - asm volatile("fxsave %0" : : "m" (fx_scratch)); + asm volatile("fxsave %0" : "+m" (fx_scratch)); mask = fx_scratch.mxcsr_mask; if (mask == 0) mask = 0x0000ffbf; diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 6c0802e..a669961 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -159,10 +159,6 @@ u64 arch_irq_stat_cpu(unsigned int cpu) u64 arch_irq_stat(void) { u64 sum = atomic_read(&irq_err_count); - -#ifdef CONFIG_X86_IO_APIC - sum += atomic_read(&irq_mis_count); -#endif return sum; } diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index 869e1ae..704faba 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -253,6 +253,18 @@ void paravirt_leave_lazy_mmu(void) leave_lazy(PARAVIRT_LAZY_MMU); } +void paravirt_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + void paravirt_start_context_switch(struct task_struct *prev) { BUG_ON(preemptible()); @@ -282,18 +294,6 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void) return percpu_read(paravirt_lazy_mode); } -void arch_flush_lazy_mmu_mode(void) -{ - preempt_disable(); - - if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_MMU) { - arch_leave_lazy_mmu_mode(); - arch_enter_lazy_mmu_mode(); - } - - preempt_enable(); -} - struct pv_info pv_info = { .name = "bare hardware", .paravirt_enabled = 0, @@ -462,6 +462,7 @@ struct pv_mmu_ops pv_mmu_ops = { .lazy_mode = { .enter = paravirt_nop, .leave = paravirt_nop, + .flush = paravirt_nop, }, .set_fixmap = native_set_fixmap, diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c index 89d6877..282c98f 100644 --- a/arch/x86/kernel/reboot.c +++ b/arch/x86/kernel/reboot.c @@ -460,6 +460,22 @@ static struct dmi_system_id __initdata pci_reboot_dmi_table[] = { DMI_MATCH(DMI_PRODUCT_NAME, "Precision M6600"), }, }, + { /* Handle problems with rebooting on the Dell PowerEdge C6100. */ + .callback = set_pci_reboot, + .ident = "Dell PowerEdge C6100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), + }, + }, + { /* Some C6100 machines were shipped with vendor being 'Dell'. */ + .callback = set_pci_reboot, + .ident = "Dell PowerEdge C6100", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Dell"), + DMI_MATCH(DMI_PRODUCT_NAME, "C6100"), + }, + }, { } }; diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S index 7a6f3b3..f2bb9c9 100644 --- a/arch/x86/kernel/relocate_kernel_64.S +++ b/arch/x86/kernel/relocate_kernel_64.S @@ -160,7 +160,7 @@ identity_mapped: xorq %rbp, %rbp xorq %r8, %r8 xorq %r9, %r9 - xorq %r10, %r9 + xorq %r10, %r10 xorq %r11, %r11 xorq %r12, %r12 xorq %r13, %r13 diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c index 2ad060a..be1d830 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -3836,6 +3836,12 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu) if (err != EMULATE_DONE) return 0; + if (vcpu->arch.halt_request) { + vcpu->arch.halt_request = 0; + ret = kvm_emulate_halt(vcpu); + goto out; + } + if (signal_pending(current)) goto out; if (need_resched()) diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e329dc5..34afae8 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -548,8 +548,6 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) if (index != XCR_XFEATURE_ENABLED_MASK) return 1; xcr0 = xcr; - if (kvm_x86_ops->get_cpl(vcpu) != 0) - return 1; if (!(xcr0 & XSTATE_FP)) return 1; if ((xcr0 & XSTATE_YMM) && !(xcr0 & XSTATE_SSE)) @@ -563,7 +561,8 @@ int __kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) int kvm_set_xcr(struct kvm_vcpu *vcpu, u32 index, u64 xcr) { - if (__kvm_set_xcr(vcpu, index, xcr)) { + if (kvm_x86_ops->get_cpl(vcpu) != 0 || + __kvm_set_xcr(vcpu, index, xcr)) { kvm_inject_gp(vcpu, 0); return 1; } @@ -1073,7 +1072,6 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) { unsigned long flags; struct kvm_vcpu_arch *vcpu = &v->arch; - void *shared_kaddr; unsigned long this_tsc_khz; s64 kernel_ns, max_kernel_ns; u64 tsc_timestamp; @@ -1109,7 +1107,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) local_irq_restore(flags); - if (!vcpu->time_page) + if (!vcpu->pv_time_enabled) return 0; /* @@ -1167,14 +1165,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v) */ vcpu->hv_clock.version += 2; - shared_kaddr = kmap_atomic(vcpu->time_page, KM_USER0); - - memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock, - sizeof(vcpu->hv_clock)); - - kunmap_atomic(shared_kaddr, KM_USER0); - - mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT); + kvm_write_guest_cached(v->kvm, &vcpu->pv_time, + &vcpu->hv_clock, + sizeof(vcpu->hv_clock)); return 0; } @@ -1454,7 +1447,8 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) return 0; } - if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa)) + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, &vcpu->arch.apf.data, gpa, + sizeof(u32))) return 1; vcpu->arch.apf.send_user_only = !(data & KVM_ASYNC_PF_SEND_ALWAYS); @@ -1464,10 +1458,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data) static void kvmclock_reset(struct kvm_vcpu *vcpu) { - if (vcpu->arch.time_page) { - kvm_release_page_dirty(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + vcpu->arch.pv_time_enabled = false; } int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) @@ -1527,6 +1518,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) break; case MSR_KVM_SYSTEM_TIME_NEW: case MSR_KVM_SYSTEM_TIME: { + u64 gpa_offset; kvmclock_reset(vcpu); vcpu->arch.time = data; @@ -1536,16 +1528,14 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, u32 msr, u64 data) if (!(data & 1)) break; - /* ...but clean it before doing the actual write */ - vcpu->arch.time_offset = data & ~(PAGE_MASK | 1); - - vcpu->arch.time_page = - gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT); + gpa_offset = data & ~(PAGE_MASK | 1); - if (is_error_page(vcpu->arch.time_page)) { - kvm_release_page_clean(vcpu->arch.time_page); - vcpu->arch.time_page = NULL; - } + if (kvm_gfn_to_hva_cache_init(vcpu->kvm, + &vcpu->arch.pv_time, data & ~1ULL, + sizeof(struct pvclock_vcpu_time_info))) + vcpu->arch.pv_time_enabled = false; + else + vcpu->arch.pv_time_enabled = true; break; } case MSR_KVM_ASYNC_PF_EN: @@ -6252,6 +6242,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) if (!zalloc_cpumask_var(&vcpu->arch.wbinvd_dirty_mask, GFP_KERNEL)) goto fail_free_mce_banks; + vcpu->arch.pv_time_enabled = false; kvm_async_pf_hash_reset(vcpu); return 0; diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c index db832fd..2d45247 100644 --- a/arch/x86/lguest/boot.c +++ b/arch/x86/lguest/boot.c @@ -1309,6 +1309,7 @@ __init void lguest_init(void) pv_mmu_ops.read_cr3 = lguest_read_cr3; pv_mmu_ops.lazy_mode.enter = paravirt_enter_lazy_mmu; pv_mmu_ops.lazy_mode.leave = lguest_leave_lazy_mmu_mode; + pv_mmu_ops.lazy_mode.flush = paravirt_flush_lazy_mmu; pv_mmu_ops.pte_update = lguest_pte_update; pv_mmu_ops.pte_update_defer = lguest_pte_update; diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index 3b2ad91..7653f14 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -376,10 +376,12 @@ static noinline __kprobes int vmalloc_fault(unsigned long address) if (pgd_none(*pgd_ref)) return -1; - if (pgd_none(*pgd)) + if (pgd_none(*pgd)) { set_pgd(pgd, *pgd_ref); - else + arch_flush_lazy_mmu_mode(); + } else { BUG_ON(pgd_page_vaddr(*pgd) != pgd_page_vaddr(*pgd_ref)); + } /* * Below here mismatches are bugs because these lower tables diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c index c22c423..96c4577 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -44,11 +44,15 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) int i; unsigned long puds = 0, pmds = 0, ptes = 0, tables; unsigned long start = 0, good_end; + unsigned long pgd_extra = 0; phys_addr_t base; for (i = 0; i < nr_range; i++) { unsigned long range, extra; + if ((mr[i].end >> PGDIR_SHIFT) - (mr[i].start >> PGDIR_SHIFT)) + pgd_extra++; + range = mr[i].end - mr[i].start; puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; @@ -73,6 +77,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); + tables += (pgd_extra * PAGE_SIZE); #ifdef CONFIG_X86_32 /* for fixmap */ diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index f5510d8..469ccae 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -207,9 +207,6 @@ static void __init setup_node_data(int nid, u64 start, u64 end) if (end && (end - start) < NODE_MIN_SIZE) return; - /* initialize remap allocator before aligning to ZONE_ALIGN */ - init_alloc_remap(nid, start, end); - start = roundup(start, ZONE_ALIGN); printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n", diff --git a/arch/x86/mm/numa_32.c b/arch/x86/mm/numa_32.c index 849a975..025d469 100644 --- a/arch/x86/mm/numa_32.c +++ b/arch/x86/mm/numa_32.c @@ -73,167 +73,6 @@ unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn, extern unsigned long highend_pfn, highstart_pfn; -#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) - -static void *node_remap_start_vaddr[MAX_NUMNODES]; -void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); - -/* - * Remap memory allocator - */ -static unsigned long node_remap_start_pfn[MAX_NUMNODES]; -static void *node_remap_end_vaddr[MAX_NUMNODES]; -static void *node_remap_alloc_vaddr[MAX_NUMNODES]; - -/** - * alloc_remap - Allocate remapped memory - * @nid: NUMA node to allocate memory from - * @size: The size of allocation - * - * Allocate @size bytes from the remap area of NUMA node @nid. The - * size of the remap area is predetermined by init_alloc_remap() and - * only the callers considered there should call this function. For - * more info, please read the comment on top of init_alloc_remap(). - * - * The caller must be ready to handle allocation failure from this - * function and fall back to regular memory allocator in such cases. - * - * CONTEXT: - * Single CPU early boot context. - * - * RETURNS: - * Pointer to the allocated memory on success, %NULL on failure. - */ -void *alloc_remap(int nid, unsigned long size) -{ - void *allocation = node_remap_alloc_vaddr[nid]; - - size = ALIGN(size, L1_CACHE_BYTES); - - if (!allocation || (allocation + size) > node_remap_end_vaddr[nid]) - return NULL; - - node_remap_alloc_vaddr[nid] += size; - memset(allocation, 0, size); - - return allocation; -} - -#ifdef CONFIG_HIBERNATION -/** - * resume_map_numa_kva - add KVA mapping to the temporary page tables created - * during resume from hibernation - * @pgd_base - temporary resume page directory - */ -void resume_map_numa_kva(pgd_t *pgd_base) -{ - int node; - - for_each_online_node(node) { - unsigned long start_va, start_pfn, nr_pages, pfn; - - start_va = (unsigned long)node_remap_start_vaddr[node]; - start_pfn = node_remap_start_pfn[node]; - nr_pages = (node_remap_end_vaddr[node] - - node_remap_start_vaddr[node]) >> PAGE_SHIFT; - - printk(KERN_DEBUG "%s: node %d\n", __func__, node); - - for (pfn = 0; pfn < nr_pages; pfn += PTRS_PER_PTE) { - unsigned long vaddr = start_va + (pfn << PAGE_SHIFT); - pgd_t *pgd = pgd_base + pgd_index(vaddr); - pud_t *pud = pud_offset(pgd, vaddr); - pmd_t *pmd = pmd_offset(pud, vaddr); - - set_pmd(pmd, pfn_pmd(start_pfn + pfn, - PAGE_KERNEL_LARGE_EXEC)); - - printk(KERN_DEBUG "%s: %08lx -> pfn %08lx\n", - __func__, vaddr, start_pfn + pfn); - } - } -} -#endif - -/** - * init_alloc_remap - Initialize remap allocator for a NUMA node - * @nid: NUMA node to initizlie remap allocator for - * - * NUMA nodes may end up without any lowmem. As allocating pgdat and - * memmap on a different node with lowmem is inefficient, a special - * remap allocator is implemented which can be used by alloc_remap(). - * - * For each node, the amount of memory which will be necessary for - * pgdat and memmap is calculated and two memory areas of the size are - * allocated - one in the node and the other in lowmem; then, the area - * in the node is remapped to the lowmem area. - * - * As pgdat and memmap must be allocated in lowmem anyway, this - * doesn't waste lowmem address space; however, the actual lowmem - * which gets remapped over is wasted. The amount shouldn't be - * problematic on machines this feature will be used. - * - * Initialization failure isn't fatal. alloc_remap() is used - * opportunistically and the callers will fall back to other memory - * allocation mechanisms on failure. - */ -void __init init_alloc_remap(int nid, u64 start, u64 end) -{ - unsigned long start_pfn = start >> PAGE_SHIFT; - unsigned long end_pfn = end >> PAGE_SHIFT; - unsigned long size, pfn; - u64 node_pa, remap_pa; - void *remap_va; - - /* - * The acpi/srat node info can show hot-add memroy zones where - * memory could be added but not currently present. - */ - printk(KERN_DEBUG "node %d pfn: [%lx - %lx]\n", - nid, start_pfn, end_pfn); - - /* calculate the necessary space aligned to large page size */ - size = node_memmap_size_bytes(nid, start_pfn, end_pfn); - size += ALIGN(sizeof(pg_data_t), PAGE_SIZE); - size = ALIGN(size, LARGE_PAGE_BYTES); - - /* allocate node memory and the lowmem remap area */ - node_pa = memblock_find_in_range(start, end, size, LARGE_PAGE_BYTES); - if (node_pa == MEMBLOCK_ERROR) { - pr_warning("remap_alloc: failed to allocate %lu bytes for node %d\n", - size, nid); - return; - } - memblock_x86_reserve_range(node_pa, node_pa + size, "KVA RAM"); - - remap_pa = memblock_find_in_range(min_low_pfn << PAGE_SHIFT, - max_low_pfn << PAGE_SHIFT, - size, LARGE_PAGE_BYTES); - if (remap_pa == MEMBLOCK_ERROR) { - pr_warning("remap_alloc: failed to allocate %lu bytes remap area for node %d\n", - size, nid); - memblock_x86_free_range(node_pa, node_pa + size); - return; - } - memblock_x86_reserve_range(remap_pa, remap_pa + size, "KVA PG"); - remap_va = phys_to_virt(remap_pa); - - /* perform actual remap */ - for (pfn = 0; pfn < size >> PAGE_SHIFT; pfn += PTRS_PER_PTE) - set_pmd_pfn((unsigned long)remap_va + (pfn << PAGE_SHIFT), - (node_pa >> PAGE_SHIFT) + pfn, - PAGE_KERNEL_LARGE); - - /* initialize remap allocator parameters */ - node_remap_start_pfn[nid] = node_pa >> PAGE_SHIFT; - node_remap_start_vaddr[nid] = remap_va; - node_remap_end_vaddr[nid] = remap_va + size; - node_remap_alloc_vaddr[nid] = remap_va; - - printk(KERN_DEBUG "remap_alloc: node %d [%08llx-%08llx) -> [%p-%p)\n", - nid, node_pa, node_pa + size, remap_va, remap_va + size); -} - void __init initmem_init(void) { x86_numa_init(); diff --git a/arch/x86/mm/numa_internal.h b/arch/x86/mm/numa_internal.h index 7178c3a..ad86ec9 100644 --- a/arch/x86/mm/numa_internal.h +++ b/arch/x86/mm/numa_internal.h @@ -21,12 +21,6 @@ void __init numa_reset_distance(void); void __init x86_numa_init(void); -#ifdef CONFIG_X86_64 -static inline void init_alloc_remap(int nid, u64 start, u64 end) { } -#else -void __init init_alloc_remap(int nid, u64 start, u64 end); -#endif - #ifdef CONFIG_NUMA_EMU void __init numa_emulation(struct numa_meminfo *numa_meminfo, int numa_dist_cnt); diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c index 899e393..86272f0 100644 --- a/arch/x86/platform/efi/efi.c +++ b/arch/x86/platform/efi/efi.c @@ -588,10 +588,13 @@ void __init efi_enter_virtual_mode(void) for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) { md = p; - if (!(md->attribute & EFI_MEMORY_RUNTIME) && - md->type != EFI_BOOT_SERVICES_CODE && - md->type != EFI_BOOT_SERVICES_DATA) - continue; + if (!(md->attribute & EFI_MEMORY_RUNTIME)) { +#ifdef CONFIG_X86_64 + if (md->type != EFI_BOOT_SERVICES_CODE && + md->type != EFI_BOOT_SERVICES_DATA) +#endif + continue; + } size = md->num_pages << EFI_PAGE_SHIFT; end = md->phys_addr + size; diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c index 9f808af..e11efbd 100644 --- a/arch/x86/xen/enlighten.c +++ b/arch/x86/xen/enlighten.c @@ -129,6 +129,21 @@ static void xen_vcpu_setup(int cpu) BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + /* + * This path is called twice on PVHVM - first during bootup via + * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being + * hotplugged: cpu_up -> xen_hvm_cpu_notify. + * As we can only do the VCPUOP_register_vcpu_info once lets + * not over-write its result. + * + * For PV it is called during restore (xen_vcpu_restore) and bootup + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not + * use this function. + */ + if (xen_hvm_domain()) { + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) + return; + } if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; @@ -1365,8 +1380,11 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self, switch (action) { case CPU_UP_PREPARE: xen_vcpu_setup(cpu); - if (xen_have_vector_callback) + if (xen_have_vector_callback) { xen_init_lock_cpu(cpu); + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + } break; default: break; diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c index d957dce..a0aed70 100644 --- a/arch/x86/xen/mmu.c +++ b/arch/x86/xen/mmu.c @@ -2011,6 +2011,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = { .lazy_mode = { .enter = paravirt_enter_lazy_mmu, .leave = xen_leave_lazy_mmu, + .flush = paravirt_flush_lazy_mmu, }, .set_fixmap = xen_set_fixmap, diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c index 5158c50..19568a0 100644 --- a/arch/x86/xen/time.c +++ b/arch/x86/xen/time.c @@ -36,9 +36,8 @@ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate); /* snapshots of runstate info */ static DEFINE_PER_CPU(struct vcpu_runstate_info, xen_runstate_snapshot); -/* unused ns of stolen and blocked time */ +/* unused ns of stolen time */ static DEFINE_PER_CPU(u64, xen_residual_stolen); -static DEFINE_PER_CPU(u64, xen_residual_blocked); /* return an consistent snapshot of 64-bit time/counter value */ static u64 get64(const u64 *p) @@ -115,7 +114,7 @@ static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; - s64 blocked, runnable, offline, stolen; + s64 runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); @@ -125,7 +124,6 @@ static void do_stolen_accounting(void) snap = &__get_cpu_var(xen_runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ - blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; @@ -141,17 +139,6 @@ static void do_stolen_accounting(void) ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); - - /* Add the appropriate number of ticks of blocked time, - including any left-overs from last time. */ - blocked += __this_cpu_read(xen_residual_blocked); - - if (blocked < 0) - blocked = 0; - - ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); - __this_cpu_write(xen_residual_blocked, blocked); - account_idle_ticks(ticks); } /* Get the TSC speed from Xen */ @@ -482,7 +469,11 @@ static void xen_hvm_setup_cpu_clockevents(void) { int cpu = smp_processor_id(); xen_setup_runstate_info(cpu); - xen_setup_timer(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ xen_setup_cpu_clockevents(); } diff --git a/block/blk-sysfs.c b/block/blk-sysfs.c index 45c56d8..cb0f1a0 100644 --- a/block/blk-sysfs.c +++ b/block/blk-sysfs.c @@ -200,6 +200,8 @@ queue_store_##name(struct request_queue *q, const char *page, size_t count) \ unsigned long val; \ ssize_t ret; \ ret = queue_var_store(&val, page, count); \ + if (ret < 0) \ + return ret; \ if (neg) \ val = !val; \ \ diff --git a/block/genhd.c b/block/genhd.c index a1b0b90..de2120a 100644 --- a/block/genhd.c +++ b/block/genhd.c @@ -518,7 +518,7 @@ void register_disk(struct gendisk *disk) ddev->parent = disk->driverfs_dev; - dev_set_name(ddev, disk->disk_name); + dev_set_name(ddev, "%s", disk->disk_name); /* delay uevents, until we scanned partition table */ dev_set_uevent_suppress(ddev, 1); diff --git a/crypto/algapi.c b/crypto/algapi.c index c3cf1a6..d44b156 100644 --- a/crypto/algapi.c +++ b/crypto/algapi.c @@ -478,7 +478,8 @@ static struct crypto_template *__crypto_lookup_template(const char *name) struct crypto_template *crypto_lookup_template(const char *name) { - return try_then_request_module(__crypto_lookup_template(name), name); + return try_then_request_module(__crypto_lookup_template(name), "%s", + name); } EXPORT_SYMBOL_GPL(crypto_lookup_template); diff --git a/crypto/algif_hash.c b/crypto/algif_hash.c index 62122a1..fed2868 100644 --- a/crypto/algif_hash.c +++ b/crypto/algif_hash.c @@ -159,6 +159,8 @@ static int hash_recvmsg(struct kiocb *unused, struct socket *sock, else if (len < ds) msg->msg_flags |= MSG_TRUNC; + msg->msg_namelen = 0; + lock_sock(sk); if (ctx->more) { ctx->more = 0; diff --git a/crypto/algif_skcipher.c b/crypto/algif_skcipher.c index 6a6dfc0..a1c4f0a 100644 --- a/crypto/algif_skcipher.c +++ b/crypto/algif_skcipher.c @@ -432,6 +432,7 @@ static int skcipher_recvmsg(struct kiocb *unused, struct socket *sock, long copied = 0; lock_sock(sk); + msg->msg_namelen = 0; for (iov = msg->msg_iov, iovlen = msg->msg_iovlen; iovlen > 0; iovlen--, iov++) { unsigned long seglen = iov->iov_len; diff --git a/crypto/api.c b/crypto/api.c index 033a714..4f98dd5 100644 --- a/crypto/api.c +++ b/crypto/api.c @@ -40,6 +40,8 @@ static inline struct crypto_alg *crypto_alg_get(struct crypto_alg *alg) return alg; } +static struct crypto_alg *crypto_larval_wait(struct crypto_alg *alg); + struct crypto_alg *crypto_mod_get(struct crypto_alg *alg) { return try_module_get(alg->cra_module) ? crypto_alg_get(alg) : NULL; @@ -150,8 +152,11 @@ static struct crypto_alg *crypto_larval_add(const char *name, u32 type, } up_write(&crypto_alg_sem); - if (alg != &larval->alg) + if (alg != &larval->alg) { kfree(larval); + if (crypto_is_larval(alg)) + alg = crypto_larval_wait(alg); + } return alg; } diff --git a/crypto/gcm.c b/crypto/gcm.c index 1a25263..b97b186 100644 --- a/crypto/gcm.c +++ b/crypto/gcm.c @@ -44,6 +44,7 @@ struct crypto_rfc4543_ctx { struct crypto_rfc4543_req_ctx { u8 auth_tag[16]; + u8 assocbuf[32]; struct scatterlist cipher[1]; struct scatterlist payload[2]; struct scatterlist assoc[2]; @@ -1142,9 +1143,19 @@ static struct aead_request *crypto_rfc4543_crypt(struct aead_request *req, scatterwalk_crypto_chain(payload, dst, vdst == req->iv + 8, 2); assoclen += 8 + req->cryptlen - (enc ? 0 : authsize); - sg_init_table(assoc, 2); - sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, - req->assoc->offset); + if (req->assoc->length == req->assoclen) { + sg_init_table(assoc, 2); + sg_set_page(assoc, sg_page(req->assoc), req->assoc->length, + req->assoc->offset); + } else { + BUG_ON(req->assoclen > sizeof(rctx->assocbuf)); + + scatterwalk_map_and_copy(rctx->assocbuf, req->assoc, 0, + req->assoclen, 0); + + sg_init_table(assoc, 2); + sg_set_buf(assoc, rctx->assocbuf, req->assoclen); + } scatterwalk_crypto_chain(assoc, payload, 0, 2); aead_request_set_tfm(subreq, ctx->child); diff --git a/drivers/acpi/acpi_ipmi.c b/drivers/acpi/acpi_ipmi.c index f40acef..a6977e1 100644 --- a/drivers/acpi/acpi_ipmi.c +++ b/drivers/acpi/acpi_ipmi.c @@ -39,6 +39,7 @@ #include <linux/ipmi.h> #include <linux/device.h> #include <linux/pnp.h> +#include <linux/spinlock.h> MODULE_AUTHOR("Zhao Yakui"); MODULE_DESCRIPTION("ACPI IPMI Opregion driver"); @@ -57,7 +58,7 @@ struct acpi_ipmi_device { struct list_head head; /* the IPMI request message list */ struct list_head tx_msg_list; - struct mutex tx_msg_lock; + spinlock_t tx_msg_lock; acpi_handle handle; struct pnp_dev *pnp_dev; ipmi_user_t user_interface; @@ -147,6 +148,7 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, struct kernel_ipmi_msg *msg; struct acpi_ipmi_buffer *buffer; struct acpi_ipmi_device *device; + unsigned long flags; msg = &tx_msg->tx_message; /* @@ -177,10 +179,10 @@ static void acpi_format_ipmi_msg(struct acpi_ipmi_msg *tx_msg, /* Get the msgid */ device = tx_msg->device; - mutex_lock(&device->tx_msg_lock); + spin_lock_irqsave(&device->tx_msg_lock, flags); device->curr_msgid++; tx_msg->tx_msgid = device->curr_msgid; - mutex_unlock(&device->tx_msg_lock); + spin_unlock_irqrestore(&device->tx_msg_lock, flags); } static void acpi_format_ipmi_response(struct acpi_ipmi_msg *msg, @@ -242,6 +244,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) int msg_found = 0; struct acpi_ipmi_msg *tx_msg; struct pnp_dev *pnp_dev = ipmi_device->pnp_dev; + unsigned long flags; if (msg->user != ipmi_device->user_interface) { dev_warn(&pnp_dev->dev, "Unexpected response is returned. " @@ -250,7 +253,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) ipmi_free_recv_msg(msg); return; } - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_for_each_entry(tx_msg, &ipmi_device->tx_msg_list, head) { if (msg->msgid == tx_msg->tx_msgid) { msg_found = 1; @@ -258,7 +261,7 @@ static void ipmi_msg_handler(struct ipmi_recv_msg *msg, void *user_msg_data) } } - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); if (!msg_found) { dev_warn(&pnp_dev->dev, "Unexpected response (msg id %ld) is " "returned.\n", msg->msgid); @@ -378,6 +381,7 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, struct acpi_ipmi_device *ipmi_device = handler_context; int err, rem_time; acpi_status status; + unsigned long flags; /* * IPMI opregion message. * IPMI message is firstly written to the BMC and system software @@ -395,9 +399,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, return AE_NO_MEMORY; acpi_format_ipmi_msg(tx_msg, address, value); - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_add_tail(&tx_msg->head, &ipmi_device->tx_msg_list); - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); err = ipmi_request_settime(ipmi_device->user_interface, &tx_msg->addr, tx_msg->tx_msgid, @@ -413,9 +417,9 @@ acpi_ipmi_space_handler(u32 function, acpi_physical_address address, status = AE_OK; end_label: - mutex_lock(&ipmi_device->tx_msg_lock); + spin_lock_irqsave(&ipmi_device->tx_msg_lock, flags); list_del(&tx_msg->head); - mutex_unlock(&ipmi_device->tx_msg_lock); + spin_unlock_irqrestore(&ipmi_device->tx_msg_lock, flags); kfree(tx_msg); return status; } @@ -457,7 +461,7 @@ static void acpi_add_ipmi_device(struct acpi_ipmi_device *ipmi_device) INIT_LIST_HEAD(&ipmi_device->head); - mutex_init(&ipmi_device->tx_msg_lock); + spin_lock_init(&ipmi_device->tx_msg_lock); INIT_LIST_HEAD(&ipmi_device->tx_msg_list); ipmi_install_space_handler(ipmi_device); diff --git a/drivers/acpi/acpi_memhotplug.c b/drivers/acpi/acpi_memhotplug.c index d985713..f81597f 100644 --- a/drivers/acpi/acpi_memhotplug.c +++ b/drivers/acpi/acpi_memhotplug.c @@ -421,6 +421,7 @@ static int acpi_memory_device_add(struct acpi_device *device) /* Get the range from the _CRS */ result = acpi_memory_get_device_resources(mem_device); if (result) { + device->driver_data = NULL; kfree(mem_device); return result; } diff --git a/drivers/acpi/acpica/exfldio.c b/drivers/acpi/acpica/exfldio.c index f915a7f..b334f54 100644 --- a/drivers/acpi/acpica/exfldio.c +++ b/drivers/acpi/acpica/exfldio.c @@ -702,7 +702,19 @@ acpi_ex_extract_from_field(union acpi_operand_object *obj_desc, if ((obj_desc->common_field.start_field_bit_offset == 0) && (obj_desc->common_field.bit_length == access_bit_width)) { - status = acpi_ex_field_datum_io(obj_desc, 0, buffer, ACPI_READ); + if (buffer_length >= sizeof(u64)) { + status = + acpi_ex_field_datum_io(obj_desc, 0, buffer, + ACPI_READ); + } else { + /* Use raw_datum (u64) to handle buffers < 64 bits */ + + status = + acpi_ex_field_datum_io(obj_desc, 0, &raw_datum, + ACPI_READ); + ACPI_MEMCPY(buffer, &raw_datum, buffer_length); + } + return_ACPI_STATUS(status); } diff --git a/drivers/acpi/battery.c b/drivers/acpi/battery.c index 4a15d57..908f40a 100644 --- a/drivers/acpi/battery.c +++ b/drivers/acpi/battery.c @@ -118,6 +118,7 @@ struct acpi_battery { struct acpi_device *device; struct notifier_block pm_nb; unsigned long update_time; + int revision; int rate_now; int capacity_now; int voltage_now; @@ -350,6 +351,7 @@ static struct acpi_offsets info_offsets[] = { }; static struct acpi_offsets extended_info_offsets[] = { + {offsetof(struct acpi_battery, revision), 0}, {offsetof(struct acpi_battery, power_unit), 0}, {offsetof(struct acpi_battery, design_capacity), 0}, {offsetof(struct acpi_battery, full_charge_capacity), 0}, diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c index d2519b2..af66776 100644 --- a/drivers/acpi/ec.c +++ b/drivers/acpi/ec.c @@ -217,7 +217,7 @@ static int ec_check_sci_sync(struct acpi_ec *ec, u8 state) static int ec_poll(struct acpi_ec *ec) { unsigned long flags; - int repeat = 2; /* number of command restarts */ + int repeat = 5; /* number of command restarts */ while (repeat--) { unsigned long delay = jiffies + msecs_to_jiffies(ec_delay); @@ -235,8 +235,6 @@ static int ec_poll(struct acpi_ec *ec) } advance_transaction(ec, acpi_ec_read_status(ec)); } while (time_before(jiffies, delay)); - if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF) - break; pr_debug(PREFIX "controller reset, restart transaction\n"); spin_lock_irqsave(&ec->curr_lock, flags); start_transaction(ec); @@ -966,6 +964,14 @@ static struct dmi_system_id __initdata ec_dmi_table[] = { ec_enlarge_storm_threshold, "CLEVO hardware", { DMI_MATCH(DMI_SYS_VENDOR, "CLEVO Co."), DMI_MATCH(DMI_PRODUCT_NAME, "M720T/M730T"),}, NULL}, + { + ec_skip_dsdt_scan, "HP Folio 13", { + DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13"),}, NULL}, + { + ec_validate_ecdt, "ASUS hardware", { + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTek Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME, "L4R"),}, NULL}, {}, }; diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index dfafecb..ea89d85 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -247,8 +247,8 @@ static acpi_status acpi_pci_query_osc(struct acpi_pci_root *root, *control &= OSC_PCI_CONTROL_MASKS; capbuf[OSC_CONTROL_TYPE] = *control | root->osc_control_set; } else { - /* Run _OSC query for all possible controls. */ - capbuf[OSC_CONTROL_TYPE] = OSC_PCI_CONTROL_MASKS; + /* Run _OSC query only with existing controls. */ + capbuf[OSC_CONTROL_TYPE] = root->osc_control_set; } status = acpi_pci_run_osc(root->device->handle, capbuf, &result); diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c index 76f0b94..6f42a5d 100644 --- a/drivers/acpi/video.c +++ b/drivers/acpi/video.c @@ -448,6 +448,22 @@ static struct dmi_system_id video_dmi_table[] __initdata = { DMI_MATCH(DMI_PRODUCT_NAME, "HP Folio 13 - 2000 Notebook PC"), }, }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Pavilion g6 Notebook PC", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion g6 Notebook PC"), + }, + }, + { + .callback = video_ignore_initial_backlight, + .ident = "HP Pavilion m4", + .matches = { + DMI_MATCH(DMI_BOARD_VENDOR, "Hewlett-Packard"), + DMI_MATCH(DMI_PRODUCT_NAME, "HP Pavilion m4 Notebook PC"), + }, + }, {} }; diff --git a/drivers/ata/Kconfig b/drivers/ata/Kconfig index 75afa75..34575fb 100644 --- a/drivers/ata/Kconfig +++ b/drivers/ata/Kconfig @@ -93,7 +93,7 @@ config SATA_FSL If unsure, say N. config SATA_INIC162X - tristate "Initio 162x SATA support" + tristate "Initio 162x SATA support (Very Experimental)" depends on PCI help This option enables support for Initio 162x Serial ATA. diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c index 75a8d0f..f3d09f3 100644 --- a/drivers/ata/ahci.c +++ b/drivers/ata/ahci.c @@ -284,6 +284,7 @@ static const struct pci_device_id ahci_pci_tbl[] = { /* AMD */ { PCI_VDEVICE(AMD, 0x7800), board_ahci }, /* AMD Hudson-2 */ + { PCI_VDEVICE(AMD, 0x7900), board_ahci }, /* AMD CZ */ /* AMD is using RAID class only for ahci controllers */ { PCI_VENDOR_ID_AMD, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_STORAGE_RAID << 8, 0xffffff, board_ahci }, diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c index 41223c7..b64e4a7 100644 --- a/drivers/ata/libahci.c +++ b/drivers/ata/libahci.c @@ -1495,8 +1495,7 @@ static void ahci_error_intr(struct ata_port *ap, u32 irq_stat) u32 fbs = readl(port_mmio + PORT_FBS); int pmp = fbs >> PORT_FBS_DWE_OFFSET; - if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links) && - ata_link_online(&ap->pmp_link[pmp])) { + if ((fbs & PORT_FBS_SDE) && (pmp < ap->nr_pmp_links)) { link = &ap->pmp_link[pmp]; fbs_need_dec = true; } diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c index 5a72e36..aa5f055 100644 --- a/drivers/ata/libata-core.c +++ b/drivers/ata/libata-core.c @@ -1599,6 +1599,12 @@ unsigned ata_exec_internal_sg(struct ata_device *dev, qc->tf = *tf; if (cdb) memcpy(qc->cdb, cdb, ATAPI_CDB_LEN); + + /* some SATA bridges need us to indicate data xfer direction */ + if (tf->protocol == ATAPI_PROT_DMA && (dev->flags & ATA_DFLAG_DMADIR) && + dma_dir == DMA_FROM_DEVICE) + qc->tf.feature |= ATAPI_DMADIR; + qc->flags |= ATA_QCFLAG_RESULT_TF; qc->dma_dir = dma_dir; if (dma_dir != DMA_NONE) { @@ -2412,6 +2418,9 @@ int ata_dev_configure(struct ata_device *dev) dev->max_sectors = min_t(unsigned int, ATA_MAX_SECTORS_128, dev->max_sectors); + if (dev->horkage & ATA_HORKAGE_MAX_SEC_LBA48) + dev->max_sectors = ATA_MAX_SECTORS_LBA48; + if (ap->ops->dev_config) ap->ops->dev_config(dev); @@ -4077,6 +4086,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { /* Weird ATAPI devices */ { "TORiSAN DVD-ROM DRD-N216", NULL, ATA_HORKAGE_MAX_SEC_128 }, { "QUANTUM DAT DAT72-000", NULL, ATA_HORKAGE_ATAPI_MOD16_DMA }, + { "Slimtype DVD A DS8A8SH", NULL, ATA_HORKAGE_MAX_SEC_LBA48 }, /* Devices we expect to fail diagnostics */ diff --git a/drivers/ata/libata-pmp.c b/drivers/ata/libata-pmp.c index f06b7ea..cf9dc09 100644 --- a/drivers/ata/libata-pmp.c +++ b/drivers/ata/libata-pmp.c @@ -288,24 +288,24 @@ static int sata_pmp_configure(struct ata_device *dev, int print_info) /* Disable sending Early R_OK. * With "cached read" HDD testing and multiple ports busy on a SATA - * host controller, 3726 PMP will very rarely drop a deferred + * host controller, 3x26 PMP will very rarely drop a deferred * R_OK that was intended for the host. Symptom will be all * 5 drives under test will timeout, get reset, and recover. */ - if (vendor == 0x1095 && devid == 0x3726) { + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { u32 reg; err_mask = sata_pmp_read(&ap->link, PMP_GSCR_SII_POL, ®); if (err_mask) { rc = -EIO; - reason = "failed to read Sil3726 Private Register"; + reason = "failed to read Sil3x26 Private Register"; goto fail; } reg &= ~0x1; err_mask = sata_pmp_write(&ap->link, PMP_GSCR_SII_POL, reg); if (err_mask) { rc = -EIO; - reason = "failed to write Sil3726 Private Register"; + reason = "failed to write Sil3x26 Private Register"; goto fail; } } @@ -383,8 +383,8 @@ static void sata_pmp_quirks(struct ata_port *ap) u16 devid = sata_pmp_gscr_devid(gscr); struct ata_link *link; - if (vendor == 0x1095 && devid == 0x3726) { - /* sil3726 quirks */ + if (vendor == 0x1095 && (devid == 0x3726 || devid == 0x3826)) { + /* sil3x26 quirks */ ata_for_each_link(link, ap, EDGE) { /* link reports offline after LPM */ link->flags |= ATA_LFLAG_NO_LPM; diff --git a/drivers/ata/sata_inic162x.c b/drivers/ata/sata_inic162x.c index 83a4447..15391ee 100644 --- a/drivers/ata/sata_inic162x.c +++ b/drivers/ata/sata_inic162x.c @@ -6,6 +6,18 @@ * * This file is released under GPL v2. * + * **** WARNING **** + * + * This driver never worked properly and unfortunately data corruption is + * relatively common. There isn't anyone working on the driver and there's + * no support from the vendor. Do not use this driver in any production + * environment. + * + * http://thread.gmane.org/gmane.linux.debian.devel.bugs.rc/378525/focus=54491 + * https://bugzilla.kernel.org/show_bug.cgi?id=60565 + * + * ***************** + * * This controller is eccentric and easily locks up if something isn't * right. Documentation is available at initio's website but it only * documents registers (not programming model). @@ -810,6 +822,8 @@ static int inic_init_one(struct pci_dev *pdev, const struct pci_device_id *ent) if (!printed_version++) dev_printk(KERN_DEBUG, &pdev->dev, "version " DRV_VERSION "\n"); + dev_alert(&pdev->dev, "inic162x support is broken with common data corruption issues and will be disabled by default, contact linux-ide@vger.kernel.org if in production use\n"); + /* alloc host */ host = ata_host_alloc_pinfo(&pdev->dev, ppi, NR_PORTS); hpriv = devm_kzalloc(&pdev->dev, sizeof(*hpriv), GFP_KERNEL); diff --git a/drivers/base/memory.c b/drivers/base/memory.c index 5fb6aae..02653fc 100644 --- a/drivers/base/memory.c +++ b/drivers/base/memory.c @@ -172,6 +172,8 @@ static ssize_t show_mem_removable(struct sys_device *dev, container_of(dev, struct memory_block, sysdev); for (i = 0; i < sections_per_block; i++) { + if (!present_section_nr(mem->start_section_nr + i)) + continue; pfn = section_nr_to_pfn(mem->start_section_nr + i); ret &= is_mem_section_removable(pfn, PAGES_PER_SECTION); } diff --git a/drivers/block/brd.c b/drivers/block/brd.c index dba1c32..f5dfb8498 100644 --- a/drivers/block/brd.c +++ b/drivers/block/brd.c @@ -117,13 +117,13 @@ static struct page *brd_insert_page(struct brd_device *brd, sector_t sector) spin_lock(&brd->brd_lock); idx = sector >> PAGE_SECTORS_SHIFT; + page->index = idx; if (radix_tree_insert(&brd->brd_pages, idx, page)) { __free_page(page); page = radix_tree_lookup(&brd->brd_pages, idx); BUG_ON(!page); BUG_ON(page->index != idx); - } else - page->index = idx; + } spin_unlock(&brd->brd_lock); radix_tree_preload_end(); diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c index 1dab802..9ee7fe7 100644 --- a/drivers/block/cciss.c +++ b/drivers/block/cciss.c @@ -1179,6 +1179,7 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode, int err; u32 cp; + memset(&arg64, 0, sizeof(arg64)); err = 0; err |= copy_from_user(&arg64.LUN_info, &arg32->LUN_info, diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c index b2fceb5..e1ac6d2 100644 --- a/drivers/block/cpqarray.c +++ b/drivers/block/cpqarray.c @@ -1195,6 +1195,7 @@ out_passthru: ida_pci_info_struct pciinfo; if (!arg) return -EINVAL; + memset(&pciinfo, 0, sizeof(pciinfo)); pciinfo.bus = host->pci_dev->bus->number; pciinfo.dev_fn = host->pci_dev->devfn; pciinfo.board_id = host->board_id; diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c index 43beaca..13cbdd3 100644 --- a/drivers/block/drbd/drbd_receiver.c +++ b/drivers/block/drbd/drbd_receiver.c @@ -2225,7 +2225,6 @@ static int drbd_asb_recover_1p(struct drbd_conf *mdev) __must_hold(local) if (hg == -1 && mdev->state.role == R_PRIMARY) { enum drbd_state_rv rv2; - drbd_set_role(mdev, R_SECONDARY, 0); /* drbd_change_state() does not sleep while in SS_IN_TRANSIENT_STATE, * we might be here in C_WF_REPORT_PARAMS which is transient. * we do not need to wait for the after state change work either. */ diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index f533f33..12a7368 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -658,7 +658,8 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *lo, mutex_unlock(&lo->tx_lock); - thread = kthread_create(nbd_thread, lo, lo->disk->disk_name); + thread = kthread_create(nbd_thread, lo, "%s", + lo->disk->disk_name); if (IS_ERR(thread)) { mutex_lock(&lo->tx_lock); return PTR_ERR(thread); diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c index cc6471a..fd484a9 100644 --- a/drivers/cdrom/cdrom.c +++ b/drivers/cdrom/cdrom.c @@ -2879,7 +2879,7 @@ static noinline int mmc_ioctl_cdrom_read_data(struct cdrom_device_info *cdi, if (lba < 0) return -EINVAL; - cgc->buffer = kmalloc(blocksize, GFP_KERNEL); + cgc->buffer = kzalloc(blocksize, GFP_KERNEL); if (cgc->buffer == NULL) return -ENOMEM; diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c index 34d6a1c..853f8e8 100644 --- a/drivers/char/hpet.c +++ b/drivers/char/hpet.c @@ -374,26 +374,14 @@ static int hpet_mmap(struct file *file, struct vm_area_struct *vma) struct hpet_dev *devp; unsigned long addr; - if (((vma->vm_end - vma->vm_start) != PAGE_SIZE) || vma->vm_pgoff) - return -EINVAL; - devp = file->private_data; addr = devp->hd_hpets->hp_hpet_phys; if (addr & (PAGE_SIZE - 1)) return -ENOSYS; - vma->vm_flags |= VM_IO; vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); - - if (io_remap_pfn_range(vma, vma->vm_start, addr >> PAGE_SHIFT, - PAGE_SIZE, vma->vm_page_prot)) { - printk(KERN_ERR "%s: io_remap_pfn_range failed\n", - __func__); - return -EAGAIN; - } - - return 0; + return vm_iomap_memory(vma, addr, PAGE_SIZE); #else return -ENOSYS; #endif diff --git a/drivers/char/ipmi/ipmi_bt_sm.c b/drivers/char/ipmi/ipmi_bt_sm.c index 3ed20e8..92ce302 100644 --- a/drivers/char/ipmi/ipmi_bt_sm.c +++ b/drivers/char/ipmi/ipmi_bt_sm.c @@ -95,9 +95,9 @@ struct si_sm_data { enum bt_states state; unsigned char seq; /* BT sequence number */ struct si_sm_io *io; - unsigned char write_data[IPMI_MAX_MSG_LENGTH]; + unsigned char write_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int write_count; - unsigned char read_data[IPMI_MAX_MSG_LENGTH]; + unsigned char read_data[IPMI_MAX_MSG_LENGTH + 2]; /* +2 for memcpy */ int read_count; int truncated; long timeout; /* microseconds countdown */ diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c index 2aa3977..8dde1f5 100644 --- a/drivers/char/ipmi/ipmi_devintf.c +++ b/drivers/char/ipmi/ipmi_devintf.c @@ -838,13 +838,25 @@ static long compat_ipmi_ioctl(struct file *filep, unsigned int cmd, return ipmi_ioctl(filep, cmd, arg); } } + +static long unlocked_compat_ipmi_ioctl(struct file *filep, unsigned int cmd, + unsigned long arg) +{ + int ret; + + mutex_lock(&ipmi_mutex); + ret = compat_ipmi_ioctl(filep, cmd, arg); + mutex_unlock(&ipmi_mutex); + + return ret; +} #endif static const struct file_operations ipmi_fops = { .owner = THIS_MODULE, .unlocked_ioctl = ipmi_unlocked_ioctl, #ifdef CONFIG_COMPAT - .compat_ioctl = compat_ipmi_ioctl, + .compat_ioctl = unlocked_compat_ipmi_ioctl, #endif .open = ipmi_open, .release = ipmi_release, diff --git a/drivers/char/random.c b/drivers/char/random.c index fceac95..e7e479c 100644 --- a/drivers/char/random.c +++ b/drivers/char/random.c @@ -1435,12 +1435,11 @@ ctl_table random_table[] = { static u32 random_int_secret[MD5_MESSAGE_BYTES / 4] ____cacheline_aligned; -static int __init random_int_secret_init(void) +int random_int_secret_init(void) { get_random_bytes(random_int_secret, sizeof(random_int_secret)); return 0; } -late_initcall(random_int_secret_init); /* * Get a random word for internal kernel use only. Similar to urandom but diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index a95256a..f2fb2f2 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -241,9 +241,12 @@ static struct port *find_port_by_devt_in_portdev(struct ports_device *portdev, unsigned long flags; spin_lock_irqsave(&portdev->ports_lock, flags); - list_for_each_entry(port, &portdev->ports, list) - if (port->cdev->dev == dev) + list_for_each_entry(port, &portdev->ports, list) { + if (port->cdev->dev == dev) { + kref_get(&port->kref); goto out; + } + } port = NULL; out: spin_unlock_irqrestore(&portdev->ports_lock, flags); @@ -622,6 +625,10 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, port = filp->private_data; + /* Port is hot-unplugged. */ + if (!port->guest_connected) + return -ENODEV; + if (!port_has_data(port)) { /* * If nothing's connected on the host just return 0 in @@ -638,7 +645,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf, if (ret < 0) return ret; } - /* Port got hot-unplugged. */ + /* Port got hot-unplugged while we were waiting above. */ if (!port->guest_connected) return -ENODEV; /* @@ -781,14 +788,14 @@ static int port_fops_open(struct inode *inode, struct file *filp) struct port *port; int ret; + /* We get the port with a kref here */ port = find_port_by_devt(cdev->dev); + if (!port) { + /* Port was unplugged before we could proceed */ + return -ENXIO; + } filp->private_data = port; - /* Prevent against a port getting hot-unplugged at the same time */ - spin_lock_irq(&port->portdev->ports_lock); - kref_get(&port->kref); - spin_unlock_irq(&port->portdev->ports_lock); - /* * Don't allow opening of console port devices -- that's done * via /dev/hvc @@ -1243,14 +1250,6 @@ static void remove_port(struct kref *kref) port = container_of(kref, struct port, kref); - sysfs_remove_group(&port->dev->kobj, &port_attribute_group); - device_destroy(pdrvdata.class, port->dev->devt); - cdev_del(port->cdev); - - kfree(port->name); - - debugfs_remove(port->debugfs_file); - kfree(port); } @@ -1268,12 +1267,14 @@ static void unplug_port(struct port *port) spin_unlock_irq(&port->portdev->ports_lock); if (port->guest_connected) { + /* Let the app know the port is going down. */ + send_sigio_to_port(port); + + /* Do this after sigio is actually sent */ port->guest_connected = false; port->host_connected = false; - wake_up_interruptible(&port->waitqueue); - /* Let the app know the port is going down. */ - send_sigio_to_port(port); + wake_up_interruptible(&port->waitqueue); } if (is_console_port(port)) { @@ -1299,6 +1300,14 @@ static void unplug_port(struct port *port) */ port->portdev = NULL; + sysfs_remove_group(&port->dev->kobj, &port_attribute_group); + device_destroy(pdrvdata.class, port->dev->devt); + cdev_del(port->cdev); + + kfree(port->name); + + debugfs_remove(port->debugfs_file); + /* * Locks around here are not necessary - a port can't be * opened after we removed the port struct from ports_list diff --git a/drivers/dma/pch_dma.c b/drivers/dma/pch_dma.c index 1ed89d0..e6f128a 100644 --- a/drivers/dma/pch_dma.c +++ b/drivers/dma/pch_dma.c @@ -489,7 +489,7 @@ static struct pch_dma_desc *pdc_desc_get(struct pch_dma_chan *pd_chan) dev_dbg(chan2dev(&pd_chan->chan), "scanned %d descriptors\n", i); if (!ret) { - ret = pdc_alloc_desc(&pd_chan->chan, GFP_NOIO); + ret = pdc_alloc_desc(&pd_chan->chan, GFP_ATOMIC); if (ret) { spin_lock(&pd_chan->lock); pd_chan->descs_allocated++; diff --git a/drivers/dma/pl330.c b/drivers/dma/pl330.c index 6abe1ec..4802aac 100644 --- a/drivers/dma/pl330.c +++ b/drivers/dma/pl330.c @@ -281,10 +281,10 @@ static void pl330_free_chan_resources(struct dma_chan *chan) struct dma_pl330_chan *pch = to_pchan(chan); unsigned long flags; - spin_lock_irqsave(&pch->lock, flags); - tasklet_kill(&pch->task); + spin_lock_irqsave(&pch->lock, flags); + pl330_release_channel(pch->pl330_chid); pch->pl330_chid = NULL; diff --git a/drivers/gpu/drm/drm_irq.c b/drivers/gpu/drm/drm_irq.c index 2022a5c..3dc3c9e 100644 --- a/drivers/gpu/drm/drm_irq.c +++ b/drivers/gpu/drm/drm_irq.c @@ -976,7 +976,7 @@ EXPORT_SYMBOL(drm_vblank_off); */ void drm_vblank_pre_modeset(struct drm_device *dev, int crtc) { - /* vblank is not initialized (IRQ not installed ?) */ + /* vblank is not initialized (IRQ not installed ?), or has been freed */ if (!dev->num_crtcs) return; /* @@ -998,6 +998,10 @@ void drm_vblank_post_modeset(struct drm_device *dev, int crtc) { unsigned long irqflags; + /* vblank is not initialized (IRQ not installed ?), or has been freed */ + if (!dev->num_crtcs) + return; + if (dev->vblank_inmodeset[crtc]) { spin_lock_irqsave(&dev->vbl_lock, irqflags); dev->vblank_disable_allowed = 1; diff --git a/drivers/gpu/drm/drm_mm.c b/drivers/gpu/drm/drm_mm.c index 959186c..01894e4 100644 --- a/drivers/gpu/drm/drm_mm.c +++ b/drivers/gpu/drm/drm_mm.c @@ -679,33 +679,35 @@ void drm_mm_debug_table(struct drm_mm *mm, const char *prefix) EXPORT_SYMBOL(drm_mm_debug_table); #if defined(CONFIG_DEBUG_FS) -int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +static unsigned long drm_mm_dump_hole(struct seq_file *m, struct drm_mm_node *entry) { - struct drm_mm_node *entry; - unsigned long total_used = 0, total_free = 0, total = 0; unsigned long hole_start, hole_end, hole_size; - hole_start = drm_mm_hole_node_start(&mm->head_node); - hole_end = drm_mm_hole_node_end(&mm->head_node); - hole_size = hole_end - hole_start; - if (hole_size) + if (entry->hole_follows) { + hole_start = drm_mm_hole_node_start(entry); + hole_end = drm_mm_hole_node_end(entry); + hole_size = hole_end - hole_start; seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", hole_start, hole_end, hole_size); - total_free += hole_size; + return hole_size; + } + + return 0; +} + +int drm_mm_dump_table(struct seq_file *m, struct drm_mm *mm) +{ + struct drm_mm_node *entry; + unsigned long total_used = 0, total_free = 0, total = 0; + + total_free += drm_mm_dump_hole(m, &mm->head_node); drm_mm_for_each_node(entry, mm) { seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: used\n", entry->start, entry->start + entry->size, entry->size); total_used += entry->size; - if (entry->hole_follows) { - hole_start = drm_mm_hole_node_start(entry); - hole_end = drm_mm_hole_node_end(entry); - hole_size = hole_end - hole_start; - seq_printf(m, "0x%08lx-0x%08lx: 0x%08lx: free\n", - hole_start, hole_end, hole_size); - total_free += hole_size; - } + total_free += drm_mm_dump_hole(m, entry); } total = total_free + total_used; diff --git a/drivers/gpu/drm/i915/intel_dp.c b/drivers/gpu/drm/i915/intel_dp.c index bf9fea9..57b3728 100644 --- a/drivers/gpu/drm/i915/intel_dp.c +++ b/drivers/gpu/drm/i915/intel_dp.c @@ -531,7 +531,18 @@ intel_dp_i2c_aux_ch(struct i2c_adapter *adapter, int mode, DRM_DEBUG_KMS("aux_ch native nack\n"); return -EREMOTEIO; case AUX_NATIVE_REPLY_DEFER: - udelay(100); + /* + * For now, just give more slack to branch devices. We + * could check the DPCD for I2C bit rate capabilities, + * and if available, adjust the interval. We could also + * be more careful with DP-to-Legacy adapters where a + * long legacy cable may force very low I2C bit rates. + */ + if (intel_dp->dpcd[DP_DOWNSTREAMPORT_PRESENT] & + DP_DWN_STRM_PORT_PRESENT) + usleep_range(500, 600); + else + usleep_range(300, 400); continue; default: DRM_ERROR("aux_ch invalid native reply 0x%02x\n", diff --git a/drivers/gpu/drm/i915/intel_dvo.c b/drivers/gpu/drm/i915/intel_dvo.c index 6eda1b5..8ac91b8 100644 --- a/drivers/gpu/drm/i915/intel_dvo.c +++ b/drivers/gpu/drm/i915/intel_dvo.c @@ -371,6 +371,7 @@ void intel_dvo_init(struct drm_device *dev) const struct intel_dvo_device *dvo = &intel_dvo_devices[i]; struct i2c_adapter *i2c; int gpio; + bool dvoinit; /* Allow the I2C driver info to specify the GPIO to be used in * special cases, but otherwise default to what's defined @@ -390,7 +391,17 @@ void intel_dvo_init(struct drm_device *dev) i2c = &dev_priv->gmbus[gpio].adapter; intel_dvo->dev = *dvo; - if (!dvo->dev_ops->init(&intel_dvo->dev, i2c)) + + /* GMBUS NAK handling seems to be unstable, hence let the + * transmitter detection run in bit banging mode for now. + */ + intel_gmbus_force_bit(i2c, true); + + dvoinit = dvo->dev_ops->init(&intel_dvo->dev, i2c); + + intel_gmbus_force_bit(i2c, false); + + if (!dvoinit) continue; intel_encoder->type = INTEL_OUTPUT_DVO; diff --git a/drivers/gpu/drm/i915/intel_lvds.c b/drivers/gpu/drm/i915/intel_lvds.c index eebf002..09881ac 100644 --- a/drivers/gpu/drm/i915/intel_lvds.c +++ b/drivers/gpu/drm/i915/intel_lvds.c @@ -767,6 +767,14 @@ static const struct dmi_system_id intel_no_lvds[] = { DMI_MATCH(DMI_PRODUCT_NAME, "X7SPA-H"), }, }, + { + .callback = intel_no_lvds_dmi_callback, + .ident = "Fujitsu Esprimo Q900", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "FUJITSU"), + DMI_MATCH(DMI_PRODUCT_NAME, "ESPRIMO Q900"), + }, + }, { } /* terminating entry */ }; diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c index d1141e8..06b51ff 100644 --- a/drivers/gpu/drm/i915/intel_sdvo.c +++ b/drivers/gpu/drm/i915/intel_sdvo.c @@ -1609,11 +1609,14 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) * Assume that the preferred modes are * arranged in priority order. */ - intel_ddc_get_modes(connector, intel_sdvo->i2c); - if (list_empty(&connector->probed_modes) == false) - goto end; + intel_ddc_get_modes(connector, &intel_sdvo->ddc); - /* Fetch modes from VBT */ + /* + * Fetch modes from VBT. For SDVO prefer the VBT mode since some + * SDVO->LVDS transcoders can't cope with the EDID mode. Since + * drm_mode_probed_add adds the mode at the head of the list we add it + * last. + */ if (dev_priv->sdvo_lvds_vbt_mode != NULL) { newmode = drm_mode_duplicate(connector->dev, dev_priv->sdvo_lvds_vbt_mode); @@ -1625,7 +1628,6 @@ static void intel_sdvo_get_lvds_modes(struct drm_connector *connector) } } -end: list_for_each_entry(newmode, &connector->probed_modes, head) { if (newmode->type & DRM_MODE_TYPE_PREFERRED) { intel_sdvo->sdvo_lvds_fixed_mode = diff --git a/drivers/gpu/drm/radeon/atom.c b/drivers/gpu/drm/radeon/atom.c index 88661ea..a1df7d7 100644 --- a/drivers/gpu/drm/radeon/atom.c +++ b/drivers/gpu/drm/radeon/atom.c @@ -1220,12 +1220,17 @@ int atom_execute_table(struct atom_context *ctx, int index, uint32_t * params) int r; mutex_lock(&ctx->mutex); + /* reset data block */ + ctx->data_block = 0; /* reset reg block */ ctx->reg_block = 0; /* reset fb window */ ctx->fb_base = 0; /* reset io mode */ ctx->io_mode = ATOM_IO_MM; + /* reset divmul */ + ctx->divmul[0] = 0; + ctx->divmul[1] = 0; r = atom_execute_table_locked(ctx, index, params); mutex_unlock(&ctx->mutex); return r; @@ -1384,10 +1389,10 @@ int atom_allocate_fb_scratch(struct atom_context *ctx) firmware_usage = (struct _ATOM_VRAM_USAGE_BY_FIRMWARE *)(ctx->bios + data_offset); DRM_DEBUG("atom firmware requested %08x %dkb\n", - firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware, - firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb); + le32_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].ulStartAddrUsedByFirmware), + le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb)); - usage_bytes = firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb * 1024; + usage_bytes = le16_to_cpu(firmware_usage->asFirmwareVramReserveInfo[0].usFirmwareUseInKb) * 1024; } ctx->scratch_size_bytes = 0; if (usage_bytes == 0) diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c index a75d290..87ff586 100644 --- a/drivers/gpu/drm/radeon/evergreen.c +++ b/drivers/gpu/drm/radeon/evergreen.c @@ -330,6 +330,16 @@ void evergreen_hpd_init(struct radeon_device *rdev) list_for_each_entry(connector, &dev->mode_config.connector_list, head) { struct radeon_connector *radeon_connector = to_radeon_connector(connector); + + if (connector->connector_type == DRM_MODE_CONNECTOR_eDP || + connector->connector_type == DRM_MODE_CONNECTOR_LVDS) { + /* don't try to enable hpd on eDP or LVDS avoid breaking the + * aux dp channel on imac and help (but not completely fix) + * https://bugzilla.redhat.com/show_bug.cgi?id=726143 + * also avoid interrupt storms during dpms. + */ + continue; + } switch (radeon_connector->hpd.hpd) { case RADEON_HPD_1: WREG32(DC_HPD1_CONTROL, tmp); @@ -409,7 +419,8 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, struct drm_display_mode *mode, struct drm_display_mode *other_mode) { - u32 tmp; + u32 tmp, buffer_alloc, i; + u32 pipe_offset = radeon_crtc->crtc_id * 0x20; /* * Line Buffer Setup * There are 3 line buffers, each one shared by 2 display controllers. @@ -432,18 +443,34 @@ static u32 evergreen_line_buffer_adjust(struct radeon_device *rdev, * non-linked crtcs for maximum line buffer allocation. */ if (radeon_crtc->base.enabled && mode) { - if (other_mode) + if (other_mode) { tmp = 0; /* 1/2 */ - else + buffer_alloc = 1; + } else { tmp = 2; /* whole */ - } else + buffer_alloc = 2; + } + } else { tmp = 0; + buffer_alloc = 0; + } /* second controller of the pair uses second half of the lb */ if (radeon_crtc->crtc_id % 2) tmp += 4; WREG32(DC_LB_MEMORY_SPLIT + radeon_crtc->crtc_offset, tmp); + if (ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) { + WREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset, + DMIF_BUFFERS_ALLOCATED(buffer_alloc)); + for (i = 0; i < rdev->usec_timeout; i++) { + if (RREG32(PIPE0_DMIF_BUFFER_CONTROL + pipe_offset) & + DMIF_BUFFERS_ALLOCATED_COMPLETED) + break; + udelay(1); + } + } + if (radeon_crtc->base.enabled && mode) { switch (tmp) { case 0: @@ -1722,7 +1749,7 @@ static void evergreen_gpu_init(struct radeon_device *rdev) rdev->config.evergreen.sx_max_export_size = 256; rdev->config.evergreen.sx_max_export_pos_size = 64; rdev->config.evergreen.sx_max_export_smx_size = 192; - rdev->config.evergreen.max_hw_contexts = 8; + rdev->config.evergreen.max_hw_contexts = 4; rdev->config.evergreen.sq_num_cf_insts = 2; rdev->config.evergreen.sc_prim_fifo_size = 0x40; diff --git a/drivers/gpu/drm/radeon/evergreend.h b/drivers/gpu/drm/radeon/evergreend.h index 6078ae4..ab670c3 100644 --- a/drivers/gpu/drm/radeon/evergreend.h +++ b/drivers/gpu/drm/radeon/evergreend.h @@ -450,6 +450,10 @@ # define LATENCY_LOW_WATERMARK(x) ((x) << 0) # define LATENCY_HIGH_WATERMARK(x) ((x) << 16) +#define PIPE0_DMIF_BUFFER_CONTROL 0x0ca0 +# define DMIF_BUFFERS_ALLOCATED(x) ((x) << 0) +# define DMIF_BUFFERS_ALLOCATED_COMPLETED (1 << 4) + #define IH_RB_CNTL 0x3e00 # define IH_RB_ENABLE (1 << 0) # define IH_IB_SIZE(x) ((x) << 1) /* log2 */ diff --git a/drivers/gpu/drm/radeon/r300_cmdbuf.c b/drivers/gpu/drm/radeon/r300_cmdbuf.c index c5c2742..a12f373 100644 --- a/drivers/gpu/drm/radeon/r300_cmdbuf.c +++ b/drivers/gpu/drm/radeon/r300_cmdbuf.c @@ -74,7 +74,7 @@ static int r300_emit_cliprects(drm_radeon_private_t *dev_priv, OUT_RING(CP_PACKET0(R300_RE_CLIPRECT_TL_0, nr * 2 - 1)); for (i = 0; i < nr; ++i) { - if (DRM_COPY_FROM_USER_UNCHECKED + if (DRM_COPY_FROM_USER (&box, &cmdbuf->boxes[n + i], sizeof(box))) { DRM_ERROR("copy cliprect faulted\n"); return -EFAULT; diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c index cee3184..f9d49e3 100644 --- a/drivers/gpu/drm/radeon/radeon_atombios.c +++ b/drivers/gpu/drm/radeon/radeon_atombios.c @@ -751,13 +751,16 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev) (ATOM_SRC_DST_TABLE_FOR_ONE_OBJECT *) (ctx->bios + data_offset + le16_to_cpu(router_obj->asObjects[k].usSrcDstTableOffset)); + u8 *num_dst_objs = (u8 *) + ((u8 *)router_src_dst_table + 1 + + (router_src_dst_table->ucNumberOfSrc * 2)); + u16 *dst_objs = (u16 *)(num_dst_objs + 1); int enum_id; router.router_id = router_obj_id; - for (enum_id = 0; enum_id < router_src_dst_table->ucNumberOfDst; - enum_id++) { + for (enum_id = 0; enum_id < (*num_dst_objs); enum_id++) { if (le16_to_cpu(path->usConnObjectId) == - le16_to_cpu(router_src_dst_table->usDstObjectID[enum_id])) + le16_to_cpu(dst_objs[enum_id])) break; } @@ -1658,7 +1661,9 @@ struct radeon_encoder_atom_dig *radeon_atombios_get_lvds_info(struct kfree(edid); } } - record += sizeof(ATOM_FAKE_EDID_PATCH_RECORD); + record += fake_edid_record->ucFakeEDIDLength ? + fake_edid_record->ucFakeEDIDLength + 2 : + sizeof(ATOM_FAKE_EDID_PATCH_RECORD); break; case LCD_PANEL_RESOLUTION_RECORD_TYPE: panel_res_record = (ATOM_PANEL_RESOLUTION_PATCH_RECORD *)record; @@ -2031,6 +2036,8 @@ static int radeon_atombios_parse_power_table_1_3(struct radeon_device *rdev) num_modes = power_info->info.ucNumOfPowerModeEntries; if (num_modes > ATOM_MAX_NUMBEROF_POWER_BLOCK) num_modes = ATOM_MAX_NUMBEROF_POWER_BLOCK; + if (num_modes == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * num_modes, GFP_KERNEL); if (!rdev->pm.power_state) return state_index; @@ -2401,6 +2408,8 @@ static int radeon_atombios_parse_power_table_4_5(struct radeon_device *rdev) power_info = (union power_info *)(mode_info->atom_context->bios + data_offset); radeon_atombios_add_pplib_thermal_controller(rdev, &power_info->pplib.sThermalController); + if (power_info->pplib.ucNumStates == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * power_info->pplib.ucNumStates, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2485,6 +2494,8 @@ static int radeon_atombios_parse_power_table_6(struct radeon_device *rdev) non_clock_info_array = (struct NonClockInfoArray *) (mode_info->atom_context->bios + data_offset + le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset)); + if (state_array->ucNumEntries == 0) + return state_index; rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state) * state_array->ucNumEntries, GFP_KERNEL); if (!rdev->pm.power_state) @@ -2561,7 +2572,9 @@ void radeon_atombios_get_power_modes(struct radeon_device *rdev) default: break; } - } else { + } + + if (state_index == 0) { rdev->pm.power_state = kzalloc(sizeof(struct radeon_power_state), GFP_KERNEL); if (rdev->pm.power_state) { /* add the default mode */ diff --git a/drivers/gpu/drm/radeon/radeon_combios.c b/drivers/gpu/drm/radeon/radeon_combios.c index 98fb3d7..f616e40 100644 --- a/drivers/gpu/drm/radeon/radeon_combios.c +++ b/drivers/gpu/drm/radeon/radeon_combios.c @@ -147,7 +147,7 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, enum radeon_combios_table_offset table) { struct radeon_device *rdev = dev->dev_private; - int rev; + int rev, size; uint16_t offset = 0, check_offset; if (!rdev->bios) @@ -156,174 +156,106 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, switch (table) { /* absolute offset tables */ case COMBIOS_ASIC_INIT_1_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0xc); - if (check_offset) - offset = check_offset; + check_offset = 0xc; break; case COMBIOS_BIOS_SUPPORT_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x14); - if (check_offset) - offset = check_offset; + check_offset = 0x14; break; case COMBIOS_DAC_PROGRAMMING_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2a); - if (check_offset) - offset = check_offset; + check_offset = 0x2a; break; case COMBIOS_MAX_COLOR_DEPTH_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2c); - if (check_offset) - offset = check_offset; + check_offset = 0x2c; break; case COMBIOS_CRTC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x2e); - if (check_offset) - offset = check_offset; + check_offset = 0x2e; break; case COMBIOS_PLL_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x30); - if (check_offset) - offset = check_offset; + check_offset = 0x30; break; case COMBIOS_TV_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x32); - if (check_offset) - offset = check_offset; + check_offset = 0x32; break; case COMBIOS_DFP_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x34); - if (check_offset) - offset = check_offset; + check_offset = 0x34; break; case COMBIOS_HW_CONFIG_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x36); - if (check_offset) - offset = check_offset; + check_offset = 0x36; break; case COMBIOS_MULTIMEDIA_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x38); - if (check_offset) - offset = check_offset; + check_offset = 0x38; break; case COMBIOS_TV_STD_PATCH_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x3e); - if (check_offset) - offset = check_offset; + check_offset = 0x3e; break; case COMBIOS_LCD_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x40); - if (check_offset) - offset = check_offset; + check_offset = 0x40; break; case COMBIOS_MOBILE_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x42); - if (check_offset) - offset = check_offset; + check_offset = 0x42; break; case COMBIOS_PLL_INIT_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x46); - if (check_offset) - offset = check_offset; + check_offset = 0x46; break; case COMBIOS_MEM_CONFIG_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x48); - if (check_offset) - offset = check_offset; + check_offset = 0x48; break; case COMBIOS_SAVE_MASK_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4a); - if (check_offset) - offset = check_offset; + check_offset = 0x4a; break; case COMBIOS_HARDCODED_EDID_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4c); - if (check_offset) - offset = check_offset; + check_offset = 0x4c; break; case COMBIOS_ASIC_INIT_2_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x4e); - if (check_offset) - offset = check_offset; + check_offset = 0x4e; break; case COMBIOS_CONNECTOR_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x50); - if (check_offset) - offset = check_offset; + check_offset = 0x50; break; case COMBIOS_DYN_CLK_1_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x52); - if (check_offset) - offset = check_offset; + check_offset = 0x52; break; case COMBIOS_RESERVED_MEM_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x54); - if (check_offset) - offset = check_offset; + check_offset = 0x54; break; case COMBIOS_EXT_TMDS_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x58); - if (check_offset) - offset = check_offset; + check_offset = 0x58; break; case COMBIOS_MEM_CLK_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5a); - if (check_offset) - offset = check_offset; + check_offset = 0x5a; break; case COMBIOS_EXT_DAC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5c); - if (check_offset) - offset = check_offset; + check_offset = 0x5c; break; case COMBIOS_MISC_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x5e); - if (check_offset) - offset = check_offset; + check_offset = 0x5e; break; case COMBIOS_CRT_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x60); - if (check_offset) - offset = check_offset; + check_offset = 0x60; break; case COMBIOS_INTEGRATED_SYSTEM_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x62); - if (check_offset) - offset = check_offset; + check_offset = 0x62; break; case COMBIOS_COMPONENT_VIDEO_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x64); - if (check_offset) - offset = check_offset; + check_offset = 0x64; break; case COMBIOS_FAN_SPEED_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x66); - if (check_offset) - offset = check_offset; + check_offset = 0x66; break; case COMBIOS_OVERDRIVE_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x68); - if (check_offset) - offset = check_offset; + check_offset = 0x68; break; case COMBIOS_OEM_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6a); - if (check_offset) - offset = check_offset; + check_offset = 0x6a; break; case COMBIOS_DYN_CLK_2_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6c); - if (check_offset) - offset = check_offset; + check_offset = 0x6c; break; case COMBIOS_POWER_CONNECTOR_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x6e); - if (check_offset) - offset = check_offset; + check_offset = 0x6e; break; case COMBIOS_I2C_INFO_TABLE: - check_offset = RBIOS16(rdev->bios_header_start + 0x70); - if (check_offset) - offset = check_offset; + check_offset = 0x70; break; /* relative offset tables */ case COMBIOS_ASIC_INIT_3_TABLE: /* offset from misc info */ @@ -439,11 +371,16 @@ static uint16_t combios_get_table_offset(struct drm_device *dev, } break; default: + check_offset = 0; break; } - return offset; + size = RBIOS8(rdev->bios_header_start + 0x6); + /* check absolute offset tables */ + if (table < COMBIOS_ASIC_INIT_3_TABLE && check_offset && check_offset < size) + offset = RBIOS16(rdev->bios_header_start + check_offset); + return offset; } bool radeon_combios_check_hardcoded_edid(struct radeon_device *rdev) @@ -953,8 +890,10 @@ struct radeon_encoder_primary_dac *radeon_combios_get_primary_dac_info(struct dac = RBIOS8(dac_info + 0x3) & 0xf; p_dac->ps2_pdac_adj = (bg << 8) | (dac); } - /* if the values are all zeros, use the table */ - if (p_dac->ps2_pdac_adj) + /* if the values are zeros, use the table */ + if ((dac == 0) || (bg == 0)) + found = 0; + else found = 1; } diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c index e87893c..a275cf6 100644 --- a/drivers/gpu/drm/radeon/radeon_device.c +++ b/drivers/gpu/drm/radeon/radeon_device.c @@ -352,18 +352,17 @@ bool radeon_card_posted(struct radeon_device *rdev) uint32_t reg; /* first check CRTCs */ - if (ASIC_IS_DCE41(rdev)) { + if (ASIC_IS_DCE4(rdev)) { reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET); - if (reg & EVERGREEN_CRTC_MASTER_EN) - return true; - } else if (ASIC_IS_DCE4(rdev)) { - reg = RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC0_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC1_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | - RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + if (rdev->num_crtc >= 4) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC2_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC3_REGISTER_OFFSET); + } + if (rdev->num_crtc >= 6) { + reg |= RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC4_REGISTER_OFFSET) | + RREG32(EVERGREEN_CRTC_CONTROL + EVERGREEN_CRTC5_REGISTER_OFFSET); + } if (reg & EVERGREEN_CRTC_MASTER_EN) return true; } else if (ASIC_IS_AVIVO(rdev)) { diff --git a/drivers/gpu/drm/radeon/radeon_pm.c b/drivers/gpu/drm/radeon/radeon_pm.c index 4f88863..8270a85 100644 --- a/drivers/gpu/drm/radeon/radeon_pm.c +++ b/drivers/gpu/drm/radeon/radeon_pm.c @@ -845,7 +845,11 @@ static int radeon_debugfs_pm_info(struct seq_file *m, void *data) struct radeon_device *rdev = dev->dev_private; seq_printf(m, "default engine clock: %u0 kHz\n", rdev->pm.default_sclk); - seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); + /* radeon_get_engine_clock is not reliable on APUs so just print the current clock */ + if ((rdev->family >= CHIP_PALM) && (rdev->flags & RADEON_IS_IGP)) + seq_printf(m, "current engine clock: %u0 kHz\n", rdev->pm.current_sclk); + else + seq_printf(m, "current engine clock: %u0 kHz\n", radeon_get_engine_clock(rdev)); seq_printf(m, "default memory clock: %u0 kHz\n", rdev->pm.default_mclk); if (rdev->asic->get_memory_clock) seq_printf(m, "current memory clock: %u0 kHz\n", radeon_get_memory_clock(rdev)); diff --git a/drivers/gpu/drm/radeon/rs400.c b/drivers/gpu/drm/radeon/rs400.c index aa6a66e..317eac1 100644 --- a/drivers/gpu/drm/radeon/rs400.c +++ b/drivers/gpu/drm/radeon/rs400.c @@ -174,10 +174,13 @@ int rs400_gart_enable(struct radeon_device *rdev) /* FIXME: according to doc we should set HIDE_MMCFG_BAR=0, * AGPMODE30=0 & AGP30ENHANCED=0 in NB_CNTL */ if ((rdev->family == CHIP_RS690) || (rdev->family == CHIP_RS740)) { - WREG32_MC(RS480_MC_MISC_CNTL, - (RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN)); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN | RS690_BLOCK_GFX_D3_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } else { - WREG32_MC(RS480_MC_MISC_CNTL, RS480_GART_INDEX_REG_EN); + tmp = RREG32_MC(RS480_MC_MISC_CNTL); + tmp |= RS480_GART_INDEX_REG_EN; + WREG32_MC(RS480_MC_MISC_CNTL, tmp); } /* Enable gart */ WREG32_MC(RS480_AGP_ADDRESS_SPACE_SIZE, (RS480_GART_EN | size_reg)); diff --git a/drivers/gpu/vga/vga_switcheroo.c b/drivers/gpu/vga/vga_switcheroo.c index 58434e8..37fe246 100644 --- a/drivers/gpu/vga/vga_switcheroo.c +++ b/drivers/gpu/vga/vga_switcheroo.c @@ -26,6 +26,7 @@ #include <linux/fb.h> #include <linux/pci.h> +#include <linux/console.h> #include <linux/vga_switcheroo.h> struct vga_switcheroo_client { @@ -256,8 +257,10 @@ static int vga_switchto_stage2(struct vga_switcheroo_client *new_client) if (new_client->fb_info) { struct fb_event event; + console_lock(); event.info = new_client->fb_info; fb_notifier_call_chain(FB_EVENT_REMAP_ALL_CONSOLE, &event); + console_unlock(); } ret = vgasr_priv.handler->switchto(new_client->id); diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c index 53576e7..8fa2cd7 100644 --- a/drivers/hid/hid-core.c +++ b/drivers/hid/hid-core.c @@ -58,6 +58,8 @@ struct hid_report *hid_register_report(struct hid_device *device, unsigned type, struct hid_report_enum *report_enum = device->report_enum + type; struct hid_report *report; + if (id >= HID_MAX_IDS) + return NULL; if (report_enum->report_id_hash[id]) return report_enum->report_id_hash[id]; @@ -379,9 +381,11 @@ static int hid_parser_global(struct hid_parser *parser, struct hid_item *item) case HID_GLOBAL_ITEM_TAG_REPORT_ID: parser->global.report_id = item_udata(item); - if (parser->global.report_id == 0) { - dbg_hid("report_id 0 is invalid\n"); - return -1; + if (parser->global.report_id == 0 || + parser->global.report_id >= HID_MAX_IDS) { + hid_err(parser->device, "report_id %u is invalid\n", + parser->global.report_id); + return -1; } return 0; @@ -551,7 +555,7 @@ static void hid_device_release(struct device *dev) for (i = 0; i < HID_REPORT_TYPES; i++) { struct hid_report_enum *report_enum = device->report_enum + i; - for (j = 0; j < 256; j++) { + for (j = 0; j < HID_MAX_IDS; j++) { struct hid_report *report = report_enum->report_id_hash[j]; if (report) hid_free_report(report); @@ -811,6 +815,64 @@ static int search(__s32 *array, __s32 value, unsigned n) return -1; } +static const char * const hid_report_names[] = { + "HID_INPUT_REPORT", + "HID_OUTPUT_REPORT", + "HID_FEATURE_REPORT", +}; +/** + * hid_validate_values - validate existing device report's value indexes + * + * @device: hid device + * @type: which report type to examine + * @id: which report ID to examine (0 for first) + * @field_index: which report field to examine + * @report_counts: expected number of values + * + * Validate the number of values in a given field of a given report, after + * parsing. + */ +struct hid_report *hid_validate_values(struct hid_device *hid, + unsigned int type, unsigned int id, + unsigned int field_index, + unsigned int report_counts) +{ + struct hid_report *report; + + if (type > HID_FEATURE_REPORT) { + hid_err(hid, "invalid HID report type %u\n", type); + return NULL; + } + + if (id >= HID_MAX_IDS) { + hid_err(hid, "invalid HID report id %u\n", id); + return NULL; + } + + /* + * Explicitly not using hid_get_report() here since it depends on + * ->numbered being checked, which may not always be the case when + * drivers go to access report values. + */ + report = hid->report_enum[type].report_id_hash[id]; + if (!report) { + hid_err(hid, "missing %s %u\n", hid_report_names[type], id); + return NULL; + } + if (report->maxfield <= field_index) { + hid_err(hid, "not enough fields in %s %u\n", + hid_report_names[type], id); + return NULL; + } + if (report->field[field_index]->report_count < report_counts) { + hid_err(hid, "not enough values in %s %u field %u\n", + hid_report_names[type], id, field_index); + return NULL; + } + return report; +} +EXPORT_SYMBOL_GPL(hid_validate_values); + /** * hid_match_report - check if driver's raw_event should be called * @@ -989,7 +1051,12 @@ EXPORT_SYMBOL_GPL(hid_output_report); int hid_set_field(struct hid_field *field, unsigned offset, __s32 value) { - unsigned size = field->report_size; + unsigned size; + + if (!field) + return -1; + + size = field->report_size; hid_dump_input(field->report->device, field->usage + offset, value); diff --git a/drivers/hid/hid-ntrig.c b/drivers/hid/hid-ntrig.c index 9fae2eb..48cba85 100644 --- a/drivers/hid/hid-ntrig.c +++ b/drivers/hid/hid-ntrig.c @@ -115,7 +115,8 @@ static inline int ntrig_get_mode(struct hid_device *hdev) struct hid_report *report = hdev->report_enum[HID_FEATURE_REPORT]. report_id_hash[0x0d]; - if (!report) + if (!report || report->maxfield < 1 || + report->field[0]->report_count < 1) return -EINVAL; usbhid_submit_report(hdev, report, USB_DIR_IN); diff --git a/drivers/hid/hid-pl.c b/drivers/hid/hid-pl.c index 06e5300..fa74232 100644 --- a/drivers/hid/hid-pl.c +++ b/drivers/hid/hid-pl.c @@ -128,8 +128,14 @@ static int plff_init(struct hid_device *hid) strong = &report->field[0]->value[2]; weak = &report->field[0]->value[3]; debug("detected single-field device"); - } else if (report->maxfield >= 4 && report->field[0]->maxusage == 1 && - report->field[0]->usage[0].hid == (HID_UP_LED | 0x43)) { + } else if (report->field[0]->maxusage == 1 && + report->field[0]->usage[0].hid == + (HID_UP_LED | 0x43) && + report->maxfield >= 4 && + report->field[0]->report_count >= 1 && + report->field[1]->report_count >= 1 && + report->field[2]->report_count >= 1 && + report->field[3]->report_count >= 1) { report->field[0]->value[0] = 0x00; report->field[1]->value[0] = 0x00; strong = &report->field[2]->value[0]; diff --git a/drivers/hid/hid-zpff.c b/drivers/hid/hid-zpff.c index f31fab0..5c8f628 100644 --- a/drivers/hid/hid-zpff.c +++ b/drivers/hid/hid-zpff.c @@ -69,21 +69,13 @@ static int zpff_init(struct hid_device *hid) struct hid_report *report; struct hid_input *hidinput = list_entry(hid->inputs.next, struct hid_input, list); - struct list_head *report_list = - &hid->report_enum[HID_OUTPUT_REPORT].report_list; struct input_dev *dev = hidinput->input; - int error; + int i, error; - if (list_empty(report_list)) { - hid_err(hid, "no output report found\n"); - return -ENODEV; - } - - report = list_entry(report_list->next, struct hid_report, list); - - if (report->maxfield < 4) { - hid_err(hid, "not enough fields in report\n"); - return -ENODEV; + for (i = 0; i < 4; i++) { + report = hid_validate_values(hid, HID_OUTPUT_REPORT, 0, i, 1); + if (!report) + return -ENODEV; } zpff = kzalloc(sizeof(struct zpff_device), GFP_KERNEL); diff --git a/drivers/hwmon/abituguru.c b/drivers/hwmon/abituguru.c index 65a35cf..61ab615 100644 --- a/drivers/hwmon/abituguru.c +++ b/drivers/hwmon/abituguru.c @@ -1280,14 +1280,18 @@ static int __devinit abituguru_probe(struct platform_device *pdev) pr_info("found Abit uGuru\n"); /* Register sysfs hooks */ - for (i = 0; i < sysfs_attr_i; i++) - if (device_create_file(&pdev->dev, - &data->sysfs_attr[i].dev_attr)) + for (i = 0; i < sysfs_attr_i; i++) { + res = device_create_file(&pdev->dev, + &data->sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; - for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) - if (device_create_file(&pdev->dev, - &abituguru_sysfs_attr[i].dev_attr)) + } + for (i = 0; i < ARRAY_SIZE(abituguru_sysfs_attr); i++) { + res = device_create_file(&pdev->dev, + &abituguru_sysfs_attr[i].dev_attr); + if (res) goto abituguru_probe_error; + } data->hwmon_dev = hwmon_device_register(&pdev->dev); if (!IS_ERR(data->hwmon_dev)) diff --git a/drivers/hwmon/adm1021.c b/drivers/hwmon/adm1021.c index 1ad0a88..8178927 100644 --- a/drivers/hwmon/adm1021.c +++ b/drivers/hwmon/adm1021.c @@ -311,26 +311,68 @@ static int adm1021_detect(struct i2c_client *client, man_id = i2c_smbus_read_byte_data(client, ADM1021_REG_MAN_ID); dev_id = i2c_smbus_read_byte_data(client, ADM1021_REG_DEV_ID); + if (man_id < 0 || dev_id < 0) + return -ENODEV; + if (man_id == 0x4d && dev_id == 0x01) type_name = "max1617a"; else if (man_id == 0x41) { if ((dev_id & 0xF0) == 0x30) type_name = "adm1023"; - else + else if ((dev_id & 0xF0) == 0x00) type_name = "adm1021"; + else + return -ENODEV; } else if (man_id == 0x49) type_name = "thmc10"; else if (man_id == 0x23) type_name = "gl523sm"; else if (man_id == 0x54) type_name = "mc1066"; - /* LM84 Mfr ID in a different place, and it has more unused bits */ - else if (conv_rate == 0x00 - && (config & 0x7F) == 0x00 - && (status & 0xAB) == 0x00) - type_name = "lm84"; - else - type_name = "max1617"; + else { + int lte, rte, lhi, rhi, llo, rlo; + + /* extra checks for LM84 and MAX1617 to avoid misdetections */ + + llo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(0)); + rlo = i2c_smbus_read_byte_data(client, ADM1021_REG_THYST_R(1)); + + /* fail if any of the additional register reads failed */ + if (llo < 0 || rlo < 0) + return -ENODEV; + + lte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(0)); + rte = i2c_smbus_read_byte_data(client, ADM1021_REG_TEMP(1)); + lhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(0)); + rhi = i2c_smbus_read_byte_data(client, ADM1021_REG_TOS_R(1)); + + /* + * Fail for negative temperatures and negative high limits. + * This check also catches read errors on the tested registers. + */ + if ((s8)lte < 0 || (s8)rte < 0 || (s8)lhi < 0 || (s8)rhi < 0) + return -ENODEV; + + /* fail if all registers hold the same value */ + if (lte == rte && lte == lhi && lte == rhi && lte == llo + && lte == rlo) + return -ENODEV; + + /* + * LM84 Mfr ID is in a different place, + * and it has more unused bits. + */ + if (conv_rate == 0x00 + && (config & 0x7F) == 0x00 + && (status & 0xAB) == 0x00) { + type_name = "lm84"; + } else { + /* fail if low limits are larger than high limits */ + if ((s8)llo > lhi || (s8)rlo > rhi) + return -ENODEV; + type_name = "max1617"; + } + } pr_debug("adm1021: Detected chip %s at adapter %d, address 0x%02x.\n", type_name, i2c_adapter_id(adapter), client->addr); diff --git a/drivers/hwmon/adt7470.c b/drivers/hwmon/adt7470.c index c6d1ce0..a9726c1 100644 --- a/drivers/hwmon/adt7470.c +++ b/drivers/hwmon/adt7470.c @@ -215,7 +215,7 @@ static inline int adt7470_write_word_data(struct i2c_client *client, u8 reg, u16 value) { return i2c_smbus_write_byte_data(client, reg, value & 0xFF) - && i2c_smbus_write_byte_data(client, reg + 1, value >> 8); + || i2c_smbus_write_byte_data(client, reg + 1, value >> 8); } static void adt7470_init_client(struct i2c_client *client) diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c index d99aa84..30cac58 100644 --- a/drivers/hwmon/applesmc.c +++ b/drivers/hwmon/applesmc.c @@ -344,8 +344,10 @@ static int applesmc_get_lower_bound(unsigned int *lo, const char *key) while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); - if (IS_ERR(entry)) + if (IS_ERR(entry)) { + *lo = 0; return PTR_ERR(entry); + } if (strcmp(entry->key, key) < 0) begin = middle + 1; else @@ -364,8 +366,10 @@ static int applesmc_get_upper_bound(unsigned int *hi, const char *key) while (begin != end) { int middle = begin + (end - begin) / 2; entry = applesmc_get_entry_by_index(middle); - if (IS_ERR(entry)) + if (IS_ERR(entry)) { + *hi = smcreg.key_count; return PTR_ERR(entry); + } if (strcmp(key, entry->key) < 0) end = middle; else @@ -485,16 +489,25 @@ static int applesmc_init_smcreg_try(void) { struct applesmc_registers *s = &smcreg; bool left_light_sensor, right_light_sensor; + unsigned int count; u8 tmp[1]; int ret; if (s->init_complete) return 0; - ret = read_register_count(&s->key_count); + ret = read_register_count(&count); if (ret) return ret; + if (s->cache && s->key_count != count) { + pr_warn("key count changed from %d to %d\n", + s->key_count, count); + kfree(s->cache); + s->cache = NULL; + } + s->key_count = count; + if (!s->cache) s->cache = kcalloc(s->key_count, sizeof(*s->cache), GFP_KERNEL); if (!s->cache) diff --git a/drivers/i2c/busses/i2c-xiic.c b/drivers/i2c/busses/i2c-xiic.c index 4bb68f3..64e7065 100644 --- a/drivers/i2c/busses/i2c-xiic.c +++ b/drivers/i2c/busses/i2c-xiic.c @@ -311,10 +311,8 @@ static void xiic_fill_tx_fifo(struct xiic_i2c *i2c) /* last message in transfer -> STOP */ data |= XIIC_TX_DYN_STOP_MASK; dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__); - - xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); - } else - xiic_setreg8(i2c, XIIC_DTR_REG_OFFSET, data); + } + xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data); } } diff --git a/drivers/md/dm-snap-persistent.c b/drivers/md/dm-snap-persistent.c index e4ecadf..2847a0b 100644 --- a/drivers/md/dm-snap-persistent.c +++ b/drivers/md/dm-snap-persistent.c @@ -251,7 +251,7 @@ static int chunk_io(struct pstore *ps, void *area, chunk_t chunk, int rw, */ INIT_WORK_ONSTACK(&req.work, do_metadata); queue_work(ps->metadata_wq, &req.work); - flush_work(&req.work); + flush_workqueue(ps->metadata_wq); return req.result; } diff --git a/drivers/md/dm-snap.c b/drivers/md/dm-snap.c index 9ecff5f..95e6db1 100644 --- a/drivers/md/dm-snap.c +++ b/drivers/md/dm-snap.c @@ -724,17 +724,16 @@ static int calc_max_buckets(void) */ static int init_hash_tables(struct dm_snapshot *s) { - sector_t hash_size, cow_dev_size, origin_dev_size, max_buckets; + sector_t hash_size, cow_dev_size, max_buckets; /* * Calculate based on the size of the original volume or * the COW volume... */ cow_dev_size = get_dev_size(s->cow->bdev); - origin_dev_size = get_dev_size(s->origin->bdev); max_buckets = calc_max_buckets(); - hash_size = min(origin_dev_size, cow_dev_size) >> s->store->chunk_shift; + hash_size = cow_dev_size >> s->store->chunk_shift; hash_size = min(hash_size, max_buckets); if (hash_size < 64) @@ -1121,6 +1120,7 @@ static int snapshot_ctr(struct dm_target *ti, unsigned int argc, char **argv) s->pending_pool = mempool_create_slab_pool(MIN_IOS, pending_cache); if (!s->pending_pool) { ti->error = "Could not allocate mempool for pending exceptions"; + r = -ENOMEM; goto bad_pending_pool; } diff --git a/drivers/media/dvb/mantis/mantis_dvb.c b/drivers/media/dvb/mantis/mantis_dvb.c index e5180e4..5d15c6b 100644 --- a/drivers/media/dvb/mantis/mantis_dvb.c +++ b/drivers/media/dvb/mantis/mantis_dvb.c @@ -248,8 +248,10 @@ int __devinit mantis_dvb_init(struct mantis_pci *mantis) err5: tasklet_kill(&mantis->tasklet); dvb_net_release(&mantis->dvbnet); - dvb_unregister_frontend(mantis->fe); - dvb_frontend_detach(mantis->fe); + if (mantis->fe) { + dvb_unregister_frontend(mantis->fe); + dvb_frontend_detach(mantis->fe); + } err4: mantis->demux.dmx.remove_frontend(&mantis->demux.dmx, &mantis->fe_mem); diff --git a/drivers/mfd/adp5520.c b/drivers/mfd/adp5520.c index f1d8848..2943fbf 100644 --- a/drivers/mfd/adp5520.c +++ b/drivers/mfd/adp5520.c @@ -36,6 +36,7 @@ struct adp5520_chip { struct blocking_notifier_head notifier_list; int irq; unsigned long id; + uint8_t mode; }; static int __adp5520_read(struct i2c_client *client, @@ -326,7 +327,10 @@ static int adp5520_suspend(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_clr_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + adp5520_read(chip->dev, ADP5520_MODE_STATUS, &chip->mode); + /* All other bits are W1C */ + chip->mode &= ADP5520_BL_EN | ADP5520_DIM_EN | ADP5520_nSTNBY; + adp5520_write(chip->dev, ADP5520_MODE_STATUS, 0); return 0; } @@ -335,7 +339,7 @@ static int adp5520_resume(struct device *dev) struct i2c_client *client = to_i2c_client(dev); struct adp5520_chip *chip = dev_get_drvdata(&client->dev); - adp5520_set_bits(chip->dev, ADP5520_MODE_STATUS, ADP5520_nSTNBY); + adp5520_write(chip->dev, ADP5520_MODE_STATUS, chip->mode); return 0; } #endif diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c index b47c52b..6d9c0796 100644 --- a/drivers/mmc/core/mmc.c +++ b/drivers/mmc/core/mmc.c @@ -354,13 +354,13 @@ static int mmc_read_ext_csd(struct mmc_card *card, u8 *ext_csd) ext_csd[EXT_CSD_SEC_FEATURE_SUPPORT]; card->ext_csd.raw_trim_mult = ext_csd[EXT_CSD_TRIM_MULT]; + card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if (card->ext_csd.rev >= 4) { /* * Enhanced area feature support -- check whether the eMMC * card has the Enhanced area enabled. If so, export enhanced * area offset and size to user by adding sysfs interface. */ - card->ext_csd.raw_partition_support = ext_csd[EXT_CSD_PARTITION_SUPPORT]; if ((ext_csd[EXT_CSD_PARTITION_SUPPORT] & 0x2) && (ext_csd[EXT_CSD_PARTITION_ATTRIBUTE] & 0x1)) { u8 hc_erase_grp_sz = diff --git a/drivers/mtd/mtdchar.c b/drivers/mtd/mtdchar.c index 9f8658e..72b788e 100644 --- a/drivers/mtd/mtdchar.c +++ b/drivers/mtd/mtdchar.c @@ -1064,6 +1064,33 @@ static unsigned long mtd_get_unmapped_area(struct file *file, } #endif +static inline unsigned long get_vm_size(struct vm_area_struct *vma) +{ + return vma->vm_end - vma->vm_start; +} + +static inline resource_size_t get_vm_offset(struct vm_area_struct *vma) +{ + return (resource_size_t) vma->vm_pgoff << PAGE_SHIFT; +} + +/* + * Set a new vm offset. + * + * Verify that the incoming offset really works as a page offset, + * and that the offset and size fit in a resource_size_t. + */ +static inline int set_vm_offset(struct vm_area_struct *vma, resource_size_t off) +{ + pgoff_t pgoff = off >> PAGE_SHIFT; + if (off != (resource_size_t) pgoff << PAGE_SHIFT) + return -EINVAL; + if (off + get_vm_size(vma) - 1 < off) + return -EINVAL; + vma->vm_pgoff = pgoff; + return 0; +} + /* * set up a mapping for shared memory segments */ @@ -1073,32 +1100,17 @@ static int mtd_mmap(struct file *file, struct vm_area_struct *vma) struct mtd_file_info *mfi = file->private_data; struct mtd_info *mtd = mfi->mtd; struct map_info *map = mtd->priv; - unsigned long start; - unsigned long off; - u32 len; - - if (mtd->type == MTD_RAM || mtd->type == MTD_ROM) { - off = vma->vm_pgoff << PAGE_SHIFT; - start = map->phys; - len = PAGE_ALIGN((start & ~PAGE_MASK) + map->size); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - vma->vm_flags |= VM_IO | VM_RESERVED; + /* This is broken because it assumes the MTD device is map-based + and that mtd->priv is a valid struct map_info. It should be + replaced with something that uses the mtd_get_unmapped_area() + operation properly. */ + if (0 /*mtd->type == MTD_RAM || mtd->type == MTD_ROM*/) { #ifdef pgprot_noncached - if (file->f_flags & O_DSYNC || off >= __pa(high_memory)) + if (file->f_flags & O_DSYNC || map->phys >= __pa(high_memory)) vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); #endif - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, - vma->vm_page_prot)) - return -EAGAIN; - - return 0; + return vm_iomap_memory(vma, map->phys, map->size); } return -ENOSYS; #else diff --git a/drivers/net/3c509.c b/drivers/net/3c509.c index 44b28b2..15410f7 100644 --- a/drivers/net/3c509.c +++ b/drivers/net/3c509.c @@ -309,6 +309,7 @@ static int __devinit el3_isa_match(struct device *pdev, if (!dev) return -ENOMEM; + SET_NETDEV_DEV(dev, pdev); netdev_boot_setup_check(dev); if (!request_region(ioaddr, EL3_IO_EXTENT, "3c509-isa")) { @@ -704,6 +705,7 @@ static int __init el3_eisa_probe (struct device *device) return -ENOMEM; } + SET_NETDEV_DEV(dev, device); netdev_boot_setup_check(dev); el3_dev_fill(dev, phys_addr, ioaddr, irq, if_port, EL3_EISA); diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c index 41afc40..c83be40 100644 --- a/drivers/net/3c59x.c +++ b/drivers/net/3c59x.c @@ -632,7 +632,6 @@ struct vortex_private { pm_state_valid:1, /* pci_dev->saved_config_space has sane contents */ open:1, medialock:1, - must_free_region:1, /* Flag: if zero, Cardbus owns the I/O region */ large_frames:1, /* accept large frames */ handling_irq:1; /* private in_irq indicator */ /* {get|set}_wol operations are already serialized by rtnl. @@ -951,7 +950,7 @@ static int __devexit vortex_eisa_remove(struct device *device) unregister_netdev(dev); iowrite16(TotalReset|0x14, ioaddr + EL3_CMD); - release_region(dev->base_addr, VORTEX_TOTAL_SIZE); + release_region(edev->base_addr, VORTEX_TOTAL_SIZE); free_netdev(dev); return 0; @@ -1012,6 +1011,12 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, if (rc < 0) goto out; + rc = pci_request_regions(pdev, DRV_NAME); + if (rc < 0) { + pci_disable_device(pdev); + goto out; + } + unit = vortex_cards_found; if (global_use_mmio < 0 && (unit >= MAX_UNITS || use_mmio[unit] < 0)) { @@ -1027,6 +1032,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, if (!ioaddr) /* If mapping fails, fall-back to BAR 0... */ ioaddr = pci_iomap(pdev, 0, 0); if (!ioaddr) { + pci_release_regions(pdev); pci_disable_device(pdev); rc = -ENOMEM; goto out; @@ -1036,6 +1042,7 @@ static int __devinit vortex_init_one(struct pci_dev *pdev, ent->driver_data, unit); if (rc < 0) { pci_iounmap(pdev, ioaddr); + pci_release_regions(pdev); pci_disable_device(pdev); goto out; } @@ -1180,11 +1187,6 @@ static int __devinit vortex_probe1(struct device *gendev, /* PCI-only startup logic */ if (pdev) { - /* EISA resources already marked, so only PCI needs to do this here */ - /* Ignore return value, because Cardbus drivers already allocate for us */ - if (request_region(dev->base_addr, vci->io_size, print_name) != NULL) - vp->must_free_region = 1; - /* enable bus-mastering if necessary */ if (vci->flags & PCI_USES_MASTER) pci_set_master(pdev); @@ -1222,7 +1224,7 @@ static int __devinit vortex_probe1(struct device *gendev, &vp->rx_ring_dma); retval = -ENOMEM; if (!vp->rx_ring) - goto free_region; + goto free_device; vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE); vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE; @@ -1487,9 +1489,7 @@ free_ring: + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); -free_region: - if (vp->must_free_region) - release_region(dev->base_addr, vci->io_size); +free_device: free_netdev(dev); pr_err(PFX "vortex_probe1 fails. Returns %d\n", retval); out: @@ -3253,8 +3253,9 @@ static void __devexit vortex_remove_one(struct pci_dev *pdev) + sizeof(struct boom_tx_desc) * TX_RING_SIZE, vp->rx_ring, vp->rx_ring_dma); - if (vp->must_free_region) - release_region(dev->base_addr, vp->io_size); + + pci_release_regions(pdev); + free_netdev(dev); } diff --git a/drivers/net/arcnet/arcnet.c b/drivers/net/arcnet/arcnet.c index a746ba2..a956053 100644 --- a/drivers/net/arcnet/arcnet.c +++ b/drivers/net/arcnet/arcnet.c @@ -1007,7 +1007,7 @@ static void arcnet_rx(struct net_device *dev, int bufnum) soft = &pkt.soft.rfc1201; - lp->hw.copy_from_card(dev, bufnum, 0, &pkt, sizeof(ARC_HDR_SIZE)); + lp->hw.copy_from_card(dev, bufnum, 0, &pkt, ARC_HDR_SIZE); if (pkt.hard.offset[0]) { ofs = pkt.hard.offset[0]; length = 256 - ofs; diff --git a/drivers/net/atl1e/atl1e.h b/drivers/net/atl1e/atl1e.h index 4093097..2cec0df 100644 --- a/drivers/net/atl1e/atl1e.h +++ b/drivers/net/atl1e/atl1e.h @@ -186,7 +186,7 @@ struct atl1e_tpd_desc { /* how about 0x2000 */ #define MAX_TX_BUF_LEN 0x2000 #define MAX_TX_BUF_SHIFT 13 -/*#define MAX_TX_BUF_LEN 0x3000 */ +#define MAX_TSO_SEG_SIZE 0x3c00 /* rrs word 1 bit 0:31 */ #define RRS_RX_CSUM_MASK 0xFFFF diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c index b0132bb..8fe1cd3 100644 --- a/drivers/net/atl1e/atl1e_main.c +++ b/drivers/net/atl1e/atl1e_main.c @@ -2333,6 +2333,7 @@ static int __devinit atl1e_probe(struct pci_dev *pdev, INIT_WORK(&adapter->reset_task, atl1e_reset_task); INIT_WORK(&adapter->link_chg_task, atl1e_link_chg_task); + netif_set_gso_max_size(netdev, MAX_TSO_SEG_SIZE); err = register_netdev(netdev); if (err) { netdev_err(netdev, "register netdevice failed\n"); diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c index dd433a7..f1b566f 100644 --- a/drivers/net/bonding/bond_main.c +++ b/drivers/net/bonding/bond_main.c @@ -1949,6 +1949,7 @@ err_detach: write_unlock_bh(&bond->lock); err_close: + slave_dev->priv_flags &= ~IFF_BONDING; dev_close(slave_dev); err_unset_master: @@ -1993,6 +1994,7 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) struct bonding *bond = netdev_priv(bond_dev); struct slave *slave, *oldcurrent; struct sockaddr addr; + int old_flags = bond_dev->flags; u32 old_features = bond_dev->features; /* slave is not a slave or master is not master of this slave */ @@ -2123,12 +2125,18 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev) * already taken care of above when we detached the slave */ if (!USES_PRIMARY(bond->params.mode)) { - /* unset promiscuity level from slave */ - if (bond_dev->flags & IFF_PROMISC) + /* unset promiscuity level from slave + * NOTE: The NETDEV_CHANGEADDR call above may change the value + * of the IFF_PROMISC flag in the bond_dev, but we need the + * value of that flag before that change, as that was the value + * when this slave was attached, so we cache at the start of the + * function and use it here. Same goes for ALLMULTI below + */ + if (old_flags & IFF_PROMISC) dev_set_promiscuity(slave_dev, -1); /* unset allmulti level from slave */ - if (bond_dev->flags & IFF_ALLMULTI) + if (old_flags & IFF_ALLMULTI) dev_set_allmulti(slave_dev, -1); /* flush master's mc_list from slave */ diff --git a/drivers/net/dummy.c b/drivers/net/dummy.c index 098ff31..d74c431 100644 --- a/drivers/net/dummy.c +++ b/drivers/net/dummy.c @@ -186,6 +186,8 @@ static int __init dummy_init_module(void) rtnl_lock(); err = __rtnl_link_register(&dummy_link_ops); + if (err < 0) + goto out; for (i = 0; i < numdummies && !err; i++) { err = dummy_init_one(); @@ -193,6 +195,8 @@ static int __init dummy_init_module(void) } if (err < 0) __rtnl_link_unregister(&dummy_link_ops); + +out: rtnl_unlock(); return err; diff --git a/drivers/net/gianfar_ptp.c b/drivers/net/gianfar_ptp.c index c413479..68cfa08 100644 --- a/drivers/net/gianfar_ptp.c +++ b/drivers/net/gianfar_ptp.c @@ -521,6 +521,7 @@ static int gianfar_ptp_probe(struct platform_device *dev) return 0; no_clock: + iounmap(etsects->regs); no_ioremap: release_resource(etsects->rsrc); no_resource: diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c index 2b98461..902ba2f 100644 --- a/drivers/net/ifb.c +++ b/drivers/net/ifb.c @@ -33,6 +33,7 @@ #include <linux/etherdevice.h> #include <linux/init.h> #include <linux/moduleparam.h> +#include <linux/sched.h> #include <net/pkt_sched.h> #include <net/net_namespace.h> @@ -251,11 +252,17 @@ static int __init ifb_init_module(void) rtnl_lock(); err = __rtnl_link_register(&ifb_link_ops); + if (err < 0) + goto out; - for (i = 0; i < numifbs && !err; i++) + for (i = 0; i < numifbs && !err; i++) { err = ifb_init_one(i); + cond_resched(); + } if (err) __rtnl_link_unregister(&ifb_link_ops); + +out: rtnl_unlock(); return err; diff --git a/drivers/net/ll_temac_main.c b/drivers/net/ll_temac_main.c index b7948cc..7ebb4c1 100644 --- a/drivers/net/ll_temac_main.c +++ b/drivers/net/ll_temac_main.c @@ -302,6 +302,12 @@ static int temac_dma_bd_init(struct net_device *ndev) lp->rx_bd_p + (sizeof(*lp->rx_bd_v) * (RX_BD_NUM - 1))); lp->dma_out(lp, TX_CURDESC_PTR, lp->tx_bd_p); + /* Init descriptor indexes */ + lp->tx_bd_ci = 0; + lp->tx_bd_next = 0; + lp->tx_bd_tail = 0; + lp->rx_bd_ci = 0; + return 0; out: diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c index 735f726..4c0bdac 100644 --- a/drivers/net/macvlan.c +++ b/drivers/net/macvlan.c @@ -193,7 +193,8 @@ static rx_handler_result_t macvlan_handle_frame(struct sk_buff **pskb) } if (port->passthru) - vlan = list_first_entry(&port->vlans, struct macvlan_dev, list); + vlan = list_first_or_null_rcu(&port->vlans, + struct macvlan_dev, list); else vlan = macvlan_hash_lookup(port, eth->h_dest); if (vlan == NULL) @@ -687,7 +688,7 @@ int macvlan_common_newlink(struct net *src_net, struct net_device *dev, if (err < 0) goto destroy_port; - list_add_tail(&vlan->list, &port->vlans); + list_add_tail_rcu(&vlan->list, &port->vlans); netif_stacked_transfer_operstate(lowerdev, dev); return 0; @@ -713,7 +714,7 @@ void macvlan_dellink(struct net_device *dev, struct list_head *head) { struct macvlan_dev *vlan = netdev_priv(dev); - list_del(&vlan->list); + list_del_rcu(&vlan->list); unregister_netdevice_queue(dev, head); } EXPORT_SYMBOL_GPL(macvlan_dellink); diff --git a/drivers/net/pptp.c b/drivers/net/pptp.c index 4b3a68b..e6a8ebf 100644 --- a/drivers/net/pptp.c +++ b/drivers/net/pptp.c @@ -282,7 +282,7 @@ static int pptp_xmit(struct ppp_channel *chan, struct sk_buff *skb) nf_reset(skb); skb->ip_summed = CHECKSUM_NONE; - ip_select_ident(iph, &rt->dst, NULL); + ip_select_ident(skb, &rt->dst, NULL); ip_send_check(iph); ip_local_out(skb); diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c index 80b230e..bf67991 100644 --- a/drivers/net/r8169.c +++ b/drivers/net/r8169.c @@ -1494,8 +1494,6 @@ static void rtl8169_rx_vlan_tag(struct RxDesc *desc, struct sk_buff *skb) if (opts2 & RxVlanTag) __vlan_hwaccel_put_tag(skb, swab16(opts2 & 0xffff)); - - desc->opts2 = 0; } static int rtl8169_gset_tbi(struct net_device *dev, struct ethtool_cmd *cmd) @@ -3105,11 +3103,34 @@ static void r810x_phy_power_up(struct rtl8169_private *tp) rtl_writephy(tp, MII_BMCR, BMCR_ANENABLE); } +static void rtl_speed_down(struct rtl8169_private *tp) +{ + u32 adv; + int lpa; + + rtl_writephy(tp, 0x1f, 0x0000); + lpa = rtl_readphy(tp, MII_LPA); + + if (lpa & (LPA_10HALF | LPA_10FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full; + else if (lpa & (LPA_100HALF | LPA_100FULL)) + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full; + else + adv = ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full | + ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full | + (tp->mii.supports_gmii ? + ADVERTISED_1000baseT_Half | + ADVERTISED_1000baseT_Full : 0); + + rtl8169_set_speed(tp->dev, AUTONEG_ENABLE, SPEED_1000, DUPLEX_FULL, + adv); +} + static void r810x_pll_power_down(struct rtl8169_private *tp) { if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); + rtl_speed_down(tp); return; } @@ -3201,8 +3222,7 @@ static void r8168_pll_power_down(struct rtl8169_private *tp) rtl_ephy_write(ioaddr, 0x19, 0xff64); if (__rtl8169_get_wol(tp) & WAKE_ANY) { - rtl_writephy(tp, 0x1f, 0x0000); - rtl_writephy(tp, MII_BMCR, 0x0000); + rtl_speed_down(tp); if (tp->mac_version == RTL_GIGA_MAC_VER_32 || tp->mac_version == RTL_GIGA_MAC_VER_33) @@ -5166,7 +5186,6 @@ static int rtl8169_rx_interrupt(struct net_device *dev, rtl8169_schedule_work(dev, rtl8169_reset_task); dev->stats.rx_fifo_errors++; } - rtl8169_mark_to_asic(desc, rx_buf_sz); } else { struct sk_buff *skb; dma_addr_t addr = le64_to_cpu(desc->addr); @@ -5180,16 +5199,14 @@ static int rtl8169_rx_interrupt(struct net_device *dev, if (unlikely(rtl8169_fragmented_frame(status))) { dev->stats.rx_dropped++; dev->stats.rx_length_errors++; - rtl8169_mark_to_asic(desc, rx_buf_sz); - continue; + goto release_descriptor; } skb = rtl8169_try_rx_copy(tp->Rx_databuff[entry], tp, pkt_size, addr); - rtl8169_mark_to_asic(desc, rx_buf_sz); if (!skb) { dev->stats.rx_dropped++; - continue; + goto release_descriptor; } rtl8169_rx_csum(skb, status); @@ -5203,6 +5220,10 @@ static int rtl8169_rx_interrupt(struct net_device *dev, dev->stats.rx_bytes += pkt_size; dev->stats.rx_packets++; } +release_descriptor: + desc->opts2 = 0; + wmb(); + rtl8169_mark_to_asic(desc, rx_buf_sz); } count = cur_rx - tp->cur_rx; diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c index d429f0a..26e4cca 100644 --- a/drivers/net/sfc/rx.c +++ b/drivers/net/sfc/rx.c @@ -311,8 +311,9 @@ static void efx_resurrect_rx_buffer(struct efx_rx_queue *rx_queue, index = rx_queue->added_count & rx_queue->ptr_mask; new_buf = efx_rx_buffer(rx_queue, index); - new_buf->dma_addr = rx_buf->dma_addr ^ (PAGE_SIZE >> 1); new_buf->u.page = rx_buf->u.page; + new_buf->page_offset = rx_buf->page_offset ^ (PAGE_SIZE >> 1); + new_buf->dma_addr = state->dma_addr + new_buf->page_offset; new_buf->len = rx_buf->len; new_buf->is_page = true; ++rx_queue->added_count; diff --git a/drivers/net/sunvnet.c b/drivers/net/sunvnet.c index bf3c762..cc77f70 100644 --- a/drivers/net/sunvnet.c +++ b/drivers/net/sunvnet.c @@ -1248,6 +1248,8 @@ static int vnet_port_remove(struct vio_dev *vdev) dev_set_drvdata(&vdev->dev, NULL); kfree(port); + + unregister_netdev(vp->dev); } return 0; } diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c index eaa24fa8..1736875 100644 --- a/drivers/net/tg3.c +++ b/drivers/net/tg3.c @@ -13067,8 +13067,11 @@ static void __devinit tg3_read_vpd(struct tg3 *tp) if (j + len > block_end) goto partno; - memcpy(tp->fw_ver, &vpd_data[j], len); - strncat(tp->fw_ver, " bc ", TG3_NVM_VPD_LEN - len - 1); + if (len >= sizeof(tp->fw_ver)) + len = sizeof(tp->fw_ver) - 1; + memset(tp->fw_ver, 0, sizeof(tp->fw_ver)); + snprintf(tp->fw_ver, sizeof(tp->fw_ver), "%.*s bc ", len, + &vpd_data[j]); } partno: diff --git a/drivers/net/tun.c b/drivers/net/tun.c index b013e7f..f799f0c 100644 --- a/drivers/net/tun.c +++ b/drivers/net/tun.c @@ -614,8 +614,9 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, int offset = 0; if (!(tun->flags & TUN_NO_PI)) { - if ((len -= sizeof(pi)) > count) + if (len < sizeof(pi)) return -EINVAL; + len -= sizeof(pi); if (memcpy_fromiovecend((void *)&pi, iv, 0, sizeof(pi))) return -EFAULT; @@ -623,8 +624,9 @@ static __inline__ ssize_t tun_get_user(struct tun_struct *tun, } if (tun->flags & TUN_VNET_HDR) { - if ((len -= tun->vnet_hdr_sz) > count) + if (len < tun->vnet_hdr_sz) return -EINVAL; + len -= tun->vnet_hdr_sz; if (memcpy_fromiovecend((void *)&gso, iv, offset, sizeof(gso))) return -EFAULT; diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c index 544c309..e48a677 100644 --- a/drivers/net/usb/cdc_ether.c +++ b/drivers/net/usb/cdc_ether.c @@ -615,6 +615,11 @@ static const struct usb_device_id products [] = { .bInterfaceProtocol = USB_CDC_PROTO_NONE, .driver_info = (unsigned long)&wwan_info, }, { + /* Telit modules */ + USB_VENDOR_AND_INTERFACE_INFO(0x1bc7, USB_CLASS_COMM, + USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), + .driver_info = (kernel_ulong_t) &wwan_info, +}, { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ETHERNET, USB_CDC_PROTO_NONE), .driver_info = (unsigned long) &cdc_info, diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c index 1d93133..64a0ad1 100644 --- a/drivers/net/usb/dm9601.c +++ b/drivers/net/usb/dm9601.c @@ -384,7 +384,7 @@ static void dm9601_set_multicast(struct net_device *net) rx_ctl |= 0x02; } else if (net->flags & IFF_ALLMULTI || netdev_mc_count(net) > DM_MAX_MCAST) { - rx_ctl |= 0x04; + rx_ctl |= 0x08; } else if (!netdev_mc_empty(net)) { struct netdev_hw_addr *ha; diff --git a/drivers/net/usb/smsc75xx.c b/drivers/net/usb/smsc75xx.c index 2f4775f..be27891 100644 --- a/drivers/net/usb/smsc75xx.c +++ b/drivers/net/usb/smsc75xx.c @@ -43,7 +43,6 @@ #define EEPROM_MAC_OFFSET (0x01) #define DEFAULT_TX_CSUM_ENABLE (true) #define DEFAULT_RX_CSUM_ENABLE (true) -#define DEFAULT_TSO_ENABLE (true) #define SMSC75XX_INTERNAL_PHY_ID (1) #define SMSC75XX_TX_OVERHEAD (8) #define MAX_RX_FIFO_SIZE (20 * 1024) @@ -1034,17 +1033,14 @@ static int smsc75xx_bind(struct usbnet *dev, struct usb_interface *intf) INIT_WORK(&pdata->set_multicast, smsc75xx_deferred_multicast_write); - if (DEFAULT_TX_CSUM_ENABLE) { + if (DEFAULT_TX_CSUM_ENABLE) dev->net->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; - if (DEFAULT_TSO_ENABLE) - dev->net->features |= NETIF_F_SG | - NETIF_F_TSO | NETIF_F_TSO6; - } + if (DEFAULT_RX_CSUM_ENABLE) dev->net->features |= NETIF_F_RXCSUM; dev->net->hw_features = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | - NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_RXCSUM; + NETIF_F_RXCSUM; /* Init all registers */ ret = smsc75xx_reset(dev); @@ -1169,8 +1165,6 @@ static struct sk_buff *smsc75xx_tx_fixup(struct usbnet *dev, { u32 tx_cmd_a, tx_cmd_b; - skb_linearize(skb); - if (skb_headroom(skb) < SMSC75XX_TX_OVERHEAD) { struct sk_buff *skb2 = skb_copy_expand(skb, SMSC75XX_TX_OVERHEAD, 0, flags); diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c index 7f23ab9..e64e440 100644 --- a/drivers/net/via-rhine.c +++ b/drivers/net/via-rhine.c @@ -32,7 +32,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #define DRV_NAME "via-rhine" -#define DRV_VERSION "1.5.0" +#define DRV_VERSION "1.5.1" #define DRV_RELDATE "2010-10-09" @@ -1518,7 +1518,12 @@ static netdev_tx_t rhine_start_tx(struct sk_buff *skb, cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN)); if (unlikely(vlan_tx_tag_present(skb))) { - rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16); + u16 vid_pcp = vlan_tx_tag_get(skb); + + /* drop CFI/DEI bit, register needs VID and PCP */ + vid_pcp = (vid_pcp & VLAN_VID_MASK) | + ((vid_pcp & VLAN_PRIO_MASK) >> 1); + rp->tx_ring[entry].tx_status = cpu_to_le32((vid_pcp) << 16); /* request tagging */ rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000); } diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c index f685324..316a565 100644 --- a/drivers/net/virtio_net.c +++ b/drivers/net/virtio_net.c @@ -480,7 +480,7 @@ static int virtnet_poll(struct napi_struct *napi, int budget) { struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi); void *buf; - unsigned int len, received = 0; + unsigned int r, len, received = 0; again: while (received < budget && @@ -497,8 +497,9 @@ again: /* Out of packets? */ if (received < budget) { + r = virtqueue_enable_cb_prepare(vi->rvq); napi_complete(napi); - if (unlikely(!virtqueue_enable_cb(vi->rvq)) && + if (unlikely(virtqueue_poll(vi->rvq, r)) && napi_schedule_prep(napi)) { virtqueue_disable_cb(vi->rvq); __napi_schedule(napi); diff --git a/drivers/net/wan/dlci.c b/drivers/net/wan/dlci.c index 21b104d..af44b93 100644 --- a/drivers/net/wan/dlci.c +++ b/drivers/net/wan/dlci.c @@ -377,21 +377,37 @@ static int dlci_del(struct dlci_add *dlci) struct frad_local *flp; struct net_device *master, *slave; int err; + bool found = false; + + rtnl_lock(); /* validate slave device */ master = __dev_get_by_name(&init_net, dlci->devname); - if (!master) - return -ENODEV; + if (!master) { + err = -ENODEV; + goto out; + } + + list_for_each_entry(dlp, &dlci_devs, list) { + if (dlp->master == master) { + found = true; + break; + } + } + if (!found) { + err = -ENODEV; + goto out; + } if (netif_running(master)) { - return -EBUSY; + err = -EBUSY; + goto out; } dlp = netdev_priv(master); slave = dlp->slave; flp = netdev_priv(slave); - rtnl_lock(); err = (*flp->deassoc)(slave, master); if (!err) { list_del(&dlp->list); @@ -400,8 +416,8 @@ static int dlci_del(struct dlci_add *dlci) dev_put(slave); } +out: rtnl_unlock(); - return err; } diff --git a/drivers/net/wireless/ath/ath9k/ar9003_phy.c b/drivers/net/wireless/ath/ath9k/ar9003_phy.c index 892c48b..b8a26d2 100644 --- a/drivers/net/wireless/ath/ath9k/ar9003_phy.c +++ b/drivers/net/wireless/ath/ath9k/ar9003_phy.c @@ -1005,6 +1005,10 @@ static bool ar9003_hw_ani_control(struct ath_hw *ah, * is_on == 0 means MRC CCK is OFF (more noise imm) */ bool is_on = param ? 1 : 0; + + if (ah->caps.rx_chainmask == 1) + break; + REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, AR_PHY_MRC_CCK_ENABLE, is_on); REG_RMW_FIELD(ah, AR_PHY_MRC_CCK_CTRL, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_init.c b/drivers/net/wireless/ath/ath9k/htc_drv_init.c index 61e6d39..403d33f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_init.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_init.c @@ -823,7 +823,7 @@ static int ath9k_init_firmware_version(struct ath9k_htc_priv *priv) * required version. */ if (priv->fw_version_major != MAJOR_VERSION_REQ || - priv->fw_version_minor != MINOR_VERSION_REQ) { + priv->fw_version_minor < MINOR_VERSION_REQ) { dev_err(priv->dev, "ath9k_htc: Please upgrade to FW version %d.%d\n", MAJOR_VERSION_REQ, MINOR_VERSION_REQ); return -EINVAL; @@ -873,6 +873,7 @@ static int ath9k_init_device(struct ath9k_htc_priv *priv, if (error != 0) goto err_rx; + ath9k_hw_disable(priv->ah); #ifdef CONFIG_MAC80211_LEDS /* must be initialized before ieee80211_register_hw */ priv->led_cdev.default_trigger = ieee80211_create_tpt_led_trigger(priv->hw, diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c index 2d81c70..3f2c88f 100644 --- a/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c +++ b/drivers/net/wireless/ath/ath9k/htc_drv_txrx.c @@ -448,6 +448,7 @@ static void ath9k_htc_tx_process(struct ath9k_htc_priv *priv, struct ieee80211_conf *cur_conf = &priv->hw->conf; bool txok; int slot; + int hdrlen, padsize; slot = strip_drv_header(priv, skb); if (slot < 0) { @@ -504,6 +505,15 @@ send_mac80211: ath9k_htc_tx_clear_slot(priv, slot); + /* Remove padding before handing frame back to mac80211 */ + hdrlen = ieee80211_get_hdrlen_from_skb(skb); + + padsize = hdrlen & 3; + if (padsize && skb->len > hdrlen + padsize) { + memmove(skb->data + padsize, skb->data, hdrlen); + skb_pull(skb, padsize); + } + /* Send status to mac80211 */ ieee80211_tx_status(priv->hw, skb); } diff --git a/drivers/net/wireless/ath/ath9k/init.c b/drivers/net/wireless/ath/ath9k/init.c index 5a9fd21..d6059a2 100644 --- a/drivers/net/wireless/ath/ath9k/init.c +++ b/drivers/net/wireless/ath/ath9k/init.c @@ -704,8 +704,7 @@ void ath9k_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw) BIT(NL80211_IFTYPE_ADHOC) | BIT(NL80211_IFTYPE_MESH_POINT); - if (AR_SREV_5416(sc->sc_ah)) - hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; + hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c index 633f962..806748a 100644 --- a/drivers/net/wireless/ath/ath9k/main.c +++ b/drivers/net/wireless/ath/ath9k/main.c @@ -1791,6 +1791,7 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, struct ath_common *common = ath9k_hw_common(sc->sc_ah); struct ath_node *an = (struct ath_node *) sta->drv_priv; struct ieee80211_key_conf ps_key = { }; + int key; ath_node_attach(sc, sta); @@ -1798,7 +1799,9 @@ static int ath9k_sta_add(struct ieee80211_hw *hw, vif->type != NL80211_IFTYPE_AP_VLAN) return 0; - an->ps_key = ath_key_config(common, vif, sta, &ps_key); + key = ath_key_config(common, vif, sta, &ps_key); + if (key > 0) + an->ps_key = key; return 0; } @@ -1815,6 +1818,7 @@ static void ath9k_del_ps_key(struct ath_softc *sc, return; ath_key_delete(common, &ps_key); + an->ps_key = 0; } static int ath9k_sta_remove(struct ieee80211_hw *hw, diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c index 6f6f100..e1f1971 100644 --- a/drivers/net/wireless/ath/ath9k/xmit.c +++ b/drivers/net/wireless/ath/ath9k/xmit.c @@ -2433,6 +2433,7 @@ void ath_tx_node_init(struct ath_softc *sc, struct ath_node *an) for (acno = 0, ac = &an->ac[acno]; acno < WME_NUM_AC; acno++, ac++) { ac->sched = false; + ac->clear_ps_filter = true; ac->txq = sc->tx.txq_map[acno]; INIT_LIST_HEAD(&ac->tid_q); } diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c index 7c2e09a..4db716b 100644 --- a/drivers/net/wireless/b43/main.c +++ b/drivers/net/wireless/b43/main.c @@ -2309,7 +2309,7 @@ static int b43_request_firmware(struct b43_wldev *dev) for (i = 0; i < B43_NR_FWTYPES; i++) { errmsg = ctx->errors[i]; if (strlen(errmsg)) - b43err(dev->wl, errmsg); + b43err(dev->wl, "%s", errmsg); } b43_print_fw_helptext(dev->wl, 1); err = -ENOENT; diff --git a/drivers/net/wireless/hostap/hostap_ioctl.c b/drivers/net/wireless/hostap/hostap_ioctl.c index 12de464..10ce1bc 100644 --- a/drivers/net/wireless/hostap/hostap_ioctl.c +++ b/drivers/net/wireless/hostap/hostap_ioctl.c @@ -521,9 +521,9 @@ static int prism2_ioctl_giwaplist(struct net_device *dev, data->length = prism2_ap_get_sta_qual(local, addr, qual, IW_MAX_AP, 1); - memcpy(extra, &addr, sizeof(struct sockaddr) * data->length); + memcpy(extra, addr, sizeof(struct sockaddr) * data->length); data->flags = 1; /* has quality information */ - memcpy(extra + sizeof(struct sockaddr) * data->length, &qual, + memcpy(extra + sizeof(struct sockaddr) * data->length, qual, sizeof(struct iw_quality) * data->length); kfree(addr); diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c index cd89fed..677e60b 100644 --- a/drivers/net/wireless/mwifiex/cmdevt.c +++ b/drivers/net/wireless/mwifiex/cmdevt.c @@ -1081,6 +1081,7 @@ mwifiex_process_hs_config(struct mwifiex_adapter *adapter) adapter->if_ops.wakeup(adapter); adapter->hs_activated = false; adapter->is_hs_configured = false; + adapter->is_suspended = false; mwifiex_hs_activated_event(mwifiex_get_priv(adapter, MWIFIEX_BSS_ROLE_ANY), false); } diff --git a/drivers/net/wireless/mwifiex/init.c b/drivers/net/wireless/mwifiex/init.c index 45dfc2b..3f1559e 100644 --- a/drivers/net/wireless/mwifiex/init.c +++ b/drivers/net/wireless/mwifiex/init.c @@ -561,14 +561,6 @@ mwifiex_shutdown_drv(struct mwifiex_adapter *adapter) return ret; } - /* cancel current command */ - if (adapter->curr_cmd) { - dev_warn(adapter->dev, "curr_cmd is still in processing\n"); - del_timer(&adapter->cmd_timer); - mwifiex_insert_cmd_to_free_q(adapter, adapter->curr_cmd); - adapter->curr_cmd = NULL; - } - /* shut down mwifiex */ dev_dbg(adapter->dev, "info: shutdown mwifiex...\n"); diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c index 3b1217f..61e152d 100644 --- a/drivers/net/wireless/mwifiex/sdio.c +++ b/drivers/net/wireless/mwifiex/sdio.c @@ -1387,8 +1387,8 @@ static int mwifiex_sdio_host_to_card(struct mwifiex_adapter *adapter, /* Allocate buffer and copy payload */ blk_size = MWIFIEX_SDIO_BLOCK_SIZE; buf_block_len = (pkt_len + blk_size - 1) / blk_size; - *(u16 *) &payload[0] = (u16) pkt_len; - *(u16 *) &payload[2] = type; + *(__le16 *)&payload[0] = cpu_to_le16((u16)pkt_len); + *(__le16 *)&payload[2] = cpu_to_le16(type); /* * This is SDIO specific header diff --git a/drivers/net/wireless/mwifiex/sta_ioctl.c b/drivers/net/wireless/mwifiex/sta_ioctl.c index d05907d..a677e7b 100644 --- a/drivers/net/wireless/mwifiex/sta_ioctl.c +++ b/drivers/net/wireless/mwifiex/sta_ioctl.c @@ -100,7 +100,7 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, } else { /* Multicast */ priv->curr_pkt_filter &= ~HostCmd_ACT_MAC_PROMISCUOUS_ENABLE; - if (mcast_list->mode == MWIFIEX_MULTICAST_MODE) { + if (mcast_list->mode == MWIFIEX_ALL_MULTI_MODE) { dev_dbg(priv->adapter->dev, "info: Enabling All Multicast!\n"); priv->curr_pkt_filter |= @@ -112,20 +112,11 @@ int mwifiex_request_set_multicast_list(struct mwifiex_private *priv, dev_dbg(priv->adapter->dev, "info: Set multicast list=%d\n", mcast_list->num_multicast_addr); - /* Set multicast addresses to firmware */ - if (old_pkt_filter == priv->curr_pkt_filter) { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } else { - /* Send request to firmware */ - ret = mwifiex_send_cmd_async(priv, - HostCmd_CMD_MAC_MULTICAST_ADR, - HostCmd_ACT_GEN_SET, 0, - mcast_list); - } + /* Send multicast addresses to firmware */ + ret = mwifiex_send_cmd_async(priv, + HostCmd_CMD_MAC_MULTICAST_ADR, + HostCmd_ACT_GEN_SET, 0, + mcast_list); } } } diff --git a/drivers/net/wireless/p54/p54usb.c b/drivers/net/wireless/p54/p54usb.c index 99e7e7f..0de0837 100644 --- a/drivers/net/wireless/p54/p54usb.c +++ b/drivers/net/wireless/p54/p54usb.c @@ -82,6 +82,7 @@ static struct usb_device_id p54u_table[] = { {USB_DEVICE(0x06a9, 0x000e)}, /* Westell 802.11g USB (A90-211WG-01) */ {USB_DEVICE(0x06b9, 0x0121)}, /* Thomson SpeedTouch 121g */ {USB_DEVICE(0x0707, 0xee13)}, /* SMC 2862W-G version 2 */ + {USB_DEVICE(0x07aa, 0x0020)}, /* Corega WLUSB2GTST USB */ {USB_DEVICE(0x0803, 0x4310)}, /* Zoom 4410a */ {USB_DEVICE(0x083a, 0x4521)}, /* Siemens Gigaset USB Adapter 54 version 2 */ {USB_DEVICE(0x083a, 0x4531)}, /* T-Com Sinus 154 data II */ diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c index d44ce30..62b92c4 100644 --- a/drivers/net/wireless/rt2x00/rt2800lib.c +++ b/drivers/net/wireless/rt2x00/rt2800lib.c @@ -1861,6 +1861,13 @@ static int rt2800_get_gain_calibration_delta(struct rt2x00_dev *rt2x00dev) int i; /* + * First check if temperature compensation is supported. + */ + rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC_CONF1, &eeprom); + if (!rt2x00_get_field16(eeprom, EEPROM_NIC_CONF1_EXTERNAL_TX_ALC)) + return 0; + + /* * Read TSSI boundaries for temperature compensation from * the EEPROM. * @@ -4075,8 +4082,8 @@ int rt2800_probe_hw_mode(struct rt2x00_dev *rt2x00dev) default_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2); for (i = 14; i < spec->num_channels; i++) { - info[i].default_power1 = default_power1[i]; - info[i].default_power2 = default_power2[i]; + info[i].default_power1 = default_power1[i - 14]; + info[i].default_power2 = default_power2[i - 14]; } } diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.c b/drivers/net/wireless/rt2x00/rt2x00pci.c index 17148bb..10fe07d 100644 --- a/drivers/net/wireless/rt2x00/rt2x00pci.c +++ b/drivers/net/wireless/rt2x00/rt2x00pci.c @@ -52,8 +52,8 @@ int rt2x00pci_regbusy_read(struct rt2x00_dev *rt2x00dev, udelay(REGISTER_BUSY_DELAY); } - ERROR(rt2x00dev, "Indirect register access failed: " - "offset=0x%.08x, value=0x%.08x\n", offset, *reg); + printk_once(KERN_ERR "%s() Indirect register access failed: " + "offset=0x%.08x, value=0x%.08x\n", __func__, offset, *reg); *reg = ~0; return 0; diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c index 76f26ad..2d7c59f 100644 --- a/drivers/net/wireless/rt2x00/rt2x00queue.c +++ b/drivers/net/wireless/rt2x00/rt2x00queue.c @@ -848,13 +848,8 @@ void rt2x00queue_index_inc(struct queue_entry *entry, enum queue_index index) spin_unlock_irqrestore(&queue->index_lock, irqflags); } -void rt2x00queue_pause_queue(struct data_queue *queue) +void rt2x00queue_pause_queue_nocheck(struct data_queue *queue) { - if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || - !test_bit(QUEUE_STARTED, &queue->flags) || - test_and_set_bit(QUEUE_PAUSED, &queue->flags)) - return; - switch (queue->qid) { case QID_AC_VO: case QID_AC_VI: @@ -870,6 +865,15 @@ void rt2x00queue_pause_queue(struct data_queue *queue) break; } } +void rt2x00queue_pause_queue(struct data_queue *queue) +{ + if (!test_bit(DEVICE_STATE_PRESENT, &queue->rt2x00dev->flags) || + !test_bit(QUEUE_STARTED, &queue->flags) || + test_and_set_bit(QUEUE_PAUSED, &queue->flags)) + return; + + rt2x00queue_pause_queue_nocheck(queue); +} EXPORT_SYMBOL_GPL(rt2x00queue_pause_queue); void rt2x00queue_unpause_queue(struct data_queue *queue) @@ -931,7 +935,7 @@ void rt2x00queue_stop_queue(struct data_queue *queue) return; } - rt2x00queue_pause_queue(queue); + rt2x00queue_pause_queue_nocheck(queue); queue->rt2x00dev->ops->lib->stop_queue(queue); diff --git a/drivers/net/wireless/rt2x00/rt61pci.c b/drivers/net/wireless/rt2x00/rt61pci.c index 17de24e..dd31588 100644 --- a/drivers/net/wireless/rt2x00/rt61pci.c +++ b/drivers/net/wireless/rt2x00/rt61pci.c @@ -2830,7 +2830,8 @@ static int rt61pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev) tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; - info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + info[i].default_power1 = + TXPOWER_FROM_DEV(tx_power[i - 14]); } } diff --git a/drivers/net/wireless/rt2x00/rt73usb.c b/drivers/net/wireless/rt2x00/rt73usb.c index 1a06231..3b68155 100644 --- a/drivers/net/wireless/rt2x00/rt73usb.c +++ b/drivers/net/wireless/rt2x00/rt73usb.c @@ -2167,7 +2167,8 @@ static int rt73usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev) tx_power = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A_START); for (i = 14; i < spec->num_channels; i++) { info[i].max_power = MAX_TXPOWER; - info[i].default_power1 = TXPOWER_FROM_DEV(tx_power[i]); + info[i].default_power1 = + TXPOWER_FROM_DEV(tx_power[i - 14]); } } diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h index 693395e..2e3daea 100644 --- a/drivers/net/wireless/rtlwifi/wifi.h +++ b/drivers/net/wireless/rtlwifi/wifi.h @@ -1587,7 +1587,7 @@ struct rtl_priv { that it points to the data allocated beyond this structure like: rtl_pci_priv or rtl_usb_priv */ - u8 priv[0]; + u8 priv[0] __aligned(sizeof(void *)); }; #define rtl_priv(hw) (((struct rtl_priv *)(hw)->priv)) diff --git a/drivers/net/wireless/zd1201.c b/drivers/net/wireless/zd1201.c index 415eec4..af792a4 100644 --- a/drivers/net/wireless/zd1201.c +++ b/drivers/net/wireless/zd1201.c @@ -98,10 +98,12 @@ static int zd1201_fw_upload(struct usb_device *dev, int apfw) goto exit; err = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), 0x4, - USB_DIR_IN | 0x40, 0,0, &ret, sizeof(ret), ZD1201_FW_TIMEOUT); + USB_DIR_IN | 0x40, 0, 0, buf, sizeof(ret), ZD1201_FW_TIMEOUT); if (err < 0) goto exit; + memcpy(&ret, buf, sizeof(ret)); + if (ret & 0x80) { err = -EIO; goto exit; diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c index 65200af..d3645f6 100644 --- a/drivers/of/fdt.c +++ b/drivers/of/fdt.c @@ -389,6 +389,8 @@ static void __unflatten_device_tree(struct boot_param_header *blob, mem = (unsigned long) dt_alloc(size + 4, __alignof__(struct device_node)); + memset((void *)mem, 0, size); + ((__be32 *)mem)[size / 4] = cpu_to_be32(0xdeadbeef); pr_debug(" unflattening %lx...\n", mem); diff --git a/drivers/parisc/iommu-helpers.h b/drivers/parisc/iommu-helpers.h index a9c46cc..8c33491 100644 --- a/drivers/parisc/iommu-helpers.h +++ b/drivers/parisc/iommu-helpers.h @@ -1,3 +1,5 @@ +#include <linux/prefetch.h> + /** * iommu_fill_pdir - Insert coalesced scatter/gather chunks into the I/O Pdir. * @ioc: The I/O Controller. diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index 094308e..9eff33b 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -46,6 +46,7 @@ obj-$(CONFIG_UNICORE32) += setup-bus.o setup-irq.o obj-$(CONFIG_PARISC) += setup-bus.o obj-$(CONFIG_SUPERH) += setup-bus.o setup-irq.o obj-$(CONFIG_PPC) += setup-bus.o +obj-$(CONFIG_FRV) += setup-bus.o obj-$(CONFIG_MIPS) += setup-bus.o setup-irq.o obj-$(CONFIG_X86_VISWS) += setup-irq.o obj-$(CONFIG_MN10300) += setup-bus.o diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index ae762ec..68baf17 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -853,56 +853,54 @@ static int dma_pte_clear_range(struct dmar_domain *domain, return order; } +static void dma_pte_free_level(struct dmar_domain *domain, int level, + struct dma_pte *pte, unsigned long pfn, + unsigned long start_pfn, unsigned long last_pfn) +{ + pfn = max(start_pfn, pfn); + pte = &pte[pfn_level_offset(pfn, level)]; + + do { + unsigned long level_pfn; + struct dma_pte *level_pte; + + if (!dma_pte_present(pte) || dma_pte_superpage(pte)) + goto next; + + level_pfn = pfn & level_mask(level - 1); + level_pte = phys_to_virt(dma_pte_addr(pte)); + + if (level > 2) + dma_pte_free_level(domain, level - 1, level_pte, + level_pfn, start_pfn, last_pfn); + + /* If range covers entire pagetable, free it */ + if (!(start_pfn > level_pfn || + last_pfn < level_pfn + level_size(level))) { + dma_clear_pte(pte); + domain_flush_cache(domain, pte, sizeof(*pte)); + free_pgtable_page(level_pte); + } +next: + pfn += level_size(level); + } while (!first_pte_in_page(++pte) && pfn <= last_pfn); +} + /* free page table pages. last level pte should already be cleared */ static void dma_pte_free_pagetable(struct dmar_domain *domain, unsigned long start_pfn, unsigned long last_pfn) { int addr_width = agaw_to_width(domain->agaw) - VTD_PAGE_SHIFT; - struct dma_pte *first_pte, *pte; - int total = agaw_to_level(domain->agaw); - int level; - unsigned long tmp; - int large_page = 2; BUG_ON(addr_width < BITS_PER_LONG && start_pfn >> addr_width); BUG_ON(addr_width < BITS_PER_LONG && last_pfn >> addr_width); BUG_ON(start_pfn > last_pfn); /* We don't need lock here; nobody else touches the iova range */ - level = 2; - while (level <= total) { - tmp = align_to_level(start_pfn, level); - - /* If we can't even clear one PTE at this level, we're done */ - if (tmp + level_size(level) - 1 > last_pfn) - return; - - do { - large_page = level; - first_pte = pte = dma_pfn_level_pte(domain, tmp, level, &large_page); - if (large_page > level) - level = large_page + 1; - if (!pte) { - tmp = align_to_level(tmp + 1, level + 1); - continue; - } - do { - if (dma_pte_present(pte)) { - free_pgtable_page(phys_to_virt(dma_pte_addr(pte))); - dma_clear_pte(pte); - } - pte++; - tmp += level_size(level); - } while (!first_pte_in_page(pte) && - tmp + level_size(level) - 1 <= last_pfn); + dma_pte_free_level(domain, agaw_to_level(domain->agaw), + domain->pgd, 0, start_pfn, last_pfn); - domain_flush_cache(domain, first_pte, - (void *)pte - (void *)first_pte); - - } while (tmp && tmp + level_size(level) - 1 <= last_pfn); - level++; - } /* free pgd */ if (start_pfn == 0 && last_pfn == DOMAIN_MAX_PFN(domain->gaw)) { free_pgtable_page(domain->pgd); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index a9b1249..f097df2 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -1122,6 +1122,8 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_ATI, PCI_DEVICE_ID_ATI_IXP700_SATA, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_HUDSON2_SATA_IDE, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); +DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AMD, 0x7900, quirk_amd_ide_mode); /* * Serverworks CSB5 IDE does not fully support native mode diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c index fb33fa4..cb0ff91 100644 --- a/drivers/pcmcia/at91_cf.c +++ b/drivers/pcmcia/at91_cf.c @@ -99,9 +99,9 @@ static int at91_cf_get_status(struct pcmcia_socket *s, u_int *sp) int vcc = cf->board->vcc_pin; *sp = SS_DETECT | SS_3VCARD; - if (!rdy || gpio_get_value(rdy)) + if (!rdy || gpio_get_value(cf->board->irq_pin)) *sp |= SS_READY; - if (!vcc || gpio_get_value(vcc)) + if (!vcc || gpio_get_value(cf->board->vcc_pin)) *sp |= SS_POWERON; } else *sp = 0; diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c index 26c5b11..aaba423 100644 --- a/drivers/platform/x86/thinkpad_acpi.c +++ b/drivers/platform/x86/thinkpad_acpi.c @@ -8656,6 +8656,13 @@ static int __must_check __init get_thinkpad_model_data( tp->model_str = kstrdup(s, GFP_KERNEL); if (!tp->model_str) return -ENOMEM; + } else { + s = dmi_get_system_info(DMI_BIOS_VENDOR); + if (s && !(strnicmp(s, "Lenovo", 6))) { + tp->model_str = kstrdup(s, GFP_KERNEL); + if (!tp->model_str) + return -ENOMEM; + } } s = dmi_get_system_info(DMI_PRODUCT_NAME); diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c index 911e75c..cd61178 100644 --- a/drivers/rtc/rtc-cmos.c +++ b/drivers/rtc/rtc-cmos.c @@ -805,9 +805,8 @@ static int cmos_suspend(struct device *dev) mask = RTC_IRQMASK; tmp &= ~mask; CMOS_WRITE(tmp, RTC_CONTROL); + hpet_mask_rtc_irq_bit(mask); - /* shut down hpet emulation - we don't need it for alarm */ - hpet_mask_rtc_irq_bit(RTC_PIE|RTC_AIE|RTC_UIE); cmos_checkintr(cmos, tmp); } spin_unlock_irq(&rtc_lock); @@ -872,6 +871,7 @@ static int cmos_resume(struct device *dev) rtc_update_irq(cmos->rtc, 1, mask); tmp &= ~RTC_AIE; hpet_mask_rtc_irq_bit(RTC_AIE); + hpet_rtc_timer_init(); } while (mask & RTC_AIE); spin_unlock_irq(&rtc_lock); } diff --git a/drivers/rtc/rtc-pcf2123.c b/drivers/rtc/rtc-pcf2123.c index 71bab0e..646f6fe 100644 --- a/drivers/rtc/rtc-pcf2123.c +++ b/drivers/rtc/rtc-pcf2123.c @@ -263,6 +263,7 @@ static int __devinit pcf2123_probe(struct spi_device *spi) if (!(rxbuf[0] & 0x20)) { dev_err(&spi->dev, "chip not found\n"); + ret = -ENODEV; goto kfree_exit; } diff --git a/drivers/rtc/rtc-rv3029c2.c b/drivers/rtc/rtc-rv3029c2.c index ea09ff2..5317d94 100644 --- a/drivers/rtc/rtc-rv3029c2.c +++ b/drivers/rtc/rtc-rv3029c2.c @@ -310,7 +310,7 @@ static int rv3029c2_rtc_i2c_set_alarm(struct i2c_client *client, dev_dbg(&client->dev, "alarm IRQ armed\n"); } else { /* disable AIE irq */ - ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 1); + ret = rv3029c2_rtc_i2c_alarm_set_irq(client, 0); if (ret) return ret; diff --git a/drivers/s390/char/sclp_cmd.c b/drivers/s390/char/sclp_cmd.c index be55fb2..a0ce2a9 100644 --- a/drivers/s390/char/sclp_cmd.c +++ b/drivers/s390/char/sclp_cmd.c @@ -507,6 +507,8 @@ static void __init sclp_add_standby_memory(void) add_memory_merged(0); } +#define MEM_SCT_SIZE (1UL << SECTION_SIZE_BITS) + static void __init insert_increment(u16 rn, int standby, int assigned) { struct memory_increment *incr, *new_incr; @@ -519,7 +521,7 @@ static void __init insert_increment(u16 rn, int standby, int assigned) new_incr->rn = rn; new_incr->standby = standby; if (!standby) - new_incr->usecount = 1; + new_incr->usecount = rzm > MEM_SCT_SIZE ? rzm/MEM_SCT_SIZE : 1; last_rn = 0; prev = &sclp_mem_list; list_for_each_entry(incr, &sclp_mem_list, list) { diff --git a/drivers/s390/scsi/zfcp_aux.c b/drivers/s390/scsi/zfcp_aux.c index 61da2cd..fc669c0 100644 --- a/drivers/s390/scsi/zfcp_aux.c +++ b/drivers/s390/scsi/zfcp_aux.c @@ -3,7 +3,7 @@ * * Module interface and handling of zfcp data structures. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2013 */ /* @@ -23,6 +23,7 @@ * Christof Schmitt * Martin Petermann * Sven Schuetz + * Steffen Maier */ #define KMSG_COMPONENT "zfcp" @@ -414,6 +415,8 @@ struct zfcp_adapter *zfcp_adapter_enqueue(struct ccw_device *ccw_device) adapter->dma_parms.max_segment_size = ZFCP_QDIO_SBALE_LEN; adapter->ccw_device->dev.dma_parms = &adapter->dma_parms; + adapter->stat_read_buf_num = FSF_STATUS_READS_RECOM; + if (!zfcp_scsi_adapter_register(adapter)) return adapter; diff --git a/drivers/s390/scsi/zfcp_erp.c b/drivers/s390/scsi/zfcp_erp.c index e1b4f80..5c87270 100644 --- a/drivers/s390/scsi/zfcp_erp.c +++ b/drivers/s390/scsi/zfcp_erp.c @@ -102,10 +102,13 @@ static void zfcp_erp_action_dismiss_port(struct zfcp_port *port) if (atomic_read(&port->status) & ZFCP_STATUS_COMMON_ERP_INUSE) zfcp_erp_action_dismiss(&port->erp_action); - else - shost_for_each_device(sdev, port->adapter->scsi_host) + else { + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) zfcp_erp_action_dismiss_lun(sdev); + spin_unlock(port->adapter->scsi_host->host_lock); + } } static void zfcp_erp_action_dismiss_adapter(struct zfcp_adapter *adapter) @@ -592,9 +595,11 @@ static void _zfcp_erp_lun_reopen_all(struct zfcp_port *port, int clear, { struct scsi_device *sdev; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock(port->adapter->scsi_host->host_lock); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) _zfcp_erp_lun_reopen(sdev, clear, id, 0); + spin_unlock(port->adapter->scsi_host->host_lock); } static void zfcp_erp_strategy_followup_failed(struct zfcp_erp_action *act) @@ -1435,8 +1440,10 @@ void zfcp_erp_set_adapter_status(struct zfcp_adapter *adapter, u32 mask) atomic_set_mask(common_mask, &port->status); read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1470,11 +1477,13 @@ void zfcp_erp_clear_adapter_status(struct zfcp_adapter *adapter, u32 mask) } read_unlock_irqrestore(&adapter->port_list_lock, flags); - shost_for_each_device(sdev, adapter->scsi_host) { + spin_lock_irqsave(adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, adapter->scsi_host) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(adapter->scsi_host->host_lock, flags); } /** @@ -1488,16 +1497,19 @@ void zfcp_erp_set_port_status(struct zfcp_port *port, u32 mask) { struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; + unsigned long flags; atomic_set_mask(mask, &port->status); if (!common_mask) return; - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) atomic_set_mask(common_mask, &sdev_to_zfcp(sdev)->status); + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** @@ -1512,6 +1524,7 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) struct scsi_device *sdev; u32 common_mask = mask & ZFCP_COMMON_FLAGS; u32 clear_counter = mask & ZFCP_STATUS_COMMON_ERP_FAILED; + unsigned long flags; atomic_clear_mask(mask, &port->status); @@ -1521,13 +1534,15 @@ void zfcp_erp_clear_port_status(struct zfcp_port *port, u32 mask) if (clear_counter) atomic_set(&port->erp_counter, 0); - shost_for_each_device(sdev, port->adapter->scsi_host) + spin_lock_irqsave(port->adapter->scsi_host->host_lock, flags); + __shost_for_each_device(sdev, port->adapter->scsi_host) if (sdev_to_zfcp(sdev)->port == port) { atomic_clear_mask(common_mask, &sdev_to_zfcp(sdev)->status); if (clear_counter) atomic_set(&sdev_to_zfcp(sdev)->erp_counter, 0); } + spin_unlock_irqrestore(port->adapter->scsi_host->host_lock, flags); } /** diff --git a/drivers/s390/scsi/zfcp_fsf.c b/drivers/s390/scsi/zfcp_fsf.c index 6e73bfe..ceb7f7c 100644 --- a/drivers/s390/scsi/zfcp_fsf.c +++ b/drivers/s390/scsi/zfcp_fsf.c @@ -3,7 +3,7 @@ * * Implementation of FSF commands. * - * Copyright IBM Corporation 2002, 2010 + * Copyright IBM Corp. 2002, 2013 */ #define KMSG_COMPONENT "zfcp" @@ -455,11 +455,8 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) fc_host_port_name(shost) = nsp->fl_wwpn; fc_host_node_name(shost) = nsp->fl_wwnn; - fc_host_port_id(shost) = ntoh24(bottom->s_id); - fc_host_speed(shost) = bottom->fc_link_speed; fc_host_supported_classes(shost) = FC_COS_CLASS2 | FC_COS_CLASS3; - adapter->hydra_version = bottom->adapter_type; adapter->timer_ticks = bottom->timer_interval & ZFCP_FSF_TIMER_INT_MASK; adapter->stat_read_buf_num = max(bottom->status_read_buf_num, (u16)FSF_STATUS_READS_RECOM); @@ -467,6 +464,18 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) if (fc_host_permanent_port_name(shost) == -1) fc_host_permanent_port_name(shost) = fc_host_port_name(shost); + zfcp_scsi_set_prot(adapter); + + /* no error return above here, otherwise must fix call chains */ + /* do not evaluate invalid fields */ + if (req->qtcb->header.fsf_status == FSF_EXCHANGE_CONFIG_DATA_INCOMPLETE) + return 0; + + fc_host_port_id(shost) = ntoh24(bottom->s_id); + fc_host_speed(shost) = bottom->fc_link_speed; + + adapter->hydra_version = bottom->adapter_type; + switch (bottom->fc_topology) { case FSF_TOPO_P2P: adapter->peer_d_id = ntoh24(bottom->peer_d_id); @@ -488,8 +497,6 @@ static int zfcp_fsf_exchange_config_evaluate(struct zfcp_fsf_req *req) return -EIO; } - zfcp_scsi_set_prot(adapter); - return 0; } @@ -534,8 +541,14 @@ static void zfcp_fsf_exchange_config_data_handler(struct zfcp_fsf_req *req) fc_host_port_type(shost) = FC_PORTTYPE_UNKNOWN; adapter->hydra_version = 0; + /* avoids adapter shutdown to be able to recognize + * events such as LINK UP */ + atomic_set_mask(ZFCP_STATUS_ADAPTER_XCONFIG_OK, + &adapter->status); zfcp_fsf_link_down_info_eval(req, &qtcb->header.fsf_status_qual.link_down_info); + if (zfcp_fsf_exchange_config_evaluate(req)) + return; break; default: zfcp_erp_adapter_shutdown(adapter, 0, "fsecdh3"); diff --git a/drivers/s390/scsi/zfcp_qdio.c b/drivers/s390/scsi/zfcp_qdio.c index d9c40ea..f3922a8 100644 --- a/drivers/s390/scsi/zfcp_qdio.c +++ b/drivers/s390/scsi/zfcp_qdio.c @@ -199,11 +199,9 @@ int zfcp_qdio_sbals_from_sg(struct zfcp_qdio *qdio, struct zfcp_qdio_req *q_req, static int zfcp_qdio_sbal_check(struct zfcp_qdio *qdio) { - spin_lock_irq(&qdio->req_q_lock); if (atomic_read(&qdio->req_q_free) || !(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return 1; - spin_unlock_irq(&qdio->req_q_lock); return 0; } @@ -221,9 +219,8 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) { long ret; - spin_unlock_irq(&qdio->req_q_lock); - ret = wait_event_interruptible_timeout(qdio->req_q_wq, - zfcp_qdio_sbal_check(qdio), 5 * HZ); + ret = wait_event_interruptible_lock_irq_timeout(qdio->req_q_wq, + zfcp_qdio_sbal_check(qdio), qdio->req_q_lock, 5 * HZ); if (!(atomic_read(&qdio->adapter->status) & ZFCP_STATUS_ADAPTER_QDIOUP)) return -EIO; @@ -237,7 +234,6 @@ int zfcp_qdio_sbal_get(struct zfcp_qdio *qdio) zfcp_erp_adapter_reopen(qdio->adapter, 0, "qdsbg_1"); } - spin_lock_irq(&qdio->req_q_lock); return -EIO; } diff --git a/drivers/scsi/bnx2fc/bnx2fc_io.c b/drivers/scsi/bnx2fc/bnx2fc_io.c index 454c72c..ca4be24 100644 --- a/drivers/scsi/bnx2fc/bnx2fc_io.c +++ b/drivers/scsi/bnx2fc/bnx2fc_io.c @@ -1618,7 +1618,7 @@ static void bnx2fc_parse_fcp_rsp(struct bnx2fc_cmd *io_req, fcp_sns_len = SCSI_SENSE_BUFFERSIZE; } - memset(sc_cmd->sense_buffer, 0, sizeof(sc_cmd->sense_buffer)); + memset(sc_cmd->sense_buffer, 0, SCSI_SENSE_BUFFERSIZE); if (fcp_sns_len) memcpy(sc_cmd->sense_buffer, rq_data, fcp_sns_len); diff --git a/drivers/scsi/esp_scsi.c b/drivers/scsi/esp_scsi.c index 394ed9e..4aa30d8 100644 --- a/drivers/scsi/esp_scsi.c +++ b/drivers/scsi/esp_scsi.c @@ -530,7 +530,7 @@ static int esp_need_to_nego_sync(struct esp_target_data *tp) static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { - if (!ent->tag[0]) { + if (!ent->orig_tag[0]) { /* Non-tagged, slot already taken? */ if (lp->non_tagged_cmd) return -EBUSY; @@ -564,9 +564,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, return -EBUSY; } - BUG_ON(lp->tagged_cmds[ent->tag[1]]); + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]); - lp->tagged_cmds[ent->tag[1]] = ent; + lp->tagged_cmds[ent->orig_tag[1]] = ent; lp->num_tagged++; return 0; @@ -575,9 +575,9 @@ static int esp_alloc_lun_tag(struct esp_cmd_entry *ent, static void esp_free_lun_tag(struct esp_cmd_entry *ent, struct esp_lun_data *lp) { - if (ent->tag[0]) { - BUG_ON(lp->tagged_cmds[ent->tag[1]] != ent); - lp->tagged_cmds[ent->tag[1]] = NULL; + if (ent->orig_tag[0]) { + BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent); + lp->tagged_cmds[ent->orig_tag[1]] = NULL; lp->num_tagged--; } else { BUG_ON(lp->non_tagged_cmd != ent); @@ -667,6 +667,8 @@ static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp) ent->tag[0] = 0; ent->tag[1] = 0; } + ent->orig_tag[0] = ent->tag[0]; + ent->orig_tag[1] = ent->tag[1]; if (esp_alloc_lun_tag(ent, lp) < 0) continue; diff --git a/drivers/scsi/esp_scsi.h b/drivers/scsi/esp_scsi.h index 28e22ac..cd68805 100644 --- a/drivers/scsi/esp_scsi.h +++ b/drivers/scsi/esp_scsi.h @@ -271,6 +271,7 @@ struct esp_cmd_entry { #define ESP_CMD_FLAG_AUTOSENSE 0x04 /* Doing automatic REQUEST_SENSE */ u8 tag[2]; + u8 orig_tag[2]; u8 status; u8 message; diff --git a/drivers/scsi/megaraid/megaraid_sas_base.c b/drivers/scsi/megaraid/megaraid_sas_base.c index 931cb11..5eac650 100644 --- a/drivers/scsi/megaraid/megaraid_sas_base.c +++ b/drivers/scsi/megaraid/megaraid_sas_base.c @@ -4746,10 +4746,12 @@ megasas_mgmt_fw_ioctl(struct megasas_instance *instance, sense, sense_handle); } - for (i = 0; i < ioc->sge_count && kbuff_arr[i]; i++) { - dma_free_coherent(&instance->pdev->dev, - kern_sge32[i].length, - kbuff_arr[i], kern_sge32[i].phys_addr); + for (i = 0; i < ioc->sge_count; i++) { + if (kbuff_arr[i]) + dma_free_coherent(&instance->pdev->dev, + kern_sge32[i].length, + kbuff_arr[i], + kern_sge32[i].phys_addr); } megasas_return_cmd(instance, cmd); diff --git a/drivers/scsi/mpt2sas/mpt2sas_scsih.c b/drivers/scsi/mpt2sas/mpt2sas_scsih.c index aa51195..d6e8353 100644 --- a/drivers/scsi/mpt2sas/mpt2sas_scsih.c +++ b/drivers/scsi/mpt2sas/mpt2sas_scsih.c @@ -3740,11 +3740,7 @@ _scsih_qcmd_lck(struct scsi_cmnd *scmd, void (*done)(struct scsi_cmnd *)) else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else -/* MPI Revision I (UNIT = 0xA) - removed MPI2_SCSIIO_CONTROL_UNTAGGED */ -/* mpi_control |= MPI2_SCSIIO_CONTROL_UNTAGGED; - */ - mpi_control |= (0x500); - + mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; } else mpi_control |= MPI2_SCSIIO_CONTROL_SIMPLEQ; /* Make sure Device is not raid volume. diff --git a/drivers/scsi/nsp32.c b/drivers/scsi/nsp32.c index f6a50c9..bfb05b8 100644 --- a/drivers/scsi/nsp32.c +++ b/drivers/scsi/nsp32.c @@ -2927,7 +2927,7 @@ static void nsp32_do_bus_reset(nsp32_hw_data *data) * reset SCSI bus */ nsp32_write1(base, SCSI_BUS_CONTROL, BUSCTL_RST); - udelay(RESET_HOLD_TIME); + mdelay(RESET_HOLD_TIME / 1000); nsp32_write1(base, SCSI_BUS_CONTROL, 0); for(i = 0; i < 5; i++) { intrdat = nsp32_read2(base, IRQ_STATUS); /* dummy read */ diff --git a/drivers/scsi/osd/osd_uld.c b/drivers/scsi/osd/osd_uld.c index d4ed9eb..caac1b2 100644 --- a/drivers/scsi/osd/osd_uld.c +++ b/drivers/scsi/osd/osd_uld.c @@ -465,7 +465,7 @@ static int osd_probe(struct device *dev) oud->class_dev.class = &osd_uld_class; oud->class_dev.parent = dev; oud->class_dev.release = __remove; - error = dev_set_name(&oud->class_dev, disk->disk_name); + error = dev_set_name(&oud->class_dev, "%s", disk->disk_name); if (error) { OSD_ERR("dev_set_name failed => %d\n", error); goto err_put_cdev; diff --git a/drivers/scsi/qla2xxx/qla_iocb.c b/drivers/scsi/qla2xxx/qla_iocb.c index 7bac3cd..d2fb950 100644 --- a/drivers/scsi/qla2xxx/qla_iocb.c +++ b/drivers/scsi/qla2xxx/qla_iocb.c @@ -422,6 +422,8 @@ qla2x00_start_scsi(srb_t *sp) __constant_cpu_to_le16(CF_SIMPLE_TAG); break; } + } else { + cmd_pkt->control_flags = __constant_cpu_to_le16(CF_SIMPLE_TAG); } /* Load SCSI command packet. */ @@ -1099,11 +1101,11 @@ qla24xx_build_scsi_crc_2_iocbs(srb_t *sp, struct cmd_type_crc_2 *cmd_pkt, fcp_cmnd->task_attribute = TSK_ORDERED; break; default: - fcp_cmnd->task_attribute = 0; + fcp_cmnd->task_attribute = TSK_SIMPLE; break; } } else { - fcp_cmnd->task_attribute = 0; + fcp_cmnd->task_attribute = TSK_SIMPLE; } cmd_pkt->fcp_rsp_dseg_len = 0; /* Let response come in status iocb */ @@ -1305,7 +1307,12 @@ qla24xx_start_scsi(srb_t *sp) case ORDERED_QUEUE_TAG: cmd_pkt->task = TSK_ORDERED; break; + default: + cmd_pkt->task = TSK_SIMPLE; + break; } + } else { + cmd_pkt->task = TSK_SIMPLE; } /* Load SCSI command packet. */ diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c index 3fd16d7..acc53e2 100644 --- a/drivers/scsi/scsi_transport_iscsi.c +++ b/drivers/scsi/scsi_transport_iscsi.c @@ -1747,7 +1747,7 @@ iscsi_if_rx(struct sk_buff *skb) break; err = iscsi_if_send_reply(group, nlh->nlmsg_seq, nlh->nlmsg_type, 0, 0, ev, sizeof(*ev)); - } while (err < 0 && err != -ECONNREFUSED); + } while (err < 0 && err != -ECONNREFUSED && err != -ESRCH); skb_pull(skb, rlen); } mutex_unlock(&rx_queue_mutex); diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 515ec6d..cc3943a 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -138,6 +138,7 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, char *buffer_data; struct scsi_mode_data data; struct scsi_sense_hdr sshdr; + static const char temp[] = "temporary "; int len; if (sdp->type != TYPE_DISK) @@ -146,6 +147,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, * it's not worth the risk */ return -EINVAL; + if (strncmp(buf, temp, sizeof(temp) - 1) == 0) { + buf += sizeof(temp) - 1; + sdkp->cache_override = 1; + } else { + sdkp->cache_override = 0; + } + for (i = 0; i < ARRAY_SIZE(sd_cache_types); i++) { len = strlen(sd_cache_types[i]); if (strncmp(sd_cache_types[i], buf, len) == 0 && @@ -158,6 +166,13 @@ sd_store_cache_type(struct device *dev, struct device_attribute *attr, return -EINVAL; rcd = ct & 0x01 ? 1 : 0; wce = ct & 0x02 ? 1 : 0; + + if (sdkp->cache_override) { + sdkp->WCE = wce; + sdkp->RCD = rcd; + return count; + } + if (scsi_mode_sense(sdp, 0x08, 8, buffer, sizeof(buffer), SD_TIMEOUT, SD_MAX_RETRIES, &data, NULL)) return -EINVAL; @@ -626,10 +641,17 @@ static int scsi_setup_flush_cmnd(struct scsi_device *sdp, struct request *rq) static void sd_unprep_fn(struct request_queue *q, struct request *rq) { + struct scsi_cmnd *SCpnt = rq->special; + if (rq->cmd_flags & REQ_DISCARD) { free_page((unsigned long)rq->buffer); rq->buffer = NULL; } + if (SCpnt->cmnd != rq->cmd) { + mempool_free(SCpnt->cmnd, sd_cdb_pool); + SCpnt->cmnd = NULL; + SCpnt->cmd_len = 0; + } } /** @@ -1436,21 +1458,6 @@ static int sd_done(struct scsi_cmnd *SCpnt) if (rq_data_dir(SCpnt->request) == READ && scsi_prot_sg_count(SCpnt)) sd_dif_complete(SCpnt, good_bytes); - if (scsi_host_dif_capable(sdkp->device->host, sdkp->protection_type) - == SD_DIF_TYPE2_PROTECTION && SCpnt->cmnd != SCpnt->request->cmd) { - - /* We have to print a failed command here as the - * extended CDB gets freed before scsi_io_completion() - * is called. - */ - if (result) - scsi_print_command(SCpnt); - - mempool_free(SCpnt->cmnd, sd_cdb_pool); - SCpnt->cmnd = NULL; - SCpnt->cmd_len = 0; - } - return good_bytes; } @@ -2036,6 +2043,10 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) int old_rcd = sdkp->RCD; int old_dpofua = sdkp->DPOFUA; + + if (sdkp->cache_override) + return; + first_len = 4; if (sdp->skip_ms_page_8) { if (sdp->type == TYPE_RBC) @@ -2124,14 +2135,9 @@ sd_read_cache_type(struct scsi_disk *sdkp, unsigned char *buffer) } } - if (modepage == 0x3F) { - sd_printk(KERN_ERR, sdkp, "No Caching mode page " - "present\n"); - goto defaults; - } else if ((buffer[offset] & 0x3f) != modepage) { - sd_printk(KERN_ERR, sdkp, "Got wrong page\n"); - goto defaults; - } + sd_printk(KERN_ERR, sdkp, "No Caching mode page found\n"); + goto defaults; + Page_found: if (modepage == 8) { sdkp->WCE = ((buffer[offset + 2] & 0x04) != 0); @@ -2517,6 +2523,7 @@ static void sd_probe_async(void *data, async_cookie_t cookie) sdkp->capacity = 0; sdkp->media_present = 1; sdkp->write_prot = 0; + sdkp->cache_override = 0; sdkp->WCE = 0; sdkp->RCD = 0; sdkp->ATO = 0; diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 6ad798b..812f1ba 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -70,6 +70,7 @@ struct scsi_disk { u8 protection_type;/* Data Integrity Field */ u8 provisioning_mode; unsigned ATO : 1; /* state of disk ATO bit */ + unsigned cache_override : 1; /* temp override of WCE,RCD */ unsigned WCE : 1; /* state of disk WCE bit */ unsigned RCD : 1; /* state of disk RCD bit, unused */ unsigned DPOFUA : 1; /* state of disk DPOFUA bit */ diff --git a/drivers/staging/comedi/comedi_fops.c b/drivers/staging/comedi/comedi_fops.c index ee33cba..40b40ed 100644 --- a/drivers/staging/comedi/comedi_fops.c +++ b/drivers/staging/comedi/comedi_fops.c @@ -1370,6 +1370,7 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, void *file) { struct comedi_subdevice *s; + int ret; if (arg >= dev->n_subdevices) return -EINVAL; @@ -1386,7 +1387,11 @@ static int do_cancel_ioctl(struct comedi_device *dev, unsigned int arg, if (s->busy != file) return -EBUSY; - return do_cancel(dev, s); + ret = do_cancel(dev, s); + if (comedi_get_subdevice_runflags(s) & SRF_USER) + wake_up_interruptible(&s->async->wait_head); + + return ret; } /* diff --git a/drivers/staging/comedi/drivers/dt282x.c b/drivers/staging/comedi/drivers/dt282x.c index 8cea9dc..00a285c 100644 --- a/drivers/staging/comedi/drivers/dt282x.c +++ b/drivers/staging/comedi/drivers/dt282x.c @@ -406,8 +406,9 @@ struct dt282x_private { } \ udelay(5); \ } \ - if (_i) \ + if (_i) { \ b \ + } \ } while (0) static int dt282x_attach(struct comedi_device *dev, diff --git a/drivers/staging/comedi/drivers/ni_65xx.c b/drivers/staging/comedi/drivers/ni_65xx.c index 403fc09..8b564ad 100644 --- a/drivers/staging/comedi/drivers/ni_65xx.c +++ b/drivers/staging/comedi/drivers/ni_65xx.c @@ -411,29 +411,25 @@ static int ni_65xx_dio_insn_bits(struct comedi_device *dev, struct comedi_subdevice *s, struct comedi_insn *insn, unsigned int *data) { - unsigned base_bitfield_channel; - const unsigned max_ports_per_bitfield = 5; + int base_bitfield_channel; unsigned read_bits = 0; - unsigned j; + int last_port_offset = ni_65xx_port_by_channel(s->n_chan - 1); + int port_offset; + if (insn->n != 2) return -EINVAL; base_bitfield_channel = CR_CHAN(insn->chanspec); - for (j = 0; j < max_ports_per_bitfield; ++j) { - const unsigned port_offset = - ni_65xx_port_by_channel(base_bitfield_channel) + j; - const unsigned port = - sprivate(s)->base_port + port_offset; - unsigned base_port_channel; + for (port_offset = ni_65xx_port_by_channel(base_bitfield_channel); + port_offset <= last_port_offset; port_offset++) { + unsigned port = sprivate(s)->base_port + port_offset; + int base_port_channel = port_offset * ni_65xx_channels_per_port; unsigned port_mask, port_data, port_read_bits; - int bitshift; - if (port >= ni_65xx_total_num_ports(board(dev))) + int bitshift = base_port_channel - base_bitfield_channel; + + if (bitshift >= 32) break; - base_port_channel = port_offset * ni_65xx_channels_per_port; port_mask = data[0]; port_data = data[1]; - bitshift = base_port_channel - base_bitfield_channel; - if (bitshift >= 32 || bitshift <= -32) - break; if (bitshift > 0) { port_mask >>= bitshift; port_data >>= bitshift; diff --git a/drivers/staging/hv/tools/hv_kvp_daemon.c b/drivers/staging/hv/tools/hv_kvp_daemon.c index 33f0f1c..1468a01 100644 --- a/drivers/staging/hv/tools/hv_kvp_daemon.c +++ b/drivers/staging/hv/tools/hv_kvp_daemon.c @@ -378,14 +378,18 @@ int main(void) pfd.fd = fd; while (1) { + struct sockaddr *addr_p = (struct sockaddr *) &addr; + socklen_t addr_l = sizeof(addr); pfd.events = POLLIN; pfd.revents = 0; poll(&pfd, 1, -1); - len = recv(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0); + len = recvfrom(fd, kvp_recv_buffer, sizeof(kvp_recv_buffer), 0, + addr_p, &addr_l); - if (len < 0) { - syslog(LOG_ERR, "recv failed; error:%d", len); + if (len < 0 || addr.nl_pid) { + syslog(LOG_ERR, "recvfrom failed; pid:%u error:%d %s", + addr.nl_pid, errno, strerror(errno)); close(fd); return -1; } diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c index 51b5adf..df8ea25 100644 --- a/drivers/staging/vt6656/hostap.c +++ b/drivers/staging/vt6656/hostap.c @@ -153,7 +153,7 @@ static int hostap_disable_hostapd(PSDevice pDevice, int rtnl_locked) DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Netdevice %s unregistered\n", pDevice->dev->name, pDevice->apdev->name); } - kfree(pDevice->apdev); + free_netdev(pDevice->apdev); pDevice->apdev = NULL; pDevice->bEnable8021x = FALSE; pDevice->bEnableHostWEP = FALSE; diff --git a/drivers/staging/vt6656/main_usb.c b/drivers/staging/vt6656/main_usb.c index 5fb56a6..0b8f93b 100644 --- a/drivers/staging/vt6656/main_usb.c +++ b/drivers/staging/vt6656/main_usb.c @@ -1228,6 +1228,8 @@ device_release_WPADEV(pDevice); memset(pMgmt->abyCurrBSSID, 0, 6); pMgmt->eCurrState = WMAC_STATE_IDLE; + pDevice->flags &= ~DEVICE_FLAGS_OPENED; + device_free_tx_bufs(pDevice); device_free_rx_bufs(pDevice); device_free_int_bufs(pDevice); @@ -1239,7 +1241,6 @@ device_release_WPADEV(pDevice); usb_free_urb(pDevice->pInterruptURB); BSSvClearNodeDBTable(pDevice, 0); - pDevice->flags &=(~DEVICE_FLAGS_OPENED); DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "device_close2 \n"); diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index c967405..527bda4 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -351,6 +351,7 @@ static inline int core_alua_state_standby( case REPORT_LUNS: case RECEIVE_DIAGNOSTIC: case SEND_DIAGNOSTIC: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: @@ -393,6 +394,7 @@ static inline int core_alua_state_unavailable( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: @@ -433,6 +435,7 @@ static inline int core_alua_state_transition( switch (cdb[0]) { case INQUIRY: case REPORT_LUNS: + return 0; case MAINTENANCE_IN: switch (cdb[1]) { case MI_REPORT_TARGET_PGS: diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c index 0558401..f8c49ab 100644 --- a/drivers/target/target_core_cdb.c +++ b/drivers/target/target_core_cdb.c @@ -117,11 +117,12 @@ target_emulate_inquiry_std(struct se_cmd *cmd) return 0; } - snprintf((unsigned char *)&buf[8], 8, "LIO-ORG"); - snprintf((unsigned char *)&buf[16], 16, "%s", - &DEV_T10_WWN(dev)->model[0]); - snprintf((unsigned char *)&buf[32], 4, "%s", - &DEV_T10_WWN(dev)->revision[0]); + memcpy(&buf[8], "LIO-ORG ", 8); + memset(&buf[16], 0x20, 16); + memcpy(&buf[16], dev->se_sub_dev->t10_wwn.model, + min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.model), 16)); + memcpy(&buf[32], dev->se_sub_dev->t10_wwn.revision, + min_t(size_t, strlen(dev->se_sub_dev->t10_wwn.revision), 4)); buf[4] = 31; /* Set additional length to 31 */ return 0; } diff --git a/drivers/thermal/thermal_sys.c b/drivers/thermal/thermal_sys.c index 0b1c82a..ba6b71b 100644 --- a/drivers/thermal/thermal_sys.c +++ b/drivers/thermal/thermal_sys.c @@ -1319,6 +1319,7 @@ static int __init thermal_init(void) idr_destroy(&thermal_cdev_idr); mutex_destroy(&thermal_idr_lock); mutex_destroy(&thermal_list_lock); + return result; } result = genetlink_init(); return result; diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c index d7164bf..687cfe3 100644 --- a/drivers/tty/n_tty.c +++ b/drivers/tty/n_tty.c @@ -1531,6 +1531,14 @@ static void n_tty_set_termios(struct tty_struct *tty, struct ktermios *old) tty->real_raw = 0; } n_tty_set_room(tty); + /* + * Fix tty hang when I_IXON(tty) is cleared, but the tty + * been stopped by STOP_CHAR(tty) before it. + */ + if (!I_IXON(tty) && old && (old->c_iflag & IXON) && !tty->flow_stopped) { + start_tty(tty); + } + /* The termios change make the tty ready for I/O */ wake_up_interruptible(&tty->write_wait); wake_up_interruptible(&tty->read_wait); diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c index d19b879..4735928 100644 --- a/drivers/tty/pty.c +++ b/drivers/tty/pty.c @@ -669,6 +669,9 @@ static int ptmx_open(struct inode *inode, struct file *filp) nonseekable_open(inode, filp); + /* We refuse fsnotify events on ptmx, since it's a shared resource */ + filp->f_mode |= FMODE_NONOTIFY; + retval = tty_alloc_file(filp); if (retval) return retval; diff --git a/drivers/tty/serial/mxs-auart.c b/drivers/tty/serial/mxs-auart.c index 5b3d063..ab7d11e 100644 --- a/drivers/tty/serial/mxs-auart.c +++ b/drivers/tty/serial/mxs-auart.c @@ -374,11 +374,18 @@ static void mxs_auart_settermios(struct uart_port *u, static irqreturn_t mxs_auart_irq_handle(int irq, void *context) { - u32 istatus, istat; + u32 istat; struct mxs_auart_port *s = context; u32 stat = readl(s->port.membase + AUART_STAT); - istatus = istat = readl(s->port.membase + AUART_INTR); + istat = readl(s->port.membase + AUART_INTR); + + /* ack irq */ + writel(istat & (AUART_INTR_RTIS + | AUART_INTR_TXIS + | AUART_INTR_RXIS + | AUART_INTR_CTSMIS), + s->port.membase + AUART_INTR_CLR); if (istat & AUART_INTR_CTSMIS) { uart_handle_cts_change(&s->port, stat & AUART_STAT_CTS); @@ -397,12 +404,6 @@ static irqreturn_t mxs_auart_irq_handle(int irq, void *context) istat &= ~AUART_INTR_TXIS; } - writel(istatus & (AUART_INTR_RTIS - | AUART_INTR_TXIS - | AUART_INTR_RXIS - | AUART_INTR_CTSMIS), - s->port.membase + AUART_INTR_CLR); - return IRQ_HANDLED; } @@ -542,7 +543,7 @@ auart_console_write(struct console *co, const char *str, unsigned int count) struct mxs_auart_port *s; struct uart_port *port; unsigned int old_ctrl0, old_ctrl2; - unsigned int to = 1000; + unsigned int to = 20000; if (co->index > MXS_AUART_PORTS || co->index < 0) return; @@ -563,18 +564,23 @@ auart_console_write(struct console *co, const char *str, unsigned int count) uart_console_write(port, str, count, mxs_auart_console_putchar); - /* - * Finally, wait for transmitter to become empty - * and restore the TCR - */ + /* Finally, wait for transmitter to become empty ... */ while (readl(port->membase + AUART_STAT) & AUART_STAT_BUSY) { + udelay(1); if (!to--) break; - udelay(1); } - writel(old_ctrl0, port->membase + AUART_CTRL0); - writel(old_ctrl2, port->membase + AUART_CTRL2); + /* + * ... and restore the TCR if we waited long enough for the transmitter + * to be idle. This might keep the transmitter enabled although it is + * unused, but that is better than to disable it while it is still + * transmitting. + */ + if (!(readl(port->membase + AUART_STAT) & AUART_STAT_BUSY)) { + writel(old_ctrl0, port->membase + AUART_CTRL0); + writel(old_ctrl2, port->membase + AUART_CTRL2); + } clk_disable(s->clk); } diff --git a/drivers/tty/serial/pch_uart.c b/drivers/tty/serial/pch_uart.c index 73038ba..78c26ca 100644 --- a/drivers/tty/serial/pch_uart.c +++ b/drivers/tty/serial/pch_uart.c @@ -935,22 +935,37 @@ static unsigned int dma_handle_tx(struct eg20t_port *priv) static void pch_uart_err_ir(struct eg20t_port *priv, unsigned int lsr) { u8 fcr = ioread8(priv->membase + UART_FCR); + struct uart_port *port = &priv->port; + struct tty_struct *tty = tty_port_tty_get(&port->state->port); + char *error_msg[5] = {}; + int i = 0; /* Reset FIFO */ fcr |= UART_FCR_CLEAR_RCVR; iowrite8(fcr, priv->membase + UART_FCR); if (lsr & PCH_UART_LSR_ERR) - dev_err(&priv->pdev->dev, "Error data in FIFO\n"); + error_msg[i++] = "Error data in FIFO\n"; - if (lsr & UART_LSR_FE) - dev_err(&priv->pdev->dev, "Framing Error\n"); + if (lsr & UART_LSR_FE) { + port->icount.frame++; + error_msg[i++] = " Framing Error\n"; + } - if (lsr & UART_LSR_PE) - dev_err(&priv->pdev->dev, "Parity Error\n"); + if (lsr & UART_LSR_PE) { + port->icount.parity++; + error_msg[i++] = " Parity Error\n"; + } - if (lsr & UART_LSR_OE) - dev_err(&priv->pdev->dev, "Overrun Error\n"); + if (lsr & UART_LSR_OE) { + port->icount.overrun++; + error_msg[i++] = " Overrun Error\n"; + } + + if (tty == NULL) { + for (i = 0; error_msg[i] != NULL; i++) + dev_err(&priv->pdev->dev, error_msg[i]); + } } static irqreturn_t pch_uart_interrupt(int irq, void *dev_id) diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c index dca63e8..f512dc4 100644 --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c @@ -1920,6 +1920,8 @@ int uart_suspend_port(struct uart_driver *drv, struct uart_port *uport) mutex_unlock(&port->mutex); return 0; } + put_device(tty_dev); + if (console_suspend_enabled || !uart_console(uport)) uport->suspended = 1; @@ -1985,9 +1987,11 @@ int uart_resume_port(struct uart_driver *drv, struct uart_port *uport) disable_irq_wake(uport->irq); uport->irq_wake = 0; } + put_device(tty_dev); mutex_unlock(&port->mutex); return 0; } + put_device(tty_dev); uport->suspended = 0; /* diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c index b44aef0..cf32a5c 100644 --- a/drivers/tty/tty_io.c +++ b/drivers/tty/tty_io.c @@ -939,6 +939,14 @@ void start_tty(struct tty_struct *tty) EXPORT_SYMBOL(start_tty); +/* We limit tty time update visibility to every 8 seconds or so. */ +static void tty_update_time(struct timespec *time) +{ + unsigned long sec = get_seconds() & ~7; + if ((long)(sec - time->tv_sec) > 0) + time->tv_sec = sec; +} + /** * tty_read - read method for tty device files * @file: pointer to tty file @@ -975,8 +983,10 @@ static ssize_t tty_read(struct file *file, char __user *buf, size_t count, else i = -EIO; tty_ldisc_deref(ld); + if (i > 0) - inode->i_atime = current_fs_time(inode->i_sb); + tty_update_time(&inode->i_atime); + return i; } @@ -1079,7 +1089,7 @@ static inline ssize_t do_tty_write( } if (written) { struct inode *inode = file->f_path.dentry->d_inode; - inode->i_mtime = current_fs_time(inode->i_sb); + tty_update_time(&inode->i_mtime); ret = written; } out: diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index a845f8b..9497171 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -686,7 +686,8 @@ static int cxacru_cm_get_array(struct cxacru_data *instance, enum cxacru_cm_requ { int ret, len; __le32 *buf; - int offb, offd; + int offb; + unsigned int offd; const int stride = CMD_PACKET_SIZE / (4 * 2) - 1; int buflen = ((size - 1) / stride + 1 + size * 2) * 4; diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index c84b455..0b85e2c 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -171,6 +171,7 @@ skip_error: static void wdm_int_callback(struct urb *urb) { int rv = 0; + int responding; int status = urb->status; struct wdm_device *desc; struct usb_ctrlrequest *req; @@ -244,8 +245,8 @@ static void wdm_int_callback(struct urb *urb) desc->response->transfer_flags |= URB_NO_TRANSFER_DMA_MAP; spin_lock(&desc->iuspin); clear_bit(WDM_READ, &desc->flags); - set_bit(WDM_RESPONDING, &desc->flags); - if (!test_bit(WDM_DISCONNECTING, &desc->flags) + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); + if (!responding && !test_bit(WDM_DISCONNECTING, &desc->flags) && !test_bit(WDM_SUSPENDING, &desc->flags)) { rv = usb_submit_urb(desc->response, GFP_ATOMIC); dev_dbg(&desc->intf->dev, "%s: usb_submit_urb %d", @@ -635,16 +636,20 @@ static void wdm_rxwork(struct work_struct *work) { struct wdm_device *desc = container_of(work, struct wdm_device, rxwork); unsigned long flags; - int rv; + int rv = 0; + int responding; spin_lock_irqsave(&desc->iuspin, flags); if (test_bit(WDM_DISCONNECTING, &desc->flags)) { spin_unlock_irqrestore(&desc->iuspin, flags); } else { + responding = test_and_set_bit(WDM_RESPONDING, &desc->flags); spin_unlock_irqrestore(&desc->iuspin, flags); - rv = usb_submit_urb(desc->response, GFP_KERNEL); + if (!responding) + rv = usb_submit_urb(desc->response, GFP_KERNEL); if (rv < 0 && rv != -EPERM) { spin_lock_irqsave(&desc->iuspin, flags); + clear_bit(WDM_RESPONDING, &desc->flags); if (!test_bit(WDM_DISCONNECTING, &desc->flags)) schedule_work(&desc->rxwork); spin_unlock_irqrestore(&desc->iuspin, flags); diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c index 26678ca..c29f521 100644 --- a/drivers/usb/core/config.c +++ b/drivers/usb/core/config.c @@ -424,7 +424,8 @@ static int usb_parse_configuration(struct usb_device *dev, int cfgidx, memcpy(&config->desc, buffer, USB_DT_CONFIG_SIZE); if (config->desc.bDescriptorType != USB_DT_CONFIG || - config->desc.bLength < USB_DT_CONFIG_SIZE) { + config->desc.bLength < USB_DT_CONFIG_SIZE || + config->desc.bLength > size) { dev_err(ddev, "invalid descriptor for config index %d: " "type = 0x%X, length = %d\n", cfgidx, config->desc.bDescriptorType, config->desc.bLength); diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c index 4d1f996..b08fcd2 100644 --- a/drivers/usb/core/devio.c +++ b/drivers/usb/core/devio.c @@ -642,7 +642,25 @@ static int check_ctrlrecip(struct dev_state *ps, unsigned int requesttype, index &= 0xff; switch (requesttype & USB_RECIP_MASK) { case USB_RECIP_ENDPOINT: + if ((index & ~USB_DIR_IN) == 0) + return 0; ret = findintfep(ps->dev, index); + if (ret < 0) { + /* + * Some not fully compliant Win apps seem to get + * index wrong and have the endpoint number here + * rather than the endpoint address (with the + * correct direction). Win does let this through, + * so we'll not reject it here but leave it to + * the device to not break KVM. But we warn. + */ + ret = findintfep(ps->dev, index ^ 0x80); + if (ret >= 0) + dev_info(&ps->dev->dev, + "%s: process %i (%s) requesting ep %02x but needs %02x\n", + __func__, task_pid_nr(current), + current->comm, index, index ^ 0x80); + } if (ret >= 0) ret = checkintf(ps, ret); break; diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 998812c..4719885 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -469,6 +469,15 @@ resubmit: static inline int hub_clear_tt_buffer (struct usb_device *hdev, u16 devinfo, u16 tt) { + /* Need to clear both directions for control ep */ + if (((devinfo >> 11) & USB_ENDPOINT_XFERTYPE_MASK) == + USB_ENDPOINT_XFER_CONTROL) { + int status = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), + HUB_CLEAR_TT_BUFFER, USB_RT_PORT, + devinfo ^ 0x8000, tt, NULL, 0, 1000); + if (status) + return status; + } return usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), HUB_CLEAR_TT_BUFFER, USB_RT_PORT, devinfo, tt, NULL, 0, 1000); diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c index a8d7856..f9140a5 100644 --- a/drivers/usb/core/quirks.c +++ b/drivers/usb/core/quirks.c @@ -100,6 +100,12 @@ static const struct usb_device_id usb_quirk_list[] = { { USB_DEVICE(0x04d8, 0x000c), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, + /* CarrolTouch 4000U */ + { USB_DEVICE(0x04e7, 0x0009), .driver_info = USB_QUIRK_RESET_RESUME }, + + /* CarrolTouch 4500U */ + { USB_DEVICE(0x04e7, 0x0030), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Samsung Android phone modem - ID conflict with SPH-I500 */ { USB_DEVICE(0x04e8, 0x6601), .driver_info = USB_QUIRK_CONFIG_INTF_STRINGS }, @@ -110,6 +116,9 @@ static const struct usb_device_id usb_quirk_list[] = { /* Edirol SD-20 */ { USB_DEVICE(0x0582, 0x0027), .driver_info = USB_QUIRK_RESET_RESUME }, + /* Alcor Micro Corp. Hub */ + { USB_DEVICE(0x058f, 0x9254), .driver_info = USB_QUIRK_RESET_RESUME }, + /* appletouch */ { USB_DEVICE(0x05ac, 0x021a), .driver_info = USB_QUIRK_RESET_RESUME }, diff --git a/drivers/usb/gadget/f_mass_storage.c b/drivers/usb/gadget/f_mass_storage.c index bc2f624..8b69b3e 100644 --- a/drivers/usb/gadget/f_mass_storage.c +++ b/drivers/usb/gadget/f_mass_storage.c @@ -513,6 +513,7 @@ static int fsg_set_halt(struct fsg_dev *fsg, struct usb_ep *ep) /* Caller must hold fsg->lock */ static void wakeup_thread(struct fsg_common *common) { + smp_wmb(); /* ensure the write of bh->state is complete */ /* Tell the main thread that something has happened */ common->thread_wakeup_needed = 1; if (common->thread_task) @@ -730,6 +731,7 @@ static int sleep_thread(struct fsg_common *common) } __set_current_state(TASK_RUNNING); common->thread_wakeup_needed = 0; + smp_rmb(); /* ensure the latest bh->state is visible */ return rc; } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 53192f2..8949b23 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -236,7 +236,7 @@ static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) } static const unsigned char -max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 125, 25 }; +max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; /* carryover low/fullspeed bandwidth that crosses uframe boundries */ static inline void carryover_tt_bandwidth(unsigned short tt_usecs[8]) diff --git a/drivers/usb/host/uhci-hub.c b/drivers/usb/host/uhci-hub.c index 045cde4..850723f 100644 --- a/drivers/usb/host/uhci-hub.c +++ b/drivers/usb/host/uhci-hub.c @@ -221,7 +221,8 @@ static int uhci_hub_status_data(struct usb_hcd *hcd, char *buf) /* auto-stop if nothing connected for 1 second */ if (any_ports_active(uhci)) uhci->rh_state = UHCI_RH_RUNNING; - else if (time_after_eq(jiffies, uhci->auto_stop_time)) + else if (time_after_eq(jiffies, uhci->auto_stop_time) && + !uhci->wait_for_hp) suspend_rh(uhci, UHCI_RH_AUTO_STOPPED); break; diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c index af65322..45ff7ba 100644 --- a/drivers/usb/host/xhci-mem.c +++ b/drivers/usb/host/xhci-mem.c @@ -273,6 +273,10 @@ static struct xhci_container_ctx *xhci_alloc_container_ctx(struct xhci_hcd *xhci ctx->size += CTX_SIZE(xhci->hcc_params); ctx->bytes = dma_pool_alloc(xhci->device_pool, flags, &ctx->dma); + if (!ctx->bytes) { + kfree(ctx); + return NULL; + } memset(ctx->bytes, 0, ctx->size); return ctx; } diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index 151ca5e..40e39df 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -465,7 +465,7 @@ static void ring_doorbell_for_active_rings(struct xhci_hcd *xhci, /* A ring has pending URBs if its TD list is not empty */ if (!(ep->ep_state & EP_HAS_STREAMS)) { - if (!(list_empty(&ep->ring->td_list))) + if (ep->ring && !(list_empty(&ep->ring->td_list))) xhci_ring_ep_doorbell(xhci, slot_id, ep_index, 0); return; } @@ -882,8 +882,12 @@ remove_finished_td: /* Otherwise ring the doorbell(s) to restart queued transfers */ ring_doorbell_for_active_rings(xhci, slot_id, ep_index); } - ep->stopped_td = NULL; - ep->stopped_trb = NULL; + + /* Clear stopped_td and stopped_trb if endpoint is not halted */ + if (!(ep->ep_state & EP_HALTED)) { + ep->stopped_td = NULL; + ep->stopped_trb = NULL; + } /* * Drop the lock and complete the URBs in the cancelled TD list. @@ -1377,6 +1381,12 @@ static void handle_cmd_completion(struct xhci_hcd *xhci, inc_deq(xhci, xhci->cmd_ring, false); return; } + /* There is no command to handle if we get a stop event when the + * command ring is empty, event->cmd_trb points to the next + * unset command + */ + if (xhci->cmd_ring->dequeue == xhci->cmd_ring->enqueue) + return; } switch (le32_to_cpu(xhci->cmd_ring->dequeue->generic.field[3]) diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index 4864b25..c95252d 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -956,9 +956,6 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, } xhci = hcd_to_xhci(hcd); - if (xhci->xhc_state & XHCI_STATE_HALTED) - return -ENODEV; - if (check_virt_dev) { if (!udev->slot_id || !xhci->devs || !xhci->devs[udev->slot_id]) { @@ -975,6 +972,9 @@ static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev, } } + if (xhci->xhc_state & XHCI_STATE_HALTED) + return -ENODEV; + return 1; } @@ -2713,10 +2713,21 @@ void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); struct xhci_virt_device *virt_dev; + struct device *dev = hcd->self.controller; unsigned long flags; u32 state; int i, ret; +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * We called pm_runtime_get_noresume when the device was attached. + * Decrement the counter here to allow controller to runtime suspend + * if no devices remain. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_put_noidle(dev); +#endif + ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__); /* If the host is halted due to driver unload, we still need to free the * device. @@ -2783,6 +2794,7 @@ static int xhci_reserve_host_control_ep_resources(struct xhci_hcd *xhci) int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) { struct xhci_hcd *xhci = hcd_to_xhci(hcd); + struct device *dev = hcd->self.controller; unsigned long flags; int timeleft; int ret; @@ -2835,6 +2847,16 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev) goto disable_slot; } udev->slot_id = xhci->slot_id; + +#ifndef CONFIG_USB_DEFAULT_PERSIST + /* + * If resetting upon resume, we can't put the controller into runtime + * suspend if there is a device attached. + */ + if (xhci->quirks & XHCI_RESET_ON_RESUME) + pm_runtime_get_noresume(dev); +#endif + /* Is this a LS or FS device under a HS hub? */ /* Hub or peripherial? */ return 1; diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 68ab460..0a70c98 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -63,6 +63,7 @@ static const struct usb_device_id appledisplay_table[] = { { APPLEDISPLAY_DEVICE(0x9219) }, { APPLEDISPLAY_DEVICE(0x921c) }, { APPLEDISPLAY_DEVICE(0x921d) }, + { APPLEDISPLAY_DEVICE(0x9236) }, /* Terminating entry */ { } diff --git a/drivers/usb/misc/sisusbvga/sisusb.c b/drivers/usb/misc/sisusbvga/sisusb.c index dd573ab..7af163d 100644 --- a/drivers/usb/misc/sisusbvga/sisusb.c +++ b/drivers/usb/misc/sisusbvga/sisusb.c @@ -3247,6 +3247,7 @@ static const struct usb_device_id sisusb_table[] = { { USB_DEVICE(0x0711, 0x0903) }, { USB_DEVICE(0x0711, 0x0918) }, { USB_DEVICE(0x0711, 0x0920) }, + { USB_DEVICE(0x0711, 0x0950) }, { USB_DEVICE(0x182d, 0x021c) }, { USB_DEVICE(0x182d, 0x0269) }, { } diff --git a/drivers/usb/serial/ark3116.c b/drivers/usb/serial/ark3116.c index 18e875b..5cc633e 100644 --- a/drivers/usb/serial/ark3116.c +++ b/drivers/usb/serial/ark3116.c @@ -49,7 +49,7 @@ static int debug; #define DRIVER_NAME "ark3116" /* usb timeout of 1 second */ -#define ARK_TIMEOUT (1*HZ) +#define ARK_TIMEOUT 1000 static const struct usb_device_id id_table[] = { { USB_DEVICE(0x6547, 0x0232) }, diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c index 913a178..c408ff7 100644 --- a/drivers/usb/serial/cp210x.c +++ b/drivers/usb/serial/cp210x.c @@ -60,6 +60,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x0489, 0xE000) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0489, 0xE003) }, /* Pirelli Broadband S.p.A, DP-L10 SIP/GSM Mobile */ { USB_DEVICE(0x0745, 0x1000) }, /* CipherLab USB CCD Barcode Scanner 1000 */ + { USB_DEVICE(0x0846, 0x1100) }, /* NetGear Managed Switch M4100 series, M5300 series, M7100 series */ { USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */ { USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */ { USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */ @@ -124,6 +125,8 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x10C4, 0x85F8) }, /* Virtenio Preon32 */ { USB_DEVICE(0x10C4, 0x8664) }, /* AC-Services CAN-IF */ { USB_DEVICE(0x10C4, 0x8665) }, /* AC-Services OBD-IF */ + { USB_DEVICE(0x10C4, 0x88A4) }, /* MMB Networks ZigBee USB Device */ + { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */ { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */ { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */ @@ -154,6 +157,7 @@ static const struct usb_device_id id_table[] = { { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ + { USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */ { USB_DEVICE(0x1BE3, 0x07A6) }, /* WAGO 750-923 USB Service Cable */ { USB_DEVICE(0x1E29, 0x0102) }, /* Festo CPX-USB */ { USB_DEVICE(0x1E29, 0x0501) }, /* Festo CMSP */ diff --git a/drivers/usb/serial/cypress_m8.c b/drivers/usb/serial/cypress_m8.c index d9906eb..a75cce9 100644 --- a/drivers/usb/serial/cypress_m8.c +++ b/drivers/usb/serial/cypress_m8.c @@ -96,6 +96,7 @@ static const struct usb_device_id id_table_earthmate[] = { static const struct usb_device_id id_table_cyphidcomrs232[] = { { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { } /* Terminating entry */ }; @@ -109,6 +110,7 @@ static const struct usb_device_id id_table_combined[] = { { USB_DEVICE(VENDOR_ID_DELORME, PRODUCT_ID_EARTHMATEUSB_LT20) }, { USB_DEVICE(VENDOR_ID_CYPRESS, PRODUCT_ID_CYPHIDCOM) }, { USB_DEVICE(VENDOR_ID_POWERCOM, PRODUCT_ID_UPS) }, + { USB_DEVICE(VENDOR_ID_FRWD, PRODUCT_ID_CYPHIDCOM_FRWD) }, { USB_DEVICE(VENDOR_ID_DAZZLE, PRODUCT_ID_CA42) }, { } /* Terminating entry */ }; @@ -268,6 +270,12 @@ static struct usb_serial_driver cypress_ca42v2_device = { * Cypress serial helper functions *****************************************************************************/ +/* FRWD Dongle hidcom needs to skip reset and speed checks */ +static inline bool is_frwd(struct usb_device *dev) +{ + return ((le16_to_cpu(dev->descriptor.idVendor) == VENDOR_ID_FRWD) && + (le16_to_cpu(dev->descriptor.idProduct) == PRODUCT_ID_CYPHIDCOM_FRWD)); +} static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) { @@ -277,6 +285,10 @@ static int analyze_baud_rate(struct usb_serial_port *port, speed_t new_rate) if (unstable_bauds) return new_rate; + /* FRWD Dongle uses 115200 bps */ + if (is_frwd(port->serial->dev)) + return new_rate; + /* * The general purpose firmware for the Cypress M8 allows for * a maximum speed of 57600bps (I have no idea whether DeLorme @@ -490,7 +502,11 @@ static int generic_startup(struct usb_serial *serial) } init_waitqueue_head(&priv->delta_msr_wait); - usb_reset_configuration(serial->dev); + /* Skip reset for FRWD device. It is a workaound: + device hangs if it receives SET_CONFIGURE in Configured + state. */ + if (!is_frwd(serial->dev)) + usb_reset_configuration(serial->dev); priv->cmd_ctrl = 0; priv->line_control = 0; diff --git a/drivers/usb/serial/cypress_m8.h b/drivers/usb/serial/cypress_m8.h index 67cf608..b461311 100644 --- a/drivers/usb/serial/cypress_m8.h +++ b/drivers/usb/serial/cypress_m8.h @@ -24,6 +24,10 @@ #define VENDOR_ID_CYPRESS 0x04b4 #define PRODUCT_ID_CYPHIDCOM 0x5500 +/* FRWD Dongle - a GPS sports watch */ +#define VENDOR_ID_FRWD 0x6737 +#define PRODUCT_ID_CYPHIDCOM_FRWD 0x0001 + /* Powercom UPS, chip CY7C63723 */ #define VENDOR_ID_POWERCOM 0x0d9f #define PRODUCT_ID_UPS 0x0002 diff --git a/drivers/usb/serial/ftdi_sio.c b/drivers/usb/serial/ftdi_sio.c index 132f114..04d5427 100644 --- a/drivers/usb/serial/ftdi_sio.c +++ b/drivers/usb/serial/ftdi_sio.c @@ -195,7 +195,10 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_THROTTLE_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GATEWAY_PID) }, { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_PID) }, + { USB_DEVICE(FTDI_VID, FTDI_OPENDCC_GBM_BOOST_PID) }, { USB_DEVICE(NEWPORT_VID, NEWPORT_AGILIS_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_CC_PID) }, + { USB_DEVICE(NEWPORT_VID, NEWPORT_CONEX_AGP_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_IOBOARD_PID) }, { USB_DEVICE(INTERBIOMETRICS_VID, INTERBIOMETRICS_MINI_IOBOARD_PID) }, { USB_DEVICE(FTDI_VID, FTDI_SPROG_II) }, @@ -740,9 +743,34 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_NDI_AURORA_SCU_PID), .driver_info = (kernel_ulong_t)&ftdi_NDI_device_quirk }, { USB_DEVICE(TELLDUS_VID, TELLDUS_TELLSTICK_PID) }, - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_SERIAL_VX7_PID) }, - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_CT29B_PID) }, - { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_RTS01_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S03_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_59_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57A_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_57B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29A_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29F_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S01_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_29C_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_81B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_82B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5D_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K4Y_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_K5G_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_S05_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_60_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_61_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_62_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_63B_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_64_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_65_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_92D_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_W5R_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_A5R_PID) }, + { USB_DEVICE(RTSYSTEMS_VID, RTSYSTEMS_USB_PW1_PID) }, { USB_DEVICE(FTDI_VID, FTDI_MAXSTREAM_PID) }, { USB_DEVICE(FTDI_VID, FTDI_PHI_FISCO_PID) }, { USB_DEVICE(TML_VID, TML_USB_SERIAL_PID) }, @@ -876,7 +904,9 @@ static struct usb_device_id id_table_combined [] = { { USB_DEVICE(FTDI_VID, FTDI_DOTEC_PID) }, { USB_DEVICE(QIHARDWARE_VID, MILKYMISTONE_JTAGSERIAL_PID), .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, - { USB_DEVICE(ST_VID, ST_STMCLT1030_PID), + { USB_DEVICE(ST_VID, ST_STMCLT_2232_PID), + .driver_info = (kernel_ulong_t)&ftdi_jtag_quirk }, + { USB_DEVICE(ST_VID, ST_STMCLT_4232_PID), .driver_info = (kernel_ulong_t)&ftdi_stmclite_quirk }, { USB_DEVICE(FTDI_VID, FTDI_RF_R106) }, { USB_DEVICE(FTDI_VID, FTDI_DISTORTEC_JTAG_LOCK_PICK_PID), @@ -2136,6 +2166,9 @@ static void ftdi_set_termios(struct tty_struct *tty, cflag = termios->c_cflag; + if (!old_termios) + goto no_skip; + if (old_termios->c_cflag == termios->c_cflag && old_termios->c_ispeed == termios->c_ispeed && old_termios->c_ospeed == termios->c_ospeed) @@ -2149,6 +2182,7 @@ static void ftdi_set_termios(struct tty_struct *tty, (termios->c_cflag & (CSIZE|PARODD|PARENB|CMSPAR|CSTOPB))) goto no_data_parity_stop_changes; +no_skip: /* Set number of data bits, parity, stop bits */ urb_value = 0; diff --git a/drivers/usb/serial/ftdi_sio_ids.h b/drivers/usb/serial/ftdi_sio_ids.h index 809c03a..61685ed 100644 --- a/drivers/usb/serial/ftdi_sio_ids.h +++ b/drivers/usb/serial/ftdi_sio_ids.h @@ -74,6 +74,7 @@ #define FTDI_OPENDCC_THROTTLE_PID 0xBFDA #define FTDI_OPENDCC_GATEWAY_PID 0xBFDB #define FTDI_OPENDCC_GBM_PID 0xBFDC +#define FTDI_OPENDCC_GBM_BOOST_PID 0xBFDD /* NZR SEM 16+ USB (http://www.nzr.de) */ #define FTDI_NZR_SEM_USB_PID 0xC1E0 /* NZR SEM-LOG16+ */ @@ -771,6 +772,8 @@ */ #define NEWPORT_VID 0x104D #define NEWPORT_AGILIS_PID 0x3000 +#define NEWPORT_CONEX_CC_PID 0x3002 +#define NEWPORT_CONEX_AGP_PID 0x3006 /* Interbiometrics USB I/O Board */ /* Developed for Interbiometrics by Rudolf Gugler */ @@ -812,11 +815,35 @@ /* * RT Systems programming cables for various ham radios */ -#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ -#define RTSYSTEMS_SERIAL_VX7_PID 0x9e52 /* Serial converter for VX-7 Radios using FT232RL */ -#define RTSYSTEMS_CT29B_PID 0x9e54 /* CT29B Radio Cable */ -#define RTSYSTEMS_RTS01_PID 0x9e57 /* USB-RTS01 Radio Cable */ - +#define RTSYSTEMS_VID 0x2100 /* Vendor ID */ +#define RTSYSTEMS_USB_S03_PID 0x9001 /* RTS-03 USB to Serial Adapter */ +#define RTSYSTEMS_USB_59_PID 0x9e50 /* USB-59 USB to 8 pin plug */ +#define RTSYSTEMS_USB_57A_PID 0x9e51 /* USB-57A USB to 4pin 3.5mm plug */ +#define RTSYSTEMS_USB_57B_PID 0x9e52 /* USB-57B USB to extended 4pin 3.5mm plug */ +#define RTSYSTEMS_USB_29A_PID 0x9e53 /* USB-29A USB to 3.5mm stereo plug */ +#define RTSYSTEMS_USB_29B_PID 0x9e54 /* USB-29B USB to 6 pin mini din */ +#define RTSYSTEMS_USB_29F_PID 0x9e55 /* USB-29F USB to 6 pin modular plug */ +#define RTSYSTEMS_USB_62B_PID 0x9e56 /* USB-62B USB to 8 pin mini din plug*/ +#define RTSYSTEMS_USB_S01_PID 0x9e57 /* USB-RTS01 USB to 3.5 mm stereo plug*/ +#define RTSYSTEMS_USB_63_PID 0x9e58 /* USB-63 USB to 9 pin female*/ +#define RTSYSTEMS_USB_29C_PID 0x9e59 /* USB-29C USB to 4 pin modular plug*/ +#define RTSYSTEMS_USB_81B_PID 0x9e5A /* USB-81 USB to 8 pin mini din plug*/ +#define RTSYSTEMS_USB_82B_PID 0x9e5B /* USB-82 USB to 2.5 mm stereo plug*/ +#define RTSYSTEMS_USB_K5D_PID 0x9e5C /* USB-K5D USB to 8 pin modular plug*/ +#define RTSYSTEMS_USB_K4Y_PID 0x9e5D /* USB-K4Y USB to 2.5/3.5 mm plugs*/ +#define RTSYSTEMS_USB_K5G_PID 0x9e5E /* USB-K5G USB to 8 pin modular plug*/ +#define RTSYSTEMS_USB_S05_PID 0x9e5F /* USB-RTS05 USB to 2.5 mm stereo plug*/ +#define RTSYSTEMS_USB_60_PID 0x9e60 /* USB-60 USB to 6 pin din*/ +#define RTSYSTEMS_USB_61_PID 0x9e61 /* USB-61 USB to 6 pin mini din*/ +#define RTSYSTEMS_USB_62_PID 0x9e62 /* USB-62 USB to 8 pin mini din*/ +#define RTSYSTEMS_USB_63B_PID 0x9e63 /* USB-63 USB to 9 pin female*/ +#define RTSYSTEMS_USB_64_PID 0x9e64 /* USB-64 USB to 9 pin male*/ +#define RTSYSTEMS_USB_65_PID 0x9e65 /* USB-65 USB to 9 pin female null modem*/ +#define RTSYSTEMS_USB_92_PID 0x9e66 /* USB-92 USB to 12 pin plug*/ +#define RTSYSTEMS_USB_92D_PID 0x9e67 /* USB-92D USB to 12 pin plug data*/ +#define RTSYSTEMS_USB_W5R_PID 0x9e68 /* USB-W5R USB to 8 pin modular plug*/ +#define RTSYSTEMS_USB_A5R_PID 0x9e69 /* USB-A5R USB to 8 pin modular plug*/ +#define RTSYSTEMS_USB_PW1_PID 0x9e6A /* USB-PW1 USB to 8 pin modular plug*/ /* * Physik Instrumente @@ -1150,7 +1177,8 @@ * STMicroelectonics */ #define ST_VID 0x0483 -#define ST_STMCLT1030_PID 0x3747 /* ST Micro Connect Lite STMCLT1030 */ +#define ST_STMCLT_2232_PID 0x3746 +#define ST_STMCLT_4232_PID 0x3747 /* * Papouch products (http://www.papouch.com/) diff --git a/drivers/usb/serial/io_ti.c b/drivers/usb/serial/io_ti.c index bdb44fc..1f145bf 100644 --- a/drivers/usb/serial/io_ti.c +++ b/drivers/usb/serial/io_ti.c @@ -558,6 +558,9 @@ static void chase_port(struct edgeport_port *port, unsigned long timeout, wait_queue_t wait; unsigned long flags; + if (!tty) + return; + if (!timeout) timeout = (HZ * EDGE_CLOSING_WAIT)/100; diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c index 6aca631..cf2668e 100644 --- a/drivers/usb/serial/iuu_phoenix.c +++ b/drivers/usb/serial/iuu_phoenix.c @@ -327,7 +327,7 @@ static int bulk_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_sndbulkpipe(serial->dev, port->bulk_out_endpointAddress), buf, - count, &actual, HZ * 1); + count, &actual, 1000); if (status != IUU_OPERATION_OK) dbg("%s - error = %2x", __func__, status); @@ -350,7 +350,7 @@ static int read_immediate(struct usb_serial_port *port, u8 *buf, u8 count) usb_bulk_msg(serial->dev, usb_rcvbulkpipe(serial->dev, port->bulk_in_endpointAddress), buf, - count, &actual, HZ * 1); + count, &actual, 1000); if (status != IUU_OPERATION_OK) dbg("%s - error = %2x", __func__, status); diff --git a/drivers/usb/serial/keyspan.c b/drivers/usb/serial/keyspan.c index a442352..4f415e2 100644 --- a/drivers/usb/serial/keyspan.c +++ b/drivers/usb/serial/keyspan.c @@ -1833,7 +1833,7 @@ static int keyspan_usa26_send_setup(struct usb_serial *serial, d_details = s_priv->device_details; device_port = port->number - port->serial->minor; - outcont_urb = d_details->outcont_endpoints[port->number]; + outcont_urb = d_details->outcont_endpoints[device_port]; this_urb = p_priv->outcont_urb; dbg("%s - endpoint %d", __func__, usb_pipeendpoint(this_urb->pipe)); diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 40abedb..7ee78bd 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -44,7 +44,7 @@ #define DRIVER_DESC "Moschip USB Serial Driver" /* default urb timeout */ -#define MOS_WDR_TIMEOUT (HZ * 5) +#define MOS_WDR_TIMEOUT 5000 #define MOS_MAX_PORT 0x02 #define MOS_WRITE 0x0E @@ -97,6 +97,7 @@ struct urbtracker { struct list_head urblist_entry; struct kref ref_count; struct urb *urb; + struct usb_ctrlrequest *setup; }; enum mos7715_pp_modes { @@ -234,11 +235,22 @@ static int read_mos_reg(struct usb_serial *serial, unsigned int serial_portnum, __u8 requesttype = (__u8)0xc0; __u16 index = get_reg_index(reg); __u16 value = get_reg_value(reg, serial_portnum); - int status = usb_control_msg(usbdev, pipe, request, requesttype, value, - index, data, 1, MOS_WDR_TIMEOUT); - if (status < 0) + u8 *buf; + int status; + + buf = kmalloc(1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + status = usb_control_msg(usbdev, pipe, request, requesttype, value, + index, buf, 1, MOS_WDR_TIMEOUT); + if (status == 1) + *data = *buf; + else if (status < 0) dev_err(&usbdev->dev, "mos7720: usb_control_msg() failed: %d", status); + kfree(buf); + return status; } @@ -268,6 +280,7 @@ static void destroy_urbtracker(struct kref *kref) struct mos7715_parport *mos_parport = urbtrack->mos_parport; dbg("%s called", __func__); usb_free_urb(urbtrack->urb); + kfree(urbtrack->setup); kfree(urbtrack); kref_put(&mos_parport->ref_count, destroy_mos_parport); } @@ -352,7 +365,6 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, struct urbtracker *urbtrack; int ret_val; unsigned long flags; - struct usb_ctrlrequest setup; struct usb_serial *serial = mos_parport->serial; struct usb_device *usbdev = serial->dev; dbg("%s called", __func__); @@ -371,14 +383,20 @@ static int write_parport_reg_nonblock(struct mos7715_parport *mos_parport, kfree(urbtrack); return -ENOMEM; } - setup.bRequestType = (__u8)0x40; - setup.bRequest = (__u8)0x0e; - setup.wValue = get_reg_value(reg, dummy); - setup.wIndex = get_reg_index(reg); - setup.wLength = 0; + urbtrack->setup = kmalloc(sizeof(*urbtrack->setup), GFP_ATOMIC); + if (!urbtrack->setup) { + usb_free_urb(urbtrack->urb); + kfree(urbtrack); + return -ENOMEM; + } + urbtrack->setup->bRequestType = (__u8)0x40; + urbtrack->setup->bRequest = (__u8)0x0e; + urbtrack->setup->wValue = cpu_to_le16(get_reg_value(reg, dummy)); + urbtrack->setup->wIndex = cpu_to_le16(get_reg_index(reg)); + urbtrack->setup->wLength = 0; usb_fill_control_urb(urbtrack->urb, usbdev, usb_sndctrlpipe(usbdev, 0), - (unsigned char *)&setup, + (unsigned char *)urbtrack->setup, NULL, 0, async_complete, urbtrack); kref_init(&urbtrack->ref_count); INIT_LIST_HEAD(&urbtrack->urblist_entry); @@ -1700,7 +1718,7 @@ static void change_port_settings(struct tty_struct *tty, mos7720_port->shadowMCR |= (UART_MCR_XONANY); /* To set hardware flow control to the specified * * serial port, in SP1/2_CONTROL_REG */ - if (port->number) + if (port_number) write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x01); else write_mos_reg(serial, dummy, SP_CONTROL_REG, 0x02); @@ -2113,7 +2131,7 @@ static int mos7720_startup(struct usb_serial *serial) /* setting configuration feature to one */ usb_control_msg(serial->dev, usb_sndctrlpipe(serial->dev, 0), - (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5*HZ); + (__u8)0x03, 0x00, 0x01, 0x00, NULL, 0x00, 5000); /* start the interrupt urb */ ret_val = usb_submit_urb(serial->port[0]->interrupt_in_urb, GFP_KERNEL); @@ -2158,7 +2176,7 @@ static void mos7720_release(struct usb_serial *serial) /* wait for synchronous usb calls to return */ if (mos_parport->msg_pending) wait_for_completion_timeout(&mos_parport->syncmsg_compl, - MOS_WDR_TIMEOUT); + msecs_to_jiffies(MOS_WDR_TIMEOUT)); parport_remove_port(mos_parport->pp); usb_set_serial_data(serial, NULL); diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c index 4418538..536c4ad 100644 --- a/drivers/usb/serial/option.c +++ b/drivers/usb/serial/option.c @@ -81,6 +81,7 @@ static void option_instat_callback(struct urb *urb); #define HUAWEI_VENDOR_ID 0x12D1 #define HUAWEI_PRODUCT_E173 0x140C +#define HUAWEI_PRODUCT_E1750 0x1406 #define HUAWEI_PRODUCT_K4505 0x1464 #define HUAWEI_PRODUCT_K3765 0x1465 #define HUAWEI_PRODUCT_K4605 0x14C6 @@ -159,8 +160,6 @@ static void option_instat_callback(struct urb *urb); #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 #define NOVATELWIRELESS_PRODUCT_E362 0x9010 -#define NOVATELWIRELESS_PRODUCT_G1 0xA001 -#define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 #define NOVATELWIRELESS_PRODUCT_G2 0xA010 #define NOVATELWIRELESS_PRODUCT_MC551 0xB001 @@ -196,6 +195,7 @@ static void option_instat_callback(struct urb *urb); #define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ #define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ +#define DELL_PRODUCT_5804_MINICARD_ATT 0x819b /* Novatel E371 */ #define KYOCERA_VENDOR_ID 0x0c88 #define KYOCERA_PRODUCT_KPC650 0x17da @@ -341,23 +341,19 @@ static void option_instat_callback(struct urb *urb); #define CINTERION_PRODUCT_EU3_E 0x0051 #define CINTERION_PRODUCT_EU3_P 0x0052 #define CINTERION_PRODUCT_PH8 0x0053 -#define CINTERION_PRODUCT_AH6 0x0055 -#define CINTERION_PRODUCT_PLS8 0x0060 +#define CINTERION_PRODUCT_AHXX 0x0055 +#define CINTERION_PRODUCT_PLXX 0x0060 /* Olivetti products */ #define OLIVETTI_VENDOR_ID 0x0b3c #define OLIVETTI_PRODUCT_OLICARD100 0xc000 +#define OLIVETTI_PRODUCT_OLICARD145 0xc003 +#define OLIVETTI_PRODUCT_OLICARD200 0xc005 /* Celot products */ #define CELOT_VENDOR_ID 0x211f #define CELOT_PRODUCT_CT680M 0x6801 -/* ONDA Communication vendor id */ -#define ONDA_VENDOR_ID 0x1ee8 - -/* ONDA MT825UP HSDPA 14.2 modem */ -#define ONDA_MT825UP 0x000b - /* Samsung products */ #define SAMSUNG_VENDOR_ID 0x04e8 #define SAMSUNG_PRODUCT_GT_B3730 0x6889 @@ -450,7 +446,8 @@ static void option_instat_callback(struct urb *urb); /* Hyundai Petatel Inc. products */ #define PETATEL_VENDOR_ID 0x1ff4 -#define PETATEL_PRODUCT_NP10T 0x600e +#define PETATEL_PRODUCT_NP10T_600A 0x600a +#define PETATEL_PRODUCT_NP10T_600E 0x600e /* TP-LINK Incorporated products */ #define TPLINK_VENDOR_ID 0x2357 @@ -585,12 +582,16 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1c23, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E173, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_E1750, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t) &net_intf2_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1441, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x1442, USB_CLASS_COMM, 0x02, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4505, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K3765, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0x14ac, 0xff, 0xff, 0xff), /* Huawei E1820 */ + .driver_info = (kernel_ulong_t) &net_intf1_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, HUAWEI_PRODUCT_K4605, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t) &huawei_cdc12_blacklist }, { USB_VENDOR_AND_INTERFACE_INFO(HUAWEI_VENDOR_ID, 0xff, 0xff, 0xff) }, @@ -740,8 +741,6 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC547) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED) }, - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1) }, - { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G1_M) }, { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, @@ -770,6 +769,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5804_MINICARD_ATT, 0xff, 0xff, 0xff) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, @@ -796,6 +796,7 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(KYOCERA_VENDOR_ID, KYOCERA_PRODUCT_KPC680) }, { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6000)}, /* ZTE AC8700 */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x6613)}, /* Onda H600/ZTE MF330 */ + { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x0023)}, /* ONYX 3G device */ { USB_DEVICE(QUALCOMM_VENDOR_ID, 0x9000)}, /* SIMCom SIM5218 */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6280) }, /* BP3-USB & BP3-EXT HSDPA */ { USB_DEVICE(CMOTECH_VENDOR_ID, CMOTECH_PRODUCT_6008) }, @@ -831,7 +832,8 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0018, 0xff, 0xff, 0xff) }, - { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0019, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf3_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0020, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0021, 0xff, 0xff, 0xff), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, @@ -965,6 +967,8 @@ static const struct usb_device_id option_ids[] = { .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0330, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0395, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0412, 0xff, 0xff, 0xff), /* Telewell TW-LTE 4G */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0414, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0417, 0xff, 0xff, 0xff) }, { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff), @@ -1263,8 +1267,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_E) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_EU3_P) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PH8) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AH6) }, - { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLS8) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_AHXX) }, + { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_PLXX), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, { USB_DEVICE(CINTERION_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDM) }, @@ -1273,8 +1278,9 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) }, { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD145) }, + { USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD200) }, { USB_DEVICE(CELOT_VENDOR_ID, CELOT_PRODUCT_CT680M) }, /* CT-650 CDMA 450 1xEVDO modem */ - { USB_DEVICE(ONDA_VENDOR_ID, ONDA_MT825UP) }, /* ONDA MT825UP modem */ { USB_DEVICE_AND_INTERFACE_INFO(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_GT_B3730, USB_CLASS_CDC_DATA, 0x00, 0x00) }, /* Samsung GT-B3730 LTE USB modem.*/ { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM600) }, { USB_DEVICE(YUGA_VENDOR_ID, YUGA_PRODUCT_CEM610) }, @@ -1346,10 +1352,21 @@ static const struct usb_device_id option_ids[] = { { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x02, 0x01) }, { USB_DEVICE_AND_INTERFACE_INFO(MEDIATEK_VENDOR_ID, MEDIATEK_PRODUCT_DC_4COM2, 0xff, 0x00, 0x00) }, { USB_DEVICE(CELLIENT_VENDOR_ID, CELLIENT_PRODUCT_MEN200) }, - { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600A) }, + { USB_DEVICE(PETATEL_VENDOR_ID, PETATEL_PRODUCT_NP10T_600E) }, { USB_DEVICE(TPLINK_VENDOR_ID, TPLINK_PRODUCT_MA180), .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE(TPLINK_VENDOR_ID, 0x9000), /* TP-Link MA260 */ + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, { USB_DEVICE(CHANGHONG_VENDOR_ID, CHANGHONG_PRODUCT_CH690) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x02, 0x01) }, /* D-Link DWM-156 (variant) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d01, 0xff, 0x00, 0x00) }, /* D-Link DWM-156 (variant) */ + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d02, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x02, 0x01) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x2001, 0x7d03, 0xff, 0x00, 0x00) }, + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e01, 0xff, 0xff, 0xff) }, /* D-Link DWM-152/C1 */ + { USB_DEVICE_AND_INTERFACE_INFO(0x07d1, 0x3e02, 0xff, 0xff, 0xff) }, /* D-Link DWM-156/C1 */ { } /* Terminating entry */ }; MODULE_DEVICE_TABLE(usb, option_ids); diff --git a/drivers/usb/serial/qcserial.c b/drivers/usb/serial/qcserial.c index 03d5f93..a7cd6fd 100644 --- a/drivers/usb/serial/qcserial.c +++ b/drivers/usb/serial/qcserial.c @@ -36,7 +36,13 @@ static const struct usb_device_id id_table[] = { {DEVICE_G1K(0x04da, 0x250c)}, /* Panasonic Gobi QDL device */ {DEVICE_G1K(0x413c, 0x8172)}, /* Dell Gobi Modem device */ {DEVICE_G1K(0x413c, 0x8171)}, /* Dell Gobi QDL device */ - {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa001)}, /* Novatel/Verizon USB-1000 */ + {DEVICE_G1K(0x1410, 0xa002)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa003)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa004)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa005)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa006)}, /* Novatel Gobi Modem device */ + {DEVICE_G1K(0x1410, 0xa007)}, /* Novatel Gobi Modem device */ {DEVICE_G1K(0x1410, 0xa008)}, /* Novatel Gobi QDL device */ {DEVICE_G1K(0x0b05, 0x1776)}, /* Asus Gobi Modem device */ {DEVICE_G1K(0x0b05, 0x1774)}, /* Asus Gobi QDL device */ diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c index 2856474..4757880 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.c +++ b/drivers/usb/serial/ti_usb_3410_5052.c @@ -179,7 +179,8 @@ static struct usb_device_id ti_id_table_3410[15+TI_EXTRA_VID_PID_COUNT+1] = { { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, - { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STEREO_PLUG_ID) }, + { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_STRIP_PORT_ID) }, { USB_DEVICE(TI_VENDOR_ID, FRI2_PRODUCT_ID) }, }; @@ -408,7 +409,7 @@ static int ti_startup(struct usb_serial *serial) usb_set_serial_data(serial, tdev); /* determine device type */ - if (usb_match_id(serial->interface, ti_id_table_3410)) + if (serial->type == &ti_1port_device) tdev->td_is_3410 = 1; dbg("%s - device type is %s", __func__, tdev->td_is_3410 ? "3410" : "5052"); diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h index b353e7e..4a2423e 100644 --- a/drivers/usb/serial/ti_usb_3410_5052.h +++ b/drivers/usb/serial/ti_usb_3410_5052.h @@ -52,7 +52,9 @@ /* Abbott Diabetics vendor and product ids */ #define ABBOTT_VENDOR_ID 0x1a61 -#define ABBOTT_PRODUCT_ID 0x3410 +#define ABBOTT_STEREO_PLUG_ID 0x3410 +#define ABBOTT_PRODUCT_ID ABBOTT_STEREO_PLUG_ID +#define ABBOTT_STRIP_PORT_ID 0x3420 /* Commands */ #define TI_GET_VERSION 0x01 diff --git a/drivers/usb/serial/visor.c b/drivers/usb/serial/visor.c index 1c11959..189a894 100644 --- a/drivers/usb/serial/visor.c +++ b/drivers/usb/serial/visor.c @@ -600,6 +600,7 @@ static int treo_attach(struct usb_serial *serial) dest->bulk_in_endpointAddress = src->bulk_in_endpointAddress;\ dest->bulk_in_buffer = src->bulk_in_buffer; \ dest->interrupt_in_urb = src->interrupt_in_urb; \ + dest->interrupt_in_urb->context = dest; \ dest->interrupt_in_endpointAddress = \ src->interrupt_in_endpointAddress;\ dest->interrupt_in_buffer = src->interrupt_in_buffer; \ diff --git a/drivers/usb/serial/whiteheat.c b/drivers/usb/serial/whiteheat.c index 59d646d..0ec60cd 100644 --- a/drivers/usb/serial/whiteheat.c +++ b/drivers/usb/serial/whiteheat.c @@ -1209,7 +1209,7 @@ static void firm_setup_port(struct tty_struct *tty) struct whiteheat_port_settings port_settings; unsigned int cflag = tty->termios->c_cflag; - port_settings.port = port->number + 1; + port_settings.port = port->number - port->serial->minor + 1; /* get the byte size */ switch (cflag & CSIZE) { diff --git a/drivers/usb/storage/cypress_atacb.c b/drivers/usb/storage/cypress_atacb.c index c844718..7341ce2 100644 --- a/drivers/usb/storage/cypress_atacb.c +++ b/drivers/usb/storage/cypress_atacb.c @@ -248,14 +248,26 @@ static int cypress_probe(struct usb_interface *intf, { struct us_data *us; int result; + struct usb_device *device; result = usb_stor_probe1(&us, intf, id, (id - cypress_usb_ids) + cypress_unusual_dev_list); if (result) return result; - us->protocol_name = "Transparent SCSI with Cypress ATACB"; - us->proto_handler = cypress_atacb_passthrough; + /* Among CY7C68300 chips, the A revision does not support Cypress ATACB + * Filter out this revision from EEPROM default descriptor values + */ + device = interface_to_usbdev(intf); + if (device->descriptor.iManufacturer != 0x38 || + device->descriptor.iProduct != 0x4e || + device->descriptor.iSerialNumber != 0x64) { + us->protocol_name = "Transparent SCSI with Cypress ATACB"; + us->proto_handler = cypress_atacb_passthrough; + } else { + us->protocol_name = "Transparent SCSI"; + us->proto_handler = usb_stor_transparent_scsi_command; + } result = usb_stor_probe2(us); return result; diff --git a/drivers/usb/storage/unusual_devs.h b/drivers/usb/storage/unusual_devs.h index 7b8d564..e073e2f 100644 --- a/drivers/usb/storage/unusual_devs.h +++ b/drivers/usb/storage/unusual_devs.h @@ -657,6 +657,20 @@ UNUSUAL_DEV( 0x054c, 0x016a, 0x0000, 0x9999, USB_SC_DEVICE, USB_PR_DEVICE, NULL, US_FL_FIX_INQUIRY ), +/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */ +UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100, + "Sony Corp.", + "MicroVault Flash Drive", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_READ_CAPACITY_16 ), + +/* Submitted by Ren Bigcren <bigcren.ren@sonymobile.com> */ +UNUSUAL_DEV( 0x054c, 0x02a5, 0x0100, 0x0100, + "Sony Corp.", + "MicroVault Flash Drive", + USB_SC_DEVICE, USB_PR_DEVICE, NULL, + US_FL_NO_READ_CAPACITY_16 ), + /* floppy reports multiple luns */ UNUSUAL_DEV( 0x055d, 0x2020, 0x0000, 0x0210, "SAMSUNG", diff --git a/drivers/video/console/fbcon.c b/drivers/video/console/fbcon.c index 9b8bcab..6b4fb5c 100644 --- a/drivers/video/console/fbcon.c +++ b/drivers/video/console/fbcon.c @@ -843,6 +843,8 @@ static void con2fb_init_display(struct vc_data *vc, struct fb_info *info, * * Maps a virtual console @unit to a frame buffer device * @newidx. + * + * This should be called with the console lock held. */ static int set_con2fb_map(int unit, int newidx, int user) { @@ -860,7 +862,7 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_for_mapped_con() || !con_is_bound(&fb_con)) { info_idx = newidx; - return fbcon_takeover(0); + return do_fbcon_takeover(0); } if (oldidx != -1) @@ -868,7 +870,6 @@ static int set_con2fb_map(int unit, int newidx, int user) found = search_fb_in_map(newidx); - console_lock(); con2fb_map[unit] = newidx; if (!err && !found) err = con2fb_acquire_newinfo(vc, info, unit, oldidx); @@ -895,7 +896,6 @@ static int set_con2fb_map(int unit, int newidx, int user) if (!search_fb_in_map(info_idx)) info_idx = newidx; - console_unlock(); return err; } @@ -1229,6 +1229,8 @@ static void fbcon_deinit(struct vc_data *vc) finished: fbcon_free_font(p, free_font); + if (free_font) + vc->vc_font.data = NULL; if (!con_is_bound(&fb_con)) fbcon_exit(); @@ -3026,6 +3028,7 @@ static inline int fbcon_unbind(void) } #endif /* CONFIG_VT_HW_CONSOLE_BINDING */ +/* called with console_lock held */ static int fbcon_fb_unbind(int idx) { int i, new_idx = -1, ret = 0; @@ -3052,6 +3055,7 @@ static int fbcon_fb_unbind(int idx) return ret; } +/* called with console_lock held */ static int fbcon_fb_unregistered(struct fb_info *info) { int i, idx; @@ -3089,6 +3093,7 @@ static int fbcon_fb_unregistered(struct fb_info *info) return 0; } +/* called with console_lock held */ static void fbcon_remap_all(int idx) { int i; @@ -3133,6 +3138,7 @@ static inline void fbcon_select_primary(struct fb_info *info) } #endif /* CONFIG_FRAMEBUFFER_DETECT_PRIMARY */ +/* called with console_lock held */ static int fbcon_fb_registered(struct fb_info *info) { int ret = 0, i, idx; @@ -3285,6 +3291,7 @@ static int fbcon_event_notify(struct notifier_block *self, ret = fbcon_fb_unregistered(info); break; case FB_EVENT_SET_CONSOLE_MAP: + /* called with console lock held */ con2fb = event->data; ret = set_con2fb_map(con2fb->console - 1, con2fb->framebuffer, 1); diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c index c133dde..0a22808 100644 --- a/drivers/video/fbmem.c +++ b/drivers/video/fbmem.c @@ -1154,8 +1154,10 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd, event.data = &con2fb; if (!lock_fb_info(info)) return -ENODEV; + console_lock(); event.info = info; ret = fb_notifier_call_chain(FB_EVENT_SET_CONSOLE_MAP, &event); + console_unlock(); unlock_fb_info(info); break; case FBIOBLANK: @@ -1348,15 +1350,12 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) { struct fb_info *info = file_fb_info(file); struct fb_ops *fb; - unsigned long off; + unsigned long mmio_pgoff; unsigned long start; u32 len; if (!info) return -ENODEV; - if (vma->vm_pgoff > (~0UL >> PAGE_SHIFT)) - return -EINVAL; - off = vma->vm_pgoff << PAGE_SHIFT; fb = info->fbops; if (!fb) return -ENODEV; @@ -1368,33 +1367,24 @@ fb_mmap(struct file *file, struct vm_area_struct * vma) return res; } - /* frame buffer memory */ + /* + * Ugh. This can be either the frame buffer mapping, or + * if pgoff points past it, the mmio mapping. + */ start = info->fix.smem_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.smem_len); - if (off >= len) { - /* memory mapped io */ - off -= len; - if (info->var.accel_flags) { - mutex_unlock(&info->mm_lock); - return -EINVAL; - } + len = info->fix.smem_len; + mmio_pgoff = PAGE_ALIGN((start & ~PAGE_MASK) + len) >> PAGE_SHIFT; + if (vma->vm_pgoff >= mmio_pgoff) { + vma->vm_pgoff -= mmio_pgoff; start = info->fix.mmio_start; - len = PAGE_ALIGN((start & ~PAGE_MASK) + info->fix.mmio_len); + len = info->fix.mmio_len; } mutex_unlock(&info->mm_lock); - start &= PAGE_MASK; - if ((vma->vm_end - vma->vm_start + off) > len) - return -EINVAL; - off += start; - vma->vm_pgoff = off >> PAGE_SHIFT; - /* This is an IO map - tell maydump to skip this VMA */ - vma->vm_flags |= VM_IO | VM_RESERVED; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); - fb_pgprotect(file, vma, off); - if (io_remap_pfn_range(vma, vma->vm_start, off >> PAGE_SHIFT, - vma->vm_end - vma->vm_start, vma->vm_page_prot)) - return -EAGAIN; - return 0; + fb_pgprotect(file, vma, start); + + return vm_iomap_memory(vma, start, len); } static int diff --git a/drivers/virtio/virtio_ring.c b/drivers/virtio/virtio_ring.c index 38710c8..4e13041 100644 --- a/drivers/virtio/virtio_ring.c +++ b/drivers/virtio/virtio_ring.c @@ -367,9 +367,22 @@ void virtqueue_disable_cb(struct virtqueue *_vq) } EXPORT_SYMBOL_GPL(virtqueue_disable_cb); -bool virtqueue_enable_cb(struct virtqueue *_vq) +/** + * virtqueue_enable_cb_prepare - restart callbacks after disable_cb + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns current queue state + * in an opaque unsigned value. This value should be later tested by + * virtqueue_poll, to detect a possible race between the driver checking for + * more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +unsigned virtqueue_enable_cb_prepare(struct virtqueue *_vq) { struct vring_virtqueue *vq = to_vvq(_vq); + u16 last_used_idx; START_USE(vq); @@ -379,15 +392,45 @@ bool virtqueue_enable_cb(struct virtqueue *_vq) * either clear the flags bit or point the event index at the next * entry. Always do both to keep code simple. */ vq->vring.avail->flags &= ~VRING_AVAIL_F_NO_INTERRUPT; - vring_used_event(&vq->vring) = vq->last_used_idx; + vring_used_event(&vq->vring) = last_used_idx = vq->last_used_idx; + END_USE(vq); + return last_used_idx; +} +EXPORT_SYMBOL_GPL(virtqueue_enable_cb_prepare); + +/** + * virtqueue_poll - query pending used buffers + * @vq: the struct virtqueue we're talking about. + * @last_used_idx: virtqueue state (from call to virtqueue_enable_cb_prepare). + * + * Returns "true" if there are pending used buffers in the queue. + * + * This does not need to be serialized. + */ +bool virtqueue_poll(struct virtqueue *_vq, unsigned last_used_idx) +{ + struct vring_virtqueue *vq = to_vvq(_vq); + virtio_mb(); - if (unlikely(more_used(vq))) { - END_USE(vq); - return false; - } + return (u16)last_used_idx != vq->vring.used->idx; +} +EXPORT_SYMBOL_GPL(virtqueue_poll); - END_USE(vq); - return true; +/** + * virtqueue_enable_cb - restart callbacks after disable_cb. + * @vq: the struct virtqueue we're talking about. + * + * This re-enables callbacks; it returns "false" if there are pending + * buffers in the queue, to detect a possible race between the driver + * checking for more work, and enabling callbacks. + * + * Caller must ensure we don't call this with other virtqueue + * operations at the same time (except where noted). + */ +bool virtqueue_enable_cb(struct virtqueue *_vq) +{ + unsigned last_used_idx = virtqueue_enable_cb_prepare(_vq); + return !virtqueue_poll(_vq, last_used_idx); } EXPORT_SYMBOL_GPL(virtqueue_enable_cb); diff --git a/drivers/watchdog/ts72xx_wdt.c b/drivers/watchdog/ts72xx_wdt.c index 5a90a4a..87c0924 100644 --- a/drivers/watchdog/ts72xx_wdt.c +++ b/drivers/watchdog/ts72xx_wdt.c @@ -310,7 +310,8 @@ static long ts72xx_wdt_ioctl(struct file *file, unsigned int cmd, case WDIOC_GETSTATUS: case WDIOC_GETBOOTSTATUS: - return put_user(0, p); + error = put_user(0, p); + break; case WDIOC_KEEPALIVE: ts72xx_wdt_kick(wdt); diff --git a/drivers/xen/events.c b/drivers/xen/events.c index a5493f8..7ba4d0e 100644 --- a/drivers/xen/events.c +++ b/drivers/xen/events.c @@ -317,7 +317,7 @@ static void init_evtchn_cpu_bindings(void) for_each_possible_cpu(i) memset(per_cpu(cpu_evtchn_mask, i), - (i == 0) ? ~0 : 0, sizeof(*per_cpu(cpu_evtchn_mask, i))); + (i == 0) ? ~0 : 0, NR_EVENT_CHANNELS/8); } static inline void clear_evtchn(int port) @@ -1153,7 +1153,7 @@ static void __xen_evtchn_do_upcall(void) { int start_word_idx, start_bit_idx; int word_idx, bit_idx; - int i; + int i, irq; int cpu = get_cpu(); struct shared_info *s = HYPERVISOR_shared_info; struct vcpu_info *vcpu_info = __this_cpu_read(xen_vcpu); @@ -1161,6 +1161,8 @@ static void __xen_evtchn_do_upcall(void) do { unsigned long pending_words; + unsigned long pending_bits; + struct irq_desc *desc; vcpu_info->evtchn_upcall_pending = 0; @@ -1171,6 +1173,17 @@ static void __xen_evtchn_do_upcall(void) /* Clear master flag /before/ clearing selector flag. */ wmb(); #endif + if ((irq = per_cpu(virq_to_irq, cpu)[VIRQ_TIMER]) != -1) { + int evtchn = evtchn_from_irq(irq); + word_idx = evtchn / BITS_PER_LONG; + pending_bits = evtchn % BITS_PER_LONG; + if (active_evtchns(cpu, s, word_idx) & (1ULL << pending_bits)) { + desc = irq_to_desc(irq); + if (desc) + generic_handle_irq_desc(irq, desc); + } + } + pending_words = xchg(&vcpu_info->evtchn_pending_sel, 0); start_word_idx = __this_cpu_read(current_word_idx); @@ -1179,7 +1192,6 @@ static void __xen_evtchn_do_upcall(void) word_idx = start_word_idx; for (i = 0; pending_words != 0; i++) { - unsigned long pending_bits; unsigned long words; words = MASK_LSBS(pending_words, word_idx); @@ -1208,8 +1220,7 @@ static void __xen_evtchn_do_upcall(void) do { unsigned long bits; - int port, irq; - struct irq_desc *desc; + int port; bits = MASK_LSBS(pending_bits, bit_idx); diff --git a/drivers/xen/evtchn.c b/drivers/xen/evtchn.c index dbc13e9..ce3a0f5 100644 --- a/drivers/xen/evtchn.c +++ b/drivers/xen/evtchn.c @@ -367,18 +367,12 @@ static long evtchn_ioctl(struct file *file, if (unbind.port >= NR_EVENT_CHANNELS) break; - spin_lock_irq(&port_user_lock); - rc = -ENOTCONN; - if (get_port_user(unbind.port) != u) { - spin_unlock_irq(&port_user_lock); + if (get_port_user(unbind.port) != u) break; - } disable_irq(irq_from_evtchn(unbind.port)); - spin_unlock_irq(&port_user_lock); - evtchn_unbind_from_user(u, unbind.port); rc = 0; @@ -478,26 +472,15 @@ static int evtchn_release(struct inode *inode, struct file *filp) int i; struct per_user_data *u = filp->private_data; - spin_lock_irq(&port_user_lock); - - free_page((unsigned long)u->ring); - for (i = 0; i < NR_EVENT_CHANNELS; i++) { if (get_port_user(i) != u) continue; disable_irq(irq_from_evtchn(i)); - } - - spin_unlock_irq(&port_user_lock); - - for (i = 0; i < NR_EVENT_CHANNELS; i++) { - if (get_port_user(i) != u) - continue; - evtchn_unbind_from_user(get_port_user(i), i); } + free_page((unsigned long)u->ring); kfree(u->name); kfree(u); diff --git a/drivers/xen/grant-table.c b/drivers/xen/grant-table.c index fd725cd..949af52 100644 --- a/drivers/xen/grant-table.c +++ b/drivers/xen/grant-table.c @@ -355,9 +355,18 @@ void gnttab_request_free_callback(struct gnttab_free_callback *callback, void (*fn)(void *), void *arg, u16 count) { unsigned long flags; + struct gnttab_free_callback *cb; + spin_lock_irqsave(&gnttab_list_lock, flags); - if (callback->next) - goto out; + + /* Check if the callback is already on the list */ + cb = gnttab_free_callback_list; + while (cb) { + if (cb == callback) + goto out; + cb = cb->next; + } + callback->fn = fn; callback->arg = arg; callback->count = count; diff --git a/fs/autofs4/expire.c b/fs/autofs4/expire.c index 450f529..2c69d12 100644 --- a/fs/autofs4/expire.c +++ b/fs/autofs4/expire.c @@ -61,15 +61,6 @@ static int autofs4_mount_busy(struct vfsmount *mnt, struct dentry *dentry) /* This is an autofs submount, we can't expire it */ if (autofs_type_indirect(sbi->type)) goto done; - - /* - * Otherwise it's an offset mount and we need to check - * if we can umount its mount, if there is one. - */ - if (!d_mountpoint(path.dentry)) { - status = 0; - goto done; - } } /* Update the expiry counter if fs is busy */ @@ -786,12 +786,22 @@ static int __bio_copy_iov(struct bio *bio, struct bio_vec *iovecs, int bio_uncopy_user(struct bio *bio) { struct bio_map_data *bmd = bio->bi_private; - int ret = 0; + struct bio_vec *bvec; + int ret = 0, i; - if (!bio_flagged(bio, BIO_NULL_MAPPED)) - ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, - bmd->nr_sgvecs, bio_data_dir(bio) == READ, - 0, bmd->is_our_pages); + if (!bio_flagged(bio, BIO_NULL_MAPPED)) { + /* + * if we're in a workqueue, the request is orphaned, so + * don't copy into a random user address space, just free. + */ + if (current->mm) + ret = __bio_copy_iov(bio, bmd->iovecs, bmd->sgvecs, + bmd->nr_sgvecs, bio_data_dir(bio) == READ, + 0, bmd->is_our_pages); + else if (bmd->is_our_pages) + __bio_for_each_segment(bvec, bio, i, 0) + __free_page(bvec->bv_page); + } bio_free_map_data(bmd); bio_put(bio); return ret; diff --git a/fs/block_dev.c b/fs/block_dev.c index 97e4cb5..b5eb8c1 100644 --- a/fs/block_dev.c +++ b/fs/block_dev.c @@ -55,13 +55,21 @@ EXPORT_SYMBOL(I_BDEV); static void bdev_inode_switch_bdi(struct inode *inode, struct backing_dev_info *dst) { + bool wakeup_bdi = false; + spin_lock(&inode_wb_list_lock); spin_lock(&inode->i_lock); inode->i_data.backing_dev_info = dst; - if (inode->i_state & I_DIRTY) + if (inode->i_state & I_DIRTY) { + if (bdi_cap_writeback_dirty(dst) && !wb_has_dirty_io(&dst->wb)) + wakeup_bdi = true; list_move(&inode->i_wb_list, &dst->wb.b_dirty); + } spin_unlock(&inode->i_lock); spin_unlock(&inode_wb_list_lock); + + if (wakeup_bdi) + bdi_wakeup_thread_delayed(dst); } sector_t blkdev_max_block(struct block_device *bdev) diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c index a3c4751..a205027 100644 --- a/fs/btrfs/ioctl.c +++ b/fs/btrfs/ioctl.c @@ -1539,7 +1539,11 @@ static noinline int copy_to_sk(struct btrfs_root *root, item_off = btrfs_item_ptr_offset(leaf, i); item_len = btrfs_item_size_nr(leaf, i); - if (item_len > BTRFS_SEARCH_ARGS_BUFSIZE) + btrfs_item_key_to_cpu(leaf, key, i); + if (!key_in_sk(key, sk)) + continue; + + if (sizeof(sh) + item_len > BTRFS_SEARCH_ARGS_BUFSIZE) item_len = 0; if (sizeof(sh) + item_len + *sk_offset > @@ -1548,10 +1552,6 @@ static noinline int copy_to_sk(struct btrfs_root *root, goto overflow; } - btrfs_item_key_to_cpu(leaf, key, i); - if (!key_in_sk(key, sk)) - continue; - sh.objectid = key->objectid; sh.offset = key->offset; sh.type = key->type; diff --git a/fs/btrfs/relocation.c b/fs/btrfs/relocation.c index 5e0a3dc..2ab5837 100644 --- a/fs/btrfs/relocation.c +++ b/fs/btrfs/relocation.c @@ -670,6 +670,7 @@ struct backref_node *build_backref_tree(struct reloc_control *rc, int cowonly; int ret; int err = 0; + bool need_check = true; path1 = btrfs_alloc_path(); path2 = btrfs_alloc_path(); @@ -892,6 +893,7 @@ again: cur->bytenr); lower = cur; + need_check = true; for (; level < BTRFS_MAX_LEVEL; level++) { if (!path2->nodes[level]) { BUG_ON(btrfs_root_bytenr(&root->root_item) != @@ -935,14 +937,12 @@ again: /* * add the block to pending list if we - * need check its backrefs. only block - * at 'cur->level + 1' is added to the - * tail of pending list. this guarantees - * we check backrefs from lower level - * blocks to upper level blocks. + * need check its backrefs, we only do this once + * while walking up a tree as we will catch + * anything else later on. */ - if (!upper->checked && - level == cur->level + 1) { + if (!upper->checked && need_check) { + need_check = false; list_add_tail(&edge->list[UPPER], &list); } else diff --git a/fs/btrfs/tree-log.c b/fs/btrfs/tree-log.c index faf7d0b..88dec16 100644 --- a/fs/btrfs/tree-log.c +++ b/fs/btrfs/tree-log.c @@ -315,6 +315,7 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, unsigned long src_ptr; unsigned long dst_ptr; int overwrite_root = 0; + bool inode_item = key->type == BTRFS_INODE_ITEM_KEY; if (root->root_key.objectid != BTRFS_TREE_LOG_OBJECTID) overwrite_root = 1; @@ -324,6 +325,9 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, /* look for the key in the destination tree */ ret = btrfs_search_slot(NULL, root, key, path, 0, 0); + if (ret < 0) + return ret; + if (ret == 0) { char *src_copy; char *dst_copy; @@ -365,6 +369,30 @@ static noinline int overwrite_item(struct btrfs_trans_handle *trans, return 0; } + /* + * We need to load the old nbytes into the inode so when we + * replay the extents we've logged we get the right nbytes. + */ + if (inode_item) { + struct btrfs_inode_item *item; + u64 nbytes; + + item = btrfs_item_ptr(path->nodes[0], path->slots[0], + struct btrfs_inode_item); + nbytes = btrfs_inode_nbytes(path->nodes[0], item); + item = btrfs_item_ptr(eb, slot, + struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, nbytes); + } + } else if (inode_item) { + struct btrfs_inode_item *item; + + /* + * New inode, set nbytes to 0 so that the nbytes comes out + * properly when we replay the extents. + */ + item = btrfs_item_ptr(eb, slot, struct btrfs_inode_item); + btrfs_set_inode_nbytes(eb, item, 0); } insert: btrfs_release_path(path); @@ -487,7 +515,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, u64 extent_end; u64 alloc_hint; u64 start = key->offset; - u64 saved_nbytes; + u64 nbytes = 0; struct btrfs_file_extent_item *item; struct inode *inode = NULL; unsigned long size; @@ -497,10 +525,19 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, found_type = btrfs_file_extent_type(eb, item); if (found_type == BTRFS_FILE_EXTENT_REG || - found_type == BTRFS_FILE_EXTENT_PREALLOC) - extent_end = start + btrfs_file_extent_num_bytes(eb, item); - else if (found_type == BTRFS_FILE_EXTENT_INLINE) { + found_type == BTRFS_FILE_EXTENT_PREALLOC) { + nbytes = btrfs_file_extent_num_bytes(eb, item); + extent_end = start + nbytes; + + /* + * We don't add to the inodes nbytes if we are prealloc or a + * hole. + */ + if (btrfs_file_extent_disk_bytenr(eb, item) == 0) + nbytes = 0; + } else if (found_type == BTRFS_FILE_EXTENT_INLINE) { size = btrfs_file_extent_inline_len(eb, item); + nbytes = btrfs_file_extent_ram_bytes(eb, item); extent_end = (start + size + mask) & ~mask; } else { ret = 0; @@ -549,7 +586,6 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, } btrfs_release_path(path); - saved_nbytes = inode_get_bytes(inode); /* drop any overlapping extents */ ret = btrfs_drop_extents(trans, inode, start, extent_end, &alloc_hint, 1); @@ -637,7 +673,7 @@ static noinline int replay_one_extent(struct btrfs_trans_handle *trans, BUG_ON(ret); } - inode_set_bytes(inode, saved_nbytes); + inode_add_bytes(inode, nbytes); btrfs_update_inode(trans, root, inode); out: if (inode) diff --git a/fs/ceph/super.c b/fs/ceph/super.c index f2f77fd..1775022 100644 --- a/fs/ceph/super.c +++ b/fs/ceph/super.c @@ -70,8 +70,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) /* * express utilization in terms of large blocks to avoid * overflow on 32-bit machines. + * + * NOTE: for the time being, we make bsize == frsize to humor + * not-yet-ancient versions of glibc that are broken. + * Someday, we will probably want to report a real block + * size... whatever that may mean for a network file system! */ buf->f_bsize = 1 << CEPH_BLOCK_SHIFT; + buf->f_frsize = 1 << CEPH_BLOCK_SHIFT; buf->f_blocks = le64_to_cpu(st.kb) >> (CEPH_BLOCK_SHIFT-10); buf->f_bfree = (le64_to_cpu(st.kb) - le64_to_cpu(st.kb_used)) >> (CEPH_BLOCK_SHIFT-10); @@ -80,7 +86,6 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf) buf->f_files = le64_to_cpu(st.num_objects); buf->f_ffree = -1; buf->f_namelen = NAME_MAX; - buf->f_frsize = PAGE_CACHE_SIZE; /* leave fsid little-endian, regardless of host endianness */ fsid = *(u64 *)(&monmap->fsid) ^ *((u64 *)&monmap->fsid + 1); diff --git a/fs/ceph/super.h b/fs/ceph/super.h index f5cabef..9091926 100644 --- a/fs/ceph/super.h +++ b/fs/ceph/super.h @@ -21,7 +21,7 @@ /* large granularity for statfs utilization stats to facilitate * large volume sizes on 32-bit machines. */ -#define CEPH_BLOCK_SHIFT 20 /* 1 MB */ +#define CEPH_BLOCK_SHIFT 22 /* 4 MB */ #define CEPH_BLOCK (1 << CEPH_BLOCK_SHIFT) #define CEPH_MOUNT_OPT_DIRSTAT (1<<4) /* `cat dirname` for stats */ diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c index 51feb1a..993384e4 100644 --- a/fs/cifs/cifs_dfs_ref.c +++ b/fs/cifs/cifs_dfs_ref.c @@ -18,6 +18,7 @@ #include <linux/slab.h> #include <linux/vfs.h> #include <linux/fs.h> +#include <linux/inet.h> #include "cifsglob.h" #include "cifsproto.h" #include "cifsfs.h" @@ -149,7 +150,8 @@ char *cifs_compose_mount_options(const char *sb_mountdata, * assuming that we have 'unc=' and 'ip=' in * the original sb_mountdata */ - md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12; + md_len = strlen(sb_mountdata) + rc + strlen(ref->node_name) + 12 + + INET6_ADDRSTRLEN; mountdata = kzalloc(md_len+1, GFP_KERNEL); if (mountdata == NULL) { rc = -ENOMEM; diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c index 07132c4..219933b 100644 --- a/fs/cifs/cifssmb.c +++ b/fs/cifs/cifssmb.c @@ -3473,13 +3473,12 @@ CIFSSMBSetCIFSACL(const int xid, struct cifs_tcon *tcon, __u16 fid, int rc = 0; int bytes_returned = 0; SET_SEC_DESC_REQ *pSMB = NULL; - NTRANSACT_RSP *pSMBr = NULL; + void *pSMBr; setCifsAclRetry: - rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, - (void **) &pSMBr); + rc = smb_init(SMB_COM_NT_TRANSACT, 19, tcon, (void **) &pSMB, &pSMBr); if (rc) - return (rc); + return rc; pSMB->MaxSetupCount = 0; pSMB->Reserved = 0; @@ -3507,9 +3506,8 @@ setCifsAclRetry: pSMB->AclFlags = cpu_to_le32(CIFS_ACL_DACL); if (pntsd && acllen) { - memcpy((char *) &pSMBr->hdr.Protocol + data_offset, - (char *) pntsd, - acllen); + memcpy((char *)pSMBr + offsetof(struct smb_hdr, Protocol) + + data_offset, pntsd, acllen); inc_rfc1001_len(pSMB, byte_count + data_count); } else inc_rfc1001_len(pSMB, byte_count); @@ -5291,7 +5289,8 @@ CIFSSMBSetFileInfo(const int xid, struct cifs_tcon *tcon, param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; - data_offset = (char *) (&pSMB->hdr.Protocol) + offset; + data_offset = (char *)pSMB + + offsetof(struct smb_hdr, Protocol) + offset; count = sizeof(FILE_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); @@ -5560,7 +5559,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, u16 fid, u32 pid_of_opener) { struct smb_com_transaction2_sfi_req *pSMB = NULL; - FILE_UNIX_BASIC_INFO *data_offset; + char *data_offset; int rc = 0; u16 params, param_offset, offset, byte_count, count; @@ -5582,8 +5581,9 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, param_offset = offsetof(struct smb_com_transaction2_sfi_req, Fid) - 4; offset = param_offset + params; - data_offset = (FILE_UNIX_BASIC_INFO *) - ((char *)(&pSMB->hdr.Protocol) + offset); + data_offset = (char *)pSMB + + offsetof(struct smb_hdr, Protocol) + offset; + count = sizeof(FILE_UNIX_BASIC_INFO); pSMB->MaxParameterCount = cpu_to_le16(2); @@ -5605,7 +5605,7 @@ CIFSSMBUnixSetFileInfo(const int xid, struct cifs_tcon *tcon, inc_rfc1001_len(pSMB, byte_count); pSMB->ByteCount = cpu_to_le16(byte_count); - cifs_fill_unix_set_info(data_offset, args); + cifs_fill_unix_set_info((FILE_UNIX_BASIC_INFO *)data_offset, args); rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *) pSMB, 0); if (rc) diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c index b775809..9e6ee47 100644 --- a/fs/cifs/connect.c +++ b/fs/cifs/connect.c @@ -158,6 +158,7 @@ cifs_reconnect(struct TCP_Server_Info *server) try_to_freeze(); /* we should try only the port we connected to before */ + mutex_lock(&server->srv_mutex); rc = generic_ip_connect(server); if (rc) { cFYI(1, "reconnect error %d", rc); @@ -169,6 +170,7 @@ cifs_reconnect(struct TCP_Server_Info *server) server->tcpStatus = CifsNeedNegotiate; spin_unlock(&GlobalMid_Lock); } + mutex_unlock(&server->srv_mutex); } while (server->tcpStatus == CifsNeedReconnect); return rc; diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c index 745e5cd..6f37228 100644 --- a/fs/cifs/inode.c +++ b/fs/cifs/inode.c @@ -173,7 +173,8 @@ cifs_fattr_to_inode(struct inode *inode, struct cifs_fattr *fattr) if (fattr->cf_flags & CIFS_FATTR_DFS_REFERRAL) inode->i_flags |= S_AUTOMOUNT; - cifs_set_ops(inode); + if (inode->i_state & I_NEW) + cifs_set_ops(inode); } void diff --git a/fs/dcache.c b/fs/dcache.c index 8b64f38..ecc0742 100644 --- a/fs/dcache.c +++ b/fs/dcache.c @@ -1563,7 +1563,7 @@ static struct dentry * d_find_any_alias(struct inode *inode) */ struct dentry *d_obtain_alias(struct inode *inode) { - static const struct qstr anonstring = { .name = "" }; + static const struct qstr anonstring = { .name = "/", .len = 1 }; struct dentry *tmp; struct dentry *res; diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c index e7a7a2f..eac5b7c 100644 --- a/fs/debugfs/inode.c +++ b/fs/debugfs/inode.c @@ -380,8 +380,7 @@ EXPORT_SYMBOL_GPL(debugfs_remove); */ void debugfs_remove_recursive(struct dentry *dentry) { - struct dentry *child; - struct dentry *parent; + struct dentry *child, *next, *parent; if (!dentry) return; @@ -391,61 +390,37 @@ void debugfs_remove_recursive(struct dentry *dentry) return; parent = dentry; + down: mutex_lock(&parent->d_inode->i_mutex); + list_for_each_entry_safe(child, next, &parent->d_subdirs, d_u.d_child) { + if (!debugfs_positive(child)) + continue; - while (1) { - /* - * When all dentries under "parent" has been removed, - * walk up the tree until we reach our starting point. - */ - if (list_empty(&parent->d_subdirs)) { - mutex_unlock(&parent->d_inode->i_mutex); - if (parent == dentry) - break; - parent = parent->d_parent; - mutex_lock(&parent->d_inode->i_mutex); - } - child = list_entry(parent->d_subdirs.next, struct dentry, - d_u.d_child); - next_sibling: - - /* - * If "child" isn't empty, walk down the tree and - * remove all its descendants first. - */ + /* perhaps simple_empty(child) makes more sense */ if (!list_empty(&child->d_subdirs)) { mutex_unlock(&parent->d_inode->i_mutex); parent = child; - mutex_lock(&parent->d_inode->i_mutex); - continue; - } - __debugfs_remove(child, parent); - if (parent->d_subdirs.next == &child->d_u.d_child) { - /* - * Try the next sibling. - */ - if (child->d_u.d_child.next != &parent->d_subdirs) { - child = list_entry(child->d_u.d_child.next, - struct dentry, - d_u.d_child); - goto next_sibling; - } - - /* - * Avoid infinite loop if we fail to remove - * one dentry. - */ - mutex_unlock(&parent->d_inode->i_mutex); - break; + goto down; } - simple_release_fs(&debugfs_mount, &debugfs_mount_count); + up: + if (!__debugfs_remove(child, parent)) + simple_release_fs(&debugfs_mount, &debugfs_mount_count); } - parent = dentry->d_parent; + mutex_unlock(&parent->d_inode->i_mutex); + child = parent; + parent = parent->d_parent; mutex_lock(&parent->d_inode->i_mutex); - __debugfs_remove(dentry, parent); + + if (child != dentry) { + next = list_entry(child->d_u.d_child.next, struct dentry, + d_u.d_child); + goto up; + } + + if (!__debugfs_remove(child, parent)) + simple_release_fs(&debugfs_mount, &debugfs_mount_count); mutex_unlock(&parent->d_inode->i_mutex); - simple_release_fs(&debugfs_mount, &debugfs_mount_count); } EXPORT_SYMBOL_GPL(debugfs_remove_recursive); @@ -1149,13 +1149,6 @@ void setup_new_exec(struct linux_binprm * bprm) set_dumpable(current->mm, suid_dumpable); } - /* - * Flush performance counters when crossing a - * security domain: - */ - if (!get_dumpable(current->mm)) - perf_event_exit_task(current); - /* An exec changes our domain. We are no longer part of the thread group */ @@ -1219,6 +1212,15 @@ void install_exec_creds(struct linux_binprm *bprm) commit_creds(bprm->cred); bprm->cred = NULL; + + /* + * Disable monitoring for regular users + * when executing setuid binaries. Must + * wait until new credentials are committed + * by commit_creds() above + */ + if (get_dumpable(current->mm) != SUID_DUMP_USER) + perf_event_exit_task(current); /* * cred_guard_mutex must be held at least to this point to prevent * ptrace_attach() from altering our determination of the task's diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c index e5a7111..8c9f82d 100644 --- a/fs/ext3/namei.c +++ b/fs/ext3/namei.c @@ -584,11 +584,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, if (!ext3_check_dir_entry("htree_dirblock_to_tree", dir, de, bh, (block<<EXT3_BLOCK_SIZE_BITS(dir->i_sb)) +((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse (bh); - return count; + /* silently ignore the rest of the block */ + break; } ext3fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || diff --git a/fs/ext4/Kconfig b/fs/ext4/Kconfig index 9ed1bb1..5459168 100644 --- a/fs/ext4/Kconfig +++ b/fs/ext4/Kconfig @@ -82,4 +82,5 @@ config EXT4_DEBUG Enables run-time debugging support for the ext4 filesystem. If you select Y here, then you will be able to turn on debugging - with a command such as "echo 1 > /sys/kernel/debug/ext4/mballoc-debug" + with a command such as: + echo 1 > /sys/module/ext4/parameters/mballoc_debug diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c index 354ba48..d432d37 100644 --- a/fs/ext4/extents.c +++ b/fs/ext4/extents.c @@ -4155,7 +4155,7 @@ static int ext4_xattr_fiemap(struct inode *inode, error = ext4_get_inode_loc(inode, &iloc); if (error) return error; - physical = iloc.bh->b_blocknr << blockbits; + physical = (__u64)iloc.bh->b_blocknr << blockbits; offset = EXT4_GOOD_OLD_INODE_SIZE + EXT4_I(inode)->i_extra_isize; physical += offset; @@ -4163,7 +4163,7 @@ static int ext4_xattr_fiemap(struct inode *inode, flags |= FIEMAP_EXTENT_DATA_INLINE; brelse(iloc.bh); } else { /* external block */ - physical = EXT4_I(inode)->i_file_acl << blockbits; + physical = (__u64)EXT4_I(inode)->i_file_acl << blockbits; length = inode->i_sb->s_blocksize; } diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c index 7e56946..5de8a27 100644 --- a/fs/ext4/inode.c +++ b/fs/ext4/inode.c @@ -5481,7 +5481,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat) { struct inode *inode; - unsigned long delalloc_blocks; + unsigned long long delalloc_blocks; inode = dentry->d_inode; generic_fillattr(inode, stat); @@ -5498,7 +5498,7 @@ int ext4_getattr(struct vfsmount *mnt, struct dentry *dentry, */ delalloc_blocks = EXT4_I(inode)->i_reserved_data_blocks; - stat->blocks += (delalloc_blocks << inode->i_sb->s_blocksize_bits)>>9; + stat->blocks += delalloc_blocks << (inode->i_sb->s_blocksize_bits-9); return 0; } diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 35959f6..cdb8414 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2026,7 +2026,11 @@ repeat: group = ac->ac_g_ex.fe_group; for (i = 0; i < ngroups; group++, i++) { - if (group == ngroups) + /* + * Artificially restricted ngroups for non-extent + * files makes group > ngroups possible on first loop. + */ + if (group >= ngroups) group = 0; /* This now checks without needing the buddy page */ diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c index 78585fc..595d087 100644 --- a/fs/ext4/namei.c +++ b/fs/ext4/namei.c @@ -585,11 +585,8 @@ static int htree_dirblock_to_tree(struct file *dir_file, if (ext4_check_dir_entry(dir, NULL, de, bh, (block<<EXT4_BLOCK_SIZE_BITS(dir->i_sb)) + ((char *)de - bh->b_data))) { - /* On error, skip the f_pos to the next block. */ - dir_file->f_pos = (dir_file->f_pos | - (dir->i_sb->s_blocksize - 1)) + 1; - brelse(bh); - return count; + /* silently ignore the rest of the block */ + break; } ext4fs_dirhash(de->name, de->name_len, hinfo); if ((hinfo->hash < start_hash) || @@ -2062,7 +2059,8 @@ int ext4_orphan_del(handle_t *handle, struct inode *inode) int err = 0; /* ext4_handle_valid() assumes a valid handle_t pointer */ - if (handle && !ext4_handle_valid(handle)) + if (handle && !ext4_handle_valid(handle) && + !(EXT4_SB(inode->i_sb)->s_mount_state & EXT4_ORPHAN_FS)) return 0; mutex_lock(&EXT4_SB(inode->i_sb)->s_orphan_lock); diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c index c2865cc..8f797ae 100644 --- a/fs/ext4/xattr.c +++ b/fs/ext4/xattr.c @@ -1271,6 +1271,8 @@ retry: s_min_extra_isize) { tried_min_extra_isize++; new_extra_isize = s_min_extra_isize; + kfree(is); is = NULL; + kfree(bs); bs = NULL; goto retry; } error = -1; diff --git a/fs/fat/inode.c b/fs/fat/inode.c index 9836839..9fc6975 100644 --- a/fs/fat/inode.c +++ b/fs/fat/inode.c @@ -1236,6 +1236,19 @@ static int fat_read_root(struct inode *inode) return 0; } +static unsigned long calc_fat_clusters(struct super_block *sb) +{ + struct msdos_sb_info *sbi = MSDOS_SB(sb); + + /* Divide first to avoid overflow */ + if (sbi->fat_bits != 12) { + unsigned long ent_per_sec = sb->s_blocksize * 8 / sbi->fat_bits; + return ent_per_sec * sbi->fat_length; + } + + return sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; +} + /* * Read the super block of an MS-DOS FS. */ @@ -1442,7 +1455,7 @@ int fat_fill_super(struct super_block *sb, void *data, int silent, int isvfat, sbi->fat_bits = (total_clusters > MAX_FAT12) ? 16 : 12; /* check that FAT table does not overflow */ - fat_clusters = sbi->fat_length * sb->s_blocksize * 8 / sbi->fat_bits; + fat_clusters = calc_fat_clusters(sb); total_clusters = min(total_clusters, fat_clusters - FAT_START_ENT); if (total_clusters > MAX_FAT(sb)) { if (!silent) diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c index 4765190..73c0bd7 100644 --- a/fs/fscache/stats.c +++ b/fs/fscache/stats.c @@ -276,5 +276,5 @@ const struct file_operations fscache_stats_fops = { .open = fscache_stats_open, .read = seq_read, .llseek = seq_lseek, - .release = seq_release, + .release = single_release, }; diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c index c04a025..607a973 100644 --- a/fs/fuse/dir.c +++ b/fs/fuse/dir.c @@ -1439,6 +1439,8 @@ static int fuse_setxattr(struct dentry *entry, const char *name, fc->no_setxattr = 1; err = -EOPNOTSUPP; } + if (!err) + fuse_invalidate_attr(inode); return err; } @@ -1568,6 +1570,8 @@ static int fuse_removexattr(struct dentry *entry, const char *name) fc->no_removexattr = 1; err = -EOPNOTSUPP; } + if (!err) + fuse_invalidate_attr(inode); return err; } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 79fca8d..2e1c10fe 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -1298,7 +1298,6 @@ static int fuse_writepage_locked(struct page *page) inc_bdi_stat(mapping->backing_dev_info, BDI_WRITEBACK); inc_zone_page_state(tmp_page, NR_WRITEBACK_TEMP); - end_page_writeback(page); spin_lock(&fc->lock); list_add(&req->writepages_entry, &fi->writepages); @@ -1306,6 +1305,8 @@ static int fuse_writepage_locked(struct page *page) fuse_flush_writepages(inode); spin_unlock(&fc->lock); + end_page_writeback(page); + return 0; err_free: diff --git a/fs/hfsplus/extents.c b/fs/hfsplus/extents.c index b1991a2..9d8c087 100644 --- a/fs/hfsplus/extents.c +++ b/fs/hfsplus/extents.c @@ -499,7 +499,7 @@ void hfsplus_file_truncate(struct inode *inode) struct address_space *mapping = inode->i_mapping; struct page *page; void *fsdata; - u32 size = inode->i_size; + loff_t size = inode->i_size; int res; res = pagecache_write_begin(NULL, mapping, size, 0, diff --git a/fs/hpfs/map.c b/fs/hpfs/map.c index a790821..ea3d1ca 100644 --- a/fs/hpfs/map.c +++ b/fs/hpfs/map.c @@ -17,7 +17,8 @@ unsigned int *hpfs_map_bitmap(struct super_block *s, unsigned bmp_block, struct quad_buffer_head *qbh, char *id) { secno sec; - if (hpfs_sb(s)->sb_chk) if (bmp_block * 16384 > hpfs_sb(s)->sb_fs_size) { + unsigned n_bands = (hpfs_sb(s)->sb_fs_size + 0x3fff) >> 14; + if (hpfs_sb(s)->sb_chk) if (bmp_block >= n_bands) { hpfs_error(s, "hpfs_map_bitmap called with bad parameter: %08x at %s", bmp_block, id); return NULL; } diff --git a/fs/hpfs/super.c b/fs/hpfs/super.c index 98580a3..f760c15 100644 --- a/fs/hpfs/super.c +++ b/fs/hpfs/super.c @@ -553,7 +553,13 @@ static int hpfs_fill_super(struct super_block *s, void *options, int silent) sbi->sb_cp_table = NULL; sbi->sb_c_bitmap = -1; sbi->sb_max_fwd_alloc = 0xffffff; - + + if (sbi->sb_fs_size >= 0x80000000) { + hpfs_error(s, "invalid size in superblock: %08x", + (unsigned)sbi->sb_fs_size); + goto bail4; + } + /* Load bitmap directory */ if (!(sbi->sb_bmp_dir = hpfs_load_bitmap_directory(s, le32_to_cpu(superblock->bitmaps)))) goto bail4; diff --git a/fs/isofs/inode.c b/fs/isofs/inode.c index b3cc858..26f6364 100644 --- a/fs/isofs/inode.c +++ b/fs/isofs/inode.c @@ -119,8 +119,8 @@ static void destroy_inodecache(void) static int isofs_remount(struct super_block *sb, int *flags, char *data) { - /* we probably want a lot more here */ - *flags |= MS_RDONLY; + if (!(*flags & MS_RDONLY)) + return -EROFS; return 0; } @@ -769,15 +769,6 @@ root_found: */ s->s_maxbytes = 0x80000000000LL; - /* - * The CDROM is read-only, has no nodes (devices) on it, and since - * all of the files appear to be owned by root, we really do not want - * to allow suid. (suid or devices will not show up unless we have - * Rock Ridge extensions) - */ - - s->s_flags |= MS_RDONLY /* | MS_NODEV | MS_NOSUID */; - /* Set this for reference. Its not currently used except on write which we don't have .. */ @@ -1528,6 +1519,9 @@ struct inode *isofs_iget(struct super_block *sb, static struct dentry *isofs_mount(struct file_system_type *fs_type, int flags, const char *dev_name, void *data) { + /* We don't support read-write mounts */ + if (!(flags & MS_RDONLY)) + return ERR_PTR(-EACCES); return mount_bdev(fs_type, flags, dev_name, data, isofs_fill_super); } diff --git a/fs/jbd2/transaction.c b/fs/jbd2/transaction.c index 4ef2aae..26e11db 100644 --- a/fs/jbd2/transaction.c +++ b/fs/jbd2/transaction.c @@ -469,10 +469,10 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, int gfp_mask) &transaction->t_outstanding_credits); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); + tid = transaction->t_tid; spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); - tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) diff --git a/fs/jfs/inode.c b/fs/jfs/inode.c index 1096559..09100b4 100644 --- a/fs/jfs/inode.c +++ b/fs/jfs/inode.c @@ -125,7 +125,7 @@ int jfs_write_inode(struct inode *inode, struct writeback_control *wbc) { int wait = wbc->sync_mode == WB_SYNC_ALL; - if (test_cflag(COMMIT_Nolink, inode)) + if (inode->i_nlink == 0) return 0; /* * If COMMIT_DIRTY is not set, the inode isn't really dirty. diff --git a/fs/jfs/jfs_dtree.c b/fs/jfs/jfs_dtree.c index 9197a1b..b6f17c0 100644 --- a/fs/jfs/jfs_dtree.c +++ b/fs/jfs/jfs_dtree.c @@ -3047,6 +3047,14 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) dir_index = (u32) filp->f_pos; + /* + * NFSv4 reserves cookies 1 and 2 for . and .. so we add + * the value we return to the vfs is one greater than the + * one we use internally. + */ + if (dir_index) + dir_index--; + if (dir_index > 1) { struct dir_table_slot dirtab_slot; @@ -3086,7 +3094,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) if (p->header.flag & BT_INTERNAL) { jfs_err("jfs_readdir: bad index table"); DT_PUTPAGE(mp); - filp->f_pos = -1; + filp->f_pos = DIREND; return 0; } } else { @@ -3094,7 +3102,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* * self "." */ - filp->f_pos = 0; + filp->f_pos = 1; if (filldir(dirent, ".", 1, 0, ip->i_ino, DT_DIR)) return 0; @@ -3102,7 +3110,7 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* * parent ".." */ - filp->f_pos = 1; + filp->f_pos = 2; if (filldir(dirent, "..", 2, 1, PARENT(ip), DT_DIR)) return 0; @@ -3123,24 +3131,25 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) /* * Legacy filesystem - OS/2 & Linux JFS < 0.3.6 * - * pn = index = 0: First entry "." - * pn = 0; index = 1: Second entry ".." + * pn = 0; index = 1: First entry "." + * pn = 0; index = 2: Second entry ".." * pn > 0: Real entries, pn=1 -> leftmost page * pn = index = -1: No more entries */ dtpos = filp->f_pos; - if (dtpos == 0) { + if (dtpos < 2) { /* build "." entry */ + filp->f_pos = 1; if (filldir(dirent, ".", 1, filp->f_pos, ip->i_ino, DT_DIR)) return 0; - dtoffset->index = 1; + dtoffset->index = 2; filp->f_pos = dtpos; } if (dtoffset->pn == 0) { - if (dtoffset->index == 1) { + if (dtoffset->index == 2) { /* build ".." entry */ if (filldir(dirent, "..", 2, filp->f_pos, @@ -3233,6 +3242,12 @@ int jfs_readdir(struct file *filp, void *dirent, filldir_t filldir) } jfs_dirent->position = unique_pos++; } + /* + * We add 1 to the index because we may + * use a value of 2 internally, and NFSv4 + * doesn't like that. + */ + jfs_dirent->position++; } else { jfs_dirent->position = dtpos; len = min(d_namleft, DTLHDRDATALEN_LEGACY); diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c index 583636f..ee55e45 100644 --- a/fs/jfs/jfs_logmgr.c +++ b/fs/jfs/jfs_logmgr.c @@ -1057,7 +1057,8 @@ static int lmLogSync(struct jfs_log * log, int hard_sync) */ void jfs_syncpt(struct jfs_log *log, int hard_sync) { LOG_LOCK(log); - lmLogSync(log, hard_sync); + if (!test_bit(log_QUIESCE, &log->flag)) + lmLogSync(log, hard_sync); LOG_UNLOCK(log); } diff --git a/fs/lockd/clntlock.c b/fs/lockd/clntlock.c index 8d4ea83..de88922 100644 --- a/fs/lockd/clntlock.c +++ b/fs/lockd/clntlock.c @@ -141,6 +141,9 @@ int nlmclnt_block(struct nlm_wait *block, struct nlm_rqst *req, long timeout) timeout); if (ret < 0) return -ERESTARTSYS; + /* Reset the lock status after a server reboot so we resend */ + if (block->b_status == nlm_lck_denied_grace_period) + block->b_status = nlm_lck_blocked; req->a_res.status = block->b_status; return 0; } diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c index 5ee055e..e374050 100644 --- a/fs/lockd/clntproc.c +++ b/fs/lockd/clntproc.c @@ -550,9 +550,6 @@ again: status = nlmclnt_block(block, req, NLMCLNT_POLL_TIMEOUT); if (status < 0) break; - /* Resend the blocking lock request after a server reboot */ - if (resp->status == nlm_lck_denied_grace_period) - continue; if (resp->status != nlm_lck_blocked) break; } diff --git a/fs/lockd/svclock.c b/fs/lockd/svclock.c index 6e31695..db7be3a 100644 --- a/fs/lockd/svclock.c +++ b/fs/lockd/svclock.c @@ -913,6 +913,7 @@ nlmsvc_retry_blocked(void) unsigned long timeout = MAX_SCHEDULE_TIMEOUT; struct nlm_block *block; + spin_lock(&nlm_blocked_lock); while (!list_empty(&nlm_blocked) && !kthread_should_stop()) { block = list_entry(nlm_blocked.next, struct nlm_block, b_list); @@ -922,6 +923,7 @@ nlmsvc_retry_blocked(void) timeout = block->b_when - jiffies; break; } + spin_unlock(&nlm_blocked_lock); dprintk("nlmsvc_retry_blocked(%p, when=%ld)\n", block, block->b_when); @@ -931,7 +933,9 @@ nlmsvc_retry_blocked(void) retry_deferred_block(block); } else nlmsvc_grant_blocked(block); + spin_lock(&nlm_blocked_lock); } + spin_unlock(&nlm_blocked_lock); return timeout; } diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c index 894e326..7204bcc 100644 --- a/fs/nfs/nfs4proc.c +++ b/fs/nfs/nfs4proc.c @@ -1335,6 +1335,12 @@ int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state case -ENOMEM: err = 0; goto out; + case -NFS4ERR_DELAY: + case -NFS4ERR_GRACE: + set_bit(NFS_DELEGATED_STATE, &state->flags); + ssleep(1); + err = -EAGAIN; + goto out; } err = nfs4_handle_exception(server, err, &exception); } while (exception.retry); diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c index 92f7eb7..4ec38df 100644 --- a/fs/nfsd/nfs4state.c +++ b/fs/nfsd/nfs4state.c @@ -189,13 +189,7 @@ static void __nfs4_file_put_access(struct nfs4_file *fp, int oflag) { if (atomic_dec_and_test(&fp->fi_access[oflag])) { nfs4_file_put_fd(fp, oflag); - /* - * It's also safe to get rid of the RDWR open *if* - * we no longer have need of the other kind of access - * or if we already have the other kind of open: - */ - if (fp->fi_fds[1-oflag] - || atomic_read(&fp->fi_access[1 - oflag]) == 0) + if (atomic_read(&fp->fi_access[1 - oflag]) == 0) nfs4_file_put_fd(fp, O_RDWR); } } diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c index 59ac3f4..45f53ae 100644 --- a/fs/nfsd/nfs4xdr.c +++ b/fs/nfsd/nfs4xdr.c @@ -159,8 +159,8 @@ static __be32 *read_buf(struct nfsd4_compoundargs *argp, u32 nbytes) */ memcpy(p, argp->p, avail); /* step to next page */ - argp->p = page_address(argp->pagelist[0]); argp->pagelist++; + argp->p = page_address(argp->pagelist[0]); if (argp->pagelen < PAGE_SIZE) { argp->end = argp->p + (argp->pagelen>>2); argp->pagelen = 0; @@ -342,10 +342,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; - READ32(dummy32); - if (dummy32) - return nfserr_inval; - READ32(iattr->ia_atime.tv_sec); + READ64(iattr->ia_atime.tv_sec); READ32(iattr->ia_atime.tv_nsec); if (iattr->ia_atime.tv_nsec >= (u32)1000000000) return nfserr_inval; @@ -368,10 +365,7 @@ nfsd4_decode_fattr(struct nfsd4_compoundargs *argp, u32 *bmval, all 32 bits of 'nseconds'. */ READ_BUF(12); len += 12; - READ32(dummy32); - if (dummy32) - return nfserr_inval; - READ32(iattr->ia_mtime.tv_sec); + READ64(iattr->ia_mtime.tv_sec); READ32(iattr->ia_mtime.tv_nsec); if (iattr->ia_mtime.tv_nsec >= (u32)1000000000) return nfserr_inval; @@ -2148,8 +2142,7 @@ out_acl: if (bmval1 & FATTR4_WORD1_TIME_ACCESS) { if ((buflen -= 12) < 0) goto out_resource; - WRITE32(0); - WRITE32(stat.atime.tv_sec); + WRITE64((s64)stat.atime.tv_sec); WRITE32(stat.atime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_DELTA) { @@ -2162,15 +2155,13 @@ out_acl: if (bmval1 & FATTR4_WORD1_TIME_METADATA) { if ((buflen -= 12) < 0) goto out_resource; - WRITE32(0); - WRITE32(stat.ctime.tv_sec); + WRITE64((s64)stat.ctime.tv_sec); WRITE32(stat.ctime.tv_nsec); } if (bmval1 & FATTR4_WORD1_TIME_MODIFY) { if ((buflen -= 12) < 0) goto out_resource; - WRITE32(0); - WRITE32(stat.mtime.tv_sec); + WRITE64((s64)stat.mtime.tv_sec); WRITE32(stat.mtime.tv_nsec); } if (bmval1 & FATTR4_WORD1_MOUNTED_ON_FILEID) { diff --git a/fs/nilfs2/inode.c b/fs/nilfs2/inode.c index b9b45fc..373cd7b 100644 --- a/fs/nilfs2/inode.c +++ b/fs/nilfs2/inode.c @@ -195,13 +195,32 @@ static int nilfs_writepage(struct page *page, struct writeback_control *wbc) static int nilfs_set_page_dirty(struct page *page) { - int ret = __set_page_dirty_buffers(page); + int ret = __set_page_dirty_nobuffers(page); - if (ret) { + if (page_has_buffers(page)) { struct inode *inode = page->mapping->host; - unsigned nr_dirty = 1 << (PAGE_SHIFT - inode->i_blkbits); + unsigned nr_dirty = 0; + struct buffer_head *bh, *head; - nilfs_set_file_dirty(inode, nr_dirty); + /* + * This page is locked by callers, and no other thread + * concurrently marks its buffers dirty since they are + * only dirtied through routines in fs/buffer.c in + * which call sites of mark_buffer_dirty are protected + * by page lock. + */ + bh = head = page_buffers(page); + do { + /* Do not mark hole blocks dirty */ + if (buffer_dirty(bh) || !buffer_mapped(bh)) + continue; + + set_buffer_dirty(bh); + nr_dirty++; + } while (bh = bh->b_this_page, bh != head); + + if (nr_dirty) + nilfs_set_file_dirty(inode, nr_dirty); } return ret; } diff --git a/fs/nilfs2/segbuf.c b/fs/nilfs2/segbuf.c index 850a7c0..07a666a 100644 --- a/fs/nilfs2/segbuf.c +++ b/fs/nilfs2/segbuf.c @@ -345,8 +345,7 @@ static void nilfs_end_bio_write(struct bio *bio, int err) if (err == -EOPNOTSUPP) { set_bit(BIO_EOPNOTSUPP, &bio->bi_flags); - bio_put(bio); - /* to be detected by submit_seg_bio() */ + /* to be detected by nilfs_segbuf_submit_bio() */ } if (!uptodate) @@ -377,12 +376,12 @@ static int nilfs_segbuf_submit_bio(struct nilfs_segment_buffer *segbuf, bio->bi_private = segbuf; bio_get(bio); submit_bio(mode, bio); + segbuf->sb_nbio++; if (bio_flagged(bio, BIO_EOPNOTSUPP)) { bio_put(bio); err = -EOPNOTSUPP; goto failed; } - segbuf->sb_nbio++; bio_put(bio); wi->bio = NULL; diff --git a/fs/notify/fanotify/fanotify.c b/fs/notify/fanotify/fanotify.c index a506360..0c2f912 100644 --- a/fs/notify/fanotify/fanotify.c +++ b/fs/notify/fanotify/fanotify.c @@ -18,6 +18,12 @@ static bool should_merge(struct fsnotify_event *old, struct fsnotify_event *new) old->tgid == new->tgid) { switch (old->data_type) { case (FSNOTIFY_EVENT_PATH): +#ifdef CONFIG_FANOTIFY_ACCESS_PERMISSIONS + /* dont merge two permission events */ + if ((old->mask & FAN_ALL_PERM_EVENTS) && + (new->mask & FAN_ALL_PERM_EVENTS)) + return false; +#endif if ((old->path.mnt == new->path.mnt) && (old->path.dentry == new->path.dentry)) return true; diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c index 9fde1c0..9860f6b 100644 --- a/fs/notify/fanotify/fanotify_user.c +++ b/fs/notify/fanotify/fanotify_user.c @@ -118,6 +118,7 @@ static int fill_event_metadata(struct fsnotify_group *group, metadata->event_len = FAN_EVENT_METADATA_LEN; metadata->metadata_len = FAN_EVENT_METADATA_LEN; metadata->vers = FANOTIFY_METADATA_VERSION; + metadata->reserved = 0; metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS; metadata->pid = pid_vnr(event->tgid); if (unlikely(event->mask & FAN_Q_OVERFLOW)) diff --git a/fs/ocfs2/extent_map.c b/fs/ocfs2/extent_map.c index 23457b4..774a032 100644 --- a/fs/ocfs2/extent_map.c +++ b/fs/ocfs2/extent_map.c @@ -782,7 +782,6 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, cpos = map_start >> osb->s_clustersize_bits; mapping_end = ocfs2_clusters_for_bytes(inode->i_sb, map_start + map_len); - mapping_end -= cpos; is_last = 0; while (cpos < mapping_end && !is_last) { u32 fe_flags; @@ -791,7 +790,7 @@ int ocfs2_fiemap(struct inode *inode, struct fiemap_extent_info *fieinfo, &hole_size, &rec, &is_last); if (ret) { mlog_errno(ret); - goto out; + goto out_unlock; } if (rec.e_blkno == 0ULL) { diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c index 81ecf9c..61a84cf 100644 --- a/fs/ocfs2/xattr.c +++ b/fs/ocfs2/xattr.c @@ -6497,6 +6497,16 @@ static int ocfs2_reflink_xattr_inline(struct ocfs2_xattr_reflink *args) } new_oi = OCFS2_I(args->new_inode); + /* + * Adjust extent record count to reserve space for extended attribute. + * Inline data count had been adjusted in ocfs2_duplicate_inline_data(). + */ + if (!(new_oi->ip_dyn_features & OCFS2_INLINE_DATA_FL) && + !(ocfs2_inode_is_fast_symlink(args->new_inode))) { + struct ocfs2_extent_list *el = &new_di->id2.i_list; + le16_add_cpu(&el->l_count, -(inline_size / + sizeof(struct ocfs2_extent_rec))); + } spin_lock(&new_oi->ip_lock); new_oi->ip_dyn_features |= OCFS2_HAS_XATTR_FL | OCFS2_INLINE_XATTR_FL; new_di->i_dyn_features = cpu_to_le16(new_oi->ip_dyn_features); diff --git a/fs/reiserfs/xattr.c b/fs/reiserfs/xattr.c index d780896..6e3ca4e 100644 --- a/fs/reiserfs/xattr.c +++ b/fs/reiserfs/xattr.c @@ -187,8 +187,8 @@ fill_with_dentries(void *buf, const char *name, int namelen, loff_t offset, if (dbuf->count == ARRAY_SIZE(dbuf->dentries)) return -ENOSPC; - if (name[0] == '.' && (name[1] == '\0' || - (name[1] == '.' && name[2] == '\0'))) + if (name[0] == '.' && (namelen < 2 || + (namelen == 2 && name[1] == '.'))) return 0; dentry = lookup_one_len(name, dbuf->xadir, namelen); diff --git a/fs/splice.c b/fs/splice.c index ea92b7c..16d0cb4 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -274,13 +274,16 @@ static void spd_release_page(struct splice_pipe_desc *spd, unsigned int i) * Check if we need to grow the arrays holding pages and partial page * descriptions. */ -int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) +int splice_grow_spd(const struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) { - if (pipe->buffers <= PIPE_DEF_BUFFERS) + unsigned int buffers = ACCESS_ONCE(pipe->buffers); + + spd->nr_pages_max = buffers; + if (buffers <= PIPE_DEF_BUFFERS) return 0; - spd->pages = kmalloc(pipe->buffers * sizeof(struct page *), GFP_KERNEL); - spd->partial = kmalloc(pipe->buffers * sizeof(struct partial_page), GFP_KERNEL); + spd->pages = kmalloc(buffers * sizeof(struct page *), GFP_KERNEL); + spd->partial = kmalloc(buffers * sizeof(struct partial_page), GFP_KERNEL); if (spd->pages && spd->partial) return 0; @@ -290,10 +293,9 @@ int splice_grow_spd(struct pipe_inode_info *pipe, struct splice_pipe_desc *spd) return -ENOMEM; } -void splice_shrink_spd(struct pipe_inode_info *pipe, - struct splice_pipe_desc *spd) +void splice_shrink_spd(struct splice_pipe_desc *spd) { - if (pipe->buffers <= PIPE_DEF_BUFFERS) + if (spd->nr_pages_max <= PIPE_DEF_BUFFERS) return; kfree(spd->pages); @@ -316,6 +318,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, struct splice_pipe_desc spd = { .pages = pages, .partial = partial, + .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &page_cache_pipe_buf_ops, .spd_release = spd_release_page, @@ -327,7 +330,7 @@ __generic_file_splice_read(struct file *in, loff_t *ppos, index = *ppos >> PAGE_CACHE_SHIFT; loff = *ppos & ~PAGE_CACHE_MASK; req_pages = (len + loff + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - nr_pages = min(req_pages, pipe->buffers); + nr_pages = min(req_pages, spd.nr_pages_max); /* * Lookup the (hopefully) full range of pages we need. @@ -498,7 +501,7 @@ fill_it: if (spd.nr_pages) error = splice_to_pipe(pipe, &spd); - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return error; } @@ -599,6 +602,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, struct splice_pipe_desc spd = { .pages = pages, .partial = partial, + .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &default_pipe_buf_ops, .spd_release = spd_release_page, @@ -609,8 +613,8 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, res = -ENOMEM; vec = __vec; - if (pipe->buffers > PIPE_DEF_BUFFERS) { - vec = kmalloc(pipe->buffers * sizeof(struct iovec), GFP_KERNEL); + if (spd.nr_pages_max > PIPE_DEF_BUFFERS) { + vec = kmalloc(spd.nr_pages_max * sizeof(struct iovec), GFP_KERNEL); if (!vec) goto shrink_ret; } @@ -618,7 +622,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, offset = *ppos & ~PAGE_CACHE_MASK; nr_pages = (len + offset + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT; - for (i = 0; i < nr_pages && i < pipe->buffers && len; i++) { + for (i = 0; i < nr_pages && i < spd.nr_pages_max && len; i++) { struct page *page; page = alloc_page(GFP_USER); @@ -666,7 +670,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos, shrink_ret: if (vec != __vec) kfree(vec); - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return res; err: @@ -1618,6 +1622,7 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, struct splice_pipe_desc spd = { .pages = pages, .partial = partial, + .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &user_page_pipe_buf_ops, .spd_release = spd_release_page, @@ -1633,13 +1638,13 @@ static long vmsplice_to_pipe(struct file *file, const struct iovec __user *iov, spd.nr_pages = get_iovec_page_array(iov, nr_segs, spd.pages, spd.partial, flags & SPLICE_F_GIFT, - pipe->buffers); + spd.nr_pages_max); if (spd.nr_pages <= 0) ret = spd.nr_pages; else ret = splice_to_pipe(pipe, &spd); - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return ret; } diff --git a/fs/statfs.c b/fs/statfs.c index 9cf04a1..a133c3e 100644 --- a/fs/statfs.c +++ b/fs/statfs.c @@ -86,7 +86,7 @@ int user_statfs(const char __user *pathname, struct kstatfs *st) int fd_statfs(int fd, struct kstatfs *st) { - struct file *file = fget(fd); + struct file *file = fget_raw(fd); int error = -EBADF; if (file) { error = vfs_statfs(&file->f_path, st); @@ -222,19 +222,19 @@ EXPORT_SYMBOL(deactivate_super); * and want to turn it into a full-blown active reference. grab_super() * is called with sb_lock held and drops it. Returns 1 in case of * success, 0 if we had failed (superblock contents was already dead or - * dying when grab_super() had been called). + * dying when grab_super() had been called). Note that this is only + * called for superblocks not in rundown mode (== ones still on ->fs_supers + * of their type), so increment of ->s_count is OK here. */ static int grab_super(struct super_block *s) __releases(sb_lock) { - if (atomic_inc_not_zero(&s->s_active)) { - spin_unlock(&sb_lock); - return 1; - } - /* it's going away */ s->s_count++; spin_unlock(&sb_lock); - /* wait for it to die */ down_write(&s->s_umount); + if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) { + put_super(s); + return 1; + } up_write(&s->s_umount); put_super(s); return 0; @@ -335,11 +335,6 @@ retry: destroy_super(s); s = NULL; } - down_write(&old->s_umount); - if (unlikely(!(old->s_flags & MS_BORN))) { - deactivate_locked_super(old); - goto retry; - } return old; } } @@ -512,10 +507,10 @@ restart: if (list_empty(&sb->s_instances)) continue; if (sb->s_bdev == bdev) { - if (grab_super(sb)) /* drops sb_lock */ - return sb; - else + if (!grab_super(sb)) goto restart; + up_write(&sb->s_umount); + return sb; } } spin_unlock(&sb_lock); diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c index 7cbc585..3ab78b8 100644 --- a/fs/sysfs/dir.c +++ b/fs/sysfs/dir.c @@ -959,21 +959,9 @@ static int sysfs_readdir(struct file * filp, void * dirent, filldir_t filldir) return 0; } -static loff_t sysfs_dir_llseek(struct file *file, loff_t offset, int whence) -{ - struct inode *inode = file->f_path.dentry->d_inode; - loff_t ret; - - mutex_lock(&inode->i_mutex); - ret = generic_file_llseek(file, offset, whence); - mutex_unlock(&inode->i_mutex); - - return ret; -} - const struct file_operations sysfs_dir_operations = { .read = generic_read_dir, .readdir = sysfs_readdir, .release = sysfs_dir_release, - .llseek = sysfs_dir_llseek, + .llseek = generic_file_llseek, }; diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c index ef5abd3..936a038 100644 --- a/fs/ubifs/dir.c +++ b/fs/ubifs/dir.c @@ -355,31 +355,50 @@ static unsigned int vfs_dent_type(uint8_t type) static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) { int err, over = 0; + loff_t pos = file->f_pos; struct qstr nm; union ubifs_key key; struct ubifs_dent_node *dent; struct inode *dir = file->f_path.dentry->d_inode; struct ubifs_info *c = dir->i_sb->s_fs_info; - dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, file->f_pos); + dbg_gen("dir ino %lu, f_pos %#llx", dir->i_ino, pos); - if (file->f_pos > UBIFS_S_KEY_HASH_MASK || file->f_pos == 2) + if (pos > UBIFS_S_KEY_HASH_MASK || pos == 2) /* * The directory was seek'ed to a senseless position or there * are no more entries. */ return 0; + if (file->f_version == 0) { + /* + * The file was seek'ed, which means that @file->private_data + * is now invalid. This may also be just the first + * 'ubifs_readdir()' invocation, in which case + * @file->private_data is NULL, and the below code is + * basically a no-op. + */ + kfree(file->private_data); + file->private_data = NULL; + } + + /* + * 'generic_file_llseek()' unconditionally sets @file->f_version to + * zero, and we use this for detecting whether the file was seek'ed. + */ + file->f_version = 1; + /* File positions 0 and 1 correspond to "." and ".." */ - if (file->f_pos == 0) { + if (pos == 0) { ubifs_assert(!file->private_data); over = filldir(dirent, ".", 1, 0, dir->i_ino, DT_DIR); if (over) return 0; - file->f_pos = 1; + file->f_pos = pos = 1; } - if (file->f_pos == 1) { + if (pos == 1) { ubifs_assert(!file->private_data); over = filldir(dirent, "..", 2, 1, parent_ino(file->f_path.dentry), DT_DIR); @@ -395,7 +414,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) goto out; } - file->f_pos = key_hash_flash(c, &dent->key); + file->f_pos = pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -403,17 +422,16 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) if (!dent) { /* * The directory was seek'ed to and is now readdir'ed. - * Find the entry corresponding to @file->f_pos or the - * closest one. + * Find the entry corresponding to @pos or the closest one. */ - dent_key_init_hash(c, &key, dir->i_ino, file->f_pos); + dent_key_init_hash(c, &key, dir->i_ino, pos); nm.name = NULL; dent = ubifs_tnc_next_ent(c, &key, &nm); if (IS_ERR(dent)) { err = PTR_ERR(dent); goto out; } - file->f_pos = key_hash_flash(c, &dent->key); + file->f_pos = pos = key_hash_flash(c, &dent->key); file->private_data = dent; } @@ -425,7 +443,7 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) ubifs_inode(dir)->creat_sqnum); nm.len = le16_to_cpu(dent->nlen); - over = filldir(dirent, dent->name, nm.len, file->f_pos, + over = filldir(dirent, dent->name, nm.len, pos, le64_to_cpu(dent->inum), vfs_dent_type(dent->type)); if (over) @@ -441,9 +459,17 @@ static int ubifs_readdir(struct file *file, void *dirent, filldir_t filldir) } kfree(file->private_data); - file->f_pos = key_hash_flash(c, &dent->key); + file->f_pos = pos = key_hash_flash(c, &dent->key); file->private_data = dent; cond_resched(); + + if (file->f_version == 0) + /* + * The file was seek'ed meanwhile, lets return and start + * reading direntries from the new position on the next + * invocation. + */ + return 0; } out: @@ -454,15 +480,13 @@ out: kfree(file->private_data); file->private_data = NULL; + /* 2 is a special value indicating that there are no more direntries */ file->f_pos = 2; return 0; } -/* If a directory is seeked, we have to free saved readdir() state */ static loff_t ubifs_dir_llseek(struct file *file, loff_t offset, int origin) { - kfree(file->private_data); - file->private_data = NULL; return generic_file_llseek(file, offset, origin); } diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c index 529be05..db04976 100644 --- a/fs/ubifs/super.c +++ b/fs/ubifs/super.c @@ -1583,6 +1583,12 @@ static int ubifs_remount_rw(struct ubifs_info *c) c->remounting_rw = 1; c->ro_mount = 0; + if (c->space_fixup) { + err = ubifs_fixup_free_space(c); + if (err) + return err; + } + err = check_free_space(c); if (err) goto out; @@ -1699,12 +1705,6 @@ static int ubifs_remount_rw(struct ubifs_info *c) err = dbg_check_space_info(c); } - if (c->space_fixup) { - err = ubifs_fixup_free_space(c); - if (err) - goto out; - } - mutex_unlock(&c->umount_mutex); return err; diff --git a/include/linux/ata.h b/include/linux/ata.h index 32df2b6..5856c9e 100644 --- a/include/linux/ata.h +++ b/include/linux/ata.h @@ -937,7 +937,7 @@ static inline int atapi_cdb_len(const u16 *dev_id) } } -static inline bool atapi_command_packet_set(const u16 *dev_id) +static inline int atapi_command_packet_set(const u16 *dev_id) { return (dev_id[ATA_ID_CONFIG] >> 8) & 0x1f; } diff --git a/include/linux/ftrace_event.h b/include/linux/ftrace_event.h index 59d3ef1..217b3c2 100644 --- a/include/linux/ftrace_event.h +++ b/include/linux/ftrace_event.h @@ -71,6 +71,8 @@ struct trace_iterator { /* trace_seq for __print_flags() and __print_symbolic() etc. */ struct trace_seq tmp_seq; + cpumask_var_t started; + /* The below is zeroed out in pipe_read */ struct trace_seq seq; struct trace_entry *ent; @@ -82,7 +84,7 @@ struct trace_iterator { loff_t pos; long idx; - cpumask_var_t started; + /* All new field here will be zeroed out in pipe_read */ }; diff --git a/include/linux/hid.h b/include/linux/hid.h index 4bfb4ca..b42cc82 100644 --- a/include/linux/hid.h +++ b/include/linux/hid.h @@ -414,10 +414,12 @@ struct hid_report { struct hid_device *device; /* associated device */ }; +#define HID_MAX_IDS 256 + struct hid_report_enum { unsigned numbered; struct list_head report_list; - struct hid_report *report_id_hash[256]; + struct hid_report *report_id_hash[HID_MAX_IDS]; }; #define HID_REPORT_TYPES 3 @@ -715,6 +717,10 @@ void hid_output_report(struct hid_report *report, __u8 *data); struct hid_device *hid_allocate_device(void); struct hid_report *hid_register_report(struct hid_device *device, unsigned type, unsigned id); int hid_parse_report(struct hid_device *hid, __u8 *start, unsigned size); +struct hid_report *hid_validate_values(struct hid_device *hid, + unsigned int type, unsigned int id, + unsigned int field_index, + unsigned int report_counts); int hid_check_keys_pressed(struct hid_device *hid); int hid_connect(struct hid_device *hid, unsigned int connect_mask); void hid_disconnect(struct hid_device *hid); diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 59225ef..db70f1b 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -313,6 +313,17 @@ static inline unsigned hstate_index_to_shift(unsigned index) return hstates[index].order + PAGE_SHIFT; } +pgoff_t __basepage_index(struct page *page); + +/* Return page->index in PAGE_SIZE units */ +static inline pgoff_t basepage_index(struct page *page) +{ + if (!PageCompound(page)) + return page->index; + + return __basepage_index(page); +} + #else struct hstate {}; #define alloc_huge_page_node(h, nid) NULL @@ -331,6 +342,11 @@ static inline unsigned int pages_per_huge_page(struct hstate *h) return 1; } #define hstate_index_to_shift(index) 0 + +static inline pgoff_t basepage_index(struct page *page) +{ + return page->index; +} #endif #endif /* _LINUX_HUGETLB_H */ diff --git a/include/linux/icmpv6.h b/include/linux/icmpv6.h index ba45e6b..f5a21d0 100644 --- a/include/linux/icmpv6.h +++ b/include/linux/icmpv6.h @@ -123,6 +123,8 @@ static inline struct icmp6hdr *icmp6_hdr(const struct sk_buff *skb) #define ICMPV6_NOT_NEIGHBOUR 2 #define ICMPV6_ADDR_UNREACH 3 #define ICMPV6_PORT_UNREACH 4 +#define ICMPV6_POLICY_FAIL 5 +#define ICMPV6_REJECT_ROUTE 6 /* * Codes for Time Exceeded diff --git a/include/linux/if_cablemodem.h b/include/linux/if_cablemodem.h index 9ca1007..ee6b3c4 100644 --- a/include/linux/if_cablemodem.h +++ b/include/linux/if_cablemodem.h @@ -12,11 +12,11 @@ */ /* some useful defines for sb1000.c e cmconfig.c - fv */ -#define SIOCGCMSTATS SIOCDEVPRIVATE+0 /* get cable modem stats */ -#define SIOCGCMFIRMWARE SIOCDEVPRIVATE+1 /* get cm firmware version */ -#define SIOCGCMFREQUENCY SIOCDEVPRIVATE+2 /* get cable modem frequency */ -#define SIOCSCMFREQUENCY SIOCDEVPRIVATE+3 /* set cable modem frequency */ -#define SIOCGCMPIDS SIOCDEVPRIVATE+4 /* get cable modem PIDs */ -#define SIOCSCMPIDS SIOCDEVPRIVATE+5 /* set cable modem PIDs */ +#define SIOCGCMSTATS (SIOCDEVPRIVATE+0) /* get cable modem stats */ +#define SIOCGCMFIRMWARE (SIOCDEVPRIVATE+1) /* get cm firmware version */ +#define SIOCGCMFREQUENCY (SIOCDEVPRIVATE+2) /* get cable modem frequency */ +#define SIOCSCMFREQUENCY (SIOCDEVPRIVATE+3) /* set cable modem frequency */ +#define SIOCGCMPIDS (SIOCDEVPRIVATE+4) /* get cable modem PIDs */ +#define SIOCSCMPIDS (SIOCDEVPRIVATE+5) /* set cable modem PIDs */ #endif diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h index 999ccd3..cffe2b2 100644 --- a/include/linux/if_pppox.h +++ b/include/linux/if_pppox.h @@ -131,11 +131,11 @@ struct pppoe_tag { struct pppoe_hdr { #if defined(__LITTLE_ENDIAN_BITFIELD) - __u8 ver : 4; __u8 type : 4; + __u8 ver : 4; #elif defined(__BIG_ENDIAN_BITFIELD) - __u8 type : 4; __u8 ver : 4; + __u8 type : 4; #else #error "Please fix <asm/byteorder.h>" #endif diff --git a/include/linux/ipc_namespace.h b/include/linux/ipc_namespace.h index a6d1655..f1605b5 100644 --- a/include/linux/ipc_namespace.h +++ b/include/linux/ipc_namespace.h @@ -42,8 +42,8 @@ struct ipc_namespace { size_t shm_ctlmax; size_t shm_ctlall; + unsigned long shm_tot; int shm_ctlmni; - int shm_tot; struct notifier_block ipcns_nb; diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h index 0c99776..84b1447 100644 --- a/include/linux/ipv6.h +++ b/include/linux/ipv6.h @@ -255,6 +255,7 @@ struct inet6_skb_parm { #define IP6SKB_XFRM_TRANSFORMED 1 #define IP6SKB_FORWARDED 2 #define IP6SKB_REROUTED 4 +#define IP6SKB_FRAGMENTED 16 }; #define IP6CB(skb) ((struct inet6_skb_parm*)((skb)->cb)) diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 8663a26..8cd0f20 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -388,7 +388,7 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, void *data, unsigned long len); int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa); + gpa_t gpa, unsigned long len); int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index fa7cc72..b0bcce0 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -71,6 +71,7 @@ struct gfn_to_hva_cache { u64 generation; gpa_t gpa; unsigned long hva; + unsigned long len; struct kvm_memory_slot *memslot; }; diff --git a/include/linux/libata.h b/include/linux/libata.h index 5a9926b..1cf324e 100644 --- a/include/linux/libata.h +++ b/include/linux/libata.h @@ -382,6 +382,7 @@ enum { ATA_HORKAGE_NOSETXFER = (1 << 14), /* skip SETXFER, SATA only */ ATA_HORKAGE_BROKEN_FPDMA_AA = (1 << 15), /* skip AA */ ATA_HORKAGE_DUMP_ID = (1 << 16), /* dump IDENTIFY data */ + ATA_HORKAGE_MAX_SEC_LBA48 = (1 << 17), /* Set max sects to 65535 */ /* DMA mask for user DMA control: User visible values; DO NOT renumber */ diff --git a/include/linux/mm.h b/include/linux/mm.h index 71b82ad..815d809 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1544,6 +1544,8 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr, unsigned long pfn); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len); + struct page *follow_page(struct vm_area_struct *, unsigned long address, unsigned int foll_flags); diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index c037215..02f887a 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -231,9 +231,9 @@ struct netdev_hw_addr { #define NETDEV_HW_ADDR_T_SLAVE 3 #define NETDEV_HW_ADDR_T_UNICAST 4 #define NETDEV_HW_ADDR_T_MULTICAST 5 - bool synced; bool global_use; int refcount; + int synced; struct rcu_head rcu_head; }; diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h index 67b9fbc..e4d3640 100644 --- a/include/linux/perf_event.h +++ b/include/linux/perf_event.h @@ -939,7 +939,7 @@ struct perf_cpu_context { int exclusive; struct list_head rotation_list; int jiffies_interval; - struct pmu *active_pmu; + struct pmu *unique_pmu; struct perf_cgroup *cgrp; }; diff --git a/include/linux/preempt.h b/include/linux/preempt.h index 2e681d9..d309dcb 100644 --- a/include/linux/preempt.h +++ b/include/linux/preempt.h @@ -82,14 +82,20 @@ do { \ #else -#define preempt_disable() do { } while (0) -#define preempt_enable_no_resched() do { } while (0) -#define preempt_enable() do { } while (0) -#define preempt_check_resched() do { } while (0) - -#define preempt_disable_notrace() do { } while (0) -#define preempt_enable_no_resched_notrace() do { } while (0) -#define preempt_enable_notrace() do { } while (0) +/* + * Even if we don't have any preemption, we need preempt disable/enable + * to be barriers, so that we don't have things like get_user/put_user + * that can cause faults and scheduling migrate into our preempt-protected + * region. + */ +#define preempt_disable() barrier() +#define preempt_enable_no_resched() barrier() +#define preempt_enable() barrier() +#define preempt_check_resched() barrier() + +#define preempt_disable_notrace() barrier() +#define preempt_enable_no_resched_notrace() barrier() +#define preempt_enable_notrace() barrier() #endif diff --git a/include/linux/random.h b/include/linux/random.h index ac621ce..7e58ad2 100644 --- a/include/linux/random.h +++ b/include/linux/random.h @@ -56,6 +56,7 @@ extern void add_interrupt_randomness(int irq, int irq_flags); extern void get_random_bytes(void *buf, int nbytes); extern void get_random_bytes_arch(void *buf, int nbytes); void generate_random_uuid(unsigned char uuid_out[16]); +extern int random_int_secret_init(void); #ifndef MODULE extern const struct file_operations random_fops, urandom_fops; diff --git a/include/linux/rculist.h b/include/linux/rculist.h index e3beb31..52d280b 100644 --- a/include/linux/rculist.h +++ b/include/linux/rculist.h @@ -242,6 +242,24 @@ static inline void list_splice_init_rcu(struct list_head *list, list_entry_rcu((ptr)->next, type, member) /** + * list_first_or_null_rcu - get the first element from a list + * @ptr: the list head to take the element from. + * @type: the type of the struct this is embedded in. + * @member: the name of the list_struct within the struct. + * + * Note that if the list is empty, it returns NULL. + * + * This primitive may safely run concurrently with the _rcu list-mutation + * primitives such as list_add_rcu() as long as it's guarded by rcu_read_lock(). + */ +#define list_first_or_null_rcu(ptr, type, member) \ + ({struct list_head *__ptr = (ptr); \ + struct list_head *__next = ACCESS_ONCE(__ptr->next); \ + likely(__ptr != __next) ? \ + list_entry_rcu(__next, type, member) : NULL; \ + }) + +/** * list_for_each_entry_rcu - iterate over rcu list of given type * @pos: the type * to use as a loop cursor. * @head: the head for your list. diff --git a/include/linux/rculist_nulls.h b/include/linux/rculist_nulls.h index 2ae1371..1c33dd7 100644 --- a/include/linux/rculist_nulls.h +++ b/include/linux/rculist_nulls.h @@ -105,9 +105,14 @@ static inline void hlist_nulls_add_head_rcu(struct hlist_nulls_node *n, * @head: the head for your list. * @member: the name of the hlist_nulls_node within the struct. * + * The barrier() is needed to make sure compiler doesn't cache first element [1], + * as this loop can be restarted [2] + * [1] Documentation/atomic_ops.txt around line 114 + * [2] Documentation/RCU/rculist_nulls.txt around line 146 */ #define hlist_nulls_for_each_entry_rcu(tpos, pos, head, member) \ - for (pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ + for (({barrier();}), \ + pos = rcu_dereference_raw(hlist_nulls_first_rcu(head)); \ (!is_a_nulls(pos)) && \ ({ tpos = hlist_nulls_entry(pos, typeof(*tpos), member); 1; }); \ pos = rcu_dereference_raw(hlist_nulls_next_rcu(pos))) diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 37b643b..7b547c2 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -2135,6 +2135,14 @@ static inline void nf_reset(struct sk_buff *skb) #endif } +static inline void nf_reset_trace(struct sk_buff *skb) +{ +#if defined(CONFIG_NETFILTER_XT_TARGET_TRACE) || \ + defined(CONFIG_NETFILTER_XT_TARGET_TRACE_MODULE) + skb->nf_trace = 0; +#endif +} + /* Note: This doesn't put any conntrack and bridge info in dst. */ static inline void __nf_copy(struct sk_buff *dst, const struct sk_buff *src) { diff --git a/include/linux/socket.h b/include/linux/socket.h index 635c213..50b2530 100644 --- a/include/linux/socket.h +++ b/include/linux/socket.h @@ -312,7 +312,8 @@ struct ucred { /* IPX options */ #define IPX_TYPE 1 -extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred); +extern void cred_to_ucred(struct pid *pid, const struct cred *cred, struct ucred *ucred, + bool use_effective); extern int memcpy_fromiovec(unsigned char *kdata, struct iovec *iov, int len); extern int memcpy_fromiovecend(unsigned char *kdata, const struct iovec *iov, @@ -331,6 +332,9 @@ extern int put_cmsg(struct msghdr*, int level, int type, int len, void *data); struct timespec; +/* The __sys_...msg variants allow MSG_CMSG_COMPAT */ +extern long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags); +extern long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags); extern int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, unsigned int flags, struct timespec *timeout); extern int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, diff --git a/include/linux/spinlock_up.h b/include/linux/spinlock_up.h index a26e2fb..e2369c1 100644 --- a/include/linux/spinlock_up.h +++ b/include/linux/spinlock_up.h @@ -16,7 +16,10 @@ * In the debug case, 1 means unlocked, 0 means locked. (the values * are inverted, to catch initialization bugs) * - * No atomicity anywhere, we are on UP. + * No atomicity anywhere, we are on UP. However, we still need + * the compiler barriers, because we do not want the compiler to + * move potentially faulting instructions (notably user accesses) + * into the locked sequence, resulting in non-atomic execution. */ #ifdef CONFIG_DEBUG_SPINLOCK @@ -25,6 +28,7 @@ static inline void arch_spin_lock(arch_spinlock_t *lock) { lock->slock = 0; + barrier(); } static inline void @@ -32,6 +36,7 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags) { local_irq_save(flags); lock->slock = 0; + barrier(); } static inline int arch_spin_trylock(arch_spinlock_t *lock) @@ -39,32 +44,34 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) char oldval = lock->slock; lock->slock = 0; + barrier(); return oldval > 0; } static inline void arch_spin_unlock(arch_spinlock_t *lock) { + barrier(); lock->slock = 1; } /* * Read-write spinlocks. No debug version. */ -#define arch_read_lock(lock) do { (void)(lock); } while (0) -#define arch_write_lock(lock) do { (void)(lock); } while (0) -#define arch_read_trylock(lock) ({ (void)(lock); 1; }) -#define arch_write_trylock(lock) ({ (void)(lock); 1; }) -#define arch_read_unlock(lock) do { (void)(lock); } while (0) -#define arch_write_unlock(lock) do { (void)(lock); } while (0) +#define arch_read_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_lock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_read_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_write_trylock(lock) ({ barrier(); (void)(lock); 1; }) +#define arch_read_unlock(lock) do { barrier(); (void)(lock); } while (0) +#define arch_write_unlock(lock) do { barrier(); (void)(lock); } while (0) #else /* DEBUG_SPINLOCK */ #define arch_spin_is_locked(lock) ((void)(lock), 0) /* for sched.c and kernel_lock.c: */ -# define arch_spin_lock(lock) do { (void)(lock); } while (0) -# define arch_spin_lock_flags(lock, flags) do { (void)(lock); } while (0) -# define arch_spin_unlock(lock) do { (void)(lock); } while (0) -# define arch_spin_trylock(lock) ({ (void)(lock); 1; }) +# define arch_spin_lock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_lock_flags(lock, flags) do { barrier(); (void)(lock); } while (0) +# define arch_spin_unlock(lock) do { barrier(); (void)(lock); } while (0) +# define arch_spin_trylock(lock) ({ barrier(); (void)(lock); 1; }) #endif /* DEBUG_SPINLOCK */ #define arch_spin_is_contended(lock) (((void)(lock), 0)) diff --git a/include/linux/splice.h b/include/linux/splice.h index 997c3b4..dbbe79c 100644 --- a/include/linux/splice.h +++ b/include/linux/splice.h @@ -51,7 +51,8 @@ struct partial_page { struct splice_pipe_desc { struct page **pages; /* page map */ struct partial_page *partial; /* pages[] may not be contig */ - int nr_pages; /* number of pages in map */ + int nr_pages; /* number of populated pages in map */ + unsigned int nr_pages_max; /* pages[] & partial[] arrays size */ unsigned int flags; /* splice flags */ const struct pipe_buf_operations *ops;/* ops associated with output pipe */ void (*spd_release)(struct splice_pipe_desc *, unsigned int); @@ -85,8 +86,7 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *, /* * for dynamic pipe sizing */ -extern int splice_grow_spd(struct pipe_inode_info *, struct splice_pipe_desc *); -extern void splice_shrink_spd(struct pipe_inode_info *, - struct splice_pipe_desc *); +extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *); +extern void splice_shrink_spd(struct splice_pipe_desc *); #endif diff --git a/include/linux/swapops.h b/include/linux/swapops.h index cd42e30..96c7110 100644 --- a/include/linux/swapops.h +++ b/include/linux/swapops.h @@ -113,6 +113,7 @@ static inline void make_migration_entry_read(swp_entry_t *entry) extern void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address); +extern void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte); #else #define make_migration_entry(page, write) swp_entry(0, 0) @@ -124,6 +125,8 @@ static inline int is_migration_entry(swp_entry_t swp) static inline void make_migration_entry_read(swp_entry_t *entryp) { } static inline void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, unsigned long address) { } +static inline void migration_entry_wait_huge(struct mm_struct *mm, + pte_t *pte) { } static inline int is_write_migration_entry(swp_entry_t entry) { return 0; diff --git a/include/linux/virtio.h b/include/linux/virtio.h index 7108857..a39e962 100644 --- a/include/linux/virtio.h +++ b/include/linux/virtio.h @@ -93,6 +93,10 @@ void virtqueue_disable_cb(struct virtqueue *vq); bool virtqueue_enable_cb(struct virtqueue *vq); +unsigned virtqueue_enable_cb_prepare(struct virtqueue *vq); + +bool virtqueue_poll(struct virtqueue *vq, unsigned); + bool virtqueue_enable_cb_delayed(struct virtqueue *vq); void *virtqueue_detach_unused_buf(struct virtqueue *vq); diff --git a/include/linux/wait.h b/include/linux/wait.h index 3efc9f3..e007f76 100644 --- a/include/linux/wait.h +++ b/include/linux/wait.h @@ -233,6 +233,8 @@ do { \ if (!ret) \ break; \ } \ + if (!ret && (condition)) \ + ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -249,8 +251,9 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, and the remaining - * jiffies if the condition evaluated to true before the timeout elapsed. + * The function returns 0 if the @timeout elapsed, or the remaining + * jiffies (at least 1) if the @condition evaluated to %true before + * the @timeout elapsed. */ #define wait_event_timeout(wq, condition, timeout) \ ({ \ @@ -318,6 +321,8 @@ do { \ ret = -ERESTARTSYS; \ break; \ } \ + if (!ret && (condition)) \ + ret = 1; \ finish_wait(&wq, &__wait); \ } while (0) @@ -334,9 +339,10 @@ do { \ * wake_up() has to be called after changing any variable that could * change the result of the wait condition. * - * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it - * was interrupted by a signal, and the remaining jiffies otherwise - * if the condition evaluated to true before the timeout elapsed. + * Returns: + * 0 if the @timeout elapsed, -%ERESTARTSYS if it was interrupted by + * a signal, or the remaining jiffies (at least 1) if the @condition + * evaluated to %true before the @timeout elapsed. */ #define wait_event_interruptible_timeout(wq, condition, timeout) \ ({ \ @@ -524,6 +530,63 @@ do { \ ? 0 : __wait_event_interruptible_locked(wq, condition, 1, 1)) +#define __wait_event_interruptible_lock_irq_timeout(wq, condition, \ + lock, ret) \ +do { \ + DEFINE_WAIT(__wait); \ + \ + for (;;) { \ + prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \ + if (condition) \ + break; \ + if (signal_pending(current)) { \ + ret = -ERESTARTSYS; \ + break; \ + } \ + spin_unlock_irq(&lock); \ + ret = schedule_timeout(ret); \ + spin_lock_irq(&lock); \ + if (!ret) \ + break; \ + } \ + finish_wait(&wq, &__wait); \ +} while (0) + +/** + * wait_event_interruptible_lock_irq_timeout - sleep until a condition gets true or a timeout elapses. + * The condition is checked under the lock. This is expected + * to be called with the lock taken. + * @wq: the waitqueue to wait on + * @condition: a C expression for the event to wait for + * @lock: a locked spinlock_t, which will be released before schedule() + * and reacquired afterwards. + * @timeout: timeout, in jiffies + * + * The process is put to sleep (TASK_INTERRUPTIBLE) until the + * @condition evaluates to true or signal is received. The @condition is + * checked each time the waitqueue @wq is woken up. + * + * wake_up() has to be called after changing any variable that could + * change the result of the wait condition. + * + * This is supposed to be called while holding the lock. The lock is + * dropped before going to sleep and is reacquired afterwards. + * + * The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it + * was interrupted by a signal, and the remaining jiffies otherwise + * if the condition evaluated to true before the timeout elapsed. + */ +#define wait_event_interruptible_lock_irq_timeout(wq, condition, lock, \ + timeout) \ +({ \ + int __ret = timeout; \ + \ + if (!(condition)) \ + __wait_event_interruptible_lock_irq_timeout( \ + wq, condition, lock, __ret); \ + __ret; \ +}) + #define __wait_event_killable(wq, condition, ret) \ do { \ diff --git a/include/net/addrconf.h b/include/net/addrconf.h index cbc6bb0..44b1110 100644 --- a/include/net/addrconf.h +++ b/include/net/addrconf.h @@ -81,6 +81,9 @@ extern int ipv6_dev_get_saddr(struct net *net, const struct in6_addr *daddr, unsigned int srcprefs, struct in6_addr *saddr); +extern int __ipv6_get_lladdr(struct inet6_dev *idev, + struct in6_addr *addr, + unsigned char banned_flags); extern int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags); diff --git a/include/net/ip.h b/include/net/ip.h index 24352d0..29e31ae 100644 --- a/include/net/ip.h +++ b/include/net/ip.h @@ -264,9 +264,11 @@ int ip_dont_fragment(struct sock *sk, struct dst_entry *dst) extern void __ip_select_ident(struct iphdr *iph, struct dst_entry *dst, int more); -static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk) +static inline void ip_select_ident(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk) { - if (iph->frag_off & htons(IP_DF)) { + struct iphdr *iph = ip_hdr(skb); + + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { /* This is only to work around buggy Windows95/2000 * VJ compression implementations. If the ID field * does not change, they drop every other packet in @@ -278,9 +280,11 @@ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, str __ip_select_ident(iph, dst, 0); } -static inline void ip_select_ident_more(struct iphdr *iph, struct dst_entry *dst, struct sock *sk, int more) +static inline void ip_select_ident_more(struct sk_buff *skb, struct dst_entry *dst, struct sock *sk, int more) { - if (iph->frag_off & htons(IP_DF)) { + struct iphdr *iph = ip_hdr(skb); + + if ((iph->frag_off & htons(IP_DF)) && !skb->local_df) { if (sk && inet_sk(sk)->inet_daddr) { iph->id = htons(inet_sk(sk)->inet_id); inet_sk(sk)->inet_id += 1 + more; diff --git a/include/net/ipip.h b/include/net/ipip.h index a32654d..4dccfe3 100644 --- a/include/net/ipip.h +++ b/include/net/ipip.h @@ -50,7 +50,7 @@ struct ip_tunnel_prl_entry { int pkt_len = skb->len - skb_transport_offset(skb); \ \ skb->ip_summed = CHECKSUM_NONE; \ - ip_select_ident(iph, &rt->dst, NULL); \ + ip_select_ident(skb, &rt->dst, NULL); \ \ err = ip_local_out(skb); \ if (likely(net_xmit_eval(err) == 0)) { \ diff --git a/include/net/scm.h b/include/net/scm.h index 745460f..820c1b3 100644 --- a/include/net/scm.h +++ b/include/net/scm.h @@ -50,7 +50,7 @@ static __inline__ void scm_set_cred(struct scm_cookie *scm, { scm->pid = get_pid(pid); scm->cred = get_cred(cred); - cred_to_ucred(pid, cred, &scm->creds); + cred_to_ucred(pid, cred, &scm->creds, false); } static __inline__ void scm_destroy_cred(struct scm_cookie *scm) diff --git a/include/net/sock.h b/include/net/sock.h index b2deeab..b6abd4f 100644 --- a/include/net/sock.h +++ b/include/net/sock.h @@ -721,6 +721,18 @@ struct timewait_sock_ops; struct inet_hashinfo; struct raw_hashinfo; +/* + * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes + * un-modified. Special care is taken when initializing object to zero. + */ +static inline void sk_prot_clear_nulls(struct sock *sk, int size) +{ + if (offsetof(struct sock, sk_node.next) != 0) + memset(sk, 0, offsetof(struct sock, sk_node.next)); + memset(&sk->sk_node.pprev, 0, + size - offsetof(struct sock, sk_node.pprev)); +} + /* Networking protocol blocks we attach to sockets. * socket layer -> transport layer interface * transport -> network interface is defined by struct inet_proto diff --git a/include/net/tcp.h b/include/net/tcp.h index fd10b99..fd864db 100644 --- a/include/net/tcp.h +++ b/include/net/tcp.h @@ -903,6 +903,7 @@ static inline int tcp_prequeue(struct sock *sk, struct sk_buff *skb) if (sysctl_tcp_low_latency || !tp->ucopy.task) return 0; + skb_dst_force(skb); __skb_queue_tail(&tp->ucopy.prequeue, skb); tp->ucopy.memory += skb->truesize; if (tp->ucopy.memory > sk->sk_rcvbuf) { diff --git a/include/net/udp.h b/include/net/udp.h index 67ea6fc..e723c9d 100644 --- a/include/net/udp.h +++ b/include/net/udp.h @@ -180,6 +180,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum, extern void udp_err(struct sk_buff *, u32); extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len); +extern int udp_push_pending_frames(struct sock *sk); extern void udp_flush_pending_frames(struct sock *sk); extern int udp_rcv(struct sk_buff *skb); extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg); diff --git a/init/main.c b/init/main.c index 841e344..cc545df 100644 --- a/init/main.c +++ b/init/main.c @@ -68,6 +68,7 @@ #include <linux/shmem_fs.h> #include <linux/slab.h> #include <linux/perf_event.h> +#include <linux/random.h> #include <asm/io.h> #include <asm/bugs.h> @@ -717,6 +718,7 @@ static void __init do_basic_setup(void) init_irq_proc(); do_ctors(); do_initcalls(); + random_int_secret_init(); } static void __init do_pre_smp_initcalls(void) @@ -343,7 +343,7 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params) size_t size = params->u.size; int error; struct shmid_kernel *shp; - int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; + size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; struct file * file; char name[13]; int id; diff --git a/kernel/audit_tree.c b/kernel/audit_tree.c index f6b4ac7..7d9731d 100644 --- a/kernel/audit_tree.c +++ b/kernel/audit_tree.c @@ -614,9 +614,9 @@ void audit_trim_trees(void) } spin_unlock(&hash_lock); trim_marked(tree); - put_tree(tree); drop_collected_mounts(root_mnt); skip_it: + put_tree(tree); mutex_lock(&audit_filter_mutex); } list_del(&cursor); diff --git a/kernel/cgroup.c b/kernel/cgroup.c index d0913a6..2005981 100644 --- a/kernel/cgroup.c +++ b/kernel/cgroup.c @@ -2036,7 +2036,7 @@ int cgroup_attach_proc(struct cgroup *cgrp, struct task_struct *leader) if (!group) return -ENOMEM; /* pre-allocate to guarantee space while iterating in rcu read-side. */ - retval = flex_array_prealloc(group, 0, group_size - 1, GFP_KERNEL); + retval = flex_array_prealloc(group, 0, group_size, GFP_KERNEL); if (retval) goto out_free_group_list; @@ -3533,6 +3533,7 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft, const char *buffer) { struct cgroup_event *event = NULL; + struct cgroup *cgrp_cfile; unsigned int efd, cfd; struct file *efile = NULL; struct file *cfile = NULL; @@ -3587,6 +3588,16 @@ static int cgroup_write_event_control(struct cgroup *cgrp, struct cftype *cft, goto fail; } + /* + * The file to be monitored must be in the same cgroup as + * cgroup.event_control is. + */ + cgrp_cfile = __d_cgrp(cfile->f_dentry->d_parent); + if (cgrp_cfile != cgrp) { + ret = -EINVAL; + goto fail; + } + if (!event->cft->register_event || !event->cft->unregister_event) { ret = -EINVAL; goto fail; diff --git a/kernel/events/core.c b/kernel/events/core.c index b582650..acdc087 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -223,9 +223,9 @@ perf_cgroup_match(struct perf_event *event) return !event->cgrp || event->cgrp == cpuctx->cgrp; } -static inline void perf_get_cgroup(struct perf_event *event) +static inline bool perf_tryget_cgroup(struct perf_event *event) { - css_get(&event->cgrp->css); + return css_tryget(&event->cgrp->css); } static inline void perf_put_cgroup(struct perf_event *event) @@ -342,6 +342,8 @@ void perf_cgroup_switch(struct task_struct *task, int mode) list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = this_cpu_ptr(pmu->pmu_cpu_context); + if (cpuctx->unique_pmu != pmu) + continue; /* ensure we process each cpuctx once */ perf_pmu_disable(cpuctx->ctx.pmu); @@ -365,9 +367,10 @@ void perf_cgroup_switch(struct task_struct *task, int mode) if (mode & PERF_CGROUP_SWIN) { WARN_ON_ONCE(cpuctx->cgrp); - /* set cgrp before ctxsw in to - * allow event_filter_match() to not - * have to pass task around + /* + * set cgrp before ctxsw in to allow + * event_filter_match() to not have to pass + * task around */ cpuctx->cgrp = perf_cgroup_from_task(task); cpu_ctx_sched_in(cpuctx, EVENT_ALL, task); @@ -415,7 +418,11 @@ static inline int perf_cgroup_connect(int fd, struct perf_event *event, event->cgrp = cgrp; /* must be done before we fput() the file */ - perf_get_cgroup(event); + if (!perf_tryget_cgroup(event)) { + event->cgrp = NULL; + ret = -ENOENT; + goto out; + } /* * all events in a group must monitor @@ -651,8 +658,18 @@ perf_lock_task_context(struct task_struct *task, int ctxn, unsigned long *flags) { struct perf_event_context *ctx; - rcu_read_lock(); retry: + /* + * One of the few rules of preemptible RCU is that one cannot do + * rcu_read_unlock() while holding a scheduler (or nested) lock when + * part of the read side critical section was preemptible -- see + * rcu_read_unlock_special(). + * + * Since ctx->lock nests under rq->lock we must ensure the entire read + * side critical section is non-preemptible. + */ + preempt_disable(); + rcu_read_lock(); ctx = rcu_dereference(task->perf_event_ctxp[ctxn]); if (ctx) { /* @@ -668,6 +685,8 @@ retry: raw_spin_lock_irqsave(&ctx->lock, *flags); if (ctx != rcu_dereference(task->perf_event_ctxp[ctxn])) { raw_spin_unlock_irqrestore(&ctx->lock, *flags); + rcu_read_unlock(); + preempt_enable(); goto retry; } @@ -677,6 +696,7 @@ retry: } } rcu_read_unlock(); + preempt_enable(); return ctx; } @@ -826,6 +846,15 @@ list_add_event(struct perf_event *event, struct perf_event_context *ctx) } /* + * Initialize event state based on the perf_event_attr::disabled. + */ +static inline void perf_event__state_init(struct perf_event *event) +{ + event->state = event->attr.disabled ? PERF_EVENT_STATE_OFF : + PERF_EVENT_STATE_INACTIVE; +} + +/* * Called at perf_event creation and when events are attached/detached from a * group. */ @@ -1616,7 +1645,16 @@ static int __perf_event_enable(void *info) struct perf_cpu_context *cpuctx = __get_cpu_context(ctx); int err; - if (WARN_ON_ONCE(!ctx->is_active)) + /* + * There's a time window between 'ctx->is_active' check + * in perf_event_enable function and this place having: + * - IRQs on + * - ctx->lock unlocked + * + * where the task could be killed and 'ctx' deactivated + * by perf_event_exit_task. + */ + if (!ctx->is_active) return -EINVAL; raw_spin_lock(&ctx->lock); @@ -4544,7 +4582,7 @@ static void perf_event_task_event(struct perf_task_event *task_event) rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) + if (cpuctx->unique_pmu != pmu) goto next; perf_event_task_ctx(&cpuctx->ctx, task_event); @@ -4690,7 +4728,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event) rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) + if (cpuctx->unique_pmu != pmu) goto next; perf_event_comm_ctx(&cpuctx->ctx, comm_event); @@ -4886,7 +4924,7 @@ got_name: rcu_read_lock(); list_for_each_entry_rcu(pmu, &pmus, entry) { cpuctx = get_cpu_ptr(pmu->pmu_cpu_context); - if (cpuctx->active_pmu != pmu) + if (cpuctx->unique_pmu != pmu) goto next; perf_event_mmap_ctx(&cpuctx->ctx, mmap_event, vma->vm_flags & VM_EXEC); @@ -5912,8 +5950,8 @@ static void update_pmu_context(struct pmu *pmu, struct pmu *old_pmu) cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu); - if (cpuctx->active_pmu == old_pmu) - cpuctx->active_pmu = pmu; + if (cpuctx->unique_pmu == old_pmu) + cpuctx->unique_pmu = pmu; } } @@ -6045,7 +6083,7 @@ skip_type: cpuctx->ctx.pmu = pmu; cpuctx->jiffies_interval = 1; INIT_LIST_HEAD(&cpuctx->rotation_list); - cpuctx->active_pmu = pmu; + cpuctx->unique_pmu = pmu; } got_cpu_context: @@ -6219,8 +6257,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu, event->overflow_handler = overflow_handler; - if (attr->disabled) - event->state = PERF_EVENT_STATE_OFF; + perf_event__state_init(event); pmu = NULL; @@ -6594,9 +6631,17 @@ SYSCALL_DEFINE5(perf_event_open, mutex_lock(&gctx->mutex); perf_remove_from_context(group_leader); + + /* + * Removing from the context ends up with disabled + * event. What we want here is event in the initial + * startup state, ready to be add into new context. + */ + perf_event__state_init(group_leader); list_for_each_entry(sibling, &group_leader->sibling_list, group_entry) { perf_remove_from_context(sibling); + perf_event__state_init(sibling); put_ctx(gctx); } mutex_unlock(&gctx->mutex); @@ -7064,7 +7109,7 @@ inherit_task_group(struct perf_event *event, struct task_struct *parent, * child. */ - child_ctx = alloc_perf_context(event->pmu, child); + child_ctx = alloc_perf_context(parent_ctx->pmu, child); if (!child_ctx) return -ENOMEM; diff --git a/kernel/events/hw_breakpoint.c b/kernel/events/hw_breakpoint.c index 086adf2..d99cb4b 100644 --- a/kernel/events/hw_breakpoint.c +++ b/kernel/events/hw_breakpoint.c @@ -147,7 +147,7 @@ fetch_bp_busy_slots(struct bp_busy_slots *slots, struct perf_event *bp, return; } - for_each_online_cpu(cpu) { + for_each_possible_cpu(cpu) { unsigned int nr; nr = per_cpu(nr_cpu_bp_pinned[type], cpu); @@ -233,7 +233,7 @@ toggle_bp_slot(struct perf_event *bp, bool enable, enum bp_type_idx type, if (cpu >= 0) { toggle_bp_task_slot(bp, cpu, enable, type, weight); } else { - for_each_online_cpu(cpu) + for_each_possible_cpu(cpu) toggle_bp_task_slot(bp, cpu, enable, type, weight); } diff --git a/kernel/futex.c b/kernel/futex.c index 61e554e..d38153d 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -60,6 +60,7 @@ #include <linux/pid.h> #include <linux/nsproxy.h> #include <linux/ptrace.h> +#include <linux/hugetlb.h> #include <asm/futex.h> @@ -363,7 +364,7 @@ again: } else { key->both.offset |= FUT_OFF_INODE; /* inode-based key */ key->shared.inode = page_head->mapping->host; - key->shared.pgoff = page_head->index; + key->shared.pgoff = basepage_index(page); } get_futex_key_refs(key); diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c index e079c3e..80ec91d 100644 --- a/kernel/hrtimer.c +++ b/kernel/hrtimer.c @@ -61,6 +61,7 @@ DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) = { + .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock), .clock_base = { { @@ -297,6 +298,10 @@ ktime_t ktime_sub_ns(const ktime_t kt, u64 nsec) } else { unsigned long rem = do_div(nsec, NSEC_PER_SEC); + /* Make sure nsec fits into long */ + if (unlikely(nsec > KTIME_SEC_MAX)) + return (ktime_t){ .tv64 = KTIME_MAX }; + tmp = ktime_set((long)nsec, rem); } @@ -702,17 +707,20 @@ static int hrtimer_switch_to_hres(void) return 1; } +static void clock_was_set_work(struct work_struct *work) +{ + clock_was_set(); +} + +static DECLARE_WORK(hrtimer_work, clock_was_set_work); + /* - * Called from timekeeping code to reprogramm the hrtimer interrupt - * device. If called from the timer interrupt context we defer it to - * softirq context. + * Called from timekeeping and resume code to reprogramm the hrtimer + * interrupt device on all cpus. */ void clock_was_set_delayed(void) { - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); - - cpu_base->clock_was_set = 1; - __raise_softirq_irqoff(HRTIMER_SOFTIRQ); + schedule_work(&hrtimer_work); } #else @@ -761,8 +769,10 @@ void hrtimers_resume(void) WARN_ONCE(!irqs_disabled(), KERN_INFO "hrtimers_resume() called with IRQs enabled!"); + /* Retrigger on the local CPU */ retrigger_next_event(NULL); - timerfd_clock_was_set(); + /* And schedule a retrigger for all others */ + clock_was_set_delayed(); } static inline void timer_stats_hrtimer_set_start_info(struct hrtimer *timer) @@ -1307,6 +1317,8 @@ retry: expires = ktime_sub(hrtimer_get_expires(timer), base->offset); + if (expires.tv64 < 0) + expires.tv64 = KTIME_MAX; if (expires.tv64 < expires_next.tv64) expires_next = expires; break; @@ -1409,13 +1421,6 @@ void hrtimer_peek_ahead_timers(void) static void run_hrtimer_softirq(struct softirq_action *h) { - struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases); - - if (cpu_base->clock_was_set) { - cpu_base->clock_was_set = 0; - clock_was_set(); - } - hrtimer_peek_ahead_timers(); } @@ -1640,8 +1645,6 @@ static void __cpuinit init_hrtimers_cpu(int cpu) struct hrtimer_cpu_base *cpu_base = &per_cpu(hrtimer_bases, cpu); int i; - raw_spin_lock_init(&cpu_base->lock); - for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) { cpu_base->clock_base[i].cpu_base = cpu_base; timerqueue_init_head(&cpu_base->clock_base[i].active); diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c index 3e1bdf9..2f61278 100644 --- a/kernel/irq/manage.c +++ b/kernel/irq/manage.c @@ -536,9 +536,9 @@ int can_request_irq(unsigned int irq, unsigned long irqflags) return 0; if (irq_settings_can_request(desc)) { - if (desc->action) - if (irqflags & desc->action->flags & IRQF_SHARED) - canrequest =1; + if (!desc->action || + irqflags & desc->action->flags & IRQF_SHARED) + canrequest = 1; } irq_put_desc_unlock(desc, flags); return canrequest; diff --git a/kernel/kmod.c b/kernel/kmod.c index fabfe54..f625b4f 100644 --- a/kernel/kmod.c +++ b/kernel/kmod.c @@ -421,6 +421,11 @@ int call_usermodehelper_exec(struct subprocess_info *sub_info, int retval = 0; helper_lock(); + if (!sub_info->path) { + retval = -EINVAL; + goto out; + } + if (sub_info->path[0] == '\0') goto out; diff --git a/kernel/relay.c b/kernel/relay.c index 2c242fb..a5be9af 100644 --- a/kernel/relay.c +++ b/kernel/relay.c @@ -1235,6 +1235,7 @@ static ssize_t subbuf_splice_actor(struct file *in, struct splice_pipe_desc spd = { .pages = pages, .nr_pages = 0, + .nr_pages_max = PIPE_DEF_BUFFERS, .partial = partial, .flags = flags, .ops = &relay_pipe_buf_ops, @@ -1302,8 +1303,8 @@ static ssize_t subbuf_splice_actor(struct file *in, ret += padding; out: - splice_shrink_spd(pipe, &spd); - return ret; + splice_shrink_spd(&spd); + return ret; } static ssize_t relay_file_splice_read(struct file *in, diff --git a/kernel/sched.c b/kernel/sched.c index 93d8b46..efb62f0 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -2774,8 +2774,10 @@ static void try_to_wake_up_local(struct task_struct *p) { struct rq *rq = task_rq(p); - BUG_ON(rq != this_rq()); - BUG_ON(p == current); + if (WARN_ON_ONCE(rq != this_rq()) || + WARN_ON_ONCE(p == current)) + return; + lockdep_assert_held(&rq->lock); if (!raw_spin_trylock(&p->pi_lock)) { diff --git a/kernel/sched_clock.c b/kernel/sched_clock.c index 9d8af0b..1eeaf74 100644 --- a/kernel/sched_clock.c +++ b/kernel/sched_clock.c @@ -176,10 +176,36 @@ static u64 sched_clock_remote(struct sched_clock_data *scd) u64 this_clock, remote_clock; u64 *ptr, old_val, val; +#if BITS_PER_LONG != 64 +again: + /* + * Careful here: The local and the remote clock values need to + * be read out atomic as we need to compare the values and + * then update either the local or the remote side. So the + * cmpxchg64 below only protects one readout. + * + * We must reread via sched_clock_local() in the retry case on + * 32bit as an NMI could use sched_clock_local() via the + * tracer and hit between the readout of + * the low32bit and the high 32bit portion. + */ + this_clock = sched_clock_local(my_scd); + /* + * We must enforce atomic readout on 32bit, otherwise the + * update on the remote cpu can hit inbetween the readout of + * the low32bit and the high 32bit portion. + */ + remote_clock = cmpxchg64(&scd->clock, 0, 0); +#else + /* + * On 64bit the read of [my]scd->clock is atomic versus the + * update, so we can avoid the above 32bit dance. + */ sched_clock_local(my_scd); again: this_clock = my_scd->clock; remote_clock = scd->clock; +#endif /* * Use the opportunity that we have both locks diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c index c768588..fae7d67 100644 --- a/kernel/sched_fair.c +++ b/kernel/sched_fair.c @@ -4277,7 +4277,7 @@ static unsigned int get_rr_interval_fair(struct rq *rq, struct task_struct *task * idle runqueue: */ if (rq->cfs.load.weight) - rr_interval = NS_TO_JIFFIES(sched_slice(&rq->cfs, se)); + rr_interval = NS_TO_JIFFIES(sched_slice(cfs_rq_of(se), se)); return rr_interval; } diff --git a/kernel/signal.c b/kernel/signal.c index b0c0887..f15021b 100644 --- a/kernel/signal.c +++ b/kernel/signal.c @@ -2664,7 +2664,7 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info) static int do_tkill(pid_t tgid, pid_t pid, int sig) { - struct siginfo info; + struct siginfo info = {}; info.si_signo = sig; info.si_errno = 0; diff --git a/kernel/sys.c b/kernel/sys.c index 84e353b..1c69aa7 100644 --- a/kernel/sys.c +++ b/kernel/sys.c @@ -320,7 +320,6 @@ void kernel_restart_prepare(char *cmd) system_state = SYSTEM_RESTART; usermodehelper_disable(); device_shutdown(); - syscore_shutdown(); } /** @@ -335,6 +334,7 @@ void kernel_restart(char *cmd) { kernel_restart_prepare(cmd); disable_nonboot_cpus(); + syscore_shutdown(); if (!cmd) printk(KERN_EMERG "Restarting system.\n"); else @@ -360,6 +360,7 @@ static void kernel_shutdown_prepare(enum system_states state) void kernel_halt(void) { kernel_shutdown_prepare(SYSTEM_HALT); + disable_nonboot_cpus(); syscore_shutdown(); printk(KERN_EMERG "System halted.\n"); kmsg_dump(KMSG_DUMP_HALT); diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c index 9e40370..20ba7b4 100644 --- a/kernel/time/tick-broadcast.c +++ b/kernel/time/tick-broadcast.c @@ -66,6 +66,8 @@ static void tick_broadcast_start_periodic(struct clock_event_device *bc) */ int tick_check_broadcast_device(struct clock_event_device *dev) { + struct clock_event_device *cur = tick_broadcast_device.evtdev; + if ((dev->features & CLOCK_EVT_FEAT_DUMMY) || (tick_broadcast_device.evtdev && tick_broadcast_device.evtdev->rating >= dev->rating) || @@ -73,6 +75,8 @@ int tick_check_broadcast_device(struct clock_event_device *dev) return 0; clockevents_exchange_device(tick_broadcast_device.evtdev, dev); + if (cur) + cur->event_handler = clockevents_handle_noop; tick_broadcast_device.evtdev = dev; if (!cpumask_empty(tick_get_broadcast_mask())) tick_broadcast_start_periodic(dev); @@ -392,7 +396,15 @@ void tick_check_oneshot_broadcast(int cpu) if (cpumask_test_cpu(cpu, to_cpumask(tick_broadcast_oneshot_mask))) { struct tick_device *td = &per_cpu(tick_cpu_device, cpu); - clockevents_set_mode(td->evtdev, CLOCK_EVT_MODE_ONESHOT); + /* + * We might be in the middle of switching over from + * periodic to oneshot. If the CPU has not yet + * switched over, leave the device alone. + */ + if (td->mode == TICKDEV_MODE_ONESHOT) { + clockevents_set_mode(td->evtdev, + CLOCK_EVT_MODE_ONESHOT); + } } } diff --git a/kernel/time/tick-common.c b/kernel/time/tick-common.c index 119528d..c43b479 100644 --- a/kernel/time/tick-common.c +++ b/kernel/time/tick-common.c @@ -323,6 +323,7 @@ static void tick_shutdown(unsigned int *cpup) */ dev->mode = CLOCK_EVT_MODE_UNUSED; clockevents_exchange_device(dev, NULL); + dev->event_handler = clockevents_handle_noop; td->evtdev = NULL; } raw_spin_unlock_irqrestore(&tick_device_lock, flags); diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c index c473ce2..c0be5f2 100644 --- a/kernel/time/tick-sched.c +++ b/kernel/time/tick-sched.c @@ -811,7 +811,7 @@ void tick_cancel_sched_timer(int cpu) hrtimer_cancel(&ts->sched_timer); # endif - ts->nohz_mode = NOHZ_MODE_INACTIVE; + memset(ts, 0, sizeof(*ts)); } #endif diff --git a/kernel/timer.c b/kernel/timer.c index 27982d9..5eac0d8 100644 --- a/kernel/timer.c +++ b/kernel/timer.c @@ -145,9 +145,11 @@ static unsigned long round_jiffies_common(unsigned long j, int cpu, /* now that we have rounded, subtract the extra skew again */ j -= cpu * 3; - if (j <= jiffies) /* rounding ate our timeout entirely; */ - return original; - return j; + /* + * Make sure j is still in the future. Otherwise return the + * unmodified value. + */ + return time_is_after_jiffies(j) ? j : original; } /** @@ -1630,12 +1632,12 @@ static int __cpuinit init_timers_cpu(int cpu) boot_done = 1; base = &boot_tvec_bases; } + spin_lock_init(&base->lock); tvec_base_done[cpu] = 1; } else { base = per_cpu(tvec_bases, cpu); } - spin_lock_init(&base->lock); for (j = 0; j < TVN_SIZE; j++) { INIT_LIST_HEAD(base->tv5.vec + j); diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index b2ca34a..0d704b0 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -548,7 +548,7 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) pages = DIV_ROUND_UP(functions, PROFILES_PER_PAGE); - for (i = 0; i < pages; i++) { + for (i = 1; i < pages; i++) { pg->next = (void *)get_zeroed_page(GFP_KERNEL); if (!pg->next) goto out_free; @@ -566,7 +566,6 @@ int ftrace_profile_pages_init(struct ftrace_profile_stat *stat) free_page(tmp); } - free_page((unsigned long)stat->pages); stat->pages = NULL; stat->start = NULL; @@ -934,6 +933,19 @@ static __init void ftrace_profile_debugfs(struct dentry *d_tracer) static struct pid * const ftrace_swapper_pid = &init_struct_pid; +static loff_t +ftrace_filter_lseek(struct file *file, loff_t offset, int whence) +{ + loff_t ret; + + if (file->f_mode & FMODE_READ) + ret = seq_lseek(file, offset, whence); + else + file->f_pos = ret = 1; + + return ret; +} + #ifdef CONFIG_DYNAMIC_FTRACE #ifndef CONFIG_FTRACE_MCOUNT_RECORD @@ -2300,19 +2312,6 @@ ftrace_notrace_open(struct inode *inode, struct file *file) inode, file); } -static loff_t -ftrace_regex_lseek(struct file *file, loff_t offset, int origin) -{ - loff_t ret; - - if (file->f_mode & FMODE_READ) - ret = seq_lseek(file, offset, origin); - else - file->f_pos = ret = 1; - - return ret; -} - static int ftrace_match(char *str, char *regex, int len, int type) { int matched = 0; @@ -3119,7 +3118,7 @@ static const struct file_operations ftrace_filter_fops = { .open = ftrace_filter_open, .read = seq_read, .write = ftrace_filter_write, - .llseek = ftrace_regex_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_regex_release, }; @@ -3127,7 +3126,7 @@ static const struct file_operations ftrace_notrace_fops = { .open = ftrace_notrace_open, .read = seq_read, .write = ftrace_notrace_write, - .llseek = ftrace_regex_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_regex_release, }; @@ -3288,7 +3287,8 @@ out: if (fail) return -EINVAL; - ftrace_graph_filter_enabled = 1; + ftrace_graph_filter_enabled = !!(*idx); + return 0; } @@ -3335,8 +3335,8 @@ static const struct file_operations ftrace_graph_fops = { .open = ftrace_graph_open, .read = seq_read, .write = ftrace_graph_write, + .llseek = ftrace_filter_lseek, .release = ftrace_graph_release, - .llseek = seq_lseek, }; #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ @@ -3822,7 +3822,7 @@ static const struct file_operations ftrace_pid_fops = { .open = ftrace_pid_open, .write = ftrace_pid_write, .read = seq_read, - .llseek = seq_lseek, + .llseek = ftrace_filter_lseek, .release = ftrace_pid_release, }; @@ -3934,12 +3934,8 @@ ftrace_enable_sysctl(struct ctl_table *table, int write, ftrace_startup_sysctl(); /* we are starting ftrace again */ - if (ftrace_ops_list != &ftrace_list_end) { - if (ftrace_ops_list->next == &ftrace_list_end) - ftrace_trace_function = ftrace_ops_list->func; - else - ftrace_trace_function = ftrace_ops_list_func; - } + if (ftrace_ops_list != &ftrace_list_end) + update_ftrace_function(); } else { /* stopping ftrace calls (just send to ftrace_stub) */ diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index b3ae845..34d15ba 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -628,7 +628,15 @@ __update_max_tr(struct trace_array *tr, struct task_struct *tsk, int cpu) memcpy(max_data->comm, tsk->comm, TASK_COMM_LEN); max_data->pid = tsk->pid; - max_data->uid = task_uid(tsk); + /* + * If tsk == current, then use current_uid(), as that does not use + * RCU. The irq tracer can be called out of RCU scope. + */ + if (tsk == current) + max_data->uid = current_uid(); + else + max_data->uid = task_uid(tsk); + max_data->nice = tsk->static_prio - 20 - MAX_RT_PRIO; max_data->policy = tsk->policy; max_data->rt_priority = tsk->rt_priority; @@ -3237,6 +3245,7 @@ waitagain: memset(&iter->seq, 0, sizeof(struct trace_iterator) - offsetof(struct trace_iterator, seq)); + cpumask_clear(iter->started); iter->pos = -1; trace_event_read_lock(); @@ -3355,6 +3364,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, .pages = pages_def, .partial = partial_def, .nr_pages = 0, /* This gets updated below. */ + .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &tracing_pipe_buf_ops, .spd_release = tracing_spd_release_pipe, @@ -3426,7 +3436,7 @@ static ssize_t tracing_splice_read_pipe(struct file *filp, ret = splice_to_pipe(pipe, &spd); out: - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return ret; out_err: @@ -3839,6 +3849,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, struct splice_pipe_desc spd = { .pages = pages_def, .partial = partial_def, + .nr_pages_max = PIPE_DEF_BUFFERS, .flags = flags, .ops = &buffer_pipe_buf_ops, .spd_release = buffer_spd_release, @@ -3927,7 +3938,7 @@ tracing_buffers_splice_read(struct file *file, loff_t *ppos, } ret = splice_to_pipe(pipe, &spd); - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); out: return ret; } @@ -4360,6 +4371,8 @@ static __init int tracer_init_debugfs(void) trace_access_lock_init(); d_tracer = tracing_init_dentry(); + if (!d_tracer) + return 0; trace_create_file("tracing_enabled", 0644, d_tracer, &global_trace, &tracing_ctrl_fops); @@ -4487,30 +4500,32 @@ void trace_init_global_iter(struct trace_iterator *iter) iter->cpu_file = TRACE_PIPE_ALL_CPU; } -static void -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) +void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) { - static arch_spinlock_t ftrace_dump_lock = - (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED; /* use static because iter can be a bit big for the stack */ static struct trace_iterator iter; + static atomic_t dump_running; unsigned int old_userobj; - static int dump_ran; unsigned long flags; int cnt = 0, cpu; - /* only one dump */ - local_irq_save(flags); - arch_spin_lock(&ftrace_dump_lock); - if (dump_ran) - goto out; - - dump_ran = 1; + /* Only allow one dump user at a time. */ + if (atomic_inc_return(&dump_running) != 1) { + atomic_dec(&dump_running); + return; + } + /* + * Always turn off tracing when we dump. + * We don't need to show trace output of what happens + * between multiple crashes. + * + * If the user does a sysrq-z, then they can re-enable + * tracing with echo 1 > tracing_on. + */ tracing_off(); - if (disable_tracing) - ftrace_kill(); + local_irq_save(flags); trace_init_global_iter(&iter); @@ -4581,26 +4596,15 @@ __ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode) printk(KERN_TRACE "---------------------------------\n"); out_enable: - /* Re-enable tracing if requested */ - if (!disable_tracing) { - trace_flags |= old_userobj; + trace_flags |= old_userobj; - for_each_tracing_cpu(cpu) { - atomic_dec(&iter.tr->data[cpu]->disabled); - } - tracing_on(); + for_each_tracing_cpu(cpu) { + atomic_dec(&iter.tr->data[cpu]->disabled); } - - out: - arch_spin_unlock(&ftrace_dump_lock); + atomic_dec(&dump_running); local_irq_restore(flags); } - -/* By default: disable tracing after the dump */ -void ftrace_dump(enum ftrace_dump_mode oops_dump_mode) -{ - __ftrace_dump(true, oops_dump_mode); -} +EXPORT_SYMBOL_GPL(ftrace_dump); __init static int tracer_alloc_buffers(void) { diff --git a/kernel/trace/trace_selftest.c b/kernel/trace/trace_selftest.c index 288541f..09fd98a 100644 --- a/kernel/trace/trace_selftest.c +++ b/kernel/trace/trace_selftest.c @@ -461,8 +461,6 @@ trace_selftest_startup_function(struct tracer *trace, struct trace_array *tr) /* Maximum number of functions to trace before diagnosing a hang */ #define GRAPH_MAX_FUNC_TEST 100000000 -static void -__ftrace_dump(bool disable_tracing, enum ftrace_dump_mode oops_dump_mode); static unsigned int graph_hang_thresh; /* Wrap the real function entry probe to avoid possible hanging */ @@ -472,8 +470,11 @@ static int trace_graph_entry_watchdog(struct ftrace_graph_ent *trace) if (unlikely(++graph_hang_thresh > GRAPH_MAX_FUNC_TEST)) { ftrace_graph_stop(); printk(KERN_WARNING "BUG: Function graph tracer hang!\n"); - if (ftrace_dump_on_oops) - __ftrace_dump(false, DUMP_ALL); + if (ftrace_dump_on_oops) { + ftrace_dump(DUMP_ALL); + /* ftrace_dump() disables tracing */ + tracing_on(); + } return 0; } diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c index b0b53b8..2d43977 100644 --- a/kernel/trace/trace_stack.c +++ b/kernel/trace/trace_stack.c @@ -17,13 +17,24 @@ #define STACK_TRACE_ENTRIES 500 +#ifdef CC_USING_FENTRY +# define fentry 1 +#else +# define fentry 0 +#endif + static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES+1] = { [0 ... (STACK_TRACE_ENTRIES)] = ULONG_MAX }; static unsigned stack_dump_index[STACK_TRACE_ENTRIES]; +/* + * Reserve one entry for the passed in ip. This will allow + * us to remove most or all of the stack size overhead + * added by the stack tracer itself. + */ static struct stack_trace max_stack_trace = { - .max_entries = STACK_TRACE_ENTRIES, - .entries = stack_dump_trace, + .max_entries = STACK_TRACE_ENTRIES - 1, + .entries = &stack_dump_trace[1], }; static unsigned long max_stack_size; @@ -37,25 +48,34 @@ static DEFINE_MUTEX(stack_sysctl_mutex); int stack_tracer_enabled; static int last_stack_tracer_enabled; -static inline void check_stack(void) +static inline void +check_stack(unsigned long ip, unsigned long *stack) { unsigned long this_size, flags; unsigned long *p, *top, *start; + static int tracer_frame; + int frame_size = ACCESS_ONCE(tracer_frame); int i; - this_size = ((unsigned long)&this_size) & (THREAD_SIZE-1); + this_size = ((unsigned long)stack) & (THREAD_SIZE-1); this_size = THREAD_SIZE - this_size; + /* Remove the frame of the tracer */ + this_size -= frame_size; if (this_size <= max_stack_size) return; /* we do not handle interrupt stacks yet */ - if (!object_is_on_stack(&this_size)) + if (!object_is_on_stack(stack)) return; local_irq_save(flags); arch_spin_lock(&max_stack_lock); + /* In case another CPU set the tracer_frame on us */ + if (unlikely(!frame_size)) + this_size -= tracer_frame; + /* a race could have already updated it */ if (this_size <= max_stack_size) goto out; @@ -68,10 +88,18 @@ static inline void check_stack(void) save_stack_trace(&max_stack_trace); /* + * Add the passed in ip from the function tracer. + * Searching for this on the stack will skip over + * most of the overhead from the stack tracer itself. + */ + stack_dump_trace[0] = ip; + max_stack_trace.nr_entries++; + + /* * Now find where in the stack these are. */ i = 0; - start = &this_size; + start = stack; top = (unsigned long *) (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE); @@ -95,6 +123,18 @@ static inline void check_stack(void) found = 1; /* Start the search from here */ start = p + 1; + /* + * We do not want to show the overhead + * of the stack tracer stack in the + * max stack. If we haven't figured + * out what that is, then figure it out + * now. + */ + if (unlikely(!tracer_frame) && i == 1) { + tracer_frame = (p - stack) * + sizeof(unsigned long); + max_stack_size -= tracer_frame; + } } } @@ -110,6 +150,7 @@ static inline void check_stack(void) static void stack_trace_call(unsigned long ip, unsigned long parent_ip) { + unsigned long stack; int cpu; if (unlikely(!ftrace_enabled || stack_trace_disabled)) @@ -122,7 +163,26 @@ stack_trace_call(unsigned long ip, unsigned long parent_ip) if (per_cpu(trace_active, cpu)++ != 0) goto out; - check_stack(); + /* + * When fentry is used, the traced function does not get + * its stack frame set up, and we lose the parent. + * The ip is pretty useless because the function tracer + * was called before that function set up its stack frame. + * In this case, we use the parent ip. + * + * By adding the return address of either the parent ip + * or the current ip we can disregard most of the stack usage + * caused by the stack tracer itself. + * + * The function tracer always reports the address of where the + * mcount call was, but the stack will hold the return address. + */ + if (fentry) + ip = parent_ip; + else + ip += MCOUNT_INSN_SIZE; + + check_stack(ip, &stack); out: per_cpu(trace_active, cpu)--; @@ -360,6 +420,8 @@ static __init int stack_trace_init(void) struct dentry *d_tracer; d_tracer = tracing_init_dentry(); + if (!d_tracer) + return 0; trace_create_file("stack_max_size", 0644, d_tracer, &max_stack_size, &stack_max_size_fops); diff --git a/kernel/trace/trace_stat.c b/kernel/trace/trace_stat.c index 96cffb2..847f88a 100644 --- a/kernel/trace/trace_stat.c +++ b/kernel/trace/trace_stat.c @@ -307,6 +307,8 @@ static int tracing_stat_init(void) struct dentry *d_tracing; d_tracing = tracing_init_dentry(); + if (!d_tracing) + return 0; stat_dir = debugfs_create_dir("trace_stat", d_tracing); if (!stat_dir) diff --git a/kernel/trace/trace_syscalls.c b/kernel/trace/trace_syscalls.c index ee7b5a0..5819cd5 100644 --- a/kernel/trace/trace_syscalls.c +++ b/kernel/trace/trace_syscalls.c @@ -304,6 +304,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) struct ring_buffer *buffer; int size; int syscall_nr; + unsigned long irq_flags; + int pc; syscall_nr = syscall_get_nr(current, regs); if (syscall_nr < 0) @@ -317,8 +319,11 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) size = sizeof(*entry) + sizeof(unsigned long) * sys_data->nb_args; + local_save_flags(irq_flags); + pc = preempt_count(); + event = trace_current_buffer_lock_reserve(&buffer, - sys_data->enter_event->event.type, size, 0, 0); + sys_data->enter_event->event.type, size, irq_flags, pc); if (!event) return; @@ -328,7 +333,8 @@ void ftrace_syscall_enter(void *ignore, struct pt_regs *regs, long id) if (!filter_current_check_discard(buffer, sys_data->enter_event, entry, event)) - trace_current_buffer_unlock_commit(buffer, event, 0, 0); + trace_current_buffer_unlock_commit(buffer, event, + irq_flags, pc); } void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) @@ -338,6 +344,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) struct ring_buffer_event *event; struct ring_buffer *buffer; int syscall_nr; + unsigned long irq_flags; + int pc; syscall_nr = syscall_get_nr(current, regs); if (syscall_nr < 0) @@ -349,8 +357,12 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) if (!sys_data) return; + local_save_flags(irq_flags); + pc = preempt_count(); + event = trace_current_buffer_lock_reserve(&buffer, - sys_data->exit_event->event.type, sizeof(*entry), 0, 0); + sys_data->exit_event->event.type, sizeof(*entry), + irq_flags, pc); if (!event) return; @@ -360,7 +372,8 @@ void ftrace_syscall_exit(void *ignore, struct pt_regs *regs, long ret) if (!filter_current_check_discard(buffer, sys_data->exit_event, entry, event)) - trace_current_buffer_unlock_commit(buffer, event, 0, 0); + trace_current_buffer_unlock_commit(buffer, event, + irq_flags, pc); } int reg_event_syscall_enter(struct ftrace_event_call *call) diff --git a/lib/klist.c b/lib/klist.c index 573d606..d0b4b18 100644 --- a/lib/klist.c +++ b/lib/klist.c @@ -193,10 +193,10 @@ static void klist_release(struct kref *kref) if (waiter->node != n) continue; + list_del(&waiter->list); waiter->woken = 1; mb(); wake_up_process(waiter->process); - list_del(&waiter->list); } spin_unlock(&klist_remove_lock); knode_set_klist(n, NULL); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index a9ab45e..78f7186 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1838,6 +1838,8 @@ static void collapse_huge_page(struct mm_struct *mm, goto out; vma = find_vma(mm, address); + if (!vma) + goto out; hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_MASK; hend = vma->vm_end & HPAGE_PMD_MASK; if (address < hstart || address + HPAGE_PMD_SIZE > hend) @@ -1893,7 +1895,12 @@ static void collapse_huge_page(struct mm_struct *mm, pte_unmap(pte); spin_lock(&mm->page_table_lock); BUG_ON(!pmd_none(*pmd)); - set_pmd_at(mm, address, pmd, _pmd); + /* + * We can only use set_pmd_at when establishing + * hugepmds and never for establishing regular pmds that + * points to regular pagetables. Use pmd_populate for that + */ + pmd_populate(mm, pmd, pmd_pgtable(_pmd)); spin_unlock(&mm->page_table_lock); anon_vma_unlock(vma->anon_vma); goto out; diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 2c56a53..6fdad25 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -602,6 +602,23 @@ int PageHuge(struct page *page) EXPORT_SYMBOL_GPL(PageHuge); +pgoff_t __basepage_index(struct page *page) +{ + struct page *page_head = compound_head(page); + pgoff_t index = page_index(page_head); + unsigned long compound_idx; + + if (!PageHuge(page_head)) + return page_index(page); + + if (compound_order(page_head) >= MAX_ORDER) + compound_idx = page_to_pfn(page) - page_to_pfn(page_head); + else + compound_idx = page - page_head; + + return (index << compound_order(page_head)) + compound_idx; +} + static struct page *alloc_fresh_huge_page_node(struct hstate *h, int nid) { struct page *page; @@ -2662,7 +2679,7 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, if (ptep) { entry = huge_ptep_get(ptep); if (unlikely(is_hugetlb_entry_migration(entry))) { - migration_entry_wait(mm, (pmd_t *)ptep, address); + migration_entry_wait_huge(mm, ptep); return 0; } else if (unlikely(is_hugetlb_entry_hwpoisoned(entry))) return VM_FAULT_HWPOISON_LARGE | @@ -2800,7 +2817,17 @@ int follow_hugetlb_page(struct mm_struct *mm, struct vm_area_struct *vma, break; } - if (absent || + /* + * We need call hugetlb_fault for both hugepages under migration + * (in which case hugetlb_fault waits for the migration,) and + * hwpoisoned hugepages (in which case we need to prevent the + * caller from accessing to them.) In order to do this, we use + * here is_swap_pte instead of is_hugetlb_entry_migration and + * is_hugetlb_entry_hwpoisoned. This is because it simply covers + * both cases, and because we can't follow correct pages + * directly from any kind of swap entries. + */ + if (absent || is_swap_pte(huge_ptep_get(pte)) || ((flags & FOLL_WRITE) && !pte_write(huge_ptep_get(pte)))) { int ret; diff --git a/mm/memcontrol.c b/mm/memcontrol.c index 57cdf5a..d7b51d5 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -4433,7 +4433,13 @@ static int compare_thresholds(const void *a, const void *b) const struct mem_cgroup_threshold *_a = a; const struct mem_cgroup_threshold *_b = b; - return _a->threshold - _b->threshold; + if (_a->threshold > _b->threshold) + return 1; + + if (_a->threshold < _b->threshold) + return -1; + + return 0; } static int mem_cgroup_oom_notify_cb(struct mem_cgroup *mem) diff --git a/mm/memory.c b/mm/memory.c index 4da0f8a..ac8d3a1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2317,6 +2317,53 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +/** + * vm_iomap_memory - remap memory to userspace + * @vma: user vma to map to + * @start: start of area + * @len: size of area + * + * This is a simplified io_remap_pfn_range() for common driver use. The + * driver just needs to give us the physical memory range to be mapped, + * we'll figure out the rest from the vma information. + * + * NOTE! Some drivers might want to tweak vma->vm_page_prot first to get + * whatever write-combining details or similar. + */ +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long vm_len, pfn, pages; + + /* Check that the physical memory area passed in looks valid */ + if (start + len < start) + return -EINVAL; + /* + * You *really* shouldn't map things that aren't page-aligned, + * but we've historically allowed it because IO memory might + * just have smaller alignment. + */ + len += start & ~PAGE_MASK; + pfn = start >> PAGE_SHIFT; + pages = (len + ~PAGE_MASK) >> PAGE_SHIFT; + if (pfn + pages < pfn) + return -EINVAL; + + /* We start the mapping 'vm_pgoff' pages into the area */ + if (vma->vm_pgoff > pages) + return -EINVAL; + pfn += vma->vm_pgoff; + pages -= vma->vm_pgoff; + + /* Can we fit all of the mapping? */ + vm_len = vma->vm_end - vma->vm_start; + if (vm_len >> PAGE_SHIFT > pages) + return -EINVAL; + + /* Ok, let it rip */ + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + static int apply_to_pte_range(struct mm_struct *mm, pmd_t *pmd, unsigned long addr, unsigned long end, pte_fn_t fn, void *data) diff --git a/mm/migrate.c b/mm/migrate.c index 480714b..3e315a7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -147,7 +147,7 @@ static int remove_migration_pte(struct page *new, struct vm_area_struct *vma, if (PageHuge(new)) pte = pte_mkhuge(pte); #endif - flush_cache_page(vma, addr, pte_pfn(pte)); + flush_dcache_page(new); set_pte_at(mm, addr, ptep, pte); if (PageHuge(new)) { @@ -184,15 +184,14 @@ static void remove_migration_ptes(struct page *old, struct page *new) * * This function is called from do_swap_page(). */ -void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, - unsigned long address) +static void __migration_entry_wait(struct mm_struct *mm, pte_t *ptep, + spinlock_t *ptl) { - pte_t *ptep, pte; - spinlock_t *ptl; + pte_t pte; swp_entry_t entry; struct page *page; - ptep = pte_offset_map_lock(mm, pmd, address, &ptl); + spin_lock(ptl); pte = *ptep; if (!is_swap_pte(pte)) goto out; @@ -220,6 +219,20 @@ out: pte_unmap_unlock(ptep, ptl); } +void migration_entry_wait(struct mm_struct *mm, pmd_t *pmd, + unsigned long address) +{ + spinlock_t *ptl = pte_lockptr(mm, pmd); + pte_t *ptep = pte_offset_map(pmd, address); + __migration_entry_wait(mm, ptep, ptl); +} + +void migration_entry_wait_huge(struct mm_struct *mm, pte_t *pte) +{ + spinlock_t *ptl = &(mm)->page_table_lock; + __migration_entry_wait(mm, pte, ptl); +} + #ifdef CONFIG_BLOCK /* Returns true if all buffers are successfully locked */ static bool buffer_migrate_lock_buffers(struct buffer_head *head, @@ -1581,7 +1581,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) if (mm) { /* Check the cache first. */ /* (Cache hit rate is typically around 35%.) */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (!(vma && vma->vm_end > addr && vma->vm_start <= addr)) { struct rb_node * rb_node; diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c index 88fa54d..2b52947 100644 --- a/mm/mmu_notifier.c +++ b/mm/mmu_notifier.c @@ -37,51 +37,48 @@ static struct srcu_struct srcu; void __mmu_notifier_release(struct mm_struct *mm) { struct mmu_notifier *mn; + struct hlist_node *node; int id; /* - * srcu_read_lock() here will block synchronize_srcu() in - * mmu_notifier_unregister() until all registered - * ->release() callouts this function makes have - * returned. + * SRCU here will block mmu_notifier_unregister until + * ->release returns. */ id = srcu_read_lock(&srcu); + hlist_for_each_entry_rcu(mn, node, &mm->mmu_notifier_mm->list, hlist) + /* + * If ->release runs before mmu_notifier_unregister it must be + * handled, as it's the only way for the driver to flush all + * existing sptes and stop the driver from establishing any more + * sptes before all the pages in the mm are freed. + */ + if (mn->ops->release) + mn->ops->release(mn, mm); + srcu_read_unlock(&srcu, id); + spin_lock(&mm->mmu_notifier_mm->lock); while (unlikely(!hlist_empty(&mm->mmu_notifier_mm->list))) { mn = hlist_entry(mm->mmu_notifier_mm->list.first, struct mmu_notifier, hlist); - /* - * Unlink. This will prevent mmu_notifier_unregister() - * from also making the ->release() callout. + * We arrived before mmu_notifier_unregister so + * mmu_notifier_unregister will do nothing other than to wait + * for ->release to finish and for mmu_notifier_unregister to + * return. */ hlist_del_init_rcu(&mn->hlist); - spin_unlock(&mm->mmu_notifier_mm->lock); - - /* - * Clear sptes. (see 'release' description in mmu_notifier.h) - */ - if (mn->ops->release) - mn->ops->release(mn, mm); - - spin_lock(&mm->mmu_notifier_mm->lock); } spin_unlock(&mm->mmu_notifier_mm->lock); /* - * All callouts to ->release() which we have done are complete. - * Allow synchronize_srcu() in mmu_notifier_unregister() to complete - */ - srcu_read_unlock(&srcu, id); - - /* - * mmu_notifier_unregister() may have unlinked a notifier and may - * still be calling out to it. Additionally, other notifiers - * may have been active via vmtruncate() et. al. Block here - * to ensure that all notifier callouts for this mm have been - * completed and the sptes are really cleaned up before returning - * to exit_mmap(). + * synchronize_srcu here prevents mmu_notifier_release from returning to + * exit_mmap (which would proceed with freeing all pages in the mm) + * until the ->release method returns, if it was invoked by + * mmu_notifier_unregister. + * + * The mmu_notifier_mm can't go away from under us because one mm_count + * is held by exit_mmap. */ synchronize_srcu(&srcu); } @@ -302,31 +299,34 @@ void mmu_notifier_unregister(struct mmu_notifier *mn, struct mm_struct *mm) { BUG_ON(atomic_read(&mm->mm_count) <= 0); - spin_lock(&mm->mmu_notifier_mm->lock); if (!hlist_unhashed(&mn->hlist)) { + /* + * SRCU here will force exit_mmap to wait for ->release to + * finish before freeing the pages. + */ int id; + id = srcu_read_lock(&srcu); /* - * Ensure we synchronize up with __mmu_notifier_release(). + * exit_mmap will block in mmu_notifier_release to guarantee + * that ->release is called before freeing the pages. */ - id = srcu_read_lock(&srcu); - - hlist_del_rcu(&mn->hlist); - spin_unlock(&mm->mmu_notifier_mm->lock); - if (mn->ops->release) mn->ops->release(mn, mm); + srcu_read_unlock(&srcu, id); + spin_lock(&mm->mmu_notifier_mm->lock); /* - * Allow __mmu_notifier_release() to complete. + * Can not use list_del_rcu() since __mmu_notifier_release + * can delete it before we hold the lock. */ - srcu_read_unlock(&srcu, id); - } else + hlist_del_init_rcu(&mn->hlist); spin_unlock(&mm->mmu_notifier_mm->lock); + } /* - * Wait for any running method to finish, including ->release() if it - * was run by __mmu_notifier_release() instead of us. + * Wait for any running method to finish, of course including + * ->release if it was run by mmu_notifier_relase instead of us. */ synchronize_srcu(&srcu); @@ -808,7 +808,7 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr) struct vm_area_struct *vma; /* check the cache first */ - vma = mm->mmap_cache; + vma = ACCESS_ONCE(mm->mmap_cache); if (vma && vma->vm_start <= addr && vma->vm_end > addr) return vma; @@ -1826,6 +1826,16 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr, } EXPORT_SYMBOL(remap_pfn_range); +int vm_iomap_memory(struct vm_area_struct *vma, phys_addr_t start, unsigned long len) +{ + unsigned long pfn = start >> PAGE_SHIFT; + unsigned long vm_len = vma->vm_end - vma->vm_start; + + pfn += vma->vm_pgoff; + return io_remap_pfn_range(vma, vma->vm_start, pfn, vm_len, vma->vm_page_prot); +} +EXPORT_SYMBOL(vm_iomap_memory); + int remap_vmalloc_range(struct vm_area_struct *vma, void *addr, unsigned long pgoff) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index aed2f55..2ddd353 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -5760,6 +5760,10 @@ __offline_isolated_pages(unsigned long start_pfn, unsigned long end_pfn) zone->free_area[order].nr_free--; __mod_zone_page_state(zone, NR_FREE_PAGES, - (1UL << order)); +#ifdef CONFIG_HIGHMEM + if (PageHighMem(page)) + totalhigh_pages -= 1 << order; +#endif for (i = 0; i < (1 << order); i++) SetPageReserved((page+i)); pfn += (1 << order); diff --git a/mm/swap_state.c b/mm/swap_state.c index 10e9198..43b957e 100644 --- a/mm/swap_state.c +++ b/mm/swap_state.c @@ -315,8 +315,24 @@ struct page *read_swap_cache_async(swp_entry_t entry, gfp_t gfp_mask, * Swap entry may have been freed since our caller observed it. */ err = swapcache_prepare(entry); - if (err == -EEXIST) { /* seems racy */ + if (err == -EEXIST) { radix_tree_preload_end(); + /* + * We might race against get_swap_page() and stumble + * across a SWAP_HAS_CACHE swap_map entry whose page + * has not been brought into the swapcache yet, while + * the other end is scheduled away waiting on discard + * I/O completion at scan_swap_map(). + * + * In order to avoid turning this transitory state + * into a permanent loop around this -EEXIST case + * if !CONFIG_PREEMPT and the I/O completion happens + * to be waiting on the CPU waitqueue where we are now + * busy looping, we just conditionally invoke the + * scheduler here, if there are some more important + * tasks to run. + */ + cond_resched(); continue; } if (err) { /* swp entry is obsolete ? */ diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c index 1e93a91..917ecb9 100644 --- a/net/8021q/vlan.c +++ b/net/8021q/vlan.c @@ -108,6 +108,13 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) grp = rtnl_dereference(real_dev->vlgrp); BUG_ON(!grp); + /* Take it out of our own structures, but be sure to interlock with + * HW accelerating devices or SW vlan input packet processing if + * VLAN is not 0 (leave it there for 802.1p). + */ + if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER)) + ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); + grp->nr_vlans--; if (vlan->flags & VLAN_FLAG_GVRP) @@ -132,13 +139,6 @@ void unregister_vlan_dev(struct net_device *dev, struct list_head *head) call_rcu(&grp->rcu, vlan_rcu_free); } - /* Take it out of our own structures, but be sure to interlock with - * HW accelerating devices or SW vlan input packet processing if - * VLAN is not 0 (leave it there for 802.1p). - */ - if (vlan_id && (real_dev->features & NETIF_F_HW_VLAN_FILTER)) - ops->ndo_vlan_rx_kill_vid(real_dev, vlan_id); - /* Get rid of the vlan's reference to real_dev */ dev_put(real_dev); } diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c index d548456..b172407 100644 --- a/net/8021q/vlan_dev.c +++ b/net/8021q/vlan_dev.c @@ -70,6 +70,8 @@ vlan_dev_get_egress_qos_mask(struct net_device *dev, struct sk_buff *skb) { struct vlan_priority_tci_mapping *mp; + smp_rmb(); /* coupled with smp_wmb() in vlan_dev_set_egress_priority() */ + mp = vlan_dev_info(dev)->egress_priority_map[(skb->priority & 0xF)]; while (mp) { if (mp->priority == skb->priority) { @@ -230,6 +232,11 @@ int vlan_dev_set_egress_priority(const struct net_device *dev, np->next = mp; np->priority = skb_prio; np->vlan_qos = vlan_qos; + /* Before inserting this element in hash table, make sure all its fields + * are committed to memory. + * coupled with smp_rmb() in vlan_dev_get_egress_qos_mask() + */ + smp_wmb(); vlan->egress_priority_map[skb_prio & 0xF] = np; if (vlan_qos) vlan->nr_egress_mappings++; diff --git a/net/atm/common.c b/net/atm/common.c index cc859ad..4b263b8 100644 --- a/net/atm/common.c +++ b/net/atm/common.c @@ -500,6 +500,8 @@ int vcc_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, struct sk_buff *skb; int copied, error = -EINVAL; + msg->msg_namelen = 0; + if (sock->state != SS_CONNECTED) return -ENOTCONN; if (flags & ~MSG_DONTWAIT) /* only handle MSG_DONTWAIT */ diff --git a/net/ax25/af_ax25.c b/net/ax25/af_ax25.c index b04a6ef..86ac37f 100644 --- a/net/ax25/af_ax25.c +++ b/net/ax25/af_ax25.c @@ -1641,6 +1641,7 @@ static int ax25_recvmsg(struct kiocb *iocb, struct socket *sock, ax25_address src; const unsigned char *mac = skb_mac_header(skb); + memset(sax, 0, sizeof(struct full_sockaddr_ax25)); ax25_addr_parse(mac + 1, skb->data - mac - 1, &src, NULL, &digi, NULL, NULL); sax->sax25_family = AF_AX25; diff --git a/net/bluetooth/af_bluetooth.c b/net/bluetooth/af_bluetooth.c index 7c73a10..9047512 100644 --- a/net/bluetooth/af_bluetooth.c +++ b/net/bluetooth/af_bluetooth.c @@ -283,6 +283,8 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (flags & (MSG_OOB)) return -EOPNOTSUPP; + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, noblock, &err); if (!skb) { if (sk->sk_shutdown & RCV_SHUTDOWN) @@ -290,8 +292,6 @@ int bt_sock_recvmsg(struct kiocb *iocb, struct socket *sock, return err; } - msg->msg_namelen = 0; - copied = skb->len; if (len < copied) { msg->msg_flags |= MSG_TRUNC; diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 5a0ce73..38e3dec 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -1728,6 +1728,9 @@ static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn, BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d", conn, code, ident, dlen); + if (conn->mtu < L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE) + return NULL; + len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen; count = min_t(unsigned int, conn->mtu, len); diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c index 927f418..56fcfd1 100644 --- a/net/bluetooth/rfcomm/sock.c +++ b/net/bluetooth/rfcomm/sock.c @@ -624,6 +624,7 @@ static int rfcomm_sock_recvmsg(struct kiocb *iocb, struct socket *sock, if (test_and_clear_bit(RFCOMM_DEFER_SETUP, &d->flags)) { rfcomm_dlc_accept(d); + msg->msg_namelen = 0; return 0; } diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c index e78269d..c637a66 100644 --- a/net/bridge/br_multicast.c +++ b/net/bridge/br_multicast.c @@ -467,8 +467,9 @@ static struct sk_buff *br_ip6_multicast_alloc_query(struct net_bridge *br, skb_set_transport_header(skb, skb->len); mldq = (struct mld_msg *) icmp6_hdr(skb); - interval = ipv6_addr_any(group) ? br->multicast_last_member_interval : - br->multicast_query_response_interval; + interval = ipv6_addr_any(group) ? + br->multicast_query_response_interval : + br->multicast_last_member_interval; mldq->mld_type = ICMPV6_MGM_QUERY; mldq->mld_code = 0; @@ -1154,7 +1155,8 @@ static int br_ip6_multicast_query(struct net_bridge *br, mld2q = (struct mld2_query *)icmp6_hdr(skb); if (!mld2q->mld2q_nsrcs) group = &mld2q->mld2q_mca; - max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; + + max_delay = max(msecs_to_jiffies(MLDV2_MRC(ntohs(mld2q->mld2q_mrc))), 1UL); } if (!group) diff --git a/net/bridge/br_private.h b/net/bridge/br_private.h index 7c1f3a0..fe1e299 100644 --- a/net/bridge/br_private.h +++ b/net/bridge/br_private.h @@ -492,6 +492,7 @@ extern struct net_bridge_port *br_get_port(struct net_bridge *br, extern void br_init_port(struct net_bridge_port *p); extern void br_become_designated_port(struct net_bridge_port *p); +extern void __br_set_forward_delay(struct net_bridge *br, unsigned long t); extern int br_set_forward_delay(struct net_bridge *br, unsigned long x); extern int br_set_hello_time(struct net_bridge *br, unsigned long x); extern int br_set_max_age(struct net_bridge *br, unsigned long x); diff --git a/net/bridge/br_stp.c b/net/bridge/br_stp.c index fcff622..81fb35a 100644 --- a/net/bridge/br_stp.c +++ b/net/bridge/br_stp.c @@ -191,7 +191,7 @@ static inline void br_record_config_information(struct net_bridge_port *p, p->designated_age = jiffies + bpdu->message_age; mod_timer(&p->message_age_timer, jiffies - + (p->br->max_age - bpdu->message_age)); + + (bpdu->max_age - bpdu->message_age)); } /* called under bridge lock */ @@ -517,18 +517,27 @@ int br_set_max_age(struct net_bridge *br, unsigned long val) } +void __br_set_forward_delay(struct net_bridge *br, unsigned long t) +{ + br->bridge_forward_delay = t; + if (br_is_root_bridge(br)) + br->forward_delay = br->bridge_forward_delay; +} + int br_set_forward_delay(struct net_bridge *br, unsigned long val) { unsigned long t = clock_t_to_jiffies(val); + int err = -ERANGE; + spin_lock_bh(&br->lock); if (br->stp_enabled != BR_NO_STP && (t < BR_MIN_FORWARD_DELAY || t > BR_MAX_FORWARD_DELAY)) - return -ERANGE; + goto unlock; - spin_lock_bh(&br->lock); - br->bridge_forward_delay = t; - if (br_is_root_bridge(br)) - br->forward_delay = br->bridge_forward_delay; + __br_set_forward_delay(br, t); + err = 0; + +unlock: spin_unlock_bh(&br->lock); - return 0; + return err; } diff --git a/net/bridge/br_stp_if.c b/net/bridge/br_stp_if.c index 6f615b8..d07e521 100644 --- a/net/bridge/br_stp_if.c +++ b/net/bridge/br_stp_if.c @@ -126,6 +126,14 @@ static void br_stp_start(struct net_bridge *br) char *envp[] = { NULL }; r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); + + spin_lock_bh(&br->lock); + + if (br->bridge_forward_delay < BR_MIN_FORWARD_DELAY) + __br_set_forward_delay(br, BR_MIN_FORWARD_DELAY); + else if (br->bridge_forward_delay < BR_MAX_FORWARD_DELAY) + __br_set_forward_delay(br, BR_MAX_FORWARD_DELAY); + if (r == 0) { br->stp_enabled = BR_USER_STP; br_debug(br, "userspace STP started\n"); @@ -134,10 +142,10 @@ static void br_stp_start(struct net_bridge *br) br_debug(br, "using kernel STP\n"); /* To start timers on any ports left in blocking */ - spin_lock_bh(&br->lock); br_port_state_selection(br); - spin_unlock_bh(&br->lock); } + + spin_unlock_bh(&br->lock); } static void br_stp_stop(struct net_bridge *br) diff --git a/net/bridge/br_stp_timer.c b/net/bridge/br_stp_timer.c index 3e96514..0abc6b1 100644 --- a/net/bridge/br_stp_timer.c +++ b/net/bridge/br_stp_timer.c @@ -106,7 +106,7 @@ static void br_tcn_timer_expired(unsigned long arg) br_debug(br, "tcn timer expired\n"); spin_lock(&br->lock); - if (br->dev->flags & IFF_UP) { + if (!br_is_root_bridge(br) && (br->dev->flags & IFF_UP)) { br_transmit_tcn(br); mod_timer(&br->tcn_timer,jiffies + br->bridge_hello_time); diff --git a/net/caif/caif_socket.c b/net/caif/caif_socket.c index a986280..53a8e37 100644 --- a/net/caif/caif_socket.c +++ b/net/caif/caif_socket.c @@ -320,6 +320,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error; diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c index e22671b..a80d94a 100644 --- a/net/caif/cfctrl.c +++ b/net/caif/cfctrl.c @@ -297,9 +297,10 @@ int cfctrl_linkup_request(struct cflayer *layer, count = cfctrl_cancel_req(&cfctrl->serv.layer, user_layer); - if (count != 1) + if (count != 1) { pr_err("Could not remove request (%d)", count); return -ENODEV; + } } return 0; } diff --git a/net/ceph/auth_none.c b/net/ceph/auth_none.c index 214c2bb..9f78c5f 100644 --- a/net/ceph/auth_none.c +++ b/net/ceph/auth_none.c @@ -39,6 +39,11 @@ static int should_authenticate(struct ceph_auth_client *ac) return xi->starting; } +static int build_request(struct ceph_auth_client *ac, void *buf, void *end) +{ + return 0; +} + /* * the generic auth code decode the global_id, and we carry no actual * authenticate state, so nothing happens here. @@ -107,6 +112,7 @@ static const struct ceph_auth_client_ops ceph_auth_none_ops = { .destroy = destroy, .is_authenticated = is_authenticated, .should_authenticate = should_authenticate, + .build_request = build_request, .handle_reply = handle_reply, .create_authorizer = ceph_auth_none_create_authorizer, .destroy_authorizer = ceph_auth_none_destroy_authorizer, diff --git a/net/compat.c b/net/compat.c index c578d93..68220c8 100644 --- a/net/compat.c +++ b/net/compat.c @@ -732,19 +732,25 @@ static unsigned char nas[21] = { asmlinkage long compat_sys_sendmsg(int fd, struct compat_msghdr __user *msg, unsigned flags) { - return sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + return __sys_sendmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_sendmmsg(int fd, struct compat_mmsghdr __user *mmsg, unsigned vlen, unsigned int flags) { + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; return __sys_sendmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recvmsg(int fd, struct compat_msghdr __user *msg, unsigned int flags) { - return sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + return __sys_recvmsg(fd, (struct msghdr __user *)msg, flags | MSG_CMSG_COMPAT); } asmlinkage long compat_sys_recv(int fd, void __user *buf, size_t len, unsigned flags) @@ -766,6 +772,9 @@ asmlinkage long compat_sys_recvmmsg(int fd, struct compat_mmsghdr __user *mmsg, int datagrams; struct timespec ktspec; + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + if (timeout == NULL) return __sys_recvmmsg(fd, (struct mmsghdr __user *)mmsg, vlen, flags | MSG_CMSG_COMPAT, NULL); diff --git a/net/core/dev.c b/net/core/dev.c index e5eba56..25ef86f 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1545,6 +1545,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb) skb->mark = 0; secpath_reset(skb); nf_reset(skb); + nf_reset_trace(skb); return netif_rx(skb); } EXPORT_SYMBOL_GPL(dev_forward_skb); diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c index c776af5..41775e4 100644 --- a/net/core/dev_addr_lists.c +++ b/net/core/dev_addr_lists.c @@ -56,7 +56,7 @@ static int __hw_addr_add_ex(struct netdev_hw_addr_list *list, ha->type = addr_type; ha->refcount = 1; ha->global_use = global; - ha->synced = false; + ha->synced = 0; list_add_tail_rcu(&ha->list, &list->list); list->count++; return 0; @@ -154,7 +154,7 @@ int __hw_addr_sync(struct netdev_hw_addr_list *to_list, addr_len, ha->type); if (err) break; - ha->synced = true; + ha->synced++; ha->refcount++; } else if (ha->refcount == 1) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); @@ -175,7 +175,7 @@ void __hw_addr_unsync(struct netdev_hw_addr_list *to_list, if (ha->synced) { __hw_addr_del(to_list, ha->addr, addr_len, ha->type); - ha->synced = false; + ha->synced--; __hw_addr_del(from_list, ha->addr, addr_len, ha->type); } diff --git a/net/core/neighbour.c b/net/core/neighbour.c index 34032f2..f268c34 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -237,7 +237,7 @@ static void neigh_flush_dev(struct neigh_table *tbl, struct net_device *dev) we must kill timers etc. and move it to safe state. */ - skb_queue_purge(&n->arp_queue); + __skb_queue_purge(&n->arp_queue); n->output = neigh_blackhole; if (n->nud_state & NUD_VALID) n->nud_state = NUD_NOARP; @@ -291,7 +291,7 @@ static struct neighbour *neigh_alloc(struct neigh_table *tbl) if (!n) goto out_entries; - skb_queue_head_init(&n->arp_queue); + __skb_queue_head_init(&n->arp_queue); rwlock_init(&n->lock); seqlock_init(&n->ha_lock); n->updated = n->used = now; @@ -712,7 +712,9 @@ void neigh_destroy(struct neighbour *neigh) hh_cache_put(hh); } - skb_queue_purge(&neigh->arp_queue); + write_lock_bh(&neigh->lock); + __skb_queue_purge(&neigh->arp_queue); + write_unlock_bh(&neigh->lock); dev_put(neigh->dev); neigh_parms_put(neigh->parms); @@ -864,7 +866,7 @@ static void neigh_invalidate(struct neighbour *neigh) neigh->ops->error_report(neigh, skb); write_lock(&neigh->lock); } - skb_queue_purge(&neigh->arp_queue); + __skb_queue_purge(&neigh->arp_queue); } /* Called when a timer expires for a neighbour entry. */ @@ -1188,7 +1190,7 @@ int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, write_lock_bh(&neigh->lock); } - skb_queue_purge(&neigh->arp_queue); + __skb_queue_purge(&neigh->arp_queue); } out: if (update_isrouter) { diff --git a/net/core/netpoll.c b/net/core/netpoll.c index 207a178..c497137 100644 --- a/net/core/netpoll.c +++ b/net/core/netpoll.c @@ -927,15 +927,14 @@ EXPORT_SYMBOL_GPL(__netpoll_cleanup); void netpoll_cleanup(struct netpoll *np) { - if (!np->dev) - return; - rtnl_lock(); + if (!np->dev) + goto out; __netpoll_cleanup(np); - rtnl_unlock(); - dev_put(np->dev); np->dev = NULL; +out: + rtnl_unlock(); } EXPORT_SYMBOL(netpoll_cleanup); diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c index 81c16d3..b6aaa7a 100644 --- a/net/core/rtnetlink.c +++ b/net/core/rtnetlink.c @@ -1045,7 +1045,7 @@ static int rtnl_dump_ifinfo(struct sk_buff *skb, struct netlink_callback *cb) rcu_read_lock(); - if (nlmsg_parse(cb->nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(cb->nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) @@ -1876,7 +1876,7 @@ static u16 rtnl_calcit(struct sk_buff *skb, struct nlmsghdr *nlh) u32 ext_filter_mask = 0; u16 min_ifinfo_dump_size = 0; - if (nlmsg_parse(nlh, sizeof(struct rtgenmsg), tb, IFLA_MAX, + if (nlmsg_parse(nlh, sizeof(struct ifinfomsg), tb, IFLA_MAX, ifla_policy) >= 0) { if (tb[IFLA_EXT_MASK]) ext_filter_mask = nla_get_u32(tb[IFLA_EXT_MASK]); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 4821df8..88b4898 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -1535,6 +1535,7 @@ int skb_splice_bits(struct sk_buff *skb, unsigned int offset, struct splice_pipe_desc spd = { .pages = pages, .partial = partial, + .nr_pages_max = MAX_SKB_FRAGS, .flags = flags, .ops = &sock_pipe_buf_ops, .spd_release = sock_spd_release, @@ -1581,7 +1582,7 @@ done: lock_sock(sk); } - splice_shrink_spd(pipe, &spd); + splice_shrink_spd(&spd); return ret; } diff --git a/net/core/sock.c b/net/core/sock.c index 3da11ba..b43e260 100644 --- a/net/core/sock.c +++ b/net/core/sock.c @@ -752,15 +752,20 @@ EXPORT_SYMBOL(sock_setsockopt); void cred_to_ucred(struct pid *pid, const struct cred *cred, - struct ucred *ucred) + struct ucred *ucred, bool use_effective) { ucred->pid = pid_vnr(pid); ucred->uid = ucred->gid = -1; if (cred) { struct user_namespace *current_ns = current_user_ns(); - ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); - ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); + if (use_effective) { + ucred->uid = user_ns_map_uid(current_ns, cred, cred->euid); + ucred->gid = user_ns_map_gid(current_ns, cred, cred->egid); + } else { + ucred->uid = user_ns_map_uid(current_ns, cred, cred->uid); + ucred->gid = user_ns_map_gid(current_ns, cred, cred->gid); + } } } EXPORT_SYMBOL_GPL(cred_to_ucred); @@ -921,7 +926,8 @@ int sock_getsockopt(struct socket *sock, int level, int optname, struct ucred peercred; if (len > sizeof(peercred)) len = sizeof(peercred); - cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, &peercred); + cred_to_ucred(sk->sk_peer_pid, sk->sk_peer_cred, + &peercred, true); if (copy_to_user(optval, &peercred, len)) return -EFAULT; goto lenout; @@ -1011,18 +1017,6 @@ static void sock_copy(struct sock *nsk, const struct sock *osk) #endif } -/* - * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes - * un-modified. Special care is taken when initializing object to zero. - */ -static inline void sk_prot_clear_nulls(struct sock *sk, int size) -{ - if (offsetof(struct sock, sk_node.next) != 0) - memset(sk, 0, offsetof(struct sock, sk_node.next)); - memset(&sk->sk_node.pprev, 0, - size - offsetof(struct sock, sk_node.pprev)); -} - void sk_prot_clear_portaddr_nulls(struct sock *sk, int size) { unsigned long nulls1, nulls2; diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c index 77a65f0..f0bdd36 100644 --- a/net/core/sysctl_net_core.c +++ b/net/core/sysctl_net_core.c @@ -19,6 +19,9 @@ #include <net/sock.h> #include <net/net_ratelimit.h> +static int zero = 0; +static int ushort_max = USHRT_MAX; + #ifdef CONFIG_RPS static int rps_sock_flow_sysctl(ctl_table *table, int write, void __user *buffer, size_t *lenp, loff_t *ppos) @@ -192,7 +195,9 @@ static struct ctl_table netns_core_table[] = { .data = &init_net.core.sysctl_somaxconn, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .extra1 = &zero, + .extra2 = &ushort_max, + .proc_handler = proc_dointvec_minmax }, { } }; diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c index 530787b..238fc3b 100644 --- a/net/ipv4/esp4.c +++ b/net/ipv4/esp4.c @@ -137,8 +137,6 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) /* skb is pure payload to encrypt */ - err = -ENOMEM; - esp = x->data; aead = esp->aead; alen = crypto_aead_authsize(aead); @@ -174,8 +172,10 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb) } tmp = esp_alloc_tmp(aead, nfrags + sglists, seqhilen); - if (!tmp) + if (!tmp) { + err = -ENOMEM; goto error; + } seqhi = esp_tmp_seqhi(tmp); iv = esp_tmp_iv(aead, tmp, seqhilen); diff --git a/net/ipv4/fib_trie.c b/net/ipv4/fib_trie.c index 0d884eb..e11f4a5 100644 --- a/net/ipv4/fib_trie.c +++ b/net/ipv4/fib_trie.c @@ -72,7 +72,6 @@ #include <linux/init.h> #include <linux/list.h> #include <linux/slab.h> -#include <linux/prefetch.h> #include <net/net_namespace.h> #include <net/ip.h> #include <net/protocol.h> @@ -1772,10 +1771,8 @@ static struct leaf *leaf_walk_rcu(struct tnode *p, struct rt_trie_node *c) if (!c) continue; - if (IS_LEAF(c)) { - prefetch(rcu_dereference_rtnl(p->child[idx])); + if (IS_LEAF(c)) return (struct leaf *) c; - } /* Rescan start scanning in new node */ p = (struct tnode *) c; diff --git a/net/ipv4/igmp.c b/net/ipv4/igmp.c index e0d42db..508bae1 100644 --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c @@ -342,7 +342,7 @@ static struct sk_buff *igmpv3_newpack(struct net_device *dev, int size) pip->saddr = fl4.saddr; pip->protocol = IPPROTO_IGMP; pip->tot_len = 0; /* filled in later */ - ip_select_ident(pip, &rt->dst, NULL); + ip_select_ident(skb, &rt->dst, NULL); ((u8*)&pip[1])[0] = IPOPT_RA; ((u8*)&pip[1])[1] = 4; ((u8*)&pip[1])[2] = 0; @@ -683,7 +683,7 @@ static int igmp_send_report(struct in_device *in_dev, struct ip_mc_list *pmc, iph->daddr = dst; iph->saddr = fl4.saddr; iph->protocol = IPPROTO_IGMP; - ip_select_ident(iph, &rt->dst, NULL); + ip_select_ident(skb, &rt->dst, NULL); ((u8*)&iph[1])[0] = IPOPT_RA; ((u8*)&iph[1])[1] = 4; ((u8*)&iph[1])[2] = 0; @@ -705,7 +705,7 @@ static void igmp_gq_timer_expire(unsigned long data) in_dev->mr_gq_running = 0; igmpv3_send_report(in_dev, NULL); - __in_dev_put(in_dev); + in_dev_put(in_dev); } static void igmp_ifc_timer_expire(unsigned long data) @@ -717,7 +717,7 @@ static void igmp_ifc_timer_expire(unsigned long data) in_dev->mr_ifc_count--; igmp_ifc_start_timer(in_dev, IGMP_Unsolicited_Report_Interval); } - __in_dev_put(in_dev); + in_dev_put(in_dev); } static void igmp_ifc_event(struct in_device *in_dev) diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index 6877645..996df36 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -31,8 +31,8 @@ * At the moment of writing this notes identifier of IP packets is generated * to be unpredictable using this code only for packets subjected * (actually or potentially) to defragmentation. I.e. DF packets less than - * PMTU in size uses a constant ID and do not use this code (see - * ip_select_ident() in include/net/ip.h). + * PMTU in size when local fragmentation is disabled use a constant ID and do + * not use this code (see ip_select_ident() in include/net/ip.h). * * Route cache entries hold references to our nodes. * New cache entries get references via lookup by destination IP address in diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c index da38472..daf19ad 100644 --- a/net/ipv4/ip_fragment.c +++ b/net/ipv4/ip_fragment.c @@ -251,8 +251,7 @@ static void ip_expire(unsigned long arg) if (!head->dev) goto out_rcu_unlock; - /* skb dst is stale, drop it, and perform route lookup again */ - skb_dst_drop(head); + /* skb has no dst, perform route lookup again */ iph = ip_hdr(head); err = ip_route_input_noref(head, iph->daddr, iph->saddr, iph->tos, head->dev); @@ -517,8 +516,16 @@ found: qp->q.last_in |= INET_FRAG_FIRST_IN; if (qp->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - qp->q.meat == qp->q.len) - return ip_frag_reasm(qp, prev, dev); + qp->q.meat == qp->q.len) { + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + err = ip_frag_reasm(qp, prev, dev); + skb->_skb_refdst = orefdst; + return err; + } + + skb_dst_drop(skb); write_lock(&ip4_frags.lock); list_move_tail(&qp->q.lru_list, &qp->q.net->lru_list); diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c index d7bb94c..91ddbd4 100644 --- a/net/ipv4/ip_gre.c +++ b/net/ipv4/ip_gre.c @@ -716,6 +716,7 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev tiph = &tunnel->parms.iph; } + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); if ((dst = tiph->daddr) == 0) { /* NBMA tunnel */ @@ -853,7 +854,6 @@ static netdev_tx_t ipgre_tunnel_xmit(struct sk_buff *skb, struct net_device *dev skb_reset_transport_header(skb); skb_push(skb, gre_hlen); skb_reset_network_header(skb); - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 6daaf60..69b32cb 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -161,7 +161,7 @@ int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk, iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr); iph->saddr = saddr; iph->protocol = sk->sk_protocol; - ip_select_ident(iph, &rt->dst, sk); + ip_select_ident(skb, &rt->dst, sk); if (opt && opt->opt.optlen) { iph->ihl += opt->opt.optlen>>2; @@ -411,7 +411,7 @@ packet_routed: ip_options_build(skb, &inet_opt->opt, inet->inet_daddr, rt, 0); } - ip_select_ident_more(iph, &rt->dst, sk, + ip_select_ident_more(skb, &rt->dst, sk, (skb_shinfo(skb)->gso_segs ?: 1) - 1); skb->priority = sk->sk_priority; @@ -1354,7 +1354,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk, iph->ihl = 5; iph->tos = inet->tos; iph->frag_off = df; - ip_select_ident(iph, &rt->dst, sk); + ip_select_ident(skb, &rt->dst, sk); iph->ttl = ttl; iph->protocol = sk->sk_protocol; ip_copy_addrs(iph, fl4); diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c index 6f06f7f..6d1165f 100644 --- a/net/ipv4/ipip.c +++ b/net/ipv4/ipip.c @@ -448,6 +448,7 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) if (tos & 1) tos = old_iph->tos; + memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); if (!dst) { /* NBMA tunnel */ if ((rt = skb_rtable(skb)) == NULL) { @@ -531,7 +532,6 @@ static netdev_tx_t ipip_tunnel_xmit(struct sk_buff *skb, struct net_device *dev) skb->transport_header = skb->network_header; skb_push(skb, sizeof(struct iphdr)); skb_reset_network_header(skb); - memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED | IPSKB_REROUTED); skb_dst_drop(skb); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index dc89714..033ea7e 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1575,7 +1575,7 @@ static void ip_encap(struct sk_buff *skb, __be32 saddr, __be32 daddr) iph->protocol = IPPROTO_IPIP; iph->ihl = 5; iph->tot_len = htons(skb->len); - ip_select_ident(iph, skb_dst(skb), NULL); + ip_select_ident(skb, skb_dst(skb), NULL); ip_send_check(iph); memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt)); diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c index 4b32679..14b0486 100644 --- a/net/ipv4/raw.c +++ b/net/ipv4/raw.c @@ -379,7 +379,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4, iph->check = 0; iph->tot_len = htons(length); if (!iph->id) - ip_select_ident(iph, &rt->dst, NULL); + ip_select_ident(skb, &rt->dst, NULL); iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl); } diff --git a/net/ipv4/syncookies.c b/net/ipv4/syncookies.c index 3f6a67d..62d9020 100644 --- a/net/ipv4/syncookies.c +++ b/net/ipv4/syncookies.c @@ -345,8 +345,8 @@ struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb, * hasn't changed since we received the original syn, but I see * no easy way to do this. */ - flowi4_init_output(&fl4, 0, sk->sk_mark, RT_CONN_FLAGS(sk), - RT_SCOPE_UNIVERSE, IPPROTO_TCP, + flowi4_init_output(&fl4, sk->sk_bound_dev_if, sk->sk_mark, + RT_CONN_FLAGS(sk), RT_SCOPE_UNIVERSE, IPPROTO_TCP, inet_sk_flowi_flags(sk), (opt && opt->srr) ? opt->faddr : ireq->rmt_addr, ireq->loc_addr, th->source, th->dest, diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c index e182501..3c74912 100644 --- a/net/ipv4/sysctl_net_ipv4.c +++ b/net/ipv4/sysctl_net_ipv4.c @@ -32,6 +32,8 @@ static int tcp_adv_win_scale_min = -31; static int tcp_adv_win_scale_max = 31; static int ip_ttl_min = 1; static int ip_ttl_max = 255; +static int tcp_syn_retries_min = 1; +static int tcp_syn_retries_max = MAX_TCP_SYNCNT; static int ip_ping_group_range_min[] = { 0, 0 }; static int ip_ping_group_range_max[] = { GID_T_MAX, GID_T_MAX }; @@ -246,7 +248,9 @@ static struct ctl_table ipv4_table[] = { .data = &sysctl_tcp_syn_retries, .maxlen = sizeof(int), .mode = 0644, - .proc_handler = proc_dointvec + .proc_handler = proc_dointvec_minmax, + .extra1 = &tcp_syn_retries_min, + .extra2 = &tcp_syn_retries_max }, { .procname = "tcp_synack_retries", diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index 1a9a57a..4349e46 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -3058,7 +3058,11 @@ int tcp_md5_hash_skb_data(struct tcp_md5sig_pool *hp, for (i = 0; i < shi->nr_frags; ++i) { const struct skb_frag_struct *f = &shi->frags[i]; - sg_set_page(&sg, f->page, f->size, f->page_offset); + unsigned int offset = f->page_offset; + struct page *page = f->page + (offset >> PAGE_SHIFT); + + sg_set_page(&sg, page, f->size, + offset_in_page(offset)); if (crypto_hash_update(desc, &sg, f->size)) return 1; } diff --git a/net/ipv4/tcp_cubic.c b/net/ipv4/tcp_cubic.c index f376b05..b78eac2 100644 --- a/net/ipv4/tcp_cubic.c +++ b/net/ipv4/tcp_cubic.c @@ -204,8 +204,8 @@ static u32 cubic_root(u64 a) */ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) { - u64 offs; - u32 delta, t, bic_target, max_cnt; + u32 delta, bic_target, max_cnt; + u64 offs, t; ca->ack_cnt++; /* count the number of ACKs */ @@ -248,9 +248,11 @@ static inline void bictcp_update(struct bictcp *ca, u32 cwnd) * if the cwnd < 1 million packets !!! */ + t = (s32)(tcp_time_stamp - ca->epoch_start); + t += msecs_to_jiffies(ca->delay_min >> 3); /* change the unit from HZ to bictcp_HZ */ - t = ((tcp_time_stamp + msecs_to_jiffies(ca->delay_min>>3) - - ca->epoch_start) << BICTCP_HZ) / HZ; + t <<= BICTCP_HZ; + do_div(t, HZ); if (t < ca->bic_K) /* t - K */ offs = ca->bic_K - t; @@ -412,7 +414,7 @@ static void bictcp_acked(struct sock *sk, u32 cnt, s32 rtt_us) return; /* Discard delay samples right after fast recovery */ - if ((s32)(tcp_time_stamp - ca->epoch_start) < HZ) + if (ca->epoch_start && (s32)(tcp_time_stamp - ca->epoch_start) < HZ) return; delay = (rtt_us << 3) / USEC_PER_MSEC; diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c index 959405e..c91002f 100644 --- a/net/ipv4/tcp_input.c +++ b/net/ipv4/tcp_input.c @@ -116,6 +116,7 @@ int sysctl_tcp_default_init_rwnd __read_mostly = TCP_DEFAULT_INIT_RCVWND; #define FLAG_DSACKING_ACK 0x800 /* SACK blocks contained D-SACK info */ #define FLAG_NONHEAD_RETRANS_ACKED 0x1000 /* Non-head rexmitted data was ACKed */ #define FLAG_SACK_RENEGING 0x2000 /* snd_una advanced to a sacked seq */ +#define FLAG_UPDATE_TS_RECENT 0x4000 /* tcp_replace_ts_recent() */ #define FLAG_ACKED (FLAG_DATA_ACKED|FLAG_SYN_ACKED) #define FLAG_NOT_DUP (FLAG_DATA|FLAG_WIN_UPDATE|FLAG_ACKED) @@ -3659,6 +3660,27 @@ static void tcp_send_challenge_ack(struct sock *sk) } } +static void tcp_store_ts_recent(struct tcp_sock *tp) +{ + tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; + tp->rx_opt.ts_recent_stamp = get_seconds(); +} + +static void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) +{ + if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { + /* PAWS bug workaround wrt. ACK frames, the PAWS discard + * extra check below makes sure this can only happen + * for pure ACK frames. -DaveM + * + * Not only, also it occurs for expired timestamps. + */ + + if (tcp_paws_check(&tp->rx_opt, 0)) + tcp_store_ts_recent(tp); + } +} + /* This routine deals with incoming acks, but not outgoing ones. */ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) { @@ -3705,6 +3727,12 @@ static int tcp_ack(struct sock *sk, struct sk_buff *skb, int flag) prior_fackets = tp->fackets_out; prior_in_flight = tcp_packets_in_flight(tp); + /* ts_recent update must be made after we are sure that the packet + * is in window. + */ + if (flag & FLAG_UPDATE_TS_RECENT) + tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); + if (!(flag & FLAG_SLOWPATH) && after(ack, prior_snd_una)) { /* Window is constant, pure forward advance. * No more checks are required. @@ -3991,27 +4019,6 @@ u8 *tcp_parse_md5sig_option(struct tcphdr *th) EXPORT_SYMBOL(tcp_parse_md5sig_option); #endif -static inline void tcp_store_ts_recent(struct tcp_sock *tp) -{ - tp->rx_opt.ts_recent = tp->rx_opt.rcv_tsval; - tp->rx_opt.ts_recent_stamp = get_seconds(); -} - -static inline void tcp_replace_ts_recent(struct tcp_sock *tp, u32 seq) -{ - if (tp->rx_opt.saw_tstamp && !after(seq, tp->rcv_wup)) { - /* PAWS bug workaround wrt. ACK frames, the PAWS discard - * extra check below makes sure this can only happen - * for pure ACK frames. -DaveM - * - * Not only, also it occurs for expired timestamps. - */ - - if (tcp_paws_check(&tp->rx_opt, 0)) - tcp_store_ts_recent(tp); - } -} - /* Sorry, PAWS as specified is broken wrt. pure-ACKs -DaveM * * It is not fatal. If this ACK does _not_ change critical state (seqs, window) @@ -5480,14 +5487,10 @@ slow_path: return 0; step5: - if (th->ack && tcp_ack(sk, skb, FLAG_SLOWPATH) < 0) + if (th->ack && + tcp_ack(sk, skb, FLAG_SLOWPATH | FLAG_UPDATE_TS_RECENT) < 0) goto discard; - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - tcp_rcv_rtt_measure_ts(sk, skb); /* Process urgent data. */ @@ -5851,7 +5854,8 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, /* step 5: check the ACK field */ if (th->ack) { - int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH) > 0; + int acceptable = tcp_ack(sk, skb, FLAG_SLOWPATH | + FLAG_UPDATE_TS_RECENT) > 0; switch (sk->sk_state) { case TCP_SYN_RECV: @@ -5964,11 +5968,6 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb, } else goto discard; - /* ts_recent update must be made after we are sure that the packet - * is in window. - */ - tcp_replace_ts_recent(tp, TCP_SKB_CB(skb)->seq); - /* step 6: check the URG bit */ tcp_urg(sk, skb, th); diff --git a/net/ipv4/tcp_output.c b/net/ipv4/tcp_output.c index 63fb1ea..acd5c87 100644 --- a/net/ipv4/tcp_output.c +++ b/net/ipv4/tcp_output.c @@ -832,11 +832,13 @@ static int tcp_transmit_skb(struct sock *sk, struct sk_buff *skb, int clone_it, &md5); tcp_header_size = tcp_options_size + sizeof(struct tcphdr); - if (tcp_packets_in_flight(tp) == 0) { + if (tcp_packets_in_flight(tp) == 0) tcp_ca_event(sk, CA_EVENT_TX_START); - skb->ooo_okay = 1; - } else - skb->ooo_okay = 0; + + /* if no packet is in qdisc/device queue, then allow XPS to select + * another queue. + */ + skb->ooo_okay = sk_wmem_alloc_get(sk) == 0; skb_push(skb, tcp_header_size); skb_reset_transport_header(skb); diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c index 68277fd..966c3e1 100644 --- a/net/ipv4/udp.c +++ b/net/ipv4/udp.c @@ -765,7 +765,7 @@ send: /* * Push out all pending data as one UDP datagram. Socket is locked. */ -static int udp_push_pending_frames(struct sock *sk) +int udp_push_pending_frames(struct sock *sk) { struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); @@ -784,6 +784,7 @@ out: up->pending = 0; return err; } +EXPORT_SYMBOL(udp_push_pending_frames); int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg, size_t len) diff --git a/net/ipv4/xfrm4_mode_tunnel.c b/net/ipv4/xfrm4_mode_tunnel.c index ed4bf11..938553e 100644 --- a/net/ipv4/xfrm4_mode_tunnel.c +++ b/net/ipv4/xfrm4_mode_tunnel.c @@ -54,7 +54,7 @@ static int xfrm4_mode_tunnel_output(struct xfrm_state *x, struct sk_buff *skb) top_iph->frag_off = (flags & XFRM_STATE_NOPMTUDISC) ? 0 : (XFRM_MODE_SKB_CB(skb)->frag_off & htons(IP_DF)); - ip_select_ident(top_iph, dst->child, NULL); + ip_select_ident(skb, dst->child, NULL); top_iph->ttl = ip4_dst_hoplimit(dst->child); diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c index 894efae..66905eb 100644 --- a/net/ipv6/addrconf.c +++ b/net/ipv6/addrconf.c @@ -912,12 +912,10 @@ retry: if (ifp->flags & IFA_F_OPTIMISTIC) addr_flags |= IFA_F_OPTIMISTIC; - ift = !max_addresses || - ipv6_count_addresses(idev) < max_addresses ? - ipv6_add_addr(idev, &addr, tmp_plen, - ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, - addr_flags) : NULL; - if (!ift || IS_ERR(ift)) { + ift = ipv6_add_addr(idev, &addr, tmp_plen, + ipv6_addr_type(&addr)&IPV6_ADDR_SCOPE_MASK, + addr_flags); + if (IS_ERR(ift)) { in6_ifa_put(ifp); in6_dev_put(idev); printk(KERN_INFO @@ -1235,6 +1233,23 @@ try_nextdev: } EXPORT_SYMBOL(ipv6_dev_get_saddr); +int __ipv6_get_lladdr(struct inet6_dev *idev, struct in6_addr *addr, + unsigned char banned_flags) +{ + struct inet6_ifaddr *ifp; + int err = -EADDRNOTAVAIL; + + list_for_each_entry(ifp, &idev->addr_list, if_list) { + if (ifp->scope == IFA_LINK && + !(ifp->flags & banned_flags)) { + ipv6_addr_copy(addr, &ifp->addr); + err = 0; + break; + } + } + return err; +} + int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, unsigned char banned_flags) { @@ -1244,17 +1259,8 @@ int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr, rcu_read_lock(); idev = __in6_dev_get(dev); if (idev) { - struct inet6_ifaddr *ifp; - read_lock_bh(&idev->lock); - list_for_each_entry(ifp, &idev->addr_list, if_list) { - if (ifp->scope == IFA_LINK && - !(ifp->flags & banned_flags)) { - ipv6_addr_copy(addr, &ifp->addr); - err = 0; - break; - } - } + err = __ipv6_get_lladdr(idev, addr, banned_flags); read_unlock_bh(&idev->lock); } rcu_read_unlock(); @@ -2354,6 +2360,9 @@ static void sit_add_v4_addrs(struct inet6_dev *idev) static void init_loopback(struct net_device *dev) { struct inet6_dev *idev; + struct net_device *sp_dev; + struct inet6_ifaddr *sp_ifa; + struct rt6_info *sp_rt; /* ::1 */ @@ -2365,6 +2374,35 @@ static void init_loopback(struct net_device *dev) } add_addr(idev, &in6addr_loopback, 128, IFA_HOST); + + /* Add routes to other interface's IPv6 addresses */ + for_each_netdev(dev_net(dev), sp_dev) { + if (!strcmp(sp_dev->name, dev->name)) + continue; + + idev = __in6_dev_get(sp_dev); + if (!idev) + continue; + + read_lock_bh(&idev->lock); + list_for_each_entry(sp_ifa, &idev->addr_list, if_list) { + + if (sp_ifa->flags & (IFA_F_DADFAILED | IFA_F_TENTATIVE)) + continue; + + if (sp_ifa->rt) + continue; + + sp_rt = addrconf_dst_alloc(idev, &sp_ifa->addr, 0); + + /* Failure cases are ignored */ + if (!IS_ERR(sp_rt)) { + sp_ifa->rt = sp_rt; + ip6_ins_rt(sp_rt); + } + } + read_unlock_bh(&idev->lock); + } } static void addrconf_add_linklocal(struct inet6_dev *idev, const struct in6_addr *addr) diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c index 6a12eda..f013586 100644 --- a/net/ipv6/icmp.c +++ b/net/ipv6/icmp.c @@ -932,6 +932,14 @@ static const struct icmp6_err { .err = ECONNREFUSED, .fatal = 1, }, + { /* POLICY_FAIL */ + .err = EACCES, + .fatal = 1, + }, + { /* REJECT_ROUTE */ + .err = EACCES, + .fatal = 1, + }, }; int icmpv6_err_convert(u8 type, u8 code, int *err) @@ -943,7 +951,7 @@ int icmpv6_err_convert(u8 type, u8 code, int *err) switch (type) { case ICMPV6_DEST_UNREACH: fatal = 1; - if (code <= ICMPV6_PORT_UNREACH) { + if (code < ARRAY_SIZE(tab_unreach)) { *err = tab_unreach[code].err; fatal = tab_unreach[code].fatal; } diff --git a/net/ipv6/inet6_connection_sock.c b/net/ipv6/inet6_connection_sock.c index ffb131fb..7ecbcc0 100644 --- a/net/ipv6/inet6_connection_sock.c +++ b/net/ipv6/inet6_connection_sock.c @@ -86,7 +86,7 @@ struct dst_entry *inet6_csk_route_req(struct sock *sk, * request_sock (formerly open request) hash tables. */ static u32 inet6_synq_hash(const struct in6_addr *raddr, const __be16 rport, - const u32 rnd, const u16 synq_hsize) + const u32 rnd, const u32 synq_hsize) { u32 c; diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 320d91d..1a503ab 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -866,14 +866,22 @@ static struct fib6_node * fib6_lookup_1(struct fib6_node *root, if (ipv6_prefix_equal(&key->addr, args->addr, key->plen)) { #ifdef CONFIG_IPV6_SUBTREES - if (fn->subtree) - fn = fib6_lookup_1(fn->subtree, args + 1); + if (fn->subtree) { + struct fib6_node *sfn; + sfn = fib6_lookup_1(fn->subtree, + args + 1); + if (!sfn) + goto backtrack; + fn = sfn; + } #endif - if (!fn || fn->fn_flags & RTN_RTINFO) + if (fn->fn_flags & RTN_RTINFO) return fn; } } - +#ifdef CONFIG_IPV6_SUBTREES +backtrack: +#endif if (fn->fn_flags & RTN_ROOT) break; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index ae9f6d4..279d153 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -925,11 +925,17 @@ static struct dst_entry *ip6_sk_dst_check(struct sock *sk, const struct flowi6 *fl6) { struct ipv6_pinfo *np = inet6_sk(sk); - struct rt6_info *rt = (struct rt6_info *)dst; + struct rt6_info *rt; if (!dst) goto out; + if (dst->ops->family != AF_INET6) { + dst_release(dst); + return NULL; + } + + rt = (struct rt6_info *)dst; /* Yes, checking route validity in not connected * case is not very simple. Take into account, * that we do not support routing by source, TOS, @@ -1135,6 +1141,8 @@ static inline int ip6_ufo_append_data(struct sock *sk, * udp datagram */ if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) { + struct frag_hdr fhdr; + skb = sock_alloc_send_skb(sk, hh_len + fragheaderlen + transhdrlen + 20, (flags & MSG_DONTWAIT), &err); @@ -1155,12 +1163,6 @@ static inline int ip6_ufo_append_data(struct sock *sk, skb->ip_summed = CHECKSUM_PARTIAL; skb->csum = 0; - } - - err = skb_append_datato_frags(sk,skb, getfrag, from, - (length - transhdrlen)); - if (!err) { - struct frag_hdr fhdr; /* Specify the length of each IPv6 datagram fragment. * It has to be a multiple of 8. @@ -1171,15 +1173,10 @@ static inline int ip6_ufo_append_data(struct sock *sk, ipv6_select_ident(&fhdr, &rt->rt6i_dst.addr); skb_shinfo(skb)->ip6_frag_id = fhdr.identification; __skb_queue_tail(&sk->sk_write_queue, skb); - - return 0; } - /* There is not enough support do UPD LSO, - * so follow normal path - */ - kfree_skb(skb); - return err; + return skb_append_datato_frags(sk, skb, getfrag, from, + (length - transhdrlen)); } static inline struct ipv6_opt_hdr *ip6_opt_dup(struct ipv6_opt_hdr *src, @@ -1248,7 +1245,7 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, if (WARN_ON(np->cork.opt)) return -EINVAL; - np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation); + np->cork.opt = kzalloc(opt->tot_len, sk->sk_allocation); if (unlikely(np->cork.opt == NULL)) return -ENOBUFS; @@ -1348,27 +1345,27 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to, * --yoshfuji */ - cork->length += length; - if (length > mtu) { - int proto = sk->sk_protocol; - if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){ - ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); - return -EMSGSIZE; - } - - if (proto == IPPROTO_UDP && - (rt->dst.dev->features & NETIF_F_UFO)) { + if ((length > mtu) && dontfrag && (sk->sk_protocol == IPPROTO_UDP || + sk->sk_protocol == IPPROTO_RAW)) { + ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen); + return -EMSGSIZE; + } - err = ip6_ufo_append_data(sk, getfrag, from, length, - hh_len, fragheaderlen, - transhdrlen, mtu, flags, rt); - if (err) - goto error; - return 0; - } + skb = skb_peek_tail(&sk->sk_write_queue); + cork->length += length; + if (((length > mtu) || + (skb && skb_is_gso(skb))) && + (sk->sk_protocol == IPPROTO_UDP) && + (rt->dst.dev->features & NETIF_F_UFO)) { + err = ip6_ufo_append_data(sk, getfrag, from, length, + hh_len, fragheaderlen, + transhdrlen, mtu, flags, rt); + if (err) + goto error; + return 0; } - if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) + if (!skb) goto alloc_new_skb; while (length > 0) { diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index def0538..4aa8a19 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -256,10 +256,12 @@ static void __net_exit ip6mr_rules_exit(struct net *net) { struct mr6_table *mrt, *next; + rtnl_lock(); list_for_each_entry_safe(mrt, next, &net->ipv6.mr6_tables, list) { list_del(&mrt->list); ip6mr_free_table(mrt); } + rtnl_unlock(); fib_rules_unregister(net->ipv6.mr6_rules_ops); } #else @@ -286,7 +288,10 @@ static int __net_init ip6mr_rules_init(struct net *net) static void __net_exit ip6mr_rules_exit(struct net *net) { + rtnl_lock(); ip6mr_free_table(net->ipv6.mrt6); + net->ipv6.mrt6 = NULL; + rtnl_unlock(); } #endif diff --git a/net/ipv6/mcast.c b/net/ipv6/mcast.c index f2d74ea..d20a9be 100644 --- a/net/ipv6/mcast.c +++ b/net/ipv6/mcast.c @@ -1334,8 +1334,9 @@ mld_scount(struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) return scount; } -static struct sk_buff *mld_newpack(struct net_device *dev, int size) +static struct sk_buff *mld_newpack(struct inet6_dev *idev, int size) { + struct net_device *dev = idev->dev; struct net *net = dev_net(dev); struct sock *sk = net->ipv6.igmp_sk; struct sk_buff *skb; @@ -1358,7 +1359,7 @@ static struct sk_buff *mld_newpack(struct net_device *dev, int size) skb_reserve(skb, LL_RESERVED_SPACE(dev)); - if (ipv6_get_lladdr(dev, &addr_buf, IFA_F_TENTATIVE)) { + if (__ipv6_get_lladdr(idev, &addr_buf, IFA_F_TENTATIVE)) { /* <draft-ietf-magma-mld-source-05.txt>: * use unspecified address as the source address * when a valid link-local address is not available. @@ -1461,7 +1462,7 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, struct mld2_grec *pgr; if (!skb) - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(pmc->idev, dev->mtu); if (!skb) return NULL; pgr = (struct mld2_grec *)skb_put(skb, sizeof(struct mld2_grec)); @@ -1481,7 +1482,8 @@ static struct sk_buff *add_grhead(struct sk_buff *skb, struct ifmcaddr6 *pmc, static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, int type, int gdeleted, int sdeleted) { - struct net_device *dev = pmc->idev->dev; + struct inet6_dev *idev = pmc->idev; + struct net_device *dev = idev->dev; struct mld2_report *pmr; struct mld2_grec *pgr = NULL; struct ip6_sf_list *psf, *psf_next, *psf_prev, **psf_list; @@ -1510,7 +1512,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, AVAILABLE(skb) < grec_size(pmc, type, gdeleted, sdeleted)) { if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); } } first = 1; @@ -1537,7 +1539,7 @@ static struct sk_buff *add_grec(struct sk_buff *skb, struct ifmcaddr6 *pmc, pgr->grec_nsrcs = htons(scount); if (skb) mld_sendpack(skb); - skb = mld_newpack(dev, dev->mtu); + skb = mld_newpack(idev, dev->mtu); first = 1; scount = 0; } @@ -1592,8 +1594,8 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) struct sk_buff *skb = NULL; int type; + read_lock_bh(&idev->lock); if (!pmc) { - read_lock_bh(&idev->lock); for (pmc=idev->mc_list; pmc; pmc=pmc->next) { if (pmc->mca_flags & MAF_NOREPORT) continue; @@ -1605,7 +1607,6 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } - read_unlock_bh(&idev->lock); } else { spin_lock_bh(&pmc->mca_lock); if (pmc->mca_sfcount[MCAST_EXCLUDE]) @@ -1615,6 +1616,7 @@ static void mld_send_report(struct inet6_dev *idev, struct ifmcaddr6 *pmc) skb = add_grec(skb, pmc, type, 0, 0); spin_unlock_bh(&pmc->mca_lock); } + read_unlock_bh(&idev->lock); if (skb) mld_sendpack(skb); } @@ -2157,7 +2159,7 @@ static void mld_gq_timer_expire(unsigned long data) idev->mc_gq_running = 0; mld_send_report(idev, NULL); - __in6_dev_put(idev); + in6_dev_put(idev); } static void mld_ifc_timer_expire(unsigned long data) @@ -2170,7 +2172,7 @@ static void mld_ifc_timer_expire(unsigned long data) if (idev->mc_ifc_count) mld_ifc_start_timer(idev, idev->mc_maxdelay); } - __in6_dev_put(idev); + in6_dev_put(idev); } static void mld_ifc_event(struct inet6_dev *idev) diff --git a/net/ipv6/ndisc.c b/net/ipv6/ndisc.c index 31ba78c..f01c153 100644 --- a/net/ipv6/ndisc.c +++ b/net/ipv6/ndisc.c @@ -456,7 +456,6 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, struct sk_buff *skb; struct icmp6hdr *hdr; int len; - int err; u8 *opt; if (!dev->addr_len) @@ -466,14 +465,12 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, if (llinfo) len += ndisc_opt_addr_space(dev); - skb = sock_alloc_send_skb(sk, - (MAX_HEADER + sizeof(struct ipv6hdr) + - len + LL_ALLOCATED_SPACE(dev)), - 1, &err); + skb = alloc_skb((MAX_HEADER + sizeof(struct ipv6hdr) + + len + LL_ALLOCATED_SPACE(dev)), GFP_ATOMIC); if (!skb) { ND_PRINTK0(KERN_ERR - "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n", - __func__, err); + "ICMPv6 ND: %s() failed to allocate an skb.\n", + __func__); return NULL; } @@ -501,6 +498,11 @@ struct sk_buff *ndisc_build_skb(struct net_device *dev, csum_partial(hdr, len, 0)); + /* Manually assign socket ownership as we avoid calling + * sock_alloc_send_pskb() to bypass wmem buffer limits + */ + skb_set_owner_w(skb, sk); + return skb; } diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c index 43f8fc9..cc332c0 100644 --- a/net/ipv6/reassembly.c +++ b/net/ipv6/reassembly.c @@ -385,8 +385,17 @@ found: } if (fq->q.last_in == (INET_FRAG_FIRST_IN | INET_FRAG_LAST_IN) && - fq->q.meat == fq->q.len) - return ip6_frag_reasm(fq, prev, dev); + fq->q.meat == fq->q.len) { + int res; + unsigned long orefdst = skb->_skb_refdst; + + skb->_skb_refdst = 0UL; + res = ip6_frag_reasm(fq, prev, dev); + skb->_skb_refdst = orefdst; + return res; + } + + skb_dst_drop(skb); write_lock(&ip6_frags.lock); list_move_tail(&fq->q.lru_list, &fq->q.net->lru_list); @@ -507,6 +516,7 @@ static int ip6_frag_reasm(struct frag_queue *fq, struct sk_buff *prev, head->tstamp = fq->q.stamp; ipv6_hdr(head)->payload_len = htons(payload_len); IP6CB(head)->nhoff = nhoff; + IP6CB(head)->flags |= IP6SKB_FRAGMENTED; /* Yes, and fold redundant checksum back. 8) */ if (head->ip_summed == CHECKSUM_COMPLETE) @@ -542,6 +552,9 @@ static int ipv6_frag_rcv(struct sk_buff *skb) const struct ipv6hdr *hdr = ipv6_hdr(skb); struct net *net = dev_net(skb_dst(skb)->dev); + if (IP6CB(skb)->flags & IP6SKB_FRAGMENTED) + goto fail_hdr; + IP6_INC_STATS_BH(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMREQDS); /* Jumbo payload inhibits frag. header */ @@ -562,6 +575,7 @@ static int ipv6_frag_rcv(struct sk_buff *skb) ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_REASMOKS); IP6CB(skb)->nhoff = (u8 *)fhdr - skb_network_header(skb); + IP6CB(skb)->flags |= IP6SKB_FRAGMENTED; return 1; } diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c index d039a25..d3d23c7 100644 --- a/net/ipv6/tcp_ipv6.c +++ b/net/ipv6/tcp_ipv6.c @@ -2208,6 +2208,17 @@ void tcp6_proc_exit(struct net *net) } #endif +static void tcp_v6_clear_sk(struct sock *sk, int size) +{ + struct inet_sock *inet = inet_sk(sk); + + /* we do not want to clear pinet6 field, because of RCU lookups */ + sk_prot_clear_nulls(sk, offsetof(struct inet_sock, pinet6)); + + size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); + memset(&inet->pinet6 + 1, 0, size); +} + struct proto tcpv6_prot = { .name = "TCPv6", .owner = THIS_MODULE, @@ -2247,6 +2258,7 @@ struct proto tcpv6_prot = { .compat_setsockopt = compat_tcp_setsockopt, .compat_getsockopt = compat_tcp_getsockopt, #endif + .clear_sk = tcp_v6_clear_sk, }; static const struct inet6_protocol tcpv6_protocol = { diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c index f90e615..5c363cd 100644 --- a/net/ipv6/udp.c +++ b/net/ipv6/udp.c @@ -892,11 +892,16 @@ static int udp_v6_push_pending_frames(struct sock *sk) struct udphdr *uh; struct udp_sock *up = udp_sk(sk); struct inet_sock *inet = inet_sk(sk); - struct flowi6 *fl6 = &inet->cork.fl.u.ip6; + struct flowi6 *fl6; int err = 0; int is_udplite = IS_UDPLITE(sk); __wsum csum = 0; + if (up->pending == AF_INET) + return udp_push_pending_frames(sk); + + fl6 = &inet->cork.fl.u.ip6; + /* Grab the skbuff where UDP header space exists. */ if ((skb = skb_peek(&sk->sk_write_queue)) == NULL) goto out; @@ -1449,6 +1454,17 @@ void udp6_proc_exit(struct net *net) { } #endif /* CONFIG_PROC_FS */ +void udp_v6_clear_sk(struct sock *sk, int size) +{ + struct inet_sock *inet = inet_sk(sk); + + /* we do not want to clear pinet6 field, because of RCU lookups */ + sk_prot_clear_portaddr_nulls(sk, offsetof(struct inet_sock, pinet6)); + + size -= offsetof(struct inet_sock, pinet6) + sizeof(inet->pinet6); + memset(&inet->pinet6 + 1, 0, size); +} + /* ------------------------------------------------------------------------ */ struct proto udpv6_prot = { @@ -1479,7 +1495,7 @@ struct proto udpv6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = sk_prot_clear_portaddr_nulls, + .clear_sk = udp_v6_clear_sk, }; static struct inet_protosw udpv6_protosw = { diff --git a/net/ipv6/udp_impl.h b/net/ipv6/udp_impl.h index d757104..4691ed5 100644 --- a/net/ipv6/udp_impl.h +++ b/net/ipv6/udp_impl.h @@ -31,6 +31,8 @@ extern int udpv6_recvmsg(struct kiocb *iocb, struct sock *sk, extern int udpv6_queue_rcv_skb(struct sock * sk, struct sk_buff *skb); extern void udpv6_destroy_sock(struct sock *sk); +extern void udp_v6_clear_sk(struct sock *sk, int size); + #ifdef CONFIG_PROC_FS extern int udp6_seq_show(struct seq_file *seq, void *v); #endif diff --git a/net/ipv6/udplite.c b/net/ipv6/udplite.c index 986c4de..41b2256 100644 --- a/net/ipv6/udplite.c +++ b/net/ipv6/udplite.c @@ -55,7 +55,7 @@ struct proto udplitev6_prot = { .compat_setsockopt = compat_udpv6_setsockopt, .compat_getsockopt = compat_udpv6_getsockopt, #endif - .clear_sk = sk_prot_clear_portaddr_nulls, + .clear_sk = udp_v6_clear_sk, }; static struct inet_protosw udplite6_protosw = { diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c index d879f7e..db78e7d 100644 --- a/net/ipv6/xfrm6_policy.c +++ b/net/ipv6/xfrm6_policy.c @@ -96,8 +96,10 @@ static int xfrm6_fill_dst(struct xfrm_dst *xdst, struct net_device *dev, dev_hold(dev); xdst->u.rt6.rt6i_idev = in6_dev_get(dev); - if (!xdst->u.rt6.rt6i_idev) + if (!xdst->u.rt6.rt6i_idev) { + dev_put(dev); return -ENODEV; + } xdst->u.rt6.rt6i_peer = rt->rt6i_peer; if (rt->rt6i_peer) diff --git a/net/irda/af_irda.c b/net/irda/af_irda.c index 8ad05f8..402af94 100644 --- a/net/irda/af_irda.c +++ b/net/irda/af_irda.c @@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb) diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c index e2013e4..794601e 100644 --- a/net/iucv/af_iucv.c +++ b/net/iucv/af_iucv.c @@ -1157,6 +1157,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN || sk->sk_state == IUCV_SEVERED) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) && diff --git a/net/key/af_key.c b/net/key/af_key.c index 8f92cf8..020a602 100644 --- a/net/key/af_key.c +++ b/net/key/af_key.c @@ -1705,6 +1705,7 @@ static int key_notify_sa_flush(const struct km_event *c) hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); @@ -2072,6 +2073,7 @@ static int pfkey_xfrm_policy2msg(struct sk_buff *skb, const struct xfrm_policy * pol->sadb_x_policy_type = IPSEC_POLICY_NONE; } pol->sadb_x_policy_dir = dir+1; + pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = xp->index; pol->sadb_x_policy_priority = xp->priority; @@ -2685,7 +2687,9 @@ static int key_notify_policy_flush(const struct km_event *c) hdr->sadb_msg_pid = c->pid; hdr->sadb_msg_version = PF_KEY_V2; hdr->sadb_msg_errno = (uint8_t) 0; + hdr->sadb_msg_satype = SADB_SATYPE_UNSPEC; hdr->sadb_msg_len = (sizeof(struct sadb_msg) / sizeof(uint64_t)); + hdr->sadb_msg_reserved = 0; pfkey_broadcast(skb_out, GFP_ATOMIC, BROADCAST_ALL, NULL, c->net); return 0; @@ -3106,7 +3110,9 @@ static int pfkey_send_acquire(struct xfrm_state *x, struct xfrm_tmpl *t, struct pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; pol->sadb_x_policy_dir = dir+1; + pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = xp->index; + pol->sadb_x_policy_priority = xp->priority; /* Set sadb_comb's. */ if (x->id.proto == IPPROTO_AH) @@ -3494,6 +3500,7 @@ static int pfkey_send_migrate(const struct xfrm_selector *sel, u8 dir, u8 type, pol->sadb_x_policy_exttype = SADB_X_EXT_POLICY; pol->sadb_x_policy_type = IPSEC_POLICY_IPSEC; pol->sadb_x_policy_dir = dir + 1; + pol->sadb_x_policy_reserved = 0; pol->sadb_x_policy_id = 0; pol->sadb_x_policy_priority = 0; diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c index 770b497..2366914 100644 --- a/net/l2tp/l2tp_ppp.c +++ b/net/l2tp/l2tp_ppp.c @@ -350,19 +350,19 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh skb_put(skb, 2); /* Copy user data into skb */ - error = memcpy_fromiovec(skb->data, m->msg_iov, total_len); + error = memcpy_fromiovec(skb_put(skb, total_len), m->msg_iov, + total_len); if (error < 0) { kfree_skb(skb); goto error_put_sess_tun; } - skb_put(skb, total_len); l2tp_xmit_skb(session, skb, session->hdr_len); sock_put(ps->tunnel_sock); sock_put(sk); - return error; + return total_len; error_put_sess_tun: sock_put(ps->tunnel_sock); @@ -1771,7 +1771,8 @@ static const struct proto_ops pppol2tp_ops = { static const struct pppox_proto pppol2tp_proto = { .create = pppol2tp_create, - .ioctl = pppol2tp_ioctl + .ioctl = pppol2tp_ioctl, + .owner = THIS_MODULE, }; #ifdef CONFIG_L2TP_V3 diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c index 99a60d5..e5565c7 100644 --- a/net/llc/af_llc.c +++ b/net/llc/af_llc.c @@ -720,6 +720,8 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, int target; /* Read at least this many bytes */ long timeo; + msg->msg_namelen = 0; + lock_sock(sk); copied = -ENOTCONN; if (unlikely(sk->sk_type == SOCK_STREAM && sk->sk_state == TCP_LISTEN)) diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c index 65f3764c..bd1ef84 100644 --- a/net/mac80211/iface.c +++ b/net/mac80211/iface.c @@ -1242,6 +1242,15 @@ void ieee80211_remove_interfaces(struct ieee80211_local *local) ASSERT_RTNL(); + /* + * Close all AP_VLAN interfaces first, as otherwise they + * might be closed while the AP interface they belong to + * is closed, causing unregister_netdevice_many() to crash. + */ + list_for_each_entry(sdata, &local->interfaces, list) + if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) + dev_close(sdata->dev); + mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 785b6e9..10e8842 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -810,8 +810,14 @@ ieee80211_rx_h_check(struct ieee80211_rx_data *rx) struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rx->skb->data; struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(rx->skb); - /* Drop duplicate 802.11 retransmissions (IEEE 802.11 Chap. 9.2.9) */ - if (rx->sta && !is_multicast_ether_addr(hdr->addr1)) { + /* + * Drop duplicate 802.11 retransmissions + * (IEEE 802.11-2012: 9.3.2.10 "Duplicate detection and recovery") + */ + if (rx->skb->len >= 24 && rx->sta && + !ieee80211_is_ctl(hdr->frame_control) && + !ieee80211_is_qos_nullfunc(hdr->frame_control) && + !is_multicast_ether_addr(hdr->addr1)) { if (unlikely(ieee80211_has_retry(hdr->frame_control) && rx->sta->last_seq_ctrl[rx->queue] == hdr->seq_ctrl)) { diff --git a/net/netfilter/ipvs/ip_vs_pe_sip.c b/net/netfilter/ipvs/ip_vs_pe_sip.c index 13d607a..87ecf75 100644 --- a/net/netfilter/ipvs/ip_vs_pe_sip.c +++ b/net/netfilter/ipvs/ip_vs_pe_sip.c @@ -37,14 +37,10 @@ static int get_callid(const char *dptr, unsigned int dataoff, if (ret > 0) break; if (!ret) - return 0; + return -EINVAL; dataoff += *matchoff; } - /* Empty callid is useless */ - if (!*matchlen) - return -EINVAL; - /* Too large is useless */ if (*matchlen > IP_VS_PEDATA_MAXLEN) return -EINVAL; diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index ee319a4..ab67dd1 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -853,7 +853,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, iph->daddr = cp->daddr.ip; iph->saddr = saddr; iph->ttl = old_iph->ttl; - ip_select_ident(iph, &rt->dst, NULL); + ip_select_ident(skb, &rt->dst, NULL); /* Another hack: avoid icmp_send in ip_fragment */ skb->local_df = 1; diff --git a/net/netlabel/netlabel_domainhash.c b/net/netlabel/netlabel_domainhash.c index de0d8e4..10b273a 100644 --- a/net/netlabel/netlabel_domainhash.c +++ b/net/netlabel/netlabel_domainhash.c @@ -246,6 +246,71 @@ static void netlbl_domhsh_audit_add(struct netlbl_dom_map *entry, } } +/** + * netlbl_domhsh_validate - Validate a new domain mapping entry + * @entry: the entry to validate + * + * This function validates the new domain mapping entry to ensure that it is + * a valid entry. Returns zero on success, negative values on failure. + * + */ +static int netlbl_domhsh_validate(const struct netlbl_dom_map *entry) +{ + struct netlbl_af4list *iter4; + struct netlbl_domaddr4_map *map4; +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + struct netlbl_af6list *iter6; + struct netlbl_domaddr6_map *map6; +#endif /* IPv6 */ + + if (entry == NULL) + return -EINVAL; + + switch (entry->type) { + case NETLBL_NLTYPE_UNLABELED: + if (entry->type_def.cipsov4 != NULL || + entry->type_def.addrsel != NULL) + return -EINVAL; + break; + case NETLBL_NLTYPE_CIPSOV4: + if (entry->type_def.cipsov4 == NULL) + return -EINVAL; + break; + case NETLBL_NLTYPE_ADDRSELECT: + netlbl_af4list_foreach(iter4, &entry->type_def.addrsel->list4) { + map4 = netlbl_domhsh_addr4_entry(iter4); + switch (map4->type) { + case NETLBL_NLTYPE_UNLABELED: + if (map4->type_def.cipsov4 != NULL) + return -EINVAL; + break; + case NETLBL_NLTYPE_CIPSOV4: + if (map4->type_def.cipsov4 == NULL) + return -EINVAL; + break; + default: + return -EINVAL; + } + } +#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) + netlbl_af6list_foreach(iter6, &entry->type_def.addrsel->list6) { + map6 = netlbl_domhsh_addr6_entry(iter6); + switch (map6->type) { + case NETLBL_NLTYPE_UNLABELED: + break; + default: + return -EINVAL; + } + } +#endif /* IPv6 */ + break; + default: + return -EINVAL; + } + + return 0; +} + /* * Domain Hash Table Functions */ @@ -312,6 +377,10 @@ int netlbl_domhsh_add(struct netlbl_dom_map *entry, struct netlbl_af6list *tmp6; #endif /* IPv6 */ + ret_val = netlbl_domhsh_validate(entry); + if (ret_val != 0) + return ret_val; + /* XXX - we can remove this RCU read lock as the spinlock protects the * entire function, but before we do we need to fixup the * netlbl_af[4,6]list RCU functions to do "the right thing" with diff --git a/net/netrom/af_netrom.c b/net/netrom/af_netrom.c index f156382..3df7c5a 100644 --- a/net/netrom/af_netrom.c +++ b/net/netrom/af_netrom.c @@ -1178,6 +1178,7 @@ static int nr_recvmsg(struct kiocb *iocb, struct socket *sock, } if (sax != NULL) { + memset(sax, 0, sizeof(*sax)); sax->sax25_family = AF_NETROM; skb_copy_from_linear_data_offset(skb, 7, sax->sax25_call.ax25_call, AX25_ADDR_LEN); diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c index 4058afe..d596ceb 100644 --- a/net/packet/af_packet.c +++ b/net/packet/af_packet.c @@ -1767,12 +1767,11 @@ static int packet_getname_spkt(struct socket *sock, struct sockaddr *uaddr, return -EOPNOTSUPP; uaddr->sa_family = AF_PACKET; + memset(uaddr->sa_data, 0, sizeof(uaddr->sa_data)); rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), pkt_sk(sk)->ifindex); if (dev) - strncpy(uaddr->sa_data, dev->name, 14); - else - memset(uaddr->sa_data, 0, 14); + strlcpy(uaddr->sa_data, dev->name, sizeof(uaddr->sa_data)); rcu_read_unlock(); *uaddr_len = sizeof(*uaddr); diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c index f9ea925..1f96fb9 100644 --- a/net/rose/af_rose.c +++ b/net/rose/af_rose.c @@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, skb_copy_datagram_iovec(skb, 0, msg->msg_iov, copied); if (srose != NULL) { + memset(srose, 0, msg->msg_namelen); srose->srose_family = AF_ROSE; srose->srose_addr = rose->dest_addr; srose->srose_call = rose->dest_call; diff --git a/net/sched/act_ipt.c b/net/sched/act_ipt.c index 9fc211a..ec66b5e 100644 --- a/net/sched/act_ipt.c +++ b/net/sched/act_ipt.c @@ -8,7 +8,7 @@ * as published by the Free Software Foundation; either version * 2 of the License, or (at your option) any later version. * - * Copyright: Jamal Hadi Salim (2002-4) + * Copyright: Jamal Hadi Salim (2002-13) */ #include <linux/types.h> @@ -299,17 +299,44 @@ static struct tc_action_ops act_ipt_ops = { .walk = tcf_generic_walker }; -MODULE_AUTHOR("Jamal Hadi Salim(2002-4)"); +static struct tc_action_ops act_xt_ops = { + .kind = "xt", + .hinfo = &ipt_hash_info, + .type = TCA_ACT_IPT, + .capab = TCA_CAP_NONE, + .owner = THIS_MODULE, + .act = tcf_ipt, + .dump = tcf_ipt_dump, + .cleanup = tcf_ipt_cleanup, + .lookup = tcf_hash_search, + .init = tcf_ipt_init, + .walk = tcf_generic_walker +}; + +MODULE_AUTHOR("Jamal Hadi Salim(2002-13)"); MODULE_DESCRIPTION("Iptables target actions"); MODULE_LICENSE("GPL"); +MODULE_ALIAS("act_xt"); static int __init ipt_init_module(void) { - return tcf_register_action(&act_ipt_ops); + int ret1, ret2; + ret1 = tcf_register_action(&act_xt_ops); + if (ret1 < 0) + printk("Failed to load xt action\n"); + ret2 = tcf_register_action(&act_ipt_ops); + if (ret2 < 0) + printk("Failed to load ipt action\n"); + + if (ret1 < 0 && ret2 < 0) + return ret1; + else + return 0; } static void __exit ipt_cleanup_module(void) { + tcf_unregister_action(&act_xt_ops); tcf_unregister_action(&act_ipt_ops); } diff --git a/net/sched/sch_atm.c b/net/sched/sch_atm.c index 3f08158..46a39dc 100644 --- a/net/sched/sch_atm.c +++ b/net/sched/sch_atm.c @@ -605,6 +605,7 @@ static int atm_tc_dump_class(struct Qdisc *sch, unsigned long cl, struct sockaddr_atmpvc pvc; int state; + memset(&pvc, 0, sizeof(pvc)); pvc.sap_family = AF_ATMPVC; pvc.sap_addr.itf = flow->vcc->dev ? flow->vcc->dev->number : -1; pvc.sap_addr.vpi = flow->vcc->vpi; diff --git a/net/sched/sch_cbq.c b/net/sched/sch_cbq.c index 599f67a..7f59944 100644 --- a/net/sched/sch_cbq.c +++ b/net/sched/sch_cbq.c @@ -963,8 +963,11 @@ cbq_dequeue(struct Qdisc *sch) cbq_update(q); if ((incr -= incr2) < 0) incr = 0; + q->now += incr; + } else { + if (now > q->now) + q->now = now; } - q->now += incr; q->now_rt = now; for (;;) { @@ -1464,6 +1467,7 @@ static int cbq_dump_wrr(struct sk_buff *skb, struct cbq_class *cl) unsigned char *b = skb_tail_pointer(skb); struct tc_cbq_wrropt opt; + memset(&opt, 0, sizeof(opt)); opt.flags = 0; opt.allot = cl->allot; opt.priority = cl->priority + 1; diff --git a/net/sched/sch_htb.c b/net/sched/sch_htb.c index f08b9166..caa5aff 100644 --- a/net/sched/sch_htb.c +++ b/net/sched/sch_htb.c @@ -86,7 +86,7 @@ struct htb_class { unsigned int children; struct htb_class *parent; /* parent class */ - int prio; /* these two are used only by leaves... */ + u32 prio; /* these two are used only by leaves... */ int quantum; /* but stored for parent-to-leaf return */ union { diff --git a/net/sctp/auth.c b/net/sctp/auth.c index 865e68f..418ebe4 100644 --- a/net/sctp/auth.c +++ b/net/sctp/auth.c @@ -71,7 +71,7 @@ void sctp_auth_key_put(struct sctp_auth_bytes *key) return; if (atomic_dec_and_test(&key->refcnt)) { - kfree(key); + kzfree(key); SCTP_DBG_OBJCNT_DEC(keys); } } diff --git a/net/sctp/ipv6.c b/net/sctp/ipv6.c index 0bb0d7c..609adfa 100644 --- a/net/sctp/ipv6.c +++ b/net/sctp/ipv6.c @@ -203,45 +203,22 @@ out: in6_dev_put(idev); } -/* Based on tcp_v6_xmit() in tcp_ipv6.c. */ static int sctp_v6_xmit(struct sk_buff *skb, struct sctp_transport *transport) { struct sock *sk = skb->sk; struct ipv6_pinfo *np = inet6_sk(sk); - struct flowi6 fl6; - - memset(&fl6, 0, sizeof(fl6)); - - fl6.flowi6_proto = sk->sk_protocol; - - /* Fill in the dest address from the route entry passed with the skb - * and the source address from the transport. - */ - ipv6_addr_copy(&fl6.daddr, &transport->ipaddr.v6.sin6_addr); - ipv6_addr_copy(&fl6.saddr, &transport->saddr.v6.sin6_addr); - - fl6.flowlabel = np->flow_label; - IP6_ECN_flow_xmit(sk, fl6.flowlabel); - if (ipv6_addr_type(&fl6.saddr) & IPV6_ADDR_LINKLOCAL) - fl6.flowi6_oif = transport->saddr.v6.sin6_scope_id; - else - fl6.flowi6_oif = sk->sk_bound_dev_if; - - if (np->opt && np->opt->srcrt) { - struct rt0_hdr *rt0 = (struct rt0_hdr *) np->opt->srcrt; - ipv6_addr_copy(&fl6.daddr, rt0->addr); - } + struct flowi6 *fl6 = &transport->fl.u.ip6; SCTP_DEBUG_PRINTK("%s: skb:%p, len:%d, src:%pI6 dst:%pI6\n", __func__, skb, skb->len, - &fl6.saddr, &fl6.daddr); - - SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); + &fl6->saddr, &fl6->daddr); if (!(transport->param_flags & SPP_PMTUD_ENABLE)) skb->local_df = 1; - return ip6_xmit(sk, skb, &fl6, np->opt); + SCTP_INC_STATS(SCTP_MIB_OUTSCTPPACKS); + + return ip6_xmit(sk, skb, fl6, np->opt); } /* Returns the dst cache entry for the given source and destination ip @@ -254,10 +231,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, struct dst_entry *dst = NULL; struct flowi6 *fl6 = &fl->u.ip6; struct sctp_bind_addr *bp; + struct ipv6_pinfo *np = inet6_sk(sk); struct sctp_sockaddr_entry *laddr; union sctp_addr *baddr = NULL; union sctp_addr *daddr = &t->ipaddr; union sctp_addr dst_saddr; + struct in6_addr *final_p, final; __u8 matchlen = 0; __u8 bmatchlen; sctp_scope_t scope; @@ -280,7 +259,8 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, SCTP_DEBUG_PRINTK("SRC=%pI6 - ", &fl6->saddr); } - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); + final_p = fl6_update_dst(fl6, np->opt, &final); + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); if (!asoc || saddr) goto out; @@ -331,10 +311,12 @@ static void sctp_v6_get_dst(struct sctp_transport *t, union sctp_addr *saddr, } } rcu_read_unlock(); + if (baddr) { ipv6_addr_copy(&fl6->saddr, &baddr->v6.sin6_addr); fl6->fl6_sport = baddr->v6.sin6_port; - dst = ip6_dst_lookup_flow(sk, fl6, NULL, false); + final_p = fl6_update_dst(fl6, np->opt, &final); + dst = ip6_dst_lookup_flow(sk, fl6, final_p, false); } out: diff --git a/net/sctp/outqueue.c b/net/sctp/outqueue.c index 2325ce6..35e44e2 100644 --- a/net/sctp/outqueue.c +++ b/net/sctp/outqueue.c @@ -205,6 +205,8 @@ static inline int sctp_cacc_skip(struct sctp_transport *primary, */ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) { + memset(q, 0, sizeof(struct sctp_outq)); + q->asoc = asoc; INIT_LIST_HEAD(&q->out_chunk_list); INIT_LIST_HEAD(&q->control_chunk_list); @@ -212,13 +214,7 @@ void sctp_outq_init(struct sctp_association *asoc, struct sctp_outq *q) INIT_LIST_HEAD(&q->sacked); INIT_LIST_HEAD(&q->abandoned); - q->fast_rtx = 0; - q->outstanding_bytes = 0; q->empty = 1; - q->cork = 0; - - q->malloced = 0; - q->out_qlen = 0; } /* Free the outqueue structure and any related pending chunks. diff --git a/net/sctp/sm_sideeffect.c b/net/sctp/sm_sideeffect.c index edac24a..6e0f882 100644 --- a/net/sctp/sm_sideeffect.c +++ b/net/sctp/sm_sideeffect.c @@ -1604,9 +1604,8 @@ static int sctp_cmd_interpreter(sctp_event_t event_type, asoc->outqueue.outstanding_bytes; sackh.num_gap_ack_blocks = 0; sackh.num_dup_tsns = 0; - chunk->subh.sack_hdr = &sackh; sctp_add_cmd_sf(commands, SCTP_CMD_PROCESS_SACK, - SCTP_CHUNK(chunk)); + SCTP_SACKH(&sackh)); break; case SCTP_CMD_DISCARD_PACKET: diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 6b766cd..d0a8a77 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -3809,6 +3809,12 @@ SCTP_STATIC void sctp_destroy_sock(struct sock *sk) /* Release our hold on the endpoint. */ ep = sctp_sk(sk)->ep; + /* This could happen during socket init, thus we bail out + * early, since the rest of the below is not setup either. + */ + if (ep == NULL) + return; + sctp_endpoint_free(ep); local_bh_disable(); percpu_counter_dec(&sctp_sockets_allocated); diff --git a/net/socket.c b/net/socket.c index 1b0f0fc..e4c7f90 100644 --- a/net/socket.c +++ b/net/socket.c @@ -1876,9 +1876,9 @@ struct used_address { unsigned int name_len; }; -static int __sys_sendmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, - struct used_address *used_address) +static int ___sys_sendmsg(struct socket *sock, struct msghdr __user *msg, + struct msghdr *msg_sys, unsigned flags, + struct used_address *used_address) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; @@ -1998,22 +1998,30 @@ out: * BSD sendmsg interface */ -SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned, flags) +long __sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; - struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); + struct socket *sock; + sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; - err = __sys_sendmsg(sock, msg, &msg_sys, flags, NULL); + err = ___sys_sendmsg(sock, msg, &msg_sys, flags, NULL); fput_light(sock->file, fput_needed); out: return err; } +SYSCALL_DEFINE3(sendmsg, int, fd, struct msghdr __user *, msg, unsigned int, flags) +{ + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + return __sys_sendmsg(fd, msg, flags); +} + /* * Linux sendmmsg interface */ @@ -2044,15 +2052,16 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, while (datagrams < vlen) { if (MSG_CMSG_COMPAT & flags) { - err = __sys_sendmsg(sock, (struct msghdr __user *)compat_entry, - &msg_sys, flags, &used_address); + err = ___sys_sendmsg(sock, (struct msghdr __user *)compat_entry, + &msg_sys, flags, &used_address); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { - err = __sys_sendmsg(sock, (struct msghdr __user *)entry, - &msg_sys, flags, &used_address); + err = ___sys_sendmsg(sock, + (struct msghdr __user *)entry, + &msg_sys, flags, &used_address); if (err < 0) break; err = put_user(err, &entry->msg_len); @@ -2076,11 +2085,13 @@ int __sys_sendmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, SYSCALL_DEFINE4(sendmmsg, int, fd, struct mmsghdr __user *, mmsg, unsigned int, vlen, unsigned int, flags) { + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; return __sys_sendmmsg(fd, mmsg, vlen, flags); } -static int __sys_recvmsg(struct socket *sock, struct msghdr __user *msg, - struct msghdr *msg_sys, unsigned flags, int nosec) +static int ___sys_recvmsg(struct socket *sock, struct msghdr __user *msg, + struct msghdr *msg_sys, unsigned flags, int nosec) { struct compat_msghdr __user *msg_compat = (struct compat_msghdr __user *)msg; @@ -2177,23 +2188,31 @@ out: * BSD recvmsg interface */ -SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, - unsigned int, flags) +long __sys_recvmsg(int fd, struct msghdr __user *msg, unsigned flags) { int fput_needed, err; struct msghdr msg_sys; - struct socket *sock = sockfd_lookup_light(fd, &err, &fput_needed); + struct socket *sock; + sock = sockfd_lookup_light(fd, &err, &fput_needed); if (!sock) goto out; - err = __sys_recvmsg(sock, msg, &msg_sys, flags, 0); + err = ___sys_recvmsg(sock, msg, &msg_sys, flags, 0); fput_light(sock->file, fput_needed); out: return err; } +SYSCALL_DEFINE3(recvmsg, int, fd, struct msghdr __user *, msg, + unsigned int, flags) +{ + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + return __sys_recvmsg(fd, msg, flags); +} + /* * Linux recvmmsg interface */ @@ -2231,17 +2250,18 @@ int __sys_recvmmsg(int fd, struct mmsghdr __user *mmsg, unsigned int vlen, * No need to ask LSM for more than the first datagram. */ if (MSG_CMSG_COMPAT & flags) { - err = __sys_recvmsg(sock, (struct msghdr __user *)compat_entry, - &msg_sys, flags & ~MSG_WAITFORONE, - datagrams); + err = ___sys_recvmsg(sock, (struct msghdr __user *)compat_entry, + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = __put_user(err, &compat_entry->msg_len); ++compat_entry; } else { - err = __sys_recvmsg(sock, (struct msghdr __user *)entry, - &msg_sys, flags & ~MSG_WAITFORONE, - datagrams); + err = ___sys_recvmsg(sock, + (struct msghdr __user *)entry, + &msg_sys, flags & ~MSG_WAITFORONE, + datagrams); if (err < 0) break; err = put_user(err, &entry->msg_len); @@ -2308,6 +2328,9 @@ SYSCALL_DEFINE5(recvmmsg, int, fd, struct mmsghdr __user *, mmsg, int datagrams; struct timespec timeout_sys; + if (flags & MSG_CMSG_COMPAT) + return -EINVAL; + if (!timeout) return __sys_recvmmsg(fd, mmsg, vlen, flags, NULL); diff --git a/net/tipc/eth_media.c b/net/tipc/eth_media.c index b69092e..a8c2a6b2 100644 --- a/net/tipc/eth_media.c +++ b/net/tipc/eth_media.c @@ -53,6 +53,7 @@ struct eth_bearer { struct tipc_bearer *bearer; struct net_device *dev; struct packet_type tipc_packet_type; + struct work_struct setup; }; static struct eth_bearer eth_bearers[MAX_ETH_BEARERS]; @@ -121,6 +122,17 @@ static int recv_msg(struct sk_buff *buf, struct net_device *dev, } /** + * setup_bearer - setup association between Ethernet bearer and interface + */ +static void setup_bearer(struct work_struct *work) +{ + struct eth_bearer *eb_ptr = + container_of(work, struct eth_bearer, setup); + + dev_add_pack(&eb_ptr->tipc_packet_type); +} + +/** * enable_bearer - attach TIPC bearer to an Ethernet interface */ @@ -167,7 +179,8 @@ static int enable_bearer(struct tipc_bearer *tb_ptr) eb_ptr->tipc_packet_type.af_packet_priv = eb_ptr; INIT_LIST_HEAD(&(eb_ptr->tipc_packet_type.list)); dev_hold(dev); - dev_add_pack(&eb_ptr->tipc_packet_type); + INIT_WORK(&eb_ptr->setup, setup_bearer); + schedule_work(&eb_ptr->setup); } /* Associate TIPC bearer with Ethernet bearer */ diff --git a/net/tipc/socket.c b/net/tipc/socket.c index 3388373..3610786 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -829,6 +829,7 @@ static void set_orig_addr(struct msghdr *m, struct tipc_msg *msg) if (addr) { addr->family = AF_TIPC; addr->addrtype = TIPC_ADDR_ID; + memset(&addr->addr, 0, sizeof(addr->addr)); addr->addr.id.ref = msg_origport(msg); addr->addr.id.node = msg_orignode(msg); addr->addr.name.domain = 0; /* could leave uninitialized */ @@ -948,6 +949,9 @@ static int recv_msg(struct kiocb *iocb, struct socket *sock, goto exit; } + /* will be updated in set_orig_addr() if needed */ + m->msg_namelen = 0; + timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: @@ -1074,6 +1078,9 @@ static int recv_stream(struct kiocb *iocb, struct socket *sock, goto exit; } + /* will be updated in set_orig_addr() if needed */ + m->msg_namelen = 0; + target = sock_rcvlowat(sk, flags & MSG_WAITALL, buf_len); timeout = sock_rcvtimeo(sk, flags & MSG_DONTWAIT); restart: diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c index afbdd0c..709400b 100644 --- a/net/unix/af_unix.c +++ b/net/unix/af_unix.c @@ -1940,7 +1940,7 @@ static int unix_stream_recvmsg(struct kiocb *iocb, struct socket *sock, skb_queue_head(&sk->sk_receive_queue, skb); break; } - } else { + } else if (test_bit(SOCK_PASSCRED, &sock->flags)) { /* Copy credentials */ scm_set_cred(siocb->scm, UNIXCB(skb).pid, UNIXCB(skb).cred); check_creds = 1; diff --git a/net/wireless/nl80211.c b/net/wireless/nl80211.c index 752d542..55e2b86 100644 --- a/net/wireless/nl80211.c +++ b/net/wireless/nl80211.c @@ -4577,12 +4577,14 @@ EXPORT_SYMBOL(cfg80211_testmode_alloc_event_skb); void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp) { + struct cfg80211_registered_device *rdev = ((void **)skb->cb)[0]; void *hdr = ((void **)skb->cb)[1]; struct nlattr *data = ((void **)skb->cb)[2]; nla_nest_end(skb, data); genlmsg_end(skb, hdr); - genlmsg_multicast(skb, 0, nl80211_testmode_mcgrp.id, gfp); + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), skb, 0, + nl80211_testmode_mcgrp.id, gfp); } EXPORT_SYMBOL(cfg80211_testmode_event); #endif @@ -7179,7 +7181,8 @@ void nl80211_send_mgmt_tx_status(struct cfg80211_registered_device *rdev, return; } - genlmsg_multicast(msg, 0, nl80211_mlme_mcgrp.id, gfp); + genlmsg_multicast_netns(wiphy_net(&rdev->wiphy), msg, 0, + nl80211_mlme_mcgrp.id, gfp); return; nla_put_failure: diff --git a/net/wireless/reg.c b/net/wireless/reg.c index 04f42b7..35253c5 100644 --- a/net/wireless/reg.c +++ b/net/wireless/reg.c @@ -852,7 +852,7 @@ static void handle_channel(struct wiphy *wiphy, return; REG_DBG_PRINT("Disabling freq %d MHz\n", chan->center_freq); - chan->flags = IEEE80211_CHAN_DISABLED; + chan->flags |= IEEE80211_CHAN_DISABLED; return; } diff --git a/net/x25/af_x25.c b/net/x25/af_x25.c index fb37356..5867429 100644 --- a/net/x25/af_x25.c +++ b/net/x25/af_x25.c @@ -1584,11 +1584,11 @@ out_cud_release: case SIOCX25CALLACCPTAPPRV: { rc = -EINVAL; lock_sock(sk); - if (sk->sk_state != TCP_CLOSE) - break; - clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); + if (sk->sk_state == TCP_CLOSE) { + clear_bit(X25_ACCPT_APPRV_FLAG, &x25->flags); + rc = 0; + } release_sock(sk); - rc = 0; break; } @@ -1596,14 +1596,15 @@ out_cud_release: rc = -EINVAL; lock_sock(sk); if (sk->sk_state != TCP_ESTABLISHED) - break; + goto out_sendcallaccpt_release; /* must call accptapprv above */ if (test_bit(X25_ACCPT_APPRV_FLAG, &x25->flags)) - break; + goto out_sendcallaccpt_release; x25_write_internal(sk, X25_CALL_ACCEPTED); x25->state = X25_STATE_3; - release_sock(sk); rc = 0; +out_sendcallaccpt_release: + release_sock(sk); break; } diff --git a/scripts/kernel-doc b/scripts/kernel-doc index d793001..ba3d9df 100755 --- a/scripts/kernel-doc +++ b/scripts/kernel-doc @@ -2044,6 +2044,9 @@ sub process_file($) { $section_counter = 0; while (<IN>) { + while (s/\\\s*$//) { + $_ .= <IN>; + } if ($state == 0) { if (/$doc_start/o) { $state = 1; # next line is always the function name diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c index 4e9ebae..903d5d8 100644 --- a/sound/core/pcm_native.c +++ b/sound/core/pcm_native.c @@ -3215,18 +3215,10 @@ static int snd_pcm_default_mmap(struct snd_pcm_substream *substream, int snd_pcm_lib_mmap_iomem(struct snd_pcm_substream *substream, struct vm_area_struct *area) { - long size; - unsigned long offset; + struct snd_pcm_runtime *runtime = substream->runtime;; area->vm_page_prot = pgprot_noncached(area->vm_page_prot); - area->vm_flags |= VM_IO; - size = area->vm_end - area->vm_start; - offset = area->vm_pgoff << PAGE_SHIFT; - if (io_remap_pfn_range(area, area->vm_start, - (substream->runtime->dma_addr + offset) >> PAGE_SHIFT, - size, area->vm_page_prot)) - return -EAGAIN; - return 0; + return vm_iomap_memory(area, runtime->dma_addr, runtime->dma_bytes); } EXPORT_SYMBOL(snd_pcm_lib_mmap_iomem); diff --git a/sound/isa/opti9xx/opti92x-ad1848.c b/sound/isa/opti9xx/opti92x-ad1848.c index c35dc68..2b83557 100644 --- a/sound/isa/opti9xx/opti92x-ad1848.c +++ b/sound/isa/opti9xx/opti92x-ad1848.c @@ -173,11 +173,7 @@ MODULE_DEVICE_TABLE(pnp_card, snd_opti9xx_pnpids); #endif /* CONFIG_PNP */ -#ifdef OPTi93X -#define DEV_NAME "opti93x" -#else -#define DEV_NAME "opti92x" -#endif +#define DEV_NAME KBUILD_MODNAME static char * snd_opti9xx_names[] = { "unknown", @@ -1126,7 +1122,7 @@ static void __devexit snd_opti9xx_pnp_remove(struct pnp_card_link * pcard) static struct pnp_card_driver opti9xx_pnpc_driver = { .flags = PNP_DRIVER_RES_DISABLE, - .name = "opti9xx", + .name = DEV_NAME, .id_table = snd_opti9xx_pnpids, .probe = snd_opti9xx_pnp_probe, .remove = __devexit_p(snd_opti9xx_pnp_remove), diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c index 7b747af..7031412 100644 --- a/sound/pci/hda/hda_codec.c +++ b/sound/pci/hda/hda_codec.c @@ -157,7 +157,7 @@ const char *snd_hda_get_jack_type(u32 cfg) "Line Out", "Speaker", "HP Out", "CD", "SPDIF Out", "Digital Out", "Modem Line", "Modem Hand", "Line In", "Aux", "Mic", "Telephony", - "SPDIF In", "Digitial In", "Reserved", "Other" + "SPDIF In", "Digital In", "Reserved", "Other" }; return jack_types[(cfg & AC_DEFCFG_DEVICE) diff --git a/sound/pci/hda/hda_eld.c b/sound/pci/hda/hda_eld.c index e3e8531..64365a5 100644 --- a/sound/pci/hda/hda_eld.c +++ b/sound/pci/hda/hda_eld.c @@ -322,7 +322,7 @@ int snd_hdmi_get_eld(struct hdmi_eld *eld, struct hda_codec *codec, hda_nid_t nid) { int i; - int ret; + int ret = 0; int size; unsigned char *buf; diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c index c5c9788..a9537d4 100644 --- a/sound/pci/hda/hda_intel.c +++ b/sound/pci/hda/hda_intel.c @@ -2461,6 +2461,7 @@ static struct snd_pci_quirk msi_black_list[] __devinitdata = { SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ + SND_PCI_QUIRK(0x1179, 0xfb44, "Toshiba Satellite C870", 0), /* AMD Hudson */ SND_PCI_QUIRK(0x1849, 0x0888, "ASRock", 0), /* Athlon64 X2 + nvidia */ SND_PCI_QUIRK(0xa0a0, 0x0575, "Aopen MZ915-M", 0), /* ICH6 */ {} diff --git a/sound/soc/codecs/88pm860x-codec.c b/sound/soc/codecs/88pm860x-codec.c index 1924157..8c792bb 100644 --- a/sound/soc/codecs/88pm860x-codec.c +++ b/sound/soc/codecs/88pm860x-codec.c @@ -350,6 +350,9 @@ static int snd_soc_put_volsw_2r_st(struct snd_kcontrol *kcontrol, val = ucontrol->value.integer.value[0]; val2 = ucontrol->value.integer.value[1]; + if (val >= ARRAY_SIZE(st_table) || val2 >= ARRAY_SIZE(st_table)) + return -EINVAL; + err = snd_soc_update_bits(codec, reg, 0x3f, st_table[val].m); if (err < 0) return err; diff --git a/sound/soc/codecs/max98088.c b/sound/soc/codecs/max98088.c index 4173b67..1d5cff2 100644 --- a/sound/soc/codecs/max98088.c +++ b/sound/soc/codecs/max98088.c @@ -1598,7 +1598,7 @@ static int max98088_dai2_digital_mute(struct snd_soc_dai *codec_dai, int mute) static void max98088_sync_cache(struct snd_soc_codec *codec) { - u16 *reg_cache = codec->reg_cache; + u8 *reg_cache = codec->reg_cache; int i; if (!codec->cache_sync) @@ -1998,7 +1998,7 @@ static int max98088_probe(struct snd_soc_codec *codec) ret); goto err_access; } - dev_info(codec->dev, "revision %c\n", ret + 'A'); + dev_info(codec->dev, "revision %c\n", ret - 0x40 + 'A'); snd_soc_write(codec, M98088_REG_51_PWR_SYS, M98088_PWRSV); diff --git a/sound/soc/codecs/max98095.c b/sound/soc/codecs/max98095.c index e1d282d..51bac5c 100644 --- a/sound/soc/codecs/max98095.c +++ b/sound/soc/codecs/max98095.c @@ -1865,7 +1865,7 @@ static int max98095_put_eq_enum(struct snd_kcontrol *kcontrol, struct max98095_pdata *pdata = max98095->pdata; int channel = max98095_get_eq_channel(kcontrol->id.name); struct max98095_cdata *cdata; - int sel = ucontrol->value.integer.value[0]; + unsigned int sel = ucontrol->value.integer.value[0]; struct max98095_eq_cfg *coef_set; int fs, best, best_val, i; int regmask, regsave; @@ -2011,7 +2011,7 @@ static int max98095_put_bq_enum(struct snd_kcontrol *kcontrol, struct max98095_pdata *pdata = max98095->pdata; int channel = max98095_get_bq_channel(kcontrol->id.name); struct max98095_cdata *cdata; - int sel = ucontrol->value.integer.value[0]; + unsigned int sel = ucontrol->value.integer.value[0]; struct max98095_biquad_cfg *coef_set; int fs, best, best_val, i; int regmask, regsave; diff --git a/sound/soc/codecs/sgtl5000.h b/sound/soc/codecs/sgtl5000.h index eec3ab3..b79eb80 100644 --- a/sound/soc/codecs/sgtl5000.h +++ b/sound/soc/codecs/sgtl5000.h @@ -347,7 +347,7 @@ #define SGTL5000_PLL_INT_DIV_MASK 0xf800 #define SGTL5000_PLL_INT_DIV_SHIFT 11 #define SGTL5000_PLL_INT_DIV_WIDTH 5 -#define SGTL5000_PLL_FRAC_DIV_MASK 0x0700 +#define SGTL5000_PLL_FRAC_DIV_MASK 0x07ff #define SGTL5000_PLL_FRAC_DIV_SHIFT 0 #define SGTL5000_PLL_FRAC_DIV_WIDTH 11 diff --git a/sound/soc/codecs/wm8903.c b/sound/soc/codecs/wm8903.c index 43e3d76..fea5b7a 100644 --- a/sound/soc/codecs/wm8903.c +++ b/sound/soc/codecs/wm8903.c @@ -1101,6 +1101,8 @@ static const struct snd_soc_dapm_route wm8903_intercon[] = { { "ROP", NULL, "Right Speaker PGA" }, { "RON", NULL, "Right Speaker PGA" }, + { "Charge Pump", NULL, "CLK_DSP" }, + { "Left Headphone Output PGA", NULL, "Charge Pump" }, { "Right Headphone Output PGA", NULL, "Charge Pump" }, { "Left Line Output PGA", NULL, "Charge Pump" }, diff --git a/sound/soc/codecs/wm8960.c b/sound/soc/codecs/wm8960.c index 4393394..ca1a90b 100644 --- a/sound/soc/codecs/wm8960.c +++ b/sound/soc/codecs/wm8960.c @@ -801,9 +801,9 @@ static int wm8960_set_dai_pll(struct snd_soc_dai *codec_dai, int pll_id, if (pll_div.k) { reg |= 0x20; - snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 18) & 0x3f); - snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 9) & 0x1ff); - snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0x1ff); + snd_soc_write(codec, WM8960_PLL2, (pll_div.k >> 16) & 0xff); + snd_soc_write(codec, WM8960_PLL3, (pll_div.k >> 8) & 0xff); + snd_soc_write(codec, WM8960_PLL4, pll_div.k & 0xff); } snd_soc_write(codec, WM8960_PLL1, reg); diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c index 1f7616d..0c73274 100644 --- a/sound/soc/codecs/wm8994.c +++ b/sound/soc/codecs/wm8994.c @@ -2249,6 +2249,7 @@ static int wm8994_aif3_hw_params(struct snd_pcm_substream *substream, default: return 0; } + break; default: return 0; } diff --git a/sound/soc/sh/dma-sh7760.c b/sound/soc/sh/dma-sh7760.c index db74005..1a757c3 100644 --- a/sound/soc/sh/dma-sh7760.c +++ b/sound/soc/sh/dma-sh7760.c @@ -342,8 +342,8 @@ static int camelot_pcm_new(struct snd_soc_pcm_runtime *rtd) return 0; } -static struct snd_soc_platform sh7760_soc_platform = { - .pcm_ops = &camelot_pcm_ops, +static struct snd_soc_platform_driver sh7760_soc_platform = { + .ops = &camelot_pcm_ops, .pcm_new = camelot_pcm_new, .pcm_free = camelot_pcm_free, }; diff --git a/sound/usb/6fire/pcm.c b/sound/usb/6fire/pcm.c index d144cdb..d2fb012 100644 --- a/sound/usb/6fire/pcm.c +++ b/sound/usb/6fire/pcm.c @@ -541,7 +541,7 @@ static snd_pcm_uframes_t usb6fire_pcm_pointer( snd_pcm_uframes_t ret; if (rt->panic || !sub) - return SNDRV_PCM_STATE_XRUN; + return SNDRV_PCM_POS_XRUN; spin_lock_irqsave(&sub->lock, flags); ret = sub->dma_off; diff --git a/sound/usb/card.c b/sound/usb/card.c index 8d0a3c1..b3c7e19 100644 --- a/sound/usb/card.c +++ b/sound/usb/card.c @@ -148,14 +148,32 @@ static int snd_usb_create_stream(struct snd_usb_audio *chip, int ctrlif, int int return -EINVAL; } + alts = &iface->altsetting[0]; + altsd = get_iface_desc(alts); + + /* + * Android with both accessory and audio interfaces enabled gets the + * interface numbers wrong. + */ + if ((chip->usb_id == USB_ID(0x18d1, 0x2d04) || + chip->usb_id == USB_ID(0x18d1, 0x2d05)) && + interface == 0 && + altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC && + altsd->bInterfaceSubClass == USB_SUBCLASS_VENDOR_SPEC) { + interface = 2; + iface = usb_ifnum_to_if(dev, interface); + if (!iface) + return -EINVAL; + alts = &iface->altsetting[0]; + altsd = get_iface_desc(alts); + } + if (usb_interface_claimed(iface)) { snd_printdd(KERN_INFO "%d:%d:%d: skipping, already claimed\n", dev->devnum, ctrlif, interface); return -EINVAL; } - alts = &iface->altsetting[0]; - altsd = get_iface_desc(alts); if ((altsd->bInterfaceClass == USB_CLASS_AUDIO || altsd->bInterfaceClass == USB_CLASS_VENDOR_SPEC) && altsd->bInterfaceSubClass == USB_SUBCLASS_MIDISTREAMING) { @@ -610,7 +628,9 @@ int snd_usb_autoresume(struct snd_usb_audio *chip) int err = -ENODEV; down_read(&chip->shutdown_rwsem); - if (!chip->shutdown && !chip->probing) + if (chip->probing) + err = 0; + else if (!chip->shutdown) err = usb_autopm_get_interface(chip->pm_intf); up_read(&chip->shutdown_rwsem); diff --git a/sound/usb/card.h b/sound/usb/card.h index 19b5b5d..da61252 100644 --- a/sound/usb/card.h +++ b/sound/usb/card.h @@ -1,6 +1,7 @@ #ifndef __USBAUDIO_CARD_H #define __USBAUDIO_CARD_H +#define MAX_NR_RATES 1024 #define MAX_PACKS 20 #define MAX_PACKS_HS (MAX_PACKS * 8) /* in high speed mode */ #define MAX_URBS 8 diff --git a/sound/usb/format.c b/sound/usb/format.c index 8d042dc..b30fcd0 100644 --- a/sound/usb/format.c +++ b/sound/usb/format.c @@ -226,7 +226,7 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets, int min = combine_quad(&data[2 + 12 * i]); int max = combine_quad(&data[6 + 12 * i]); int res = combine_quad(&data[10 + 12 * i]); - int rate; + unsigned int rate; if ((max < 0) || (min < 0) || (res < 0) || (max < min)) continue; @@ -253,6 +253,10 @@ static int parse_uac2_sample_rate_range(struct audioformat *fp, int nr_triplets, fp->rates |= snd_pcm_rate_to_rate_bit(rate); nr_rates++; + if (nr_rates >= MAX_NR_RATES) { + snd_printk(KERN_ERR "invalid uac2 rates\n"); + break; + } /* avoid endless loop */ if (res == 0) diff --git a/sound/usb/midi.c b/sound/usb/midi.c index 4376215..c635a2d 100644 --- a/sound/usb/midi.c +++ b/sound/usb/midi.c @@ -125,7 +125,6 @@ struct snd_usb_midi { struct snd_usb_midi_in_endpoint *in; } endpoints[MIDI_MAX_ENDPOINTS]; unsigned long input_triggered; - bool autopm_reference; unsigned int opened[2]; unsigned char disconnected; unsigned char input_running; @@ -1023,7 +1022,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, { struct snd_usb_midi* umidi = substream->rmidi->private_data; struct snd_kcontrol *ctl; - int err; down_read(&umidi->disc_rwsem); if (umidi->disconnected) { @@ -1034,13 +1032,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, mutex_lock(&umidi->mutex); if (open) { if (!umidi->opened[0] && !umidi->opened[1]) { - err = usb_autopm_get_interface(umidi->iface); - umidi->autopm_reference = err >= 0; - if (err < 0 && err != -EACCES) { - mutex_unlock(&umidi->mutex); - up_read(&umidi->disc_rwsem); - return -EIO; - } if (umidi->roland_load_ctl) { ctl = umidi->roland_load_ctl; ctl->vd[0].access |= SNDRV_CTL_ELEM_ACCESS_INACTIVE; @@ -1063,8 +1054,6 @@ static int substream_open(struct snd_rawmidi_substream *substream, int dir, snd_ctl_notify(umidi->card, SNDRV_CTL_EVENT_MASK_INFO, &ctl->id); } - if (umidi->autopm_reference) - usb_autopm_put_interface(umidi->iface); } } mutex_unlock(&umidi->mutex); @@ -2228,6 +2217,8 @@ int snd_usbmidi_create(struct snd_card *card, return err; } + usb_autopm_get_interface_no_resume(umidi->iface); + list_add_tail(&umidi->list, midi_list); return 0; } diff --git a/sound/usb/mixer_quirks.c b/sound/usb/mixer_quirks.c index b4c516a..2d224f4 100644 --- a/sound/usb/mixer_quirks.c +++ b/sound/usb/mixer_quirks.c @@ -396,7 +396,7 @@ static int snd_nativeinstruments_control_get(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_rcvctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0, cpu_to_le16(wIndex), + 0, wIndex, &tmp, sizeof(tmp), 1000); up_read(&mixer->chip->shutdown_rwsem); @@ -427,7 +427,7 @@ static int snd_nativeinstruments_control_put(struct snd_kcontrol *kcontrol, else ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), bRequest, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - cpu_to_le16(wValue), cpu_to_le16(wIndex), + wValue, wIndex, NULL, 0, 1000); up_read(&mixer->chip->shutdown_rwsem); diff --git a/sound/usb/quirks-table.h b/sound/usb/quirks-table.h index 11a9f86..3c30b3b 100644 --- a/sound/usb/quirks-table.h +++ b/sound/usb/quirks-table.h @@ -146,7 +146,13 @@ .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL }, { - USB_DEVICE(0x046d, 0x0990), + .match_flags = USB_DEVICE_ID_MATCH_DEVICE | + USB_DEVICE_ID_MATCH_INT_CLASS | + USB_DEVICE_ID_MATCH_INT_SUBCLASS, + .idVendor = 0x046d, + .idProduct = 0x0990, + .bInterfaceClass = USB_CLASS_AUDIO, + .bInterfaceSubClass = USB_SUBCLASS_AUDIOCONTROL, .driver_info = (unsigned long) & (const struct snd_usb_audio_quirk) { .vendor_name = "Logitech, Inc.", .product_name = "QuickCam Pro 9000", diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 090e193..4a650ab 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -131,10 +131,14 @@ static int create_fixed_stream_quirk(struct snd_usb_audio *chip, unsigned *rate_table = NULL; fp = kmemdup(quirk->data, sizeof(*fp), GFP_KERNEL); - if (! fp) { + if (!fp) { snd_printk(KERN_ERR "cannot memdup\n"); return -ENOMEM; } + if (fp->nr_rates > MAX_NR_RATES) { + kfree(fp); + return -EINVAL; + } if (fp->nr_rates > 0) { rate_table = kmalloc(sizeof(int) * fp->nr_rates, GFP_KERNEL); if (!rate_table) { @@ -455,7 +459,7 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) { int ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, - cpu_to_le16(1), 0, NULL, 0, 1000); + 1, 0, NULL, 0, 1000); if (ret < 0) return ret; diff --git a/tools/perf/util/map.c b/tools/perf/util/map.c index a16ecab..84e157e 100644 --- a/tools/perf/util/map.c +++ b/tools/perf/util/map.c @@ -15,7 +15,9 @@ const char *map_type__name[MAP__NR_TYPES] = { static inline int is_anon_memory(const char *filename) { - return strcmp(filename, "//anon") == 0; + return !strcmp(filename, "//anon") || + !strcmp(filename, "/dev/zero (deleted)") || + !strcmp(filename, "/anon_hugepage (deleted)"); } void map__init(struct map *self, enum map_type type, diff --git a/virt/kvm/ioapic.c b/virt/kvm/ioapic.c index 8df1ca1..e99257c 100644 --- a/virt/kvm/ioapic.c +++ b/virt/kvm/ioapic.c @@ -73,9 +73,12 @@ static unsigned long ioapic_read_indirect(struct kvm_ioapic *ioapic, u32 redir_index = (ioapic->ioregsel - 0x10) >> 1; u64 redir_content; - ASSERT(redir_index < IOAPIC_NUM_PINS); + if (redir_index < IOAPIC_NUM_PINS) + redir_content = + ioapic->redirtbl[redir_index].bits; + else + redir_content = ~0ULL; - redir_content = ioapic->redirtbl[redir_index].bits; result = (ioapic->ioregsel & 0x1) ? (redir_content >> 32) & 0xffffffff : redir_content & 0xffffffff; diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 88dde44..caa3bb1 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1375,20 +1375,38 @@ int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, } int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, - gpa_t gpa) + gpa_t gpa, unsigned long len) { struct kvm_memslots *slots = kvm_memslots(kvm); int offset = offset_in_page(gpa); - gfn_t gfn = gpa >> PAGE_SHIFT; + gfn_t start_gfn = gpa >> PAGE_SHIFT; + gfn_t end_gfn = (gpa + len - 1) >> PAGE_SHIFT; + gfn_t nr_pages_needed = end_gfn - start_gfn + 1; + gfn_t nr_pages_avail; ghc->gpa = gpa; ghc->generation = slots->generation; - ghc->memslot = __gfn_to_memslot(slots, gfn); - ghc->hva = gfn_to_hva_many(ghc->memslot, gfn, NULL); - if (!kvm_is_error_hva(ghc->hva)) + ghc->len = len; + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, &nr_pages_avail); + if (!kvm_is_error_hva(ghc->hva) && nr_pages_avail >= nr_pages_needed) { ghc->hva += offset; - else - return -EFAULT; + } else { + /* + * If the requested region crosses two memslots, we still + * verify that the entire region is valid here. + */ + while (start_gfn <= end_gfn) { + ghc->memslot = gfn_to_memslot(kvm, start_gfn); + ghc->hva = gfn_to_hva_many(ghc->memslot, start_gfn, + &nr_pages_avail); + if (kvm_is_error_hva(ghc->hva)) + return -EFAULT; + start_gfn += nr_pages_avail; + } + /* Use the slow path for cross page reads and writes. */ + ghc->memslot = NULL; + } return 0; } @@ -1400,8 +1418,13 @@ int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, struct kvm_memslots *slots = kvm_memslots(kvm); int r; + BUG_ON(len > ghc->len); + if (slots->generation != ghc->generation) - kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa); + kvm_gfn_to_hva_cache_init(kvm, ghc, ghc->gpa, ghc->len); + + if (unlikely(!ghc->memslot)) + return kvm_write_guest(kvm, ghc->gpa, data, len); if (kvm_is_error_hva(ghc->hva)) return -EFAULT; |