diff options
Diffstat (limited to '3.2.62/1054_linux-3.2.55.patch')
-rw-r--r-- | 3.2.62/1054_linux-3.2.55.patch | 2495 |
1 files changed, 2495 insertions, 0 deletions
diff --git a/3.2.62/1054_linux-3.2.55.patch b/3.2.62/1054_linux-3.2.55.patch new file mode 100644 index 0000000..6071ff5 --- /dev/null +++ b/3.2.62/1054_linux-3.2.55.patch @@ -0,0 +1,2495 @@ +diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt +index 2ba8272..1b196ea 100644 +--- a/Documentation/kernel-parameters.txt ++++ b/Documentation/kernel-parameters.txt +@@ -1305,6 +1305,8 @@ bytes respectively. Such letter suffixes can also be entirely omitted. + + * dump_id: dump IDENTIFY data. + ++ * disable: Disable this device. ++ + If there are multiple matching configurations changing + the same attribute, the last one is used. + +diff --git a/Makefile b/Makefile +index 848be26..538463e 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 2 +-SUBLEVEL = 54 ++SUBLEVEL = 55 + EXTRAVERSION = + NAME = Saber-toothed Squirrel + +diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c +index 7ac5dfd..d45fd22 100644 +--- a/arch/arm/kernel/traps.c ++++ b/arch/arm/kernel/traps.c +@@ -37,7 +37,13 @@ + + #include "signal.h" + +-static const char *handler[]= { "prefetch abort", "data abort", "address exception", "interrupt" }; ++static const char *handler[]= { ++ "prefetch abort", ++ "data abort", ++ "address exception", ++ "interrupt", ++ "undefined instruction", ++}; + + void *vectors_page; + +diff --git a/arch/arm/mach-footbridge/dc21285-timer.c b/arch/arm/mach-footbridge/dc21285-timer.c +index 121ad1d..547731b 100644 +--- a/arch/arm/mach-footbridge/dc21285-timer.c ++++ b/arch/arm/mach-footbridge/dc21285-timer.c +@@ -95,17 +95,14 @@ static struct irqaction footbridge_timer_irq = { + static void __init footbridge_timer_init(void) + { + struct clock_event_device *ce = &ckevt_dc21285; ++ unsigned rate = DIV_ROUND_CLOSEST(mem_fclk_21285, 16); + +- clocksource_register_hz(&cksrc_dc21285, (mem_fclk_21285 + 8) / 16); ++ clocksource_register_hz(&cksrc_dc21285, rate); + + setup_irq(ce->irq, &footbridge_timer_irq); + +- clockevents_calc_mult_shift(ce, mem_fclk_21285, 5); +- ce->max_delta_ns = clockevent_delta2ns(0xffffff, ce); +- ce->min_delta_ns = clockevent_delta2ns(0x000004, ce); + ce->cpumask = cpumask_of(smp_processor_id()); +- +- clockevents_register_device(ce); ++ clockevents_config_and_register(ce, rate, 0x4, 0xffffff); + } + + struct sys_timer footbridge_timer = { +diff --git a/arch/ia64/kernel/machine_kexec.c b/arch/ia64/kernel/machine_kexec.c +index 3d3aeef..abd6bad 100644 +--- a/arch/ia64/kernel/machine_kexec.c ++++ b/arch/ia64/kernel/machine_kexec.c +@@ -157,7 +157,7 @@ void arch_crash_save_vmcoreinfo(void) + #endif + #ifdef CONFIG_PGTABLE_3 + VMCOREINFO_CONFIG(PGTABLE_3); +-#elif CONFIG_PGTABLE_4 ++#elif defined(CONFIG_PGTABLE_4) + VMCOREINFO_CONFIG(PGTABLE_4); + #endif + } +diff --git a/arch/powerpc/include/asm/exception-64s.h b/arch/powerpc/include/asm/exception-64s.h +index 8057f4f..da85ad4 100644 +--- a/arch/powerpc/include/asm/exception-64s.h ++++ b/arch/powerpc/include/asm/exception-64s.h +@@ -163,7 +163,7 @@ do_kvm_##n: \ + subi r1,r1,INT_FRAME_SIZE; /* alloc frame on kernel stack */ \ + beq- 1f; \ + ld r1,PACAKSAVE(r13); /* kernel stack to use */ \ +-1: cmpdi cr1,r1,0; /* check if r1 is in userspace */ \ ++1: cmpdi cr1,r1,-INT_FRAME_SIZE; /* check if r1 is in userspace */ \ + blt+ cr1,3f; /* abort if it is */ \ + li r1,(n); /* will be reloaded later */ \ + sth r1,PACA_TRAP_SAVE(r13); \ +diff --git a/arch/sh/lib/Makefile b/arch/sh/lib/Makefile +index 7b95f29..3baff31 100644 +--- a/arch/sh/lib/Makefile ++++ b/arch/sh/lib/Makefile +@@ -6,7 +6,7 @@ lib-y = delay.o memmove.o memchr.o \ + checksum.o strlen.o div64.o div64-generic.o + + # Extracted from libgcc +-lib-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ ++obj-y += movmem.o ashldi3.o ashrdi3.o lshrdi3.o \ + ashlsi3.o ashrsi3.o ashiftrt.o lshrsi3.o \ + udiv_qrnnd.o + +diff --git a/arch/x86/kernel/cpu/perf_event_amd_ibs.c b/arch/x86/kernel/cpu/perf_event_amd_ibs.c +index 3b8a2d3..ea34253 100644 +--- a/arch/x86/kernel/cpu/perf_event_amd_ibs.c ++++ b/arch/x86/kernel/cpu/perf_event_amd_ibs.c +@@ -9,6 +9,7 @@ + #include <linux/perf_event.h> + #include <linux/module.h> + #include <linux/pci.h> ++#include <linux/syscore_ops.h> + + #include <asm/apic.h> + +@@ -209,6 +210,18 @@ out: + return ret; + } + ++static void ibs_eilvt_setup(void) ++{ ++ /* ++ * Force LVT offset assignment for family 10h: The offsets are ++ * not assigned by the BIOS for this family, so the OS is ++ * responsible for doing it. If the OS assignment fails, fall ++ * back to BIOS settings and try to setup this. ++ */ ++ if (boot_cpu_data.x86 == 0x10) ++ force_ibs_eilvt_setup(); ++} ++ + static inline int get_ibs_lvt_offset(void) + { + u64 val; +@@ -244,6 +257,36 @@ static void clear_APIC_ibs(void *dummy) + setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1); + } + ++#ifdef CONFIG_PM ++ ++static int perf_ibs_suspend(void) ++{ ++ clear_APIC_ibs(NULL); ++ return 0; ++} ++ ++static void perf_ibs_resume(void) ++{ ++ ibs_eilvt_setup(); ++ setup_APIC_ibs(NULL); ++} ++ ++static struct syscore_ops perf_ibs_syscore_ops = { ++ .resume = perf_ibs_resume, ++ .suspend = perf_ibs_suspend, ++}; ++ ++static void perf_ibs_pm_init(void) ++{ ++ register_syscore_ops(&perf_ibs_syscore_ops); ++} ++ ++#else ++ ++static inline void perf_ibs_pm_init(void) { } ++ ++#endif ++ + static int __cpuinit + perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu) + { +@@ -270,18 +313,12 @@ static __init int amd_ibs_init(void) + if (!caps) + return -ENODEV; /* ibs not supported by the cpu */ + +- /* +- * Force LVT offset assignment for family 10h: The offsets are +- * not assigned by the BIOS for this family, so the OS is +- * responsible for doing it. If the OS assignment fails, fall +- * back to BIOS settings and try to setup this. +- */ +- if (boot_cpu_data.x86 == 0x10) +- force_ibs_eilvt_setup(); ++ ibs_eilvt_setup(); + + if (!ibs_eilvt_valid()) + goto out; + ++ perf_ibs_pm_init(); + get_online_cpus(); + ibs_caps = caps; + /* make ibs_caps visible to other cpus: */ +diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c +index e6fbb94..20061b9 100644 +--- a/arch/x86/kernel/traps.c ++++ b/arch/x86/kernel/traps.c +@@ -582,12 +582,13 @@ void __math_state_restore(struct task_struct *tsk) + /* AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception + is pending. Clear the x87 state here by setting it to fixed + values. safe_address is a random variable that should be in L1 */ +- alternative_input( +- ASM_NOP8 ASM_NOP2, +- "emms\n\t" /* clear stack tags */ +- "fildl %P[addr]", /* set F?P to defined value */ +- X86_FEATURE_FXSAVE_LEAK, +- [addr] "m" (safe_address)); ++ if (unlikely(static_cpu_has(X86_FEATURE_FXSAVE_LEAK))) { ++ asm volatile( ++ "fnclex\n\t" ++ "emms\n\t" ++ "fildl %P[addr]" /* set F?P to defined value */ ++ : : [addr] "m" (safe_address)); ++ } + + /* + * Paranoid restore. send a SIGSEGV if we fail to restore the state. +diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c +index 43e7753..757c716 100644 +--- a/arch/x86/kvm/lapic.c ++++ b/arch/x86/kvm/lapic.c +@@ -1278,14 +1278,12 @@ void __kvm_migrate_apic_timer(struct kvm_vcpu *vcpu) + void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu) + { + u32 data; +- void *vapic; + + if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) + return; + +- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); +- data = *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)); +- kunmap_atomic(vapic, KM_USER0); ++ kvm_read_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, ++ sizeof(u32)); + + apic_set_tpr(vcpu->arch.apic, data & 0xff); + } +@@ -1295,7 +1293,6 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) + u32 data, tpr; + int max_irr, max_isr; + struct kvm_lapic *apic; +- void *vapic; + + if (!irqchip_in_kernel(vcpu->kvm) || !vcpu->arch.apic->vapic_addr) + return; +@@ -1310,17 +1307,22 @@ void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu) + max_isr = 0; + data = (tpr & 0xff) | ((max_isr & 0xf0) << 8) | (max_irr << 24); + +- vapic = kmap_atomic(vcpu->arch.apic->vapic_page, KM_USER0); +- *(u32 *)(vapic + offset_in_page(vcpu->arch.apic->vapic_addr)) = data; +- kunmap_atomic(vapic, KM_USER0); ++ kvm_write_guest_cached(vcpu->kvm, &vcpu->arch.apic->vapic_cache, &data, ++ sizeof(u32)); + } + +-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) ++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr) + { + if (!irqchip_in_kernel(vcpu->kvm)) +- return; ++ return 0; ++ ++ if (vapic_addr && kvm_gfn_to_hva_cache_init(vcpu->kvm, ++ &vcpu->arch.apic->vapic_cache, ++ vapic_addr, sizeof(u32))) ++ return -EINVAL; + + vcpu->arch.apic->vapic_addr = vapic_addr; ++ return 0; + } + + int kvm_x2apic_msr_write(struct kvm_vcpu *vcpu, u32 msr, u64 data) +diff --git a/arch/x86/kvm/lapic.h b/arch/x86/kvm/lapic.h +index 138e8cc..62ae376 100644 +--- a/arch/x86/kvm/lapic.h ++++ b/arch/x86/kvm/lapic.h +@@ -15,7 +15,7 @@ struct kvm_lapic { + bool irr_pending; + void *regs; + gpa_t vapic_addr; +- struct page *vapic_page; ++ struct gfn_to_hva_cache vapic_cache; + }; + int kvm_create_lapic(struct kvm_vcpu *vcpu); + void kvm_free_lapic(struct kvm_vcpu *vcpu); +@@ -45,7 +45,7 @@ int kvm_lapic_find_highest_irr(struct kvm_vcpu *vcpu); + u64 kvm_get_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu); + void kvm_set_lapic_tscdeadline_msr(struct kvm_vcpu *vcpu, u64 data); + +-void kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); ++int kvm_lapic_set_vapic_addr(struct kvm_vcpu *vcpu, gpa_t vapic_addr); + void kvm_lapic_sync_from_vapic(struct kvm_vcpu *vcpu); + void kvm_lapic_sync_to_vapic(struct kvm_vcpu *vcpu); + +diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c +index 57867e4..7774cca 100644 +--- a/arch/x86/kvm/x86.c ++++ b/arch/x86/kvm/x86.c +@@ -3140,8 +3140,7 @@ long kvm_arch_vcpu_ioctl(struct file *filp, + r = -EFAULT; + if (copy_from_user(&va, argp, sizeof va)) + goto out; +- r = 0; +- kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); ++ r = kvm_lapic_set_vapic_addr(vcpu, va.vapic_addr); + break; + } + case KVM_X86_SETUP_MCE: { +@@ -5537,33 +5536,6 @@ static void post_kvm_run_save(struct kvm_vcpu *vcpu) + !kvm_event_needs_reinjection(vcpu); + } + +-static void vapic_enter(struct kvm_vcpu *vcpu) +-{ +- struct kvm_lapic *apic = vcpu->arch.apic; +- struct page *page; +- +- if (!apic || !apic->vapic_addr) +- return; +- +- page = gfn_to_page(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); +- +- vcpu->arch.apic->vapic_page = page; +-} +- +-static void vapic_exit(struct kvm_vcpu *vcpu) +-{ +- struct kvm_lapic *apic = vcpu->arch.apic; +- int idx; +- +- if (!apic || !apic->vapic_addr) +- return; +- +- idx = srcu_read_lock(&vcpu->kvm->srcu); +- kvm_release_page_dirty(apic->vapic_page); +- mark_page_dirty(vcpu->kvm, apic->vapic_addr >> PAGE_SHIFT); +- srcu_read_unlock(&vcpu->kvm->srcu, idx); +-} +- + static void update_cr8_intercept(struct kvm_vcpu *vcpu) + { + int max_irr, tpr; +@@ -5836,7 +5808,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) + } + + vcpu->srcu_idx = srcu_read_lock(&kvm->srcu); +- vapic_enter(vcpu); + + r = 1; + while (r > 0) { +@@ -5893,8 +5864,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu) + + srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx); + +- vapic_exit(vcpu); +- + return r; + } + +diff --git a/drivers/ata/ahci.c b/drivers/ata/ahci.c +index d29f6d5..f4000ee 100644 +--- a/drivers/ata/ahci.c ++++ b/drivers/ata/ahci.c +@@ -429,17 +429,22 @@ static const struct pci_device_id ahci_pci_tbl[] = { + /* Marvell */ + { PCI_VDEVICE(MARVELL, 0x6145), board_ahci_mv }, /* 6145 */ + { PCI_VDEVICE(MARVELL, 0x6121), board_ahci_mv }, /* 6121 */ +- { PCI_DEVICE(0x1b4b, 0x9123), ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9123), + .class = PCI_CLASS_STORAGE_SATA_AHCI, + .class_mask = 0xffffff, + .driver_data = board_ahci_yes_fbs }, /* 88se9128 */ +- { PCI_DEVICE(0x1b4b, 0x9125), ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9125), + .driver_data = board_ahci_yes_fbs }, /* 88se9125 */ +- { PCI_DEVICE(0x1b4b, 0x917a), ++ { PCI_DEVICE_SUB(PCI_VENDOR_ID_MARVELL_EXT, 0x9178, ++ PCI_VENDOR_ID_MARVELL_EXT, 0x9170), ++ .driver_data = board_ahci_yes_fbs }, /* 88se9170 */ ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x917a), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ +- { PCI_DEVICE(0x1b4b, 0x9192), ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9172), ++ .driver_data = board_ahci_yes_fbs }, /* 88se9172 */ ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9192), + .driver_data = board_ahci_yes_fbs }, /* 88se9172 on some Gigabyte */ +- { PCI_DEVICE(0x1b4b, 0x91a3), ++ { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x91a3), + .driver_data = board_ahci_yes_fbs }, + { PCI_DEVICE(PCI_VENDOR_ID_MARVELL_EXT, 0x9230), + .driver_data = board_ahci_yes_fbs }, +diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c +index a0a3987..72bbb5e 100644 +--- a/drivers/ata/libata-core.c ++++ b/drivers/ata/libata-core.c +@@ -4097,6 +4097,9 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = { + { "ST3320[68]13AS", "SD1[5-9]", ATA_HORKAGE_NONCQ | + ATA_HORKAGE_FIRMWARE_WARN }, + ++ /* Seagate Momentus SpinPoint M8 seem to have FPMDA_AA issues */ ++ { "ST1000LM024 HN-M101MBB", "2AR10001", ATA_HORKAGE_BROKEN_FPDMA_AA }, ++ + /* Blacklist entries taken from Silicon Image 3124/3132 + Windows driver .inf file - also several Linux problem reports */ + { "HTS541060G9SA00", "MB3OC60D", ATA_HORKAGE_NONCQ, }, +@@ -6328,6 +6331,7 @@ static int __init ata_parse_force_one(char **cur, + { "nohrst", .lflags = ATA_LFLAG_NO_HRST }, + { "nosrst", .lflags = ATA_LFLAG_NO_SRST }, + { "norst", .lflags = ATA_LFLAG_NO_HRST | ATA_LFLAG_NO_SRST }, ++ { "disable", .horkage_on = ATA_HORKAGE_DISABLE }, + }; + char *start = *cur, *p = *cur; + char *id, *val, *endp; +diff --git a/drivers/dma/Kconfig b/drivers/dma/Kconfig +index da85c0d..a842317 100644 +--- a/drivers/dma/Kconfig ++++ b/drivers/dma/Kconfig +@@ -254,6 +254,7 @@ config NET_DMA + bool "Network: TCP receive copy offload" + depends on DMA_ENGINE && NET + default (INTEL_IOATDMA || FSL_DMA) ++ depends on BROKEN + help + This enables the use of DMA engines in the network stack to + offload receive copy-to-user operations, freeing CPU cycles. +diff --git a/drivers/gpio/gpio-msm-v2.c b/drivers/gpio/gpio-msm-v2.c +index 5cb1227..3104502 100644 +--- a/drivers/gpio/gpio-msm-v2.c ++++ b/drivers/gpio/gpio-msm-v2.c +@@ -249,7 +249,7 @@ static void msm_gpio_irq_mask(struct irq_data *d) + + spin_lock_irqsave(&tlmm_lock, irq_flags); + writel(TARGET_PROC_NONE, GPIO_INTR_CFG_SU(gpio)); +- clear_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); ++ clear_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); + __clear_bit(gpio, msm_gpio.enabled_irqs); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); + } +@@ -261,7 +261,7 @@ static void msm_gpio_irq_unmask(struct irq_data *d) + + spin_lock_irqsave(&tlmm_lock, irq_flags); + __set_bit(gpio, msm_gpio.enabled_irqs); +- set_gpio_bits(INTR_RAW_STATUS_EN | INTR_ENABLE, GPIO_INTR_CFG(gpio)); ++ set_gpio_bits(BIT(INTR_RAW_STATUS_EN) | BIT(INTR_ENABLE), GPIO_INTR_CFG(gpio)); + writel(TARGET_PROC_SCORPION, GPIO_INTR_CFG_SU(gpio)); + spin_unlock_irqrestore(&tlmm_lock, irq_flags); + } +diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h +index 97a050f..ddb22e7 100644 +--- a/drivers/gpu/drm/i915/i915_reg.h ++++ b/drivers/gpu/drm/i915/i915_reg.h +@@ -37,6 +37,7 @@ + */ + #define INTEL_GMCH_CTRL 0x52 + #define INTEL_GMCH_VGA_DISABLE (1 << 1) ++#define SNB_GMCH_CTRL 0x50 + + /* PCI config space */ + +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index 6d36695..61b708b 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -9141,14 +9141,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, + int intel_modeset_vga_set_state(struct drm_device *dev, bool state) + { + struct drm_i915_private *dev_priv = dev->dev_private; ++ unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; + u16 gmch_ctrl; + +- pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); ++ pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl); + if (state) + gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; + else + gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; +- pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); ++ pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl); + return 0; + } + +diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c +index 93bce72..414a681 100644 +--- a/drivers/gpu/drm/radeon/rs690.c ++++ b/drivers/gpu/drm/radeon/rs690.c +@@ -160,6 +160,16 @@ void rs690_mc_init(struct radeon_device *rdev) + base = RREG32_MC(R_000100_MCCFG_FB_LOCATION); + base = G_000100_MC_FB_START(base) << 16; + rdev->mc.igp_sideport_enabled = radeon_atombios_sideport_present(rdev); ++ /* Some boards seem to be configured for 128MB of sideport memory, ++ * but really only have 64MB. Just skip the sideport and use ++ * UMA memory. ++ */ ++ if (rdev->mc.igp_sideport_enabled && ++ (rdev->mc.real_vram_size == (384 * 1024 * 1024))) { ++ base += 128 * 1024 * 1024; ++ rdev->mc.real_vram_size -= 128 * 1024 * 1024; ++ rdev->mc.mc_vram_size = rdev->mc.real_vram_size; ++ } + rs690_pm_info(rdev); + radeon_vram_location(rdev, &rdev->mc, base); + rdev->mc.gtt_base_align = rdev->mc.gtt_size - 1; +diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c +index 221b924..e223175 100644 +--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c ++++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c +@@ -144,9 +144,9 @@ static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + } + + page_offset = ((address - vma->vm_start) >> PAGE_SHIFT) + +- bo->vm_node->start - vma->vm_pgoff; +- page_last = ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT) + +- bo->vm_node->start - vma->vm_pgoff; ++ vma->vm_pgoff - bo->vm_node->start; ++ page_last = vma_pages(vma) + vma->vm_pgoff - ++ bo->vm_node->start; + + if (unlikely(page_offset >= bo->num_pages)) { + retval = VM_FAULT_SIGBUS; +diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c +index 3d630bb..e6ec920 100644 +--- a/drivers/hwmon/coretemp.c ++++ b/drivers/hwmon/coretemp.c +@@ -52,7 +52,7 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius"); + + #define BASE_SYSFS_ATTR_NO 2 /* Sysfs Base attr no for coretemp */ + #define NUM_REAL_CORES 32 /* Number of Real cores per cpu */ +-#define CORETEMP_NAME_LENGTH 17 /* String Length of attrs */ ++#define CORETEMP_NAME_LENGTH 19 /* String Length of attrs */ + #define MAX_CORE_ATTRS 4 /* Maximum no of basic attrs */ + #define TOTAL_ATTRS (MAX_CORE_ATTRS + 1) + #define MAX_CORE_DATA (NUM_REAL_CORES + BASE_SYSFS_ATTR_NO) +diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c +index 8bba438..6d05e26 100644 +--- a/drivers/md/raid10.c ++++ b/drivers/md/raid10.c +@@ -997,7 +997,7 @@ read_again: + /* Could not read all from this device, so we will + * need another r10_bio. + */ +- sectors_handled = (r10_bio->sectors + max_sectors ++ sectors_handled = (r10_bio->sector + max_sectors + - bio->bi_sector); + r10_bio->sectors = max_sectors; + spin_lock_irq(&conf->device_lock); +@@ -1005,7 +1005,7 @@ read_again: + bio->bi_phys_segments = 2; + else + bio->bi_phys_segments++; +- spin_unlock(&conf->device_lock); ++ spin_unlock_irq(&conf->device_lock); + /* Cannot call generic_make_request directly + * as that will be queued in __generic_make_request + * and subsequent mempool_alloc might block +@@ -2563,10 +2563,6 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, + if (j == conf->copies) { + /* Cannot recover, so abort the recovery or + * record a bad block */ +- put_buf(r10_bio); +- if (rb2) +- atomic_dec(&rb2->remaining); +- r10_bio = rb2; + if (any_working) { + /* problem is that there are bad blocks + * on other device(s) +@@ -2590,6 +2586,10 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, + conf->mirrors[i].recovery_disabled + = mddev->recovery_disabled; + } ++ put_buf(r10_bio); ++ if (rb2) ++ atomic_dec(&rb2->remaining); ++ r10_bio = rb2; + break; + } + } +diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c +index 26ef63a..fb67833 100644 +--- a/drivers/md/raid5.c ++++ b/drivers/md/raid5.c +@@ -3084,7 +3084,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) + */ + set_bit(R5_Insync, &dev->flags); + +- if (rdev && test_bit(R5_WriteError, &dev->flags)) { ++ if (test_bit(R5_WriteError, &dev->flags)) { + clear_bit(R5_Insync, &dev->flags); + if (!test_bit(Faulty, &rdev->flags)) { + s->handle_bad_blocks = 1; +@@ -3092,7 +3092,7 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s) + } else + clear_bit(R5_WriteError, &dev->flags); + } +- if (rdev && test_bit(R5_MadeGood, &dev->flags)) { ++ if (test_bit(R5_MadeGood, &dev->flags)) { + if (!test_bit(Faulty, &rdev->flags)) { + s->handle_bad_blocks = 1; + atomic_inc(&rdev->nr_pending); +diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +index bbb6692..e367ab1 100644 +--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c ++++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +@@ -108,6 +108,7 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + struct sk_buff *skb = tx_buf->skb; + u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons; + int nbd; ++ u16 split_bd_len = 0; + + /* prefetch skb end pointer to speedup dev_kfree_skb() */ + prefetch(&skb->end); +@@ -115,11 +116,8 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + DP(BNX2X_MSG_FP, "fp[%d]: pkt_idx %d buff @(%p)->skb %p\n", + txdata->txq_index, idx, tx_buf, skb); + +- /* unmap first bd */ + DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx); + tx_start_bd = &txdata->tx_desc_ring[bd_idx].start_bd; +- dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), +- BD_UNMAP_LEN(tx_start_bd), DMA_TO_DEVICE); + + + nbd = le16_to_cpu(tx_start_bd->nbd) - 1; +@@ -138,12 +136,19 @@ static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fp_txdata *txdata, + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + +- /* ...and the TSO split header bd since they have no mapping */ ++ /* TSO headers+data bds share a common mapping. See bnx2x_tx_split() */ + if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) { ++ tx_data_bd = &txdata->tx_desc_ring[bd_idx].reg_bd; ++ split_bd_len = BD_UNMAP_LEN(tx_data_bd); + --nbd; + bd_idx = TX_BD(NEXT_TX_IDX(bd_idx)); + } + ++ /* unmap first bd */ ++ dma_unmap_single(&bp->pdev->dev, BD_UNMAP_ADDR(tx_start_bd), ++ BD_UNMAP_LEN(tx_start_bd) + split_bd_len, ++ DMA_TO_DEVICE); ++ + /* now free frags */ + while (nbd > 0) { + +diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c +index 1bc927a..d5793d3 100644 +--- a/drivers/net/ethernet/broadcom/tg3.c ++++ b/drivers/net/ethernet/broadcom/tg3.c +@@ -14537,6 +14537,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp) + /* Clear this out for sanity. */ + tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0); + ++ /* Clear TG3PCI_REG_BASE_ADDR to prevent hangs. */ ++ tw32(TG3PCI_REG_BASE_ADDR, 0); ++ + pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, + &pci_state_reg); + if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 && +diff --git a/drivers/net/ethernet/ibm/ehea/ehea_main.c b/drivers/net/ethernet/ibm/ehea/ehea_main.c +index bfeccbf..297f0b6 100644 +--- a/drivers/net/ethernet/ibm/ehea/ehea_main.c ++++ b/drivers/net/ethernet/ibm/ehea/ehea_main.c +@@ -3015,7 +3015,7 @@ struct ehea_port *ehea_setup_single_port(struct ehea_adapter *adapter, + + dev->hw_features = NETIF_F_SG | NETIF_F_TSO + | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX | NETIF_F_LRO; +- dev->features = NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_TSO ++ dev->features = NETIF_F_SG | NETIF_F_TSO + | NETIF_F_HIGHDMA | NETIF_F_IP_CSUM | NETIF_F_HW_VLAN_TX + | NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER + | NETIF_F_RXCSUM; +diff --git a/drivers/net/ethernet/tehuti/tehuti.c b/drivers/net/ethernet/tehuti/tehuti.c +index 3a90af6..cc7e7de 100644 +--- a/drivers/net/ethernet/tehuti/tehuti.c ++++ b/drivers/net/ethernet/tehuti/tehuti.c +@@ -1995,7 +1995,6 @@ bdx_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + ndev->features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO + | NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX | + NETIF_F_HW_VLAN_FILTER | NETIF_F_RXCSUM +- /*| NETIF_F_FRAGLIST */ + ; + ndev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_HW_VLAN_TX; +diff --git a/drivers/net/ethernet/xilinx/ll_temac_main.c b/drivers/net/ethernet/xilinx/ll_temac_main.c +index e26945d..1b7b3be 100644 +--- a/drivers/net/ethernet/xilinx/ll_temac_main.c ++++ b/drivers/net/ethernet/xilinx/ll_temac_main.c +@@ -1011,7 +1011,7 @@ static int __devinit temac_of_probe(struct platform_device *op) + dev_set_drvdata(&op->dev, ndev); + SET_NETDEV_DEV(ndev, &op->dev); + ndev->flags &= ~IFF_MULTICAST; /* clear multicast */ +- ndev->features = NETIF_F_SG | NETIF_F_FRAGLIST; ++ ndev->features = NETIF_F_SG; + ndev->netdev_ops = &temac_netdev_ops; + ndev->ethtool_ops = &temac_ethtool_ops; + #if 0 +diff --git a/drivers/net/hamradio/hdlcdrv.c b/drivers/net/hamradio/hdlcdrv.c +index a4a3516..3b3a7e0 100644 +--- a/drivers/net/hamradio/hdlcdrv.c ++++ b/drivers/net/hamradio/hdlcdrv.c +@@ -571,6 +571,8 @@ static int hdlcdrv_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + case HDLCDRVCTL_CALIBRATE: + if(!capable(CAP_SYS_RAWIO)) + return -EPERM; ++ if (bi.data.calibrate > INT_MAX / s->par.bitrate) ++ return -EINVAL; + s->hdlctx.calibrate = bi.data.calibrate * s->par.bitrate / 16; + return 0; + +diff --git a/drivers/net/hamradio/yam.c b/drivers/net/hamradio/yam.c +index 96a98d2..e4260ab 100644 +--- a/drivers/net/hamradio/yam.c ++++ b/drivers/net/hamradio/yam.c +@@ -1060,6 +1060,7 @@ static int yam_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) + break; + + case SIOCYAMGCFG: ++ memset(&yi, 0, sizeof(yi)); + yi.cfg.mask = 0xffffffff; + yi.cfg.iobase = yp->iobase; + yi.cfg.irq = yp->irq; +diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c +index 136ecf3..dc60aec 100644 +--- a/drivers/net/usb/dm9601.c ++++ b/drivers/net/usb/dm9601.c +@@ -445,7 +445,12 @@ static int dm9601_bind(struct usbnet *dev, struct usb_interface *intf) + dev->net->ethtool_ops = &dm9601_ethtool_ops; + dev->net->hard_header_len += DM_TX_OVERHEAD; + dev->hard_mtu = dev->net->mtu + dev->net->hard_header_len; +- dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD; ++ ++ /* dm9620/21a require room for 4 byte padding, even in dm9601 ++ * mode, so we need +1 to be able to receive full size ++ * ethernet frames. ++ */ ++ dev->rx_urb_size = dev->net->mtu + ETH_HLEN + DM_RX_OVERHEAD + 1; + + dev->mii.dev = dev->net; + dev->mii.mdio_read = dm9601_mdio_read; +@@ -531,7 +536,7 @@ static int dm9601_rx_fixup(struct usbnet *dev, struct sk_buff *skb) + static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + gfp_t flags) + { +- int len; ++ int len, pad; + + /* format: + b1: packet length low +@@ -539,12 +544,23 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + b3..n: packet data + */ + +- len = skb->len; ++ len = skb->len + DM_TX_OVERHEAD; + +- if (skb_headroom(skb) < DM_TX_OVERHEAD) { ++ /* workaround for dm962x errata with tx fifo getting out of ++ * sync if a USB bulk transfer retry happens right after a ++ * packet with odd / maxpacket length by adding up to 3 bytes ++ * padding. ++ */ ++ while ((len & 1) || !(len % dev->maxpacket)) ++ len++; ++ ++ len -= DM_TX_OVERHEAD; /* hw header doesn't count as part of length */ ++ pad = len - skb->len; ++ ++ if (skb_headroom(skb) < DM_TX_OVERHEAD || skb_tailroom(skb) < pad) { + struct sk_buff *skb2; + +- skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, 0, flags); ++ skb2 = skb_copy_expand(skb, DM_TX_OVERHEAD, pad, flags); + dev_kfree_skb_any(skb); + skb = skb2; + if (!skb) +@@ -553,10 +569,10 @@ static struct sk_buff *dm9601_tx_fixup(struct usbnet *dev, struct sk_buff *skb, + + __skb_push(skb, DM_TX_OVERHEAD); + +- /* usbnet adds padding if length is a multiple of packet size +- if so, adjust length value in header */ +- if ((skb->len % dev->maxpacket) == 0) +- len++; ++ if (pad) { ++ memset(skb->data + skb->len, 0, pad); ++ __skb_put(skb, pad); ++ } + + skb->data[0] = len; + skb->data[1] = len >> 8; +diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +index b592016..f4caeb3 100644 +--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c ++++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c +@@ -76,9 +76,16 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) + mask2 |= ATH9K_INT_CST; + if (isr2 & AR_ISR_S2_TSFOOR) + mask2 |= ATH9K_INT_TSFOOR; ++ ++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { ++ REG_WRITE(ah, AR_ISR_S2, isr2); ++ isr &= ~AR_ISR_BCNMISC; ++ } + } + +- isr = REG_READ(ah, AR_ISR_RAC); ++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) ++ isr = REG_READ(ah, AR_ISR_RAC); ++ + if (isr == 0xffffffff) { + *masked = 0; + return false; +@@ -97,11 +104,23 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) + + *masked |= ATH9K_INT_TX; + +- s0_s = REG_READ(ah, AR_ISR_S0_S); ++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { ++ s0_s = REG_READ(ah, AR_ISR_S0_S); ++ s1_s = REG_READ(ah, AR_ISR_S1_S); ++ } else { ++ s0_s = REG_READ(ah, AR_ISR_S0); ++ REG_WRITE(ah, AR_ISR_S0, s0_s); ++ s1_s = REG_READ(ah, AR_ISR_S1); ++ REG_WRITE(ah, AR_ISR_S1, s1_s); ++ ++ isr &= ~(AR_ISR_TXOK | ++ AR_ISR_TXDESC | ++ AR_ISR_TXERR | ++ AR_ISR_TXEOL); ++ } ++ + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXOK); + ah->intr_txqs |= MS(s0_s, AR_ISR_S0_QCU_TXDESC); +- +- s1_s = REG_READ(ah, AR_ISR_S1_S); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXERR); + ah->intr_txqs |= MS(s1_s, AR_ISR_S1_QCU_TXEOL); + } +@@ -114,13 +133,15 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) + *masked |= mask2; + } + +- if (AR_SREV_9100(ah)) +- return true; +- +- if (isr & AR_ISR_GENTMR) { ++ if (!AR_SREV_9100(ah) && (isr & AR_ISR_GENTMR)) { + u32 s5_s; + +- s5_s = REG_READ(ah, AR_ISR_S5_S); ++ if (pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED) { ++ s5_s = REG_READ(ah, AR_ISR_S5_S); ++ } else { ++ s5_s = REG_READ(ah, AR_ISR_S5); ++ } ++ + ah->intr_gen_timer_trigger = + MS(s5_s, AR_ISR_S5_GENTIMER_TRIG); + +@@ -133,8 +154,21 @@ static bool ar9002_hw_get_isr(struct ath_hw *ah, enum ath9k_int *masked) + if ((s5_s & AR_ISR_S5_TIM_TIMER) && + !(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) + *masked |= ATH9K_INT_TIM_TIMER; ++ ++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { ++ REG_WRITE(ah, AR_ISR_S5, s5_s); ++ isr &= ~AR_ISR_GENTMR; ++ } + } + ++ if (!(pCap->hw_caps & ATH9K_HW_CAP_RAC_SUPPORTED)) { ++ REG_WRITE(ah, AR_ISR, isr); ++ REG_READ(ah, AR_ISR); ++ } ++ ++ if (AR_SREV_9100(ah)) ++ return true; ++ + if (sync_cause) { + fatal_int = + (sync_cause & +diff --git a/drivers/net/wireless/ath/ath9k/htc_drv_main.c b/drivers/net/wireless/ath/ath9k/htc_drv_main.c +index 0b9a0e8..b6cd36c 100644 +--- a/drivers/net/wireless/ath/ath9k/htc_drv_main.c ++++ b/drivers/net/wireless/ath/ath9k/htc_drv_main.c +@@ -139,21 +139,26 @@ static void ath9k_htc_bssid_iter(void *data, u8 *mac, struct ieee80211_vif *vif) + struct ath9k_vif_iter_data *iter_data = data; + int i; + +- for (i = 0; i < ETH_ALEN; i++) +- iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); ++ if (iter_data->hw_macaddr != NULL) { ++ for (i = 0; i < ETH_ALEN; i++) ++ iter_data->mask[i] &= ~(iter_data->hw_macaddr[i] ^ mac[i]); ++ } else { ++ iter_data->hw_macaddr = mac; ++ } + } + +-static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, ++static void ath9k_htc_set_mac_bssid_mask(struct ath9k_htc_priv *priv, + struct ieee80211_vif *vif) + { + struct ath_common *common = ath9k_hw_common(priv->ah); + struct ath9k_vif_iter_data iter_data; + + /* +- * Use the hardware MAC address as reference, the hardware uses it +- * together with the BSSID mask when matching addresses. ++ * Pick the MAC address of the first interface as the new hardware ++ * MAC address. The hardware will use it together with the BSSID mask ++ * when matching addresses. + */ +- iter_data.hw_macaddr = common->macaddr; ++ iter_data.hw_macaddr = NULL; + memset(&iter_data.mask, 0xff, ETH_ALEN); + + if (vif) +@@ -164,6 +169,10 @@ static void ath9k_htc_set_bssid_mask(struct ath9k_htc_priv *priv, + &iter_data); + + memcpy(common->bssidmask, iter_data.mask, ETH_ALEN); ++ ++ if (iter_data.hw_macaddr) ++ memcpy(common->macaddr, iter_data.hw_macaddr, ETH_ALEN); ++ + ath_hw_setbssidmask(common); + } + +@@ -1100,7 +1109,7 @@ static int ath9k_htc_add_interface(struct ieee80211_hw *hw, + goto out; + } + +- ath9k_htc_set_bssid_mask(priv, vif); ++ ath9k_htc_set_mac_bssid_mask(priv, vif); + + priv->vif_slot |= (1 << avp->index); + priv->nvifs++; +@@ -1163,7 +1172,7 @@ static void ath9k_htc_remove_interface(struct ieee80211_hw *hw, + + ath9k_htc_set_opmode(priv); + +- ath9k_htc_set_bssid_mask(priv, vif); ++ ath9k_htc_set_mac_bssid_mask(priv, vif); + + /* + * Stop ANI only if there are no associated station interfaces. +diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c +index a59267a..ad33126 100644 +--- a/drivers/net/wireless/ath/ath9k/main.c ++++ b/drivers/net/wireless/ath/ath9k/main.c +@@ -1357,8 +1357,9 @@ void ath9k_calculate_iter_data(struct ieee80211_hw *hw, + struct ath_common *common = ath9k_hw_common(ah); + + /* +- * Use the hardware MAC address as reference, the hardware uses it +- * together with the BSSID mask when matching addresses. ++ * Pick the MAC address of the first interface as the new hardware ++ * MAC address. The hardware will use it together with the BSSID mask ++ * when matching addresses. + */ + memset(iter_data, 0, sizeof(*iter_data)); + iter_data->hw_macaddr = common->macaddr; +diff --git a/drivers/net/wireless/rtlwifi/pci.c b/drivers/net/wireless/rtlwifi/pci.c +index 47ba0f7..38b793b 100644 +--- a/drivers/net/wireless/rtlwifi/pci.c ++++ b/drivers/net/wireless/rtlwifi/pci.c +@@ -685,6 +685,8 @@ static void _rtl_pci_rx_interrupt(struct ieee80211_hw *hw) + }; + int index = rtlpci->rx_ring[rx_queue_idx].idx; + ++ if (rtlpci->driver_is_goingto_unload) ++ return; + /*RX NORMAL PKT */ + while (count--) { + /*rx descriptor */ +@@ -1563,6 +1565,7 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) + */ + set_hal_stop(rtlhal); + ++ rtlpci->driver_is_goingto_unload = true; + rtlpriv->cfg->ops->disable_interrupt(hw); + tasklet_kill(&rtlpriv->works.ips_leave_tasklet); + +@@ -1580,7 +1583,6 @@ static void rtl_pci_stop(struct ieee80211_hw *hw) + ppsc->rfchange_inprogress = true; + spin_unlock_irqrestore(&rtlpriv->locks.rf_ps_lock, flags); + +- rtlpci->driver_is_goingto_unload = true; + rtlpriv->cfg->ops->hw_disable(hw); + rtlpriv->cfg->ops->led_control(hw, LED_CTL_POWER_OFF); + +diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c +index 363a5c6..9f1fec1 100644 +--- a/drivers/pci/pci.c ++++ b/drivers/pci/pci.c +@@ -1915,10 +1915,6 @@ void pci_enable_ari(struct pci_dev *dev) + if (!pci_is_pcie(dev) || dev->devfn) + return; + +- pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI); +- if (!pos) +- return; +- + bridge = dev->bus->self; + if (!bridge || !pci_is_pcie(bridge)) + return; +@@ -1937,10 +1933,14 @@ void pci_enable_ari(struct pci_dev *dev) + return; + + pci_read_config_word(bridge, pos + PCI_EXP_DEVCTL2, &ctrl); +- ctrl |= PCI_EXP_DEVCTL2_ARI; ++ if (pci_find_ext_capability(dev, PCI_EXT_CAP_ID_ARI)) { ++ ctrl |= PCI_EXP_DEVCTL2_ARI; ++ bridge->ari_enabled = 1; ++ } else { ++ ctrl &= ~PCI_EXP_DEVCTL2_ARI; ++ bridge->ari_enabled = 0; ++ } + pci_write_config_word(bridge, pos + PCI_EXP_DEVCTL2, ctrl); +- +- bridge->ari_enabled = 1; + } + + /** +diff --git a/drivers/staging/comedi/drivers/cb_pcidio.c b/drivers/staging/comedi/drivers/cb_pcidio.c +index 79477a5..b3c9c8f 100644 +--- a/drivers/staging/comedi/drivers/cb_pcidio.c ++++ b/drivers/staging/comedi/drivers/cb_pcidio.c +@@ -56,10 +56,6 @@ struct pcidio_board { + const char *name; /* name of the board */ + int dev_id; + int n_8255; /* number of 8255 chips on board */ +- +- /* indices of base address regions */ +- int pcicontroler_badrindex; +- int dioregs_badrindex; + }; + + static const struct pcidio_board pcidio_boards[] = { +@@ -67,22 +63,16 @@ static const struct pcidio_board pcidio_boards[] = { + .name = "pci-dio24", + .dev_id = 0x0028, + .n_8255 = 1, +- .pcicontroler_badrindex = 1, +- .dioregs_badrindex = 2, + }, + { + .name = "pci-dio24h", + .dev_id = 0x0014, + .n_8255 = 1, +- .pcicontroler_badrindex = 1, +- .dioregs_badrindex = 2, + }, + { + .name = "pci-dio48h", + .dev_id = 0x000b, + .n_8255 = 2, +- .pcicontroler_badrindex = 0, +- .dioregs_badrindex = 1, + }, + }; + +@@ -244,10 +234,15 @@ found: + ("cb_pcidio: failed to enable PCI device and request regions\n"); + return -EIO; + } +- devpriv->dio_reg_base +- = ++ /* ++ * Use PCI BAR 2 region if non-zero length, else use PCI BAR 1 region. ++ * PCI BAR 1 is only used for older PCI-DIO48H boards. At some point ++ * the PCI-DIO48H was redesigned to use the same PCI interface chip ++ * (and same PCI BAR region) as the other boards. ++ */ ++ devpriv->dio_reg_base = + pci_resource_start(devpriv->pci_dev, +- pcidio_boards[index].dioregs_badrindex); ++ (pci_resource_len(pcidev, 2) ? 2 : 1)); + + /* + * Allocate the subdevice structures. alloc_subdevice() is a +diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c +index 3effde2..45c13a6 100644 +--- a/drivers/target/iscsi/iscsi_target.c ++++ b/drivers/target/iscsi/iscsi_target.c +@@ -861,24 +861,22 @@ static int iscsit_handle_scsi_cmd( + if (((hdr->flags & ISCSI_FLAG_CMD_READ) || + (hdr->flags & ISCSI_FLAG_CMD_WRITE)) && !hdr->data_length) { + /* +- * Vmware ESX v3.0 uses a modified Cisco Initiator (v3.4.2) +- * that adds support for RESERVE/RELEASE. There is a bug +- * add with this new functionality that sets R/W bits when +- * neither CDB carries any READ or WRITE datapayloads. ++ * From RFC-3720 Section 10.3.1: ++ * ++ * "Either or both of R and W MAY be 1 when either the ++ * Expected Data Transfer Length and/or Bidirectional Read ++ * Expected Data Transfer Length are 0" ++ * ++ * For this case, go ahead and clear the unnecssary bits ++ * to avoid any confusion with ->data_direction. + */ +- if ((hdr->cdb[0] == 0x16) || (hdr->cdb[0] == 0x17)) { +- hdr->flags &= ~ISCSI_FLAG_CMD_READ; +- hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; +- goto done; +- } ++ hdr->flags &= ~ISCSI_FLAG_CMD_READ; ++ hdr->flags &= ~ISCSI_FLAG_CMD_WRITE; + +- pr_err("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" ++ pr_warn("ISCSI_FLAG_CMD_READ or ISCSI_FLAG_CMD_WRITE" + " set when Expected Data Transfer Length is 0 for" +- " CDB: 0x%02x. Bad iSCSI Initiator.\n", hdr->cdb[0]); +- return iscsit_add_reject(ISCSI_REASON_BOOKMARK_INVALID, 1, +- buf, conn); ++ " CDB: 0x%02x, Fixing up flags\n", hdr->cdb[0]); + } +-done: + + if (!(hdr->flags & ISCSI_FLAG_CMD_READ) && + !(hdr->flags & ISCSI_FLAG_CMD_WRITE) && (hdr->data_length != 0)) { +diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c +index 827f933..15685c3 100644 +--- a/drivers/usb/host/xhci-pci.c ++++ b/drivers/usb/host/xhci-pci.c +@@ -121,7 +121,12 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) + * any other sleep) on Haswell machines with LPT and LPT-LP + * with the new Intel BIOS + */ +- xhci->quirks |= XHCI_SPURIOUS_WAKEUP; ++ /* Limit the quirk to only known vendors, as this triggers ++ * yet another BIOS bug on some other machines ++ * https://bugzilla.kernel.org/show_bug.cgi?id=66171 ++ */ ++ if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP) ++ xhci->quirks |= XHCI_SPURIOUS_WAKEUP; + } + if (pdev->vendor == PCI_VENDOR_ID_ETRON && + pdev->device == PCI_DEVICE_ID_ASROCK_P67) { +diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c +index 6203d80..b24e2d3 100644 +--- a/fs/ceph/mds_client.c ++++ b/fs/ceph/mds_client.c +@@ -608,6 +608,8 @@ static void __unregister_request(struct ceph_mds_client *mdsc, + req->r_unsafe_dir = NULL; + } + ++ complete_all(&req->r_safe_completion); ++ + ceph_mdsc_put_request(req); + } + +@@ -1815,8 +1817,11 @@ static int __do_request(struct ceph_mds_client *mdsc, + int mds = -1; + int err = -EAGAIN; + +- if (req->r_err || req->r_got_result) ++ if (req->r_err || req->r_got_result) { ++ if (req->r_aborted) ++ __unregister_request(mdsc, req); + goto out; ++ } + + if (req->r_timeout && + time_after_eq(jiffies, req->r_started + req->r_timeout)) { +@@ -2124,7 +2129,6 @@ static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg) + if (head->safe) { + req->r_got_safe = true; + __unregister_request(mdsc, req); +- complete_all(&req->r_safe_completion); + + if (req->r_got_unsafe) { + /* +diff --git a/fs/ext2/super.c b/fs/ext2/super.c +index bd8ac16..94b9e32 100644 +--- a/fs/ext2/super.c ++++ b/fs/ext2/super.c +@@ -1448,6 +1448,7 @@ static ssize_t ext2_quota_write(struct super_block *sb, int type, + sb->s_blocksize - offset : towrite; + + tmp_bh.b_state = 0; ++ tmp_bh.b_size = sb->s_blocksize; + err = ext2_get_block(inode, blk, &tmp_bh, 1); + if (err < 0) + goto out; +diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h +index 22c71b9..68b1602 100644 +--- a/fs/ext4/ext4.h ++++ b/fs/ext4/ext4.h +@@ -278,6 +278,16 @@ struct ext4_io_submit { + /* Translate # of blks to # of clusters */ + #define EXT4_NUM_B2C(sbi, blks) (((blks) + (sbi)->s_cluster_ratio - 1) >> \ + (sbi)->s_cluster_bits) ++/* Mask out the low bits to get the starting block of the cluster */ ++#define EXT4_PBLK_CMASK(s, pblk) ((pblk) & \ ++ ~((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) ++#define EXT4_LBLK_CMASK(s, lblk) ((lblk) & \ ++ ~((ext4_lblk_t) (s)->s_cluster_ratio - 1)) ++/* Get the cluster offset */ ++#define EXT4_PBLK_COFF(s, pblk) ((pblk) & \ ++ ((ext4_fsblk_t) (s)->s_cluster_ratio - 1)) ++#define EXT4_LBLK_COFF(s, lblk) ((lblk) & \ ++ ((ext4_lblk_t) (s)->s_cluster_ratio - 1)) + + /* + * Structure of a blocks group descriptor +diff --git a/fs/ext4/ext4_jbd2.c b/fs/ext4/ext4_jbd2.c +index d0b8f98..9995b99 100644 +--- a/fs/ext4/ext4_jbd2.c ++++ b/fs/ext4/ext4_jbd2.c +@@ -113,6 +113,10 @@ int __ext4_handle_dirty_metadata(const char *where, unsigned int line, + if (WARN_ON_ONCE(err)) { + ext4_journal_abort_handle(where, line, __func__, bh, + handle, err); ++ ext4_error_inode(inode, where, line, ++ bh->b_blocknr, ++ "journal_dirty_metadata failed: " ++ "errcode %d", err); + } + } else { + if (inode) +diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c +index 3e8fc80..bf35fe0 100644 +--- a/fs/ext4/extents.c ++++ b/fs/ext4/extents.c +@@ -318,8 +318,10 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext) + { + ext4_fsblk_t block = ext4_ext_pblock(ext); + int len = ext4_ext_get_actual_len(ext); ++ ext4_lblk_t lblock = le32_to_cpu(ext->ee_block); ++ ext4_lblk_t last = lblock + len - 1; + +- if (len == 0) ++ if (lblock > last) + return 0; + return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len); + } +@@ -345,11 +347,26 @@ static int ext4_valid_extent_entries(struct inode *inode, + if (depth == 0) { + /* leaf entries */ + struct ext4_extent *ext = EXT_FIRST_EXTENT(eh); ++ struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; ++ ext4_fsblk_t pblock = 0; ++ ext4_lblk_t lblock = 0; ++ ext4_lblk_t prev = 0; ++ int len = 0; + while (entries) { + if (!ext4_valid_extent(inode, ext)) + return 0; ++ ++ /* Check for overlapping extents */ ++ lblock = le32_to_cpu(ext->ee_block); ++ len = ext4_ext_get_actual_len(ext); ++ if ((lblock <= prev) && prev) { ++ pblock = ext4_ext_pblock(ext); ++ es->s_last_error_block = cpu_to_le64(pblock); ++ return 0; ++ } + ext++; + entries--; ++ prev = lblock + len - 1; + } + } else { + struct ext4_extent_idx *ext_idx = EXT_FIRST_INDEX(eh); +@@ -1642,8 +1659,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, + depth = ext_depth(inode); + if (!path[depth].p_ext) + goto out; +- b2 = le32_to_cpu(path[depth].p_ext->ee_block); +- b2 &= ~(sbi->s_cluster_ratio - 1); ++ b2 = EXT4_LBLK_CMASK(sbi, le32_to_cpu(path[depth].p_ext->ee_block)); + + /* + * get the next allocated block if the extent in the path +@@ -1653,7 +1669,7 @@ static unsigned int ext4_ext_check_overlap(struct ext4_sb_info *sbi, + b2 = ext4_ext_next_allocated_block(path); + if (b2 == EXT_MAX_BLOCKS) + goto out; +- b2 &= ~(sbi->s_cluster_ratio - 1); ++ b2 = EXT4_LBLK_CMASK(sbi, b2); + } + + /* check for wrap through zero on extent logical start block*/ +@@ -2288,7 +2304,7 @@ static int ext4_remove_blocks(handle_t *handle, struct inode *inode, + * truncate operation has removed all of the blocks in + * the cluster. + */ +- if (pblk & (sbi->s_cluster_ratio - 1) && ++ if (EXT4_PBLK_COFF(sbi, pblk) && + (ee_len == num)) + *partial_cluster = EXT4_B2C(sbi, pblk); + else +@@ -3491,7 +3507,7 @@ int ext4_find_delalloc_cluster(struct inode *inode, ext4_lblk_t lblk, + { + struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb); + ext4_lblk_t lblk_start, lblk_end; +- lblk_start = lblk & (~(sbi->s_cluster_ratio - 1)); ++ lblk_start = EXT4_LBLK_CMASK(sbi, lblk); + lblk_end = lblk_start + sbi->s_cluster_ratio - 1; + + return ext4_find_delalloc_range(inode, lblk_start, lblk_end, +@@ -3551,9 +3567,9 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, + trace_ext4_get_reserved_cluster_alloc(inode, lblk_start, num_blks); + + /* Check towards left side */ +- c_offset = lblk_start & (sbi->s_cluster_ratio - 1); ++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start); + if (c_offset) { +- lblk_from = lblk_start & (~(sbi->s_cluster_ratio - 1)); ++ lblk_from = EXT4_LBLK_CMASK(sbi, lblk_start); + lblk_to = lblk_from + c_offset - 1; + + if (ext4_find_delalloc_range(inode, lblk_from, lblk_to, 0)) +@@ -3561,7 +3577,7 @@ get_reserved_cluster_alloc(struct inode *inode, ext4_lblk_t lblk_start, + } + + /* Now check towards right. */ +- c_offset = (lblk_start + num_blks) & (sbi->s_cluster_ratio - 1); ++ c_offset = EXT4_LBLK_COFF(sbi, lblk_start + num_blks); + if (allocated_clusters && c_offset) { + lblk_from = lblk_start + num_blks; + lblk_to = lblk_from + (sbi->s_cluster_ratio - c_offset) - 1; +@@ -3754,7 +3770,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, + struct ext4_ext_path *path) + { + struct ext4_sb_info *sbi = EXT4_SB(sb); +- ext4_lblk_t c_offset = map->m_lblk & (sbi->s_cluster_ratio-1); ++ ext4_lblk_t c_offset = EXT4_LBLK_COFF(sbi, map->m_lblk); + ext4_lblk_t ex_cluster_start, ex_cluster_end; + ext4_lblk_t rr_cluster_start, rr_cluster_end; + ext4_lblk_t ee_block = le32_to_cpu(ex->ee_block); +@@ -3773,8 +3789,7 @@ static int get_implied_cluster_alloc(struct super_block *sb, + (rr_cluster_start == ex_cluster_start)) { + if (rr_cluster_start == ex_cluster_end) + ee_start += ee_len - 1; +- map->m_pblk = (ee_start & ~(sbi->s_cluster_ratio - 1)) + +- c_offset; ++ map->m_pblk = EXT4_PBLK_CMASK(sbi, ee_start) + c_offset; + map->m_len = min(map->m_len, + (unsigned) sbi->s_cluster_ratio - c_offset); + /* +@@ -4052,7 +4067,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, + */ + map->m_flags &= ~EXT4_MAP_FROM_CLUSTER; + newex.ee_block = cpu_to_le32(map->m_lblk); +- cluster_offset = map->m_lblk & (sbi->s_cluster_ratio-1); ++ cluster_offset = EXT4_LBLK_CMASK(sbi, map->m_lblk); + + /* + * If we are doing bigalloc, check to see if the extent returned +@@ -4120,7 +4135,7 @@ int ext4_ext_map_blocks(handle_t *handle, struct inode *inode, + * needed so that future calls to get_implied_cluster_alloc() + * work correctly. + */ +- offset = map->m_lblk & (sbi->s_cluster_ratio - 1); ++ offset = EXT4_LBLK_COFF(sbi, map->m_lblk); + ar.len = EXT4_NUM_B2C(sbi, offset+allocated); + ar.goal -= offset; + ar.logical -= offset; +diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c +index 9b8c131..81feb17 100644 +--- a/fs/ext4/mballoc.c ++++ b/fs/ext4/mballoc.c +@@ -3378,6 +3378,9 @@ static void ext4_mb_pa_callback(struct rcu_head *head) + { + struct ext4_prealloc_space *pa; + pa = container_of(head, struct ext4_prealloc_space, u.pa_rcu); ++ ++ BUG_ON(atomic_read(&pa->pa_count)); ++ BUG_ON(pa->pa_deleted == 0); + kmem_cache_free(ext4_pspace_cachep, pa); + } + +@@ -3391,11 +3394,13 @@ static void ext4_mb_put_pa(struct ext4_allocation_context *ac, + ext4_group_t grp; + ext4_fsblk_t grp_blk; + +- if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) +- return; +- + /* in this short window concurrent discard can set pa_deleted */ + spin_lock(&pa->pa_lock); ++ if (!atomic_dec_and_test(&pa->pa_count) || pa->pa_free != 0) { ++ spin_unlock(&pa->pa_lock); ++ return; ++ } ++ + if (pa->pa_deleted == 1) { + spin_unlock(&pa->pa_lock); + return; +@@ -4062,7 +4067,7 @@ ext4_mb_initialize_context(struct ext4_allocation_context *ac, + + /* set up allocation goals */ + memset(ac, 0, sizeof(struct ext4_allocation_context)); +- ac->ac_b_ex.fe_logical = ar->logical & ~(sbi->s_cluster_ratio - 1); ++ ac->ac_b_ex.fe_logical = EXT4_LBLK_CMASK(sbi, ar->logical); + ac->ac_status = AC_STATUS_CONTINUE; + ac->ac_sb = sb; + ac->ac_inode = ar->inode; +@@ -4600,7 +4605,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, + * blocks at the beginning or the end unless we are explicitly + * requested to avoid doing so. + */ +- overflow = block & (sbi->s_cluster_ratio - 1); ++ overflow = EXT4_PBLK_COFF(sbi, block); + if (overflow) { + if (flags & EXT4_FREE_BLOCKS_NOFREE_FIRST_CLUSTER) { + overflow = sbi->s_cluster_ratio - overflow; +@@ -4614,7 +4619,7 @@ void ext4_free_blocks(handle_t *handle, struct inode *inode, + count += overflow; + } + } +- overflow = count & (sbi->s_cluster_ratio - 1); ++ overflow = EXT4_LBLK_COFF(sbi, count); + if (overflow) { + if (flags & EXT4_FREE_BLOCKS_NOFREE_LAST_CLUSTER) { + if (count > overflow) +diff --git a/fs/ext4/super.c b/fs/ext4/super.c +index 84f84bf..acf2baf 100644 +--- a/fs/ext4/super.c ++++ b/fs/ext4/super.c +@@ -819,7 +819,7 @@ static void ext4_put_super(struct super_block *sb) + ext4_abort(sb, "Couldn't clean up the journal"); + } + +- del_timer(&sbi->s_err_report); ++ del_timer_sync(&sbi->s_err_report); + ext4_release_system_zone(sb); + ext4_mb_release(sb); + ext4_ext_release(sb); +@@ -3961,7 +3961,7 @@ failed_mount_wq: + sbi->s_journal = NULL; + } + failed_mount3: +- del_timer(&sbi->s_err_report); ++ del_timer_sync(&sbi->s_err_report); + if (sbi->s_flex_groups) + ext4_kvfree(sbi->s_flex_groups); + percpu_counter_destroy(&sbi->s_freeclusters_counter); +diff --git a/fs/hpfs/file.c b/fs/hpfs/file.c +index 89d2a58..5ecfffe 100644 +--- a/fs/hpfs/file.c ++++ b/fs/hpfs/file.c +@@ -116,9 +116,12 @@ static int hpfs_write_begin(struct file *file, struct address_space *mapping, + hpfs_get_block, + &hpfs_i(mapping->host)->mmu_private); + if (unlikely(ret)) { +- loff_t isize = mapping->host->i_size; ++ loff_t isize; ++ hpfs_lock(mapping->host->i_sb); ++ isize = mapping->host->i_size; + if (pos + len > isize) + vmtruncate(mapping->host, isize); ++ hpfs_unlock(mapping->host->i_sb); + } + + return ret; +diff --git a/fs/nilfs2/segment.c b/fs/nilfs2/segment.c +index 233d3ed..3ceaced 100644 +--- a/fs/nilfs2/segment.c ++++ b/fs/nilfs2/segment.c +@@ -1437,17 +1437,19 @@ static int nilfs_segctor_collect(struct nilfs_sc_info *sci, + + nilfs_clear_logs(&sci->sc_segbufs); + +- err = nilfs_segctor_extend_segments(sci, nilfs, nadd); +- if (unlikely(err)) +- return err; +- + if (sci->sc_stage.flags & NILFS_CF_SUFREED) { + err = nilfs_sufile_cancel_freev(nilfs->ns_sufile, + sci->sc_freesegs, + sci->sc_nfreesegs, + NULL); + WARN_ON(err); /* do not happen */ ++ sci->sc_stage.flags &= ~NILFS_CF_SUFREED; + } ++ ++ err = nilfs_segctor_extend_segments(sci, nilfs, nadd); ++ if (unlikely(err)) ++ return err; ++ + nadd = min_t(int, nadd << 1, SC_MAX_SEGDELTA); + sci->sc_stage = prev_stage; + } +diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c +index 34817ad..10ca5e5 100644 +--- a/fs/xfs/xfs_log.c ++++ b/fs/xfs/xfs_log.c +@@ -653,8 +653,9 @@ xfs_log_unmount_write(xfs_mount_t *mp) + .lv_iovecp = ®, + }; + +- /* remove inited flag */ ++ /* remove inited flag, and account for space used */ + tic->t_flags = 0; ++ tic->t_curr_res -= sizeof(magic); + error = xlog_write(log, &vec, tic, &lsn, + NULL, XLOG_UNMOUNT_TRANS); + /* +diff --git a/include/drm/drm_pciids.h b/include/drm/drm_pciids.h +index def807c..c37fd89 100644 +--- a/include/drm/drm_pciids.h ++++ b/include/drm/drm_pciids.h +@@ -495,7 +495,7 @@ + {0x1002, 0x9645, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9647, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x9648, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ +- {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ ++ {0x1002, 0x9649, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO2|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP},\ + {0x1002, 0x964a, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x964c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SUMO|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ +diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h +index 32697c1..4bc9445 100644 +--- a/include/linux/hugetlb.h ++++ b/include/linux/hugetlb.h +@@ -24,6 +24,7 @@ struct hugepage_subpool *hugepage_new_subpool(long nr_blocks); + void hugepage_put_subpool(struct hugepage_subpool *spool); + + int PageHuge(struct page *page); ++int PageHeadHuge(struct page *page_head); + + void reset_vma_resv_huge_pages(struct vm_area_struct *vma); + int hugetlb_sysctl_handler(struct ctl_table *, int, void __user *, size_t *, loff_t *); +@@ -88,6 +89,11 @@ static inline int PageHuge(struct page *page) + return 0; + } + ++static inline int PageHeadHuge(struct page *page_head) ++{ ++ return 0; ++} ++ + static inline void reset_vma_resv_huge_pages(struct vm_area_struct *vma) + { + } +diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h +index 8c43fd1..4b04097 100644 +--- a/include/linux/netdevice.h ++++ b/include/linux/netdevice.h +@@ -1719,6 +1719,15 @@ static inline int dev_parse_header(const struct sk_buff *skb, + return dev->header_ops->parse(skb, haddr); + } + ++static inline int dev_rebuild_header(struct sk_buff *skb) ++{ ++ const struct net_device *dev = skb->dev; ++ ++ if (!dev->header_ops || !dev->header_ops->rebuild) ++ return 0; ++ return dev->header_ops->rebuild(skb); ++} ++ + typedef int gifconf_func_t(struct net_device * dev, char __user * bufptr, int len); + extern int register_gifconf(unsigned int family, gifconf_func_t * gifconf); + static inline int unregister_gifconf(unsigned int family) +diff --git a/include/linux/pci.h b/include/linux/pci.h +index 7cda65b..fe76a74 100644 +--- a/include/linux/pci.h ++++ b/include/linux/pci.h +@@ -590,6 +590,20 @@ struct pci_driver { + .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID + + /** ++ * PCI_DEVICE_SUB - macro used to describe a specific pci device with subsystem ++ * @vend: the 16 bit PCI Vendor ID ++ * @dev: the 16 bit PCI Device ID ++ * @subvend: the 16 bit PCI Subvendor ID ++ * @subdev: the 16 bit PCI Subdevice ID ++ * ++ * This macro is used to create a struct pci_device_id that matches a ++ * specific device with subsystem information. ++ */ ++#define PCI_DEVICE_SUB(vend, dev, subvend, subdev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = (subvend), .subdevice = (subdev) ++ ++/** + * PCI_DEVICE_CLASS - macro used to describe a specific pci device class + * @dev_class: the class, subclass, prog-if triple for this device + * @dev_class_mask: the class mask for this device +diff --git a/include/linux/sched.h b/include/linux/sched.h +index 312d047..c17fdfb 100644 +--- a/include/linux/sched.h ++++ b/include/linux/sched.h +@@ -1200,6 +1200,7 @@ struct sched_entity { + struct sched_rt_entity { + struct list_head run_list; + unsigned long timeout; ++ unsigned long watchdog_stamp; + unsigned int time_slice; + int nr_cpus_allowed; + +diff --git a/kernel/sched.c b/kernel/sched.c +index d93369a..ea85b0d 100644 +--- a/kernel/sched.c ++++ b/kernel/sched.c +@@ -2189,6 +2189,10 @@ static int irqtime_account_si_update(void) + + #endif + ++#ifdef CONFIG_SMP ++static void unthrottle_offline_cfs_rqs(struct rq *rq); ++#endif ++ + #include "sched_idletask.c" + #include "sched_fair.c" + #include "sched_rt.c" +@@ -6566,8 +6570,6 @@ static void unthrottle_offline_cfs_rqs(struct rq *rq) + unthrottle_cfs_rq(cfs_rq); + } + } +-#else +-static void unthrottle_offline_cfs_rqs(struct rq *rq) {} + #endif + + /* +@@ -6595,9 +6597,6 @@ static void migrate_tasks(unsigned int dead_cpu) + */ + rq->stop = NULL; + +- /* Ensure any throttled groups are reachable by pick_next_task */ +- unthrottle_offline_cfs_rqs(rq); +- + for ( ; ; ) { + /* + * There's this thread running, bail when that's the only +@@ -6624,6 +6623,10 @@ static void migrate_tasks(unsigned int dead_cpu) + + #endif /* CONFIG_HOTPLUG_CPU */ + ++#if !defined(CONFIG_HOTPLUG_CPU) || !defined(CONFIG_CFS_BANDWIDTH) ++static void unthrottle_offline_cfs_rqs(struct rq *rq) {} ++#endif ++ + #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL) + + static struct ctl_table sd_ctl_dir[] = { +diff --git a/kernel/sched_fair.c b/kernel/sched_fair.c +index 5b9e456..37f3f39 100644 +--- a/kernel/sched_fair.c ++++ b/kernel/sched_fair.c +@@ -4848,6 +4848,9 @@ static void rq_online_fair(struct rq *rq) + static void rq_offline_fair(struct rq *rq) + { + update_sysctl(); ++ ++ /* Ensure any throttled groups are reachable by pick_next_task */ ++ unthrottle_offline_cfs_rqs(rq); + } + + #else /* CONFIG_SMP */ +diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c +index 6ad4fb3..f57fda7 100644 +--- a/kernel/sched_rt.c ++++ b/kernel/sched_rt.c +@@ -509,6 +509,7 @@ balanced: + * runtime - in which case borrowing doesn't make sense. + */ + rt_rq->rt_runtime = RUNTIME_INF; ++ rt_rq->rt_throttled = 0; + raw_spin_unlock(&rt_rq->rt_runtime_lock); + raw_spin_unlock(&rt_b->rt_runtime_lock); + } +@@ -587,6 +588,19 @@ static int do_sched_rt_period_timer(struct rt_bandwidth *rt_b, int overrun) + return 1; + + span = sched_rt_period_mask(); ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* ++ * FIXME: isolated CPUs should really leave the root task group, ++ * whether they are isolcpus or were isolated via cpusets, lest ++ * the timer run on a CPU which does not service all runqueues, ++ * potentially leaving other CPUs indefinitely throttled. If ++ * isolation is really required, the user will turn the throttle ++ * off to kill the perturbations it causes anyway. Meanwhile, ++ * this maintains functionality for boot and/or troubleshooting. ++ */ ++ if (rt_b == &root_task_group.rt_bandwidth) ++ span = cpu_online_mask; ++#endif + for_each_cpu(i, span) { + int enqueue = 0; + struct rt_rq *rt_rq = sched_rt_period_rt_rq(rt_b, i); +@@ -719,6 +733,13 @@ inc_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) + { + struct rq *rq = rq_of_rt_rq(rt_rq); + ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* ++ * Change rq's cpupri only if rt_rq is the top queue. ++ */ ++ if (&rq->rt != rt_rq) ++ return; ++#endif + if (rq->online && prio < prev_prio) + cpupri_set(&rq->rd->cpupri, rq->cpu, prio); + } +@@ -728,6 +749,13 @@ dec_rt_prio_smp(struct rt_rq *rt_rq, int prio, int prev_prio) + { + struct rq *rq = rq_of_rt_rq(rt_rq); + ++#ifdef CONFIG_RT_GROUP_SCHED ++ /* ++ * Change rq's cpupri only if rt_rq is the top queue. ++ */ ++ if (&rq->rt != rt_rq) ++ return; ++#endif + if (rq->online && rt_rq->highest_prio.curr != prev_prio) + cpupri_set(&rq->rd->cpupri, rq->cpu, rt_rq->highest_prio.curr); + } +@@ -1749,7 +1777,11 @@ static void watchdog(struct rq *rq, struct task_struct *p) + if (soft != RLIM_INFINITY) { + unsigned long next; + +- p->rt.timeout++; ++ if (p->rt.watchdog_stamp != jiffies) { ++ p->rt.timeout++; ++ p->rt.watchdog_stamp = jiffies; ++ } ++ + next = DIV_ROUND_UP(min(soft, hard), USEC_PER_SEC/HZ); + if (p->rt.timeout > next) + p->cputime_expires.sched_exp = p->se.sum_exec_runtime; +@@ -1758,6 +1790,8 @@ static void watchdog(struct rq *rq, struct task_struct *p) + + static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) + { ++ struct sched_rt_entity *rt_se = &p->rt; ++ + update_curr_rt(rq); + + watchdog(rq, p); +@@ -1775,12 +1809,15 @@ static void task_tick_rt(struct rq *rq, struct task_struct *p, int queued) + p->rt.time_slice = DEF_TIMESLICE; + + /* +- * Requeue to the end of queue if we are not the only element +- * on the queue: ++ * Requeue to the end of queue if we (and all of our ancestors) are the ++ * only element on the queue + */ +- if (p->rt.run_list.prev != p->rt.run_list.next) { +- requeue_task_rt(rq, p, 0); +- set_tsk_need_resched(p); ++ for_each_sched_rt_entity(rt_se) { ++ if (rt_se->run_list.prev != rt_se->run_list.next) { ++ requeue_task_rt(rq, p, 0); ++ set_tsk_need_resched(p); ++ return; ++ } + } + } + +diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c +index d40d7f6..cf8b439 100644 +--- a/kernel/trace/ftrace.c ++++ b/kernel/trace/ftrace.c +@@ -618,7 +618,7 @@ static int ftrace_profile_init(void) + int cpu; + int ret = 0; + +- for_each_online_cpu(cpu) { ++ for_each_possible_cpu(cpu) { + ret = ftrace_profile_init_cpu(cpu); + if (ret) + break; +diff --git a/mm/hugetlb.c b/mm/hugetlb.c +index ddf2128..3a5aae2 100644 +--- a/mm/hugetlb.c ++++ b/mm/hugetlb.c +@@ -679,6 +679,23 @@ int PageHuge(struct page *page) + } + EXPORT_SYMBOL_GPL(PageHuge); + ++/* ++ * PageHeadHuge() only returns true for hugetlbfs head page, but not for ++ * normal or transparent huge pages. ++ */ ++int PageHeadHuge(struct page *page_head) ++{ ++ compound_page_dtor *dtor; ++ ++ if (!PageHead(page_head)) ++ return 0; ++ ++ dtor = get_compound_page_dtor(page_head); ++ ++ return dtor == free_huge_page; ++} ++EXPORT_SYMBOL_GPL(PageHeadHuge); ++ + pgoff_t __basepage_index(struct page *page) + { + struct page *page_head = compound_head(page); +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 1b03878..96c4bcf 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1441,10 +1441,18 @@ static int soft_offline_huge_page(struct page *page, int flags) + return ret; + } + done: +- if (!PageHWPoison(hpage)) +- atomic_long_add(1 << compound_trans_order(hpage), &mce_bad_pages); +- set_page_hwpoison_huge_page(hpage); +- dequeue_hwpoisoned_huge_page(hpage); ++ /* overcommit hugetlb page will be freed to buddy */ ++ if (PageHuge(hpage)) { ++ if (!PageHWPoison(hpage)) ++ atomic_long_add(1 << compound_trans_order(hpage), ++ &mce_bad_pages); ++ set_page_hwpoison_huge_page(hpage); ++ dequeue_hwpoisoned_huge_page(hpage); ++ } else { ++ SetPageHWPoison(page); ++ atomic_long_inc(&mce_bad_pages); ++ } ++ + /* keep elevated page count for bad page */ + return ret; + } +diff --git a/mm/mmap.c b/mm/mmap.c +index dff37a6..6182c8a 100644 +--- a/mm/mmap.c ++++ b/mm/mmap.c +@@ -1368,7 +1368,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + struct vm_area_struct *vma; + unsigned long start_addr; + +- if (len > TASK_SIZE) ++ if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) +@@ -1377,7 +1377,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && ++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } +@@ -1442,9 +1442,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; + unsigned long addr = addr0; ++ unsigned long low_limit = max(PAGE_SIZE, mmap_min_addr); + + /* requested length too big for entire address space */ +- if (len > TASK_SIZE) ++ if (len > TASK_SIZE - mmap_min_addr) + return -ENOMEM; + + if (flags & MAP_FIXED) +@@ -1454,7 +1455,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && ++ if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && + (!vma || addr + len <= vma->vm_start)) + return addr; + } +@@ -1469,14 +1470,14 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + addr = mm->free_area_cache; + + /* make sure it can fit in the remaining address space */ +- if (addr > len) { ++ if (addr >= low_limit + len) { + vma = find_vma(mm, addr-len); + if (!vma || addr <= vma->vm_start) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } + +- if (mm->mmap_base < len) ++ if (mm->mmap_base < low_limit + len) + goto bottomup; + + addr = mm->mmap_base-len; +@@ -1498,7 +1499,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, + + /* try just below the current vma->vm_start */ + addr = vma->vm_start-len; +- } while (len < vma->vm_start); ++ } while (vma->vm_start >= low_limit + len); + + bottomup: + /* +diff --git a/mm/swap.c b/mm/swap.c +index 55b266d..a4b9016 100644 +--- a/mm/swap.c ++++ b/mm/swap.c +@@ -31,6 +31,7 @@ + #include <linux/backing-dev.h> + #include <linux/memcontrol.h> + #include <linux/gfp.h> ++#include <linux/hugetlb.h> + + #include "internal.h" + +@@ -69,7 +70,8 @@ static void __put_compound_page(struct page *page) + { + compound_page_dtor *dtor; + +- __page_cache_release(page); ++ if (!PageHuge(page)) ++ __page_cache_release(page); + dtor = get_compound_page_dtor(page); + (*dtor)(page); + } +@@ -83,6 +85,35 @@ static void put_compound_page(struct page *page) + if (likely(page != page_head && + get_page_unless_zero(page_head))) { + unsigned long flags; ++ ++ if (PageHeadHuge(page_head)) { ++ if (likely(PageTail(page))) { ++ /* ++ * __split_huge_page_refcount ++ * cannot race here. ++ */ ++ VM_BUG_ON(!PageHead(page_head)); ++ atomic_dec(&page->_mapcount); ++ if (put_page_testzero(page_head)) ++ VM_BUG_ON(1); ++ if (put_page_testzero(page_head)) ++ __put_compound_page(page_head); ++ return; ++ } else { ++ /* ++ * __split_huge_page_refcount ++ * run before us, "page" was a ++ * THP tail. The split ++ * page_head has been freed ++ * and reallocated as slab or ++ * hugetlbfs page of smaller ++ * order (only possible if ++ * reallocated as slab on ++ * x86). ++ */ ++ goto skip_lock; ++ } ++ } + /* + * page_head wasn't a dangling pointer but it + * may not be a head page anymore by the time +@@ -94,9 +125,29 @@ static void put_compound_page(struct page *page) + /* __split_huge_page_refcount run before us */ + compound_unlock_irqrestore(page_head, flags); + VM_BUG_ON(PageHead(page_head)); +- if (put_page_testzero(page_head)) +- __put_single_page(page_head); +- out_put_single: ++skip_lock: ++ if (put_page_testzero(page_head)) { ++ /* ++ * The head page may have been ++ * freed and reallocated as a ++ * compound page of smaller ++ * order and then freed again. ++ * All we know is that it ++ * cannot have become: a THP ++ * page, a compound page of ++ * higher order, a tail page. ++ * That is because we still ++ * hold the refcount of the ++ * split THP tail and ++ * page_head was the THP head ++ * before the split. ++ */ ++ if (PageHead(page_head)) ++ __put_compound_page(page_head); ++ else ++ __put_single_page(page_head); ++ } ++out_put_single: + if (put_page_testzero(page)) + __put_single_page(page); + return; +@@ -163,6 +214,31 @@ bool __get_page_tail(struct page *page) + struct page *page_head = compound_trans_head(page); + + if (likely(page != page_head && get_page_unless_zero(page_head))) { ++ /* Ref to put_compound_page() comment. */ ++ if (PageHeadHuge(page_head)) { ++ if (likely(PageTail(page))) { ++ /* ++ * This is a hugetlbfs ++ * page. __split_huge_page_refcount ++ * cannot race here. ++ */ ++ VM_BUG_ON(!PageHead(page_head)); ++ __get_page_tail_foll(page, false); ++ return true; ++ } else { ++ /* ++ * __split_huge_page_refcount run ++ * before us, "page" was a THP ++ * tail. The split page_head has been ++ * freed and reallocated as slab or ++ * hugetlbfs page of smaller order ++ * (only possible if reallocated as ++ * slab on x86). ++ */ ++ put_page(page_head); ++ return false; ++ } ++ } + /* + * page_head wasn't a dangling pointer but it + * may not be a head page anymore by the time +diff --git a/net/8021q/vlan_dev.c b/net/8021q/vlan_dev.c +index b40d3da..48a62d8 100644 +--- a/net/8021q/vlan_dev.c ++++ b/net/8021q/vlan_dev.c +@@ -522,6 +522,22 @@ static const struct header_ops vlan_header_ops = { + .parse = eth_header_parse, + }; + ++static int vlan_passthru_hard_header(struct sk_buff *skb, struct net_device *dev, ++ unsigned short type, ++ const void *daddr, const void *saddr, ++ unsigned int len) ++{ ++ struct net_device *real_dev = vlan_dev_info(dev)->real_dev; ++ ++ return dev_hard_header(skb, real_dev, type, daddr, saddr, len); ++} ++ ++static const struct header_ops vlan_passthru_header_ops = { ++ .create = vlan_passthru_hard_header, ++ .rebuild = dev_rebuild_header, ++ .parse = eth_header_parse, ++}; ++ + static const struct net_device_ops vlan_netdev_ops; + + static int vlan_dev_init(struct net_device *dev) +@@ -561,7 +577,7 @@ static int vlan_dev_init(struct net_device *dev) + + dev->needed_headroom = real_dev->needed_headroom; + if (real_dev->features & NETIF_F_HW_VLAN_TX) { +- dev->header_ops = real_dev->header_ops; ++ dev->header_ops = &vlan_passthru_header_ops; + dev->hard_header_len = real_dev->hard_header_len; + } else { + dev->header_ops = &vlan_header_ops; +diff --git a/net/bridge/br_multicast.c b/net/bridge/br_multicast.c +index a06deca..2157984 100644 +--- a/net/bridge/br_multicast.c ++++ b/net/bridge/br_multicast.c +@@ -1743,7 +1743,7 @@ int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) + u32 old; + struct net_bridge_mdb_htable *mdb; + +- spin_lock(&br->multicast_lock); ++ spin_lock_bh(&br->multicast_lock); + if (!netif_running(br->dev)) + goto unlock; + +@@ -1775,7 +1775,7 @@ rollback: + } + + unlock: +- spin_unlock(&br->multicast_lock); ++ spin_unlock_bh(&br->multicast_lock); + + return err; + } +diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c +index b856f87..1d9a529 100644 +--- a/net/core/drop_monitor.c ++++ b/net/core/drop_monitor.c +@@ -61,7 +61,6 @@ static struct genl_family net_drop_monitor_family = { + .hdrsize = 0, + .name = "NET_DM", + .version = 2, +- .maxattr = NET_DM_CMD_MAX, + }; + + static DEFINE_PER_CPU(struct per_cpu_dm_data, dm_cpu_data); +diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c +index ccee270..6be5e8e 100644 +--- a/net/ipv4/inet_diag.c ++++ b/net/ipv4/inet_diag.c +@@ -119,6 +119,10 @@ static int inet_csk_diag_fill(struct sock *sk, + + r->id.idiag_sport = inet->inet_sport; + r->id.idiag_dport = inet->inet_dport; ++ ++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); ++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); ++ + r->id.idiag_src[0] = inet->inet_rcv_saddr; + r->id.idiag_dst[0] = inet->inet_daddr; + +@@ -209,13 +213,20 @@ static int inet_twsk_diag_fill(struct inet_timewait_sock *tw, + + r->idiag_family = tw->tw_family; + r->idiag_retrans = 0; ++ + r->id.idiag_if = tw->tw_bound_dev_if; + r->id.idiag_cookie[0] = (u32)(unsigned long)tw; + r->id.idiag_cookie[1] = (u32)(((unsigned long)tw >> 31) >> 1); ++ + r->id.idiag_sport = tw->tw_sport; + r->id.idiag_dport = tw->tw_dport; ++ ++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); ++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); ++ + r->id.idiag_src[0] = tw->tw_rcv_saddr; + r->id.idiag_dst[0] = tw->tw_daddr; ++ + r->idiag_state = tw->tw_substate; + r->idiag_timer = 3; + r->idiag_expires = DIV_ROUND_UP(tmo * 1000, HZ); +@@ -598,8 +609,13 @@ static int inet_diag_fill_req(struct sk_buff *skb, struct sock *sk, + + r->id.idiag_sport = inet->inet_sport; + r->id.idiag_dport = ireq->rmt_port; ++ ++ memset(&r->id.idiag_src, 0, sizeof(r->id.idiag_src)); ++ memset(&r->id.idiag_dst, 0, sizeof(r->id.idiag_dst)); ++ + r->id.idiag_src[0] = ireq->loc_addr; + r->id.idiag_dst[0] = ireq->rmt_addr; ++ + r->idiag_expires = jiffies_to_msecs(tmo); + r->idiag_rqueue = 0; + r->idiag_wqueue = 0; +@@ -824,7 +840,7 @@ next_normal: + ++num; + } + +- if (r->idiag_states & TCPF_TIME_WAIT) { ++ if (r->idiag_states & (TCPF_TIME_WAIT | TCPF_FIN_WAIT2)) { + struct inet_timewait_sock *tw; + + inet_twsk_for_each(tw, node, +@@ -832,6 +848,8 @@ next_normal: + + if (num < s_num) + goto next_dying; ++ if (!(r->idiag_states & (1 << tw->tw_substate))) ++ goto next_dying; + if (r->id.idiag_sport != tw->tw_sport && + r->id.idiag_sport) + goto next_dying; +diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c +index b5e64e4..140d377 100644 +--- a/net/ipv4/ipmr.c ++++ b/net/ipv4/ipmr.c +@@ -155,9 +155,12 @@ static struct mr_table *ipmr_get_table(struct net *net, u32 id) + static int ipmr_fib_lookup(struct net *net, struct flowi4 *flp4, + struct mr_table **mrt) + { +- struct ipmr_result res; +- struct fib_lookup_arg arg = { .result = &res, }; + int err; ++ struct ipmr_result res; ++ struct fib_lookup_arg arg = { ++ .result = &res, ++ .flags = FIB_LOOKUP_NOREF, ++ }; + + err = fib_rules_lookup(net->ipv4.mr_rules_ops, + flowi4_to_flowi(flp4), 0, &arg); +diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c +index f5af259..f96c96f 100644 +--- a/net/ipv6/ip6mr.c ++++ b/net/ipv6/ip6mr.c +@@ -139,9 +139,12 @@ static struct mr6_table *ip6mr_get_table(struct net *net, u32 id) + static int ip6mr_fib_lookup(struct net *net, struct flowi6 *flp6, + struct mr6_table **mrt) + { +- struct ip6mr_result res; +- struct fib_lookup_arg arg = { .result = &res, }; + int err; ++ struct ip6mr_result res; ++ struct fib_lookup_arg arg = { ++ .result = &res, ++ .flags = FIB_LOOKUP_NOREF, ++ }; + + err = fib_rules_lookup(net->ipv6.mr6_rules_ops, + flowi6_to_flowi(flp6), 0, &arg); +diff --git a/net/ipv6/route.c b/net/ipv6/route.c +index 1768238..9a4f437 100644 +--- a/net/ipv6/route.c ++++ b/net/ipv6/route.c +@@ -2058,15 +2058,11 @@ struct rt6_info *addrconf_dst_alloc(struct inet6_dev *idev, + { + struct net *net = dev_net(idev->dev); + struct rt6_info *rt = ip6_dst_alloc(&net->ipv6.ip6_dst_ops, +- net->loopback_dev, 0); ++ net->loopback_dev, DST_NOCOUNT); + struct neighbour *neigh; + +- if (rt == NULL) { +- if (net_ratelimit()) +- pr_warning("IPv6: Maximum number of routes reached," +- " consider increasing route/max_size.\n"); ++ if (rt == NULL) + return ERR_PTR(-ENOMEM); +- } + + in6_dev_hold(idev); + +diff --git a/net/llc/af_llc.c b/net/llc/af_llc.c +index 99a60d5..f432d7b 100644 +--- a/net/llc/af_llc.c ++++ b/net/llc/af_llc.c +@@ -715,7 +715,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, + struct llc_sock *llc = llc_sk(sk); + size_t copied = 0; + u32 peek_seq = 0; +- u32 *seq; ++ u32 *seq, skb_len; + unsigned long used; + int target; /* Read at least this many bytes */ + long timeo; +@@ -813,6 +813,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, + } + continue; + found_ok_skb: ++ skb_len = skb->len; + /* Ok so how much can we use? */ + used = skb->len - offset; + if (len < used) +@@ -843,7 +844,7 @@ static int llc_ui_recvmsg(struct kiocb *iocb, struct socket *sock, + } + + /* Partial read */ +- if (used + offset < skb->len) ++ if (used + offset < skb_len) + continue; + } while (len > 0); + +diff --git a/net/rds/ib.c b/net/rds/ib.c +index b4c8b00..ba2dffe 100644 +--- a/net/rds/ib.c ++++ b/net/rds/ib.c +@@ -338,7 +338,8 @@ static int rds_ib_laddr_check(__be32 addr) + ret = rdma_bind_addr(cm_id, (struct sockaddr *)&sin); + /* due to this, we will claim to support iWARP devices unless we + check node_type. */ +- if (ret || cm_id->device->node_type != RDMA_NODE_IB_CA) ++ if (ret || !cm_id->device || ++ cm_id->device->node_type != RDMA_NODE_IB_CA) + ret = -EADDRNOTAVAIL; + + rdsdebug("addr %pI4 ret %d node type %d\n", +diff --git a/net/rds/ib_send.c b/net/rds/ib_send.c +index e590949..37be6e2 100644 +--- a/net/rds/ib_send.c ++++ b/net/rds/ib_send.c +@@ -552,9 +552,8 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, + && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { + rds_cong_map_updated(conn->c_fcong, ~(u64) 0); + scat = &rm->data.op_sg[sg]; +- ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; +- ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); +- return ret; ++ ret = max_t(int, RDS_CONG_MAP_BYTES, scat->length); ++ return sizeof(struct rds_header) + ret; + } + + /* FIXME we may overallocate here */ +diff --git a/net/rose/af_rose.c b/net/rose/af_rose.c +index bf76dec7..686fb1a 100644 +--- a/net/rose/af_rose.c ++++ b/net/rose/af_rose.c +@@ -1258,6 +1258,7 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, + + if (msg->msg_name) { + struct sockaddr_rose *srose; ++ struct full_sockaddr_rose *full_srose = msg->msg_name; + + memset(msg->msg_name, 0, sizeof(struct full_sockaddr_rose)); + srose = msg->msg_name; +@@ -1265,18 +1266,9 @@ static int rose_recvmsg(struct kiocb *iocb, struct socket *sock, + srose->srose_addr = rose->dest_addr; + srose->srose_call = rose->dest_call; + srose->srose_ndigis = rose->dest_ndigis; +- if (msg->msg_namelen >= sizeof(struct full_sockaddr_rose)) { +- struct full_sockaddr_rose *full_srose = (struct full_sockaddr_rose *)msg->msg_name; +- for (n = 0 ; n < rose->dest_ndigis ; n++) +- full_srose->srose_digis[n] = rose->dest_digis[n]; +- msg->msg_namelen = sizeof(struct full_sockaddr_rose); +- } else { +- if (rose->dest_ndigis >= 1) { +- srose->srose_ndigis = 1; +- srose->srose_digi = rose->dest_digis[0]; +- } +- msg->msg_namelen = sizeof(struct sockaddr_rose); +- } ++ for (n = 0 ; n < rose->dest_ndigis ; n++) ++ full_srose->srose_digis[n] = rose->dest_digis[n]; ++ msg->msg_namelen = sizeof(struct full_sockaddr_rose); + } + + skb_free_datagram(sk, skb); +diff --git a/net/unix/af_unix.c b/net/unix/af_unix.c +index 9338ccc..eddfdec 100644 +--- a/net/unix/af_unix.c ++++ b/net/unix/af_unix.c +@@ -696,7 +696,9 @@ static int unix_autobind(struct socket *sock) + int err; + unsigned int retries = 0; + +- mutex_lock(&u->readlock); ++ err = mutex_lock_interruptible(&u->readlock); ++ if (err) ++ return err; + + err = 0; + if (u->addr) +@@ -829,7 +831,9 @@ static int unix_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) + goto out; + addr_len = err; + +- mutex_lock(&u->readlock); ++ err = mutex_lock_interruptible(&u->readlock); ++ if (err) ++ goto out; + + err = -EINVAL; + if (u->addr) +diff --git a/net/wireless/radiotap.c b/net/wireless/radiotap.c +index 617a310..60549a4 100644 +--- a/net/wireless/radiotap.c ++++ b/net/wireless/radiotap.c +@@ -122,6 +122,10 @@ int ieee80211_radiotap_iterator_init( + /* find payload start allowing for extended bitmap(s) */ + + if (iterator->_bitmap_shifter & (1<<IEEE80211_RADIOTAP_EXT)) { ++ if ((unsigned long)iterator->_arg - ++ (unsigned long)iterator->_rtheader + sizeof(uint32_t) > ++ (unsigned long)iterator->_max_length) ++ return -EINVAL; + while (get_unaligned_le32(iterator->_arg) & + (1 << IEEE80211_RADIOTAP_EXT)) { + iterator->_arg += sizeof(uint32_t); +diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c +index 5898f34..bcf1d73 100644 +--- a/security/selinux/hooks.c ++++ b/security/selinux/hooks.c +@@ -217,6 +217,14 @@ static int inode_alloc_security(struct inode *inode) + return 0; + } + ++static void inode_free_rcu(struct rcu_head *head) ++{ ++ struct inode_security_struct *isec; ++ ++ isec = container_of(head, struct inode_security_struct, rcu); ++ kmem_cache_free(sel_inode_cache, isec); ++} ++ + static void inode_free_security(struct inode *inode) + { + struct inode_security_struct *isec = inode->i_security; +@@ -227,8 +235,16 @@ static void inode_free_security(struct inode *inode) + list_del_init(&isec->list); + spin_unlock(&sbsec->isec_lock); + +- inode->i_security = NULL; +- kmem_cache_free(sel_inode_cache, isec); ++ /* ++ * The inode may still be referenced in a path walk and ++ * a call to selinux_inode_permission() can be made ++ * after inode_free_security() is called. Ideally, the VFS ++ * wouldn't do this, but fixing that is a much harder ++ * job. For now, simply free the i_security via RCU, and ++ * leave the current inode->i_security pointer intact. ++ * The inode will be freed after the RCU grace period too. ++ */ ++ call_rcu(&isec->rcu, inode_free_rcu); + } + + static int file_alloc_security(struct file *file) +@@ -4181,8 +4197,10 @@ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb) + } + err = avc_has_perm(sk_sid, peer_sid, SECCLASS_PEER, + PEER__RECV, &ad); +- if (err) ++ if (err) { + selinux_netlbl_err(skb, err, 0); ++ return err; ++ } + } + + if (secmark_active) { +@@ -5372,11 +5390,11 @@ static int selinux_setprocattr(struct task_struct *p, + /* Check for ptracing, and update the task SID if ok. + Otherwise, leave SID unchanged and fail. */ + ptsid = 0; +- task_lock(p); ++ rcu_read_lock(); + tracer = ptrace_parent(p); + if (tracer) + ptsid = task_sid(tracer); +- task_unlock(p); ++ rcu_read_unlock(); + + if (tracer) { + error = avc_has_perm(ptsid, sid, SECCLASS_PROCESS, +diff --git a/security/selinux/include/objsec.h b/security/selinux/include/objsec.h +index 26c7eee..7b1830b 100644 +--- a/security/selinux/include/objsec.h ++++ b/security/selinux/include/objsec.h +@@ -38,7 +38,10 @@ struct task_security_struct { + + struct inode_security_struct { + struct inode *inode; /* back pointer to inode object */ +- struct list_head list; /* list of inode_security_struct */ ++ union { ++ struct list_head list; /* list of inode_security_struct */ ++ struct rcu_head rcu; /* for freeing the inode_security_struct */ ++ }; + u32 task_sid; /* SID of creating task */ + u32 sid; /* SID of this object */ + u16 sclass; /* security class of this object */ +diff --git a/sound/core/pcm_lib.c b/sound/core/pcm_lib.c +index 3420bd3..cf0d46e 100644 +--- a/sound/core/pcm_lib.c ++++ b/sound/core/pcm_lib.c +@@ -1846,6 +1846,8 @@ static int wait_for_avail(struct snd_pcm_substream *substream, + case SNDRV_PCM_STATE_DISCONNECTED: + err = -EBADFD; + goto _endloop; ++ case SNDRV_PCM_STATE_PAUSED: ++ continue; + } + if (!tout) { + snd_printd("%s write error (DMA or IRQ trouble?)\n", +diff --git a/sound/pci/hda/hda_intel.c b/sound/pci/hda/hda_intel.c +index 7ebe4b7..fea6895 100644 +--- a/sound/pci/hda/hda_intel.c ++++ b/sound/pci/hda/hda_intel.c +@@ -2618,6 +2618,10 @@ static void __devinit check_probe_mask(struct azx *chip, int dev) + * white/black-list for enable_msi + */ + static struct snd_pci_quirk msi_black_list[] __devinitdata = { ++ SND_PCI_QUIRK(0x103c, 0x2191, "HP", 0), /* AMD Hudson */ ++ SND_PCI_QUIRK(0x103c, 0x2192, "HP", 0), /* AMD Hudson */ ++ SND_PCI_QUIRK(0x103c, 0x21f7, "HP", 0), /* AMD Hudson */ ++ SND_PCI_QUIRK(0x103c, 0x21fa, "HP", 0), /* AMD Hudson */ + SND_PCI_QUIRK(0x1043, 0x81f2, "ASUS", 0), /* Athlon64 X2 + nvidia */ + SND_PCI_QUIRK(0x1043, 0x81f6, "ASUS", 0), /* nvidia */ + SND_PCI_QUIRK(0x1043, 0x822d, "ASUS", 0), /* Athlon64 X2 + nvidia MCP55 */ +diff --git a/sound/soc/codecs/wm8904.c b/sound/soc/codecs/wm8904.c +index 285ef87..fafb76f 100644 +--- a/sound/soc/codecs/wm8904.c ++++ b/sound/soc/codecs/wm8904.c +@@ -1714,7 +1714,7 @@ static int wm8904_set_fmt(struct snd_soc_dai *dai, unsigned int fmt) + + switch (fmt & SND_SOC_DAIFMT_FORMAT_MASK) { + case SND_SOC_DAIFMT_DSP_B: +- aif1 |= WM8904_AIF_LRCLK_INV; ++ aif1 |= 0x3 | WM8904_AIF_LRCLK_INV; + case SND_SOC_DAIFMT_DSP_A: + aif1 |= 0x3; + break; +diff --git a/tools/power/cpupower/utils/cpupower-set.c b/tools/power/cpupower/utils/cpupower-set.c +index dc4de37..bcf1d2f 100644 +--- a/tools/power/cpupower/utils/cpupower-set.c ++++ b/tools/power/cpupower/utils/cpupower-set.c +@@ -18,9 +18,9 @@ + #include "helpers/bitmask.h" + + static struct option set_opts[] = { +- { .name = "perf-bias", .has_arg = optional_argument, .flag = NULL, .val = 'b'}, +- { .name = "sched-mc", .has_arg = optional_argument, .flag = NULL, .val = 'm'}, +- { .name = "sched-smt", .has_arg = optional_argument, .flag = NULL, .val = 's'}, ++ { .name = "perf-bias", .has_arg = required_argument, .flag = NULL, .val = 'b'}, ++ { .name = "sched-mc", .has_arg = required_argument, .flag = NULL, .val = 'm'}, ++ { .name = "sched-smt", .has_arg = required_argument, .flag = NULL, .val = 's'}, + { }, + }; + |