summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to '3.5.1/1000_linux-3.5.1.patch')
-rw-r--r--3.5.1/1000_linux-3.5.1.patch5588
1 files changed, 5588 insertions, 0 deletions
diff --git a/3.5.1/1000_linux-3.5.1.patch b/3.5.1/1000_linux-3.5.1.patch
new file mode 100644
index 0000000..69785d2
--- /dev/null
+++ b/3.5.1/1000_linux-3.5.1.patch
@@ -0,0 +1,5588 @@
+diff --git a/Documentation/sound/alsa/HD-Audio-Models.txt b/Documentation/sound/alsa/HD-Audio-Models.txt
+index 03f7897..286ec04 100644
+--- a/Documentation/sound/alsa/HD-Audio-Models.txt
++++ b/Documentation/sound/alsa/HD-Audio-Models.txt
+@@ -21,10 +21,11 @@ ALC267/268
+ ==========
+ N/A
+
+-ALC269
++ALC269/270/275/276/280/282
+ ======
+ laptop-amic Laptops with analog-mic input
+ laptop-dmic Laptops with digital-mic input
++ lenovo-dock Enables docking station I/O for some Lenovos
+
+ ALC662/663/272
+ ==============
+diff --git a/Documentation/stable_kernel_rules.txt b/Documentation/stable_kernel_rules.txt
+index 4a7b54b..b0714d8 100644
+--- a/Documentation/stable_kernel_rules.txt
++++ b/Documentation/stable_kernel_rules.txt
+@@ -1,4 +1,4 @@
+-Everything you ever wanted to know about Linux 2.6 -stable releases.
++Everything you ever wanted to know about Linux -stable releases.
+
+ Rules on what kind of patches are accepted, and which ones are not, into the
+ "-stable" tree:
+@@ -42,10 +42,10 @@ Procedure for submitting patches to the -stable tree:
+ cherry-picked than this can be specified in the following format in
+ the sign-off area:
+
+- Cc: <stable@vger.kernel.org> # .32.x: a1f84a3: sched: Check for idle
+- Cc: <stable@vger.kernel.org> # .32.x: 1b9508f: sched: Rate-limit newidle
+- Cc: <stable@vger.kernel.org> # .32.x: fd21073: sched: Fix affinity logic
+- Cc: <stable@vger.kernel.org> # .32.x
++ Cc: <stable@vger.kernel.org> # 3.3.x: a1f84a3: sched: Check for idle
++ Cc: <stable@vger.kernel.org> # 3.3.x: 1b9508f: sched: Rate-limit newidle
++ Cc: <stable@vger.kernel.org> # 3.3.x: fd21073: sched: Fix affinity logic
++ Cc: <stable@vger.kernel.org> # 3.3.x
+ Signed-off-by: Ingo Molnar <mingo@elte.hu>
+
+ The tag sequence has the meaning of:
+@@ -79,6 +79,15 @@ Review cycle:
+ security kernel team, and not go through the normal review cycle.
+ Contact the kernel security team for more details on this procedure.
+
++Trees:
++
++ - The queues of patches, for both completed versions and in progress
++ versions can be found at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/stable-queue.git
++ - The finalized and tagged releases of all stable kernels can be found
++ in separate branches per version at:
++ http://git.kernel.org/?p=linux/kernel/git/stable/linux-stable.git
++
+
+ Review committee:
+
+diff --git a/Makefile b/Makefile
+index 4bb09e1..d7ee1cb 100644
+--- a/Makefile
++++ b/Makefile
+@@ -1,6 +1,6 @@
+ VERSION = 3
+ PATCHLEVEL = 5
+-SUBLEVEL = 0
++SUBLEVEL = 1
+ EXTRAVERSION =
+ NAME = Saber-toothed Squirrel
+
+diff --git a/arch/arm/boot/dts/tegra-trimslice.dts b/arch/arm/boot/dts/tegra-trimslice.dts
+index 9de5636..27fb8a6 100644
+--- a/arch/arm/boot/dts/tegra-trimslice.dts
++++ b/arch/arm/boot/dts/tegra-trimslice.dts
+@@ -276,9 +276,11 @@
+
+ usb@c5000000 {
+ status = "okay";
++ nvidia,vbus-gpio = <&gpio 170 0>; /* gpio PV2 */
+ };
+
+ usb@c5004000 {
++ status = "okay";
+ nvidia,phy-reset-gpio = <&gpio 168 0>; /* gpio PV0 */
+ };
+
+diff --git a/arch/arm/mach-omap2/opp.c b/arch/arm/mach-omap2/opp.c
+index de6d464..d8f6dbf 100644
+--- a/arch/arm/mach-omap2/opp.c
++++ b/arch/arm/mach-omap2/opp.c
+@@ -53,7 +53,7 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
+ omap_table_init = 1;
+
+ /* Lets now register with OPP library */
+- for (i = 0; i < opp_def_size; i++) {
++ for (i = 0; i < opp_def_size; i++, opp_def++) {
+ struct omap_hwmod *oh;
+ struct device *dev;
+
+@@ -86,7 +86,6 @@ int __init omap_init_opp_table(struct omap_opp_def *opp_def,
+ __func__, opp_def->freq,
+ opp_def->hwmod_name, i, r);
+ }
+- opp_def++;
+ }
+
+ return 0;
+diff --git a/arch/m68k/kernel/sys_m68k.c b/arch/m68k/kernel/sys_m68k.c
+index 8623f8d..9a5932e 100644
+--- a/arch/m68k/kernel/sys_m68k.c
++++ b/arch/m68k/kernel/sys_m68k.c
+@@ -479,9 +479,13 @@ sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
+ goto bad_access;
+ }
+
+- mem_value = *mem;
++ /*
++ * No need to check for EFAULT; we know that the page is
++ * present and writable.
++ */
++ __get_user(mem_value, mem);
+ if (mem_value == oldval)
+- *mem = newval;
++ __put_user(newval, mem);
+
+ pte_unmap_unlock(pte, ptl);
+ up_read(&mm->mmap_sem);
+diff --git a/arch/mips/kernel/kspd.c b/arch/mips/kernel/kspd.c
+index 84d0639..b77f56b 100644
+--- a/arch/mips/kernel/kspd.c
++++ b/arch/mips/kernel/kspd.c
+@@ -323,7 +323,7 @@ static void sp_cleanup(void)
+ fdt = files_fdtable(files);
+ for (;;) {
+ unsigned long set;
+- i = j * __NFDBITS;
++ i = j * BITS_PER_LONG;
+ if (i >= fdt->max_fds)
+ break;
+ set = fdt->open_fds[j++];
+diff --git a/arch/powerpc/boot/dts/p1022ds.dtsi b/arch/powerpc/boot/dts/p1022ds.dtsi
+index 7cdb505..1b0673e 100644
+--- a/arch/powerpc/boot/dts/p1022ds.dtsi
++++ b/arch/powerpc/boot/dts/p1022ds.dtsi
+@@ -33,22 +33,6 @@
+ */
+
+ &board_lbc {
+- /*
+- * This node is used to access the pixis via "indirect" mode,
+- * which is done by writing the pixis register index to chip
+- * select 0 and the value to/from chip select 1. Indirect
+- * mode is the only way to access the pixis when DIU video
+- * is enabled. Note that this assumes that the first column
+- * of the 'ranges' property above is the chip select number.
+- */
+- board-control@0,0 {
+- compatible = "fsl,p1022ds-indirect-pixis";
+- reg = <0x0 0x0 1 /* CS0 */
+- 0x1 0x0 1>; /* CS1 */
+- interrupt-parent = <&mpic>;
+- interrupts = <8 0 0 0>;
+- };
+-
+ nor@0,0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
+index f0cb7f4..360585d 100644
+--- a/arch/powerpc/include/asm/reg.h
++++ b/arch/powerpc/include/asm/reg.h
+@@ -1024,7 +1024,8 @@
+ /* Macros for setting and retrieving special purpose registers */
+ #ifndef __ASSEMBLY__
+ #define mfmsr() ({unsigned long rval; \
+- asm volatile("mfmsr %0" : "=r" (rval)); rval;})
++ asm volatile("mfmsr %0" : "=r" (rval) : \
++ : "memory"); rval;})
+ #ifdef CONFIG_PPC_BOOK3S_64
+ #define __mtmsrd(v, l) asm volatile("mtmsrd %0," __stringify(l) \
+ : : "r" (v) : "memory")
+diff --git a/arch/powerpc/kernel/ftrace.c b/arch/powerpc/kernel/ftrace.c
+index bf99cfa..6324008 100644
+--- a/arch/powerpc/kernel/ftrace.c
++++ b/arch/powerpc/kernel/ftrace.c
+@@ -245,9 +245,9 @@ __ftrace_make_nop(struct module *mod,
+
+ /*
+ * On PPC32 the trampoline looks like:
+- * 0x3d, 0x60, 0x00, 0x00 lis r11,sym@ha
+- * 0x39, 0x6b, 0x00, 0x00 addi r11,r11,sym@l
+- * 0x7d, 0x69, 0x03, 0xa6 mtctr r11
++ * 0x3d, 0x80, 0x00, 0x00 lis r12,sym@ha
++ * 0x39, 0x8c, 0x00, 0x00 addi r12,r12,sym@l
++ * 0x7d, 0x89, 0x03, 0xa6 mtctr r12
+ * 0x4e, 0x80, 0x04, 0x20 bctr
+ */
+
+@@ -262,9 +262,9 @@ __ftrace_make_nop(struct module *mod,
+ pr_devel(" %08x %08x ", jmp[0], jmp[1]);
+
+ /* verify that this is what we expect it to be */
+- if (((jmp[0] & 0xffff0000) != 0x3d600000) ||
+- ((jmp[1] & 0xffff0000) != 0x396b0000) ||
+- (jmp[2] != 0x7d6903a6) ||
++ if (((jmp[0] & 0xffff0000) != 0x3d800000) ||
++ ((jmp[1] & 0xffff0000) != 0x398c0000) ||
++ (jmp[2] != 0x7d8903a6) ||
+ (jmp[3] != 0x4e800420)) {
+ printk(KERN_ERR "Not a trampoline\n");
+ return -EINVAL;
+diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c
+index f700c81..978330c 100644
+--- a/arch/powerpc/platforms/85xx/p1022_ds.c
++++ b/arch/powerpc/platforms/85xx/p1022_ds.c
+@@ -27,6 +27,7 @@
+ #include <sysdev/fsl_pci.h>
+ #include <asm/udbg.h>
+ #include <asm/fsl_guts.h>
++#include <asm/fsl_lbc.h>
+ #include "smp.h"
+
+ #include "mpc85xx.h"
+@@ -142,17 +143,73 @@ static void p1022ds_set_gamma_table(enum fsl_diu_monitor_port port,
+ {
+ }
+
++struct fsl_law {
++ u32 lawbar;
++ u32 reserved1;
++ u32 lawar;
++ u32 reserved[5];
++};
++
++#define LAWBAR_MASK 0x00F00000
++#define LAWBAR_SHIFT 12
++
++#define LAWAR_EN 0x80000000
++#define LAWAR_TGT_MASK 0x01F00000
++#define LAW_TRGT_IF_LBC (0x04 << 20)
++
++#define LAWAR_MASK (LAWAR_EN | LAWAR_TGT_MASK)
++#define LAWAR_MATCH (LAWAR_EN | LAW_TRGT_IF_LBC)
++
++#define BR_BA 0xFFFF8000
++
++/*
++ * Map a BRx value to a physical address
++ *
++ * The localbus BRx registers only store the lower 32 bits of the address. To
++ * obtain the upper four bits, we need to scan the LAW table. The entry which
++ * maps to the localbus will contain the upper four bits.
++ */
++static phys_addr_t lbc_br_to_phys(const void *ecm, unsigned int count, u32 br)
++{
++#ifndef CONFIG_PHYS_64BIT
++ /*
++ * If we only have 32-bit addressing, then the BRx address *is* the
++ * physical address.
++ */
++ return br & BR_BA;
++#else
++ const struct fsl_law *law = ecm + 0xc08;
++ unsigned int i;
++
++ for (i = 0; i < count; i++) {
++ u64 lawbar = in_be32(&law[i].lawbar);
++ u32 lawar = in_be32(&law[i].lawar);
++
++ if ((lawar & LAWAR_MASK) == LAWAR_MATCH)
++ /* Extract the upper four bits */
++ return (br & BR_BA) | ((lawbar & LAWBAR_MASK) << 12);
++ }
++
++ return 0;
++#endif
++}
++
+ /**
+ * p1022ds_set_monitor_port: switch the output to a different monitor port
+- *
+ */
+ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
+ {
+ struct device_node *guts_node;
+- struct device_node *indirect_node = NULL;
++ struct device_node *lbc_node = NULL;
++ struct device_node *law_node = NULL;
+ struct ccsr_guts __iomem *guts;
++ struct fsl_lbc_regs *lbc = NULL;
++ void *ecm = NULL;
+ u8 __iomem *lbc_lcs0_ba = NULL;
+ u8 __iomem *lbc_lcs1_ba = NULL;
++ phys_addr_t cs0_addr, cs1_addr;
++ const __be32 *iprop;
++ unsigned int num_laws;
+ u8 b;
+
+ /* Map the global utilities registers. */
+@@ -168,25 +225,43 @@ static void p1022ds_set_monitor_port(enum fsl_diu_monitor_port port)
+ goto exit;
+ }
+
+- indirect_node = of_find_compatible_node(NULL, NULL,
+- "fsl,p1022ds-indirect-pixis");
+- if (!indirect_node) {
+- pr_err("p1022ds: missing pixis indirect mode node\n");
++ lbc_node = of_find_compatible_node(NULL, NULL, "fsl,p1022-elbc");
++ if (!lbc_node) {
++ pr_err("p1022ds: missing localbus node\n");
+ goto exit;
+ }
+
+- lbc_lcs0_ba = of_iomap(indirect_node, 0);
+- if (!lbc_lcs0_ba) {
+- pr_err("p1022ds: could not map localbus chip select 0\n");
++ lbc = of_iomap(lbc_node, 0);
++ if (!lbc) {
++ pr_err("p1022ds: could not map localbus node\n");
+ goto exit;
+ }
+
+- lbc_lcs1_ba = of_iomap(indirect_node, 1);
+- if (!lbc_lcs1_ba) {
+- pr_err("p1022ds: could not map localbus chip select 1\n");
++ law_node = of_find_compatible_node(NULL, NULL, "fsl,ecm-law");
++ if (!law_node) {
++ pr_err("p1022ds: missing local access window node\n");
+ goto exit;
+ }
+
++ ecm = of_iomap(law_node, 0);
++ if (!ecm) {
++ pr_err("p1022ds: could not map local access window node\n");
++ goto exit;
++ }
++
++ iprop = of_get_property(law_node, "fsl,num-laws", 0);
++ if (!iprop) {
++ pr_err("p1022ds: LAW node is missing fsl,num-laws property\n");
++ goto exit;
++ }
++ num_laws = be32_to_cpup(iprop);
++
++ cs0_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[0].br));
++ cs1_addr = lbc_br_to_phys(ecm, num_laws, in_be32(&lbc->bank[1].br));
++
++ lbc_lcs0_ba = ioremap(cs0_addr, 1);
++ lbc_lcs1_ba = ioremap(cs1_addr, 1);
++
+ /* Make sure we're in indirect mode first. */
+ if ((in_be32(&guts->pmuxcr) & PMUXCR_ELBCDIU_MASK) !=
+ PMUXCR_ELBCDIU_DIU) {
+@@ -254,10 +329,15 @@ exit:
+ iounmap(lbc_lcs1_ba);
+ if (lbc_lcs0_ba)
+ iounmap(lbc_lcs0_ba);
++ if (lbc)
++ iounmap(lbc);
++ if (ecm)
++ iounmap(ecm);
+ if (guts)
+ iounmap(guts);
+
+- of_node_put(indirect_node);
++ of_node_put(law_node);
++ of_node_put(lbc_node);
+ of_node_put(guts_node);
+ }
+
+diff --git a/arch/powerpc/platforms/pseries/eeh_event.c b/arch/powerpc/platforms/pseries/eeh_event.c
+index 4cb375c..fb50631 100644
+--- a/arch/powerpc/platforms/pseries/eeh_event.c
++++ b/arch/powerpc/platforms/pseries/eeh_event.c
+@@ -85,8 +85,10 @@ static int eeh_event_handler(void * dummy)
+ set_current_state(TASK_INTERRUPTIBLE); /* Don't add to load average */
+ edev = handle_eeh_events(event);
+
+- eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
+- pci_dev_put(edev->pdev);
++ if (edev) {
++ eeh_clear_slot(eeh_dev_to_of_node(edev), EEH_MODE_RECOVERING);
++ pci_dev_put(edev->pdev);
++ }
+
+ kfree(event);
+ mutex_unlock(&eeh_event_mutex);
+diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h
+index 69bdf72..89b0b39 100644
+--- a/arch/s390/include/asm/mmu_context.h
++++ b/arch/s390/include/asm/mmu_context.h
+@@ -13,7 +13,6 @@
+ #include <asm/uaccess.h>
+ #include <asm/tlbflush.h>
+ #include <asm/ctl_reg.h>
+-#include <asm-generic/mm_hooks.h>
+
+ static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+@@ -93,4 +92,17 @@ static inline void activate_mm(struct mm_struct *prev,
+ switch_mm(prev, next, current);
+ }
+
++static inline void arch_dup_mmap(struct mm_struct *oldmm,
++ struct mm_struct *mm)
++{
++#ifdef CONFIG_64BIT
++ if (oldmm->context.asce_limit < mm->context.asce_limit)
++ crst_table_downgrade(mm, oldmm->context.asce_limit);
++#endif
++}
++
++static inline void arch_exit_mmap(struct mm_struct *mm)
++{
++}
++
+ #endif /* __S390_MMU_CONTEXT_H */
+diff --git a/arch/s390/include/asm/processor.h b/arch/s390/include/asm/processor.h
+index 20d0585..3987b2f 100644
+--- a/arch/s390/include/asm/processor.h
++++ b/arch/s390/include/asm/processor.h
+@@ -122,7 +122,9 @@ struct stack_frame {
+ regs->psw.mask = psw_user_bits | PSW_MASK_BA; \
+ regs->psw.addr = new_psw | PSW_ADDR_AMODE; \
+ regs->gprs[15] = new_stackp; \
++ __tlb_flush_mm(current->mm); \
+ crst_table_downgrade(current->mm, 1UL << 31); \
++ update_mm(current->mm, current); \
+ } while (0)
+
+ /* Forward declaration, a strange C thing */
+diff --git a/arch/s390/kernel/processor.c b/arch/s390/kernel/processor.c
+index 6e0073e..07c7bf4 100644
+--- a/arch/s390/kernel/processor.c
++++ b/arch/s390/kernel/processor.c
+@@ -26,12 +26,14 @@ static DEFINE_PER_CPU(struct cpuid, cpu_id);
+ void __cpuinit cpu_init(void)
+ {
+ struct cpuid *id = &per_cpu(cpu_id, smp_processor_id());
++ struct s390_idle_data *idle = &__get_cpu_var(s390_idle);
+
+ get_cpu_id(id);
+ atomic_inc(&init_mm.mm_count);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
++ memset(idle, 0, sizeof(*idle));
+ }
+
+ /*
+diff --git a/arch/s390/kernel/smp.c b/arch/s390/kernel/smp.c
+index 15cca26..25e3f3e 100644
+--- a/arch/s390/kernel/smp.c
++++ b/arch/s390/kernel/smp.c
+@@ -984,14 +984,11 @@ static int __cpuinit smp_cpu_notify(struct notifier_block *self,
+ unsigned int cpu = (unsigned int)(long)hcpu;
+ struct cpu *c = &pcpu_devices[cpu].cpu;
+ struct device *s = &c->dev;
+- struct s390_idle_data *idle;
+ int err = 0;
+
+ switch (action) {
+ case CPU_ONLINE:
+ case CPU_ONLINE_FROZEN:
+- idle = &per_cpu(s390_idle, cpu);
+- memset(idle, 0, sizeof(struct s390_idle_data));
+ err = sysfs_create_group(&s->kobj, &cpu_online_attr_group);
+ break;
+ case CPU_DEAD:
+diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c
+index 72cec9e..470651f 100644
+--- a/arch/s390/mm/fault.c
++++ b/arch/s390/mm/fault.c
+@@ -443,6 +443,7 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ struct pt_regs regs;
+ int access, fault;
+
++ /* Emulate a uaccess fault from kernel mode. */
+ regs.psw.mask = psw_kernel_bits | PSW_MASK_DAT | PSW_MASK_MCHECK;
+ if (!irqs_disabled())
+ regs.psw.mask |= PSW_MASK_IO | PSW_MASK_EXT;
+@@ -452,12 +453,12 @@ int __handle_fault(unsigned long uaddr, unsigned long pgm_int_code, int write)
+ regs.int_parm_long = (uaddr & PAGE_MASK) | 2;
+ access = write ? VM_WRITE : VM_READ;
+ fault = do_exception(&regs, access);
+- if (unlikely(fault)) {
+- if (fault & VM_FAULT_OOM)
+- return -EFAULT;
+- else if (fault & VM_FAULT_SIGBUS)
+- do_sigbus(&regs);
+- }
++ /*
++ * Since the fault happened in kernel mode while performing a uaccess
++ * all we need to do now is emulating a fixup in case "fault" is not
++ * zero.
++ * For the calling uaccess functions this results always in -EFAULT.
++ */
+ return fault ? -EFAULT : 0;
+ }
+
+diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
+index 2857c48..a64fe53 100644
+--- a/arch/s390/mm/mmap.c
++++ b/arch/s390/mm/mmap.c
+@@ -105,9 +105,15 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
+
+ int s390_mmap_check(unsigned long addr, unsigned long len)
+ {
++ int rc;
++
+ if (!is_compat_task() &&
+- len >= TASK_SIZE && TASK_SIZE < (1UL << 53))
+- return crst_table_upgrade(current->mm, 1UL << 53);
++ len >= TASK_SIZE && TASK_SIZE < (1UL << 53)) {
++ rc = crst_table_upgrade(current->mm, 1UL << 53);
++ if (rc)
++ return rc;
++ update_mm(current->mm, current);
++ }
+ return 0;
+ }
+
+@@ -127,6 +133,7 @@ s390_get_unmapped_area(struct file *filp, unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area(filp, addr, len, pgoff, flags);
+ }
+ return area;
+@@ -149,6 +156,7 @@ s390_get_unmapped_area_topdown(struct file *filp, const unsigned long addr,
+ rc = crst_table_upgrade(mm, 1UL << 53);
+ if (rc)
+ return (unsigned long) rc;
++ update_mm(mm, current);
+ area = arch_get_unmapped_area_topdown(filp, addr, len,
+ pgoff, flags);
+ }
+diff --git a/arch/s390/mm/pgtable.c b/arch/s390/mm/pgtable.c
+index a3db5a3..56e6fd5 100644
+--- a/arch/s390/mm/pgtable.c
++++ b/arch/s390/mm/pgtable.c
+@@ -85,7 +85,6 @@ repeat:
+ crst_table_free(mm, table);
+ if (mm->context.asce_limit < limit)
+ goto repeat;
+- update_mm(mm, current);
+ return 0;
+ }
+
+@@ -93,9 +92,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ {
+ pgd_t *pgd;
+
+- if (mm->context.asce_limit <= limit)
+- return;
+- __tlb_flush_mm(mm);
+ while (mm->context.asce_limit > limit) {
+ pgd = mm->pgd;
+ switch (pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) {
+@@ -118,7 +114,6 @@ void crst_table_downgrade(struct mm_struct *mm, unsigned long limit)
+ mm->task_size = mm->context.asce_limit;
+ crst_table_free(mm, (unsigned long *) pgd);
+ }
+- update_mm(mm, current);
+ }
+ #endif
+
+diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
+index da27c5d..c46ed49 100644
+--- a/arch/x86/kernel/cpu/mcheck/mce.c
++++ b/arch/x86/kernel/cpu/mcheck/mce.c
+@@ -1186,6 +1186,7 @@ void mce_notify_process(void)
+ {
+ unsigned long pfn;
+ struct mce_info *mi = mce_find_info();
++ int flags = MF_ACTION_REQUIRED;
+
+ if (!mi)
+ mce_panic("Lost physical address for unconsumed uncorrectable error", NULL, NULL);
+@@ -1200,8 +1201,9 @@ void mce_notify_process(void)
+ * doomed. We still need to mark the page as poisoned and alert any
+ * other users of the page.
+ */
+- if (memory_failure(pfn, MCE_VECTOR, MF_ACTION_REQUIRED) < 0 ||
+- mi->restartable == 0) {
++ if (!mi->restartable)
++ flags |= MF_MUST_KILL;
++ if (memory_failure(pfn, MCE_VECTOR, flags) < 0) {
+ pr_err("Memory error not recovered");
+ force_sig(SIGBUS, current);
+ }
+diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
+index fbdfc69..24b852b 100644
+--- a/arch/x86/kernel/microcode_core.c
++++ b/arch/x86/kernel/microcode_core.c
+@@ -298,19 +298,31 @@ static ssize_t reload_store(struct device *dev,
+ const char *buf, size_t size)
+ {
+ unsigned long val;
+- int cpu = dev->id;
+- ssize_t ret = 0;
++ int cpu;
++ ssize_t ret = 0, tmp_ret;
++
++ /* allow reload only from the BSP */
++ if (boot_cpu_data.cpu_index != dev->id)
++ return -EINVAL;
+
+ ret = kstrtoul(buf, 0, &val);
+ if (ret)
+ return ret;
+
+- if (val == 1) {
+- get_online_cpus();
+- if (cpu_online(cpu))
+- ret = reload_for_cpu(cpu);
+- put_online_cpus();
++ if (val != 1)
++ return size;
++
++ get_online_cpus();
++ for_each_online_cpu(cpu) {
++ tmp_ret = reload_for_cpu(cpu);
++ if (tmp_ret != 0)
++ pr_warn("Error reloading microcode on CPU %d\n", cpu);
++
++ /* save retval of the first encountered reload error */
++ if (!ret)
++ ret = tmp_ret;
+ }
++ put_online_cpus();
+
+ if (!ret)
+ ret = size;
+diff --git a/drivers/acpi/ac.c b/drivers/acpi/ac.c
+index 6512b20..d1fcbc0 100644
+--- a/drivers/acpi/ac.c
++++ b/drivers/acpi/ac.c
+@@ -292,7 +292,9 @@ static int acpi_ac_add(struct acpi_device *device)
+ ac->charger.properties = ac_props;
+ ac->charger.num_properties = ARRAY_SIZE(ac_props);
+ ac->charger.get_property = get_ac_property;
+- power_supply_register(&ac->device->dev, &ac->charger);
++ result = power_supply_register(&ac->device->dev, &ac->charger);
++ if (result)
++ goto end;
+
+ printk(KERN_INFO PREFIX "%s [%s] (%s)\n",
+ acpi_device_name(device), acpi_device_bid(device),
+diff --git a/drivers/acpi/apei/apei-base.c b/drivers/acpi/apei/apei-base.c
+index 6686b1e..00a7836 100644
+--- a/drivers/acpi/apei/apei-base.c
++++ b/drivers/acpi/apei/apei-base.c
+@@ -586,6 +586,11 @@ static int apei_check_gar(struct acpi_generic_address *reg, u64 *paddr,
+ }
+ *access_bit_width = 1UL << (access_size_code + 2);
+
++ /* Fixup common BIOS bug */
++ if (bit_width == 32 && bit_offset == 0 && (*paddr & 0x03) == 0 &&
++ *access_bit_width < 32)
++ *access_bit_width = 32;
++
+ if ((bit_width + bit_offset) > *access_bit_width) {
+ pr_warning(FW_BUG APEI_PFX
+ "Invalid bit width + offset in GAR [0x%llx/%u/%u/%u/%u]\n",
+diff --git a/drivers/base/power/main.c b/drivers/base/power/main.c
+index 9cb845e..742fcbe 100644
+--- a/drivers/base/power/main.c
++++ b/drivers/base/power/main.c
+@@ -989,8 +989,16 @@ static int dpm_suspend_late(pm_message_t state)
+ int dpm_suspend_end(pm_message_t state)
+ {
+ int error = dpm_suspend_late(state);
++ if (error)
++ return error;
+
+- return error ? : dpm_suspend_noirq(state);
++ error = dpm_suspend_noirq(state);
++ if (error) {
++ dpm_resume_early(state);
++ return error;
++ }
++
++ return 0;
+ }
+ EXPORT_SYMBOL_GPL(dpm_suspend_end);
+
+diff --git a/drivers/char/tpm/tpm.c b/drivers/char/tpm/tpm.c
+index ad7c732..08427ab 100644
+--- a/drivers/char/tpm/tpm.c
++++ b/drivers/char/tpm/tpm.c
+@@ -827,10 +827,10 @@ EXPORT_SYMBOL_GPL(tpm_pcr_extend);
+ int tpm_do_selftest(struct tpm_chip *chip)
+ {
+ int rc;
+- u8 digest[TPM_DIGEST_SIZE];
+ unsigned int loops;
+ unsigned int delay_msec = 1000;
+ unsigned long duration;
++ struct tpm_cmd_t cmd;
+
+ duration = tpm_calc_ordinal_duration(chip,
+ TPM_ORD_CONTINUE_SELFTEST);
+@@ -845,7 +845,15 @@ int tpm_do_selftest(struct tpm_chip *chip)
+ return rc;
+
+ do {
+- rc = __tpm_pcr_read(chip, 0, digest);
++ /* Attempt to read a PCR value */
++ cmd.header.in = pcrread_header;
++ cmd.params.pcrread_in.pcr_idx = cpu_to_be32(0);
++ rc = tpm_transmit(chip, (u8 *) &cmd, READ_PCR_RESULT_SIZE);
++
++ if (rc < TPM_HEADER_SIZE)
++ return -EFAULT;
++
++ rc = be32_to_cpu(cmd.header.out.return_code);
+ if (rc == TPM_ERR_DISABLED || rc == TPM_ERR_DEACTIVATED) {
+ dev_info(chip->dev,
+ "TPM is disabled/deactivated (0x%X)\n", rc);
+diff --git a/drivers/gpu/drm/nouveau/nouveau_drv.h b/drivers/gpu/drm/nouveau/nouveau_drv.h
+index 8613cb2..b863a3a 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_drv.h
++++ b/drivers/gpu/drm/nouveau/nouveau_drv.h
+@@ -689,8 +689,6 @@ struct drm_nouveau_private {
+ void (*irq_handler[32])(struct drm_device *);
+ bool msi_enabled;
+
+- struct list_head vbl_waiting;
+-
+ struct {
+ struct drm_global_reference mem_global_ref;
+ struct ttm_bo_global_ref bo_global_ref;
+diff --git a/drivers/gpu/drm/nouveau/nouveau_irq.c b/drivers/gpu/drm/nouveau/nouveau_irq.c
+index 868c7fd..b2c2937 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_irq.c
++++ b/drivers/gpu/drm/nouveau/nouveau_irq.c
+@@ -41,12 +41,8 @@
+ void
+ nouveau_irq_preinstall(struct drm_device *dev)
+ {
+- struct drm_nouveau_private *dev_priv = dev->dev_private;
+-
+ /* Master disable */
+ nv_wr32(dev, NV03_PMC_INTR_EN_0, 0);
+-
+- INIT_LIST_HEAD(&dev_priv->vbl_waiting);
+ }
+
+ int
+diff --git a/drivers/gpu/drm/nouveau/nouveau_software.h b/drivers/gpu/drm/nouveau/nouveau_software.h
+index e60bc6c..b507a92 100644
+--- a/drivers/gpu/drm/nouveau/nouveau_software.h
++++ b/drivers/gpu/drm/nouveau/nouveau_software.h
+@@ -38,6 +38,7 @@ static inline void
+ nouveau_software_context_new(struct nouveau_software_chan *pch)
+ {
+ INIT_LIST_HEAD(&pch->flip);
++ INIT_LIST_HEAD(&pch->vblank.list);
+ }
+
+ static inline void
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+index abc3662..219850d 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc
+@@ -119,9 +119,9 @@ dispatch_dma:
+ // mthd 0x030c-0x0340, various stuff
+ .b16 0xc3 14
+ .b32 #ctx_src_address_high ~0x000000ff
+-.b32 #ctx_src_address_low ~0xfffffff0
++.b32 #ctx_src_address_low ~0xffffffff
+ .b32 #ctx_dst_address_high ~0x000000ff
+-.b32 #ctx_dst_address_low ~0xfffffff0
++.b32 #ctx_dst_address_low ~0xffffffff
+ .b32 #ctx_src_pitch ~0x0007ffff
+ .b32 #ctx_dst_pitch ~0x0007ffff
+ .b32 #ctx_xcnt ~0x0000ffff
+diff --git a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+index 1f33fbd..37d6de3 100644
+--- a/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nva3_copy.fuc.h
+@@ -1,37 +1,72 @@
+-uint32_t nva3_pcopy_data[] = {
++u32 nva3_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_dma */
++/* 0x0004: ctx_dma_query */
+ 0x00000000,
++/* 0x0008: ctx_dma_src */
+ 0x00000000,
++/* 0x000c: ctx_dma_dst */
+ 0x00000000,
++/* 0x0010: ctx_query_address_high */
+ 0x00000000,
++/* 0x0014: ctx_query_address_low */
+ 0x00000000,
++/* 0x0018: ctx_query_counter */
+ 0x00000000,
++/* 0x001c: ctx_src_address_high */
+ 0x00000000,
++/* 0x0020: ctx_src_address_low */
+ 0x00000000,
++/* 0x0024: ctx_src_pitch */
+ 0x00000000,
++/* 0x0028: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x002c: ctx_src_xsize */
+ 0x00000000,
++/* 0x0030: ctx_src_ysize */
+ 0x00000000,
++/* 0x0034: ctx_src_zsize */
+ 0x00000000,
++/* 0x0038: ctx_src_zoff */
+ 0x00000000,
++/* 0x003c: ctx_src_xoff */
+ 0x00000000,
++/* 0x0040: ctx_src_yoff */
+ 0x00000000,
++/* 0x0044: ctx_src_cpp */
+ 0x00000000,
++/* 0x0048: ctx_dst_address_high */
+ 0x00000000,
++/* 0x004c: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0050: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0054: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x0058: ctx_dst_xsize */
+ 0x00000000,
++/* 0x005c: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0060: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0064: ctx_dst_zoff */
+ 0x00000000,
++/* 0x0068: ctx_dst_xoff */
+ 0x00000000,
++/* 0x006c: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0070: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0074: ctx_format */
+ 0x00000000,
++/* 0x0078: ctx_swz_const0 */
+ 0x00000000,
++/* 0x007c: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0080: ctx_xcnt */
+ 0x00000000,
++/* 0x0084: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +98,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -73,6 +109,7 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00010162,
+ 0x00000000,
+ 0x00030060,
++/* 0x0128: dispatch_dma */
+ 0x00010170,
+ 0x00000000,
+ 0x00010170,
+@@ -118,11 +155,11 @@ uint32_t nva3_pcopy_data[] = {
+ 0x0000001c,
+ 0xffffff00,
+ 0x00000020,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000048,
+ 0xffffff00,
+ 0x0000004c,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000024,
+ 0xfff80000,
+ 0x00000050,
+@@ -146,7 +183,8 @@ uint32_t nva3_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nva3_pcopy_code[] = {
++u32 nva3_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -158,23 +196,31 @@ uint32_t nva3_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c472,
+ 0xf4060bf4,
++/* 0x004a: ih_no_cmd */
+ 0x11c4c321,
+ 0x4001d00c,
++/* 0x0052: swctx */
+ 0x47f101f8,
+ 0x4bfe7700,
+ 0x0007fe00,
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x006b: swctx_load */
+ 0xfa060ef4,
++/* 0x006e: swctx_done */
+ 0x03f80504,
++/* 0x0072: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -183,18 +229,22 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1e3af052,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x0093: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x220bf41e,
+ 0xf40131f4,
+ 0x57f05221,
+ 0x0367f004,
++/* 0x00a8: chsw_load_ctx_dma */
+ 0xa07856bc,
+ 0xb6018068,
+ 0x87d00884,
+ 0x0162b600,
++/* 0x00bb: chsw_finish_load */
+ 0xf0f018f4,
+ 0x23d00237,
++/* 0x00c3: dispatch */
+ 0xf100f880,
+ 0xcf190037,
+ 0x33cf4032,
+@@ -202,6 +252,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x1024b607,
+ 0x010057f1,
+ 0x74bd64bd,
++/* 0x00dc: dispatch_loop */
+ 0x58005658,
+ 0x50b60157,
+ 0x0446b804,
+@@ -211,6 +262,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb60276bb,
+ 0x57bb0374,
+ 0xdf0ef400,
++/* 0x0100: dispatch_valid_mthd */
+ 0xb60246bb,
+ 0x45bb0344,
+ 0x01459800,
+@@ -220,31 +272,41 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb0014658,
+ 0x1bf40064,
+ 0x00538009,
++/* 0x0127: dispatch_cmd */
+ 0xf4300ef4,
+ 0x55f90132,
+ 0xf40c01f4,
++/* 0x0132: dispatch_invalid_bitfield */
+ 0x25f0250e,
++/* 0x0135: dispatch_illegal_mthd */
+ 0x0125f002,
++/* 0x0138: dispatch_error */
+ 0x100047f1,
+ 0xd00042d0,
+ 0x27f04043,
+ 0x0002d040,
++/* 0x0148: hostirq_wait */
+ 0xf08002cf,
+ 0x24b04024,
+ 0xf71bf400,
++/* 0x0154: dispatch_done */
+ 0x1d0027f1,
+ 0xd00137f0,
+ 0x00f80023,
++/* 0x0160: cmd_nop */
++/* 0x0162: cmd_pm_trigger */
+ 0x27f100f8,
+ 0x34bd2200,
+ 0xd00233f0,
+ 0x00f80023,
++/* 0x0170: cmd_dma */
+ 0x012842b7,
+ 0xf00145b6,
+ 0x43801e39,
+ 0x0040b701,
+ 0x0644b606,
+ 0xf80043d0,
++/* 0x0189: cmd_exec_set_format */
+ 0xf030f400,
+ 0xb00001b0,
+ 0x01b00101,
+@@ -256,20 +318,26 @@ uint32_t nva3_pcopy_code[] = {
+ 0x70b63847,
+ 0x0232f401,
+ 0x94bd84bd,
++/* 0x01b4: ncomp_loop */
+ 0xb60f4ac4,
+ 0xb4bd0445,
++/* 0x01bc: bpc_loop */
+ 0xf404a430,
+ 0xa5ff0f18,
+ 0x00cbbbc0,
+ 0xf40231f4,
++/* 0x01ce: cmp_c0 */
+ 0x1bf4220e,
+ 0x10c7f00c,
+ 0xf400cbbb,
++/* 0x01da: cmp_c1 */
+ 0xa430160e,
+ 0x0c18f406,
+ 0xbb14c7f0,
+ 0x0ef400cb,
++/* 0x01e9: cmp_zero */
+ 0x80c7f107,
++/* 0x01ed: bpc_next */
+ 0x01c83800,
+ 0xb60180b6,
+ 0xb5b801b0,
+@@ -280,6 +348,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x98110680,
+ 0x68fd2008,
+ 0x0502f400,
++/* 0x0216: dst_xcnt */
+ 0x75fd64bd,
+ 0x1c078000,
+ 0xf10078fd,
+@@ -304,6 +373,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x980056d0,
+ 0x56d01f06,
+ 0x1030f440,
++/* 0x0276: cmd_exec_set_surface_tiled */
+ 0x579800f8,
+ 0x6879c70a,
+ 0xb66478c7,
+@@ -311,9 +381,11 @@ uint32_t nva3_pcopy_code[] = {
+ 0x0e76b060,
+ 0xf0091bf4,
+ 0x0ef40477,
++/* 0x0291: xtile64 */
+ 0x027cf00f,
+ 0xfd1170b6,
+ 0x77f00947,
++/* 0x029d: xtileok */
+ 0x0f5a9806,
+ 0xfd115b98,
+ 0xb7f000ab,
+@@ -371,6 +443,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x67d00600,
+ 0x0060b700,
+ 0x0068d004,
++/* 0x0382: cmd_exec_set_surface_linear */
+ 0x6cf000f8,
+ 0x0260b702,
+ 0x0864b602,
+@@ -381,13 +454,16 @@ uint32_t nva3_pcopy_code[] = {
+ 0xb70067d0,
+ 0x98040060,
+ 0x67d00957,
++/* 0x03ab: cmd_exec_wait */
+ 0xf900f800,
+ 0xf110f900,
+ 0xb6080007,
++/* 0x03b6: loop */
+ 0x01cf0604,
+ 0x0114f000,
+ 0xfcfa1bf4,
+ 0xf800fc10,
++/* 0x03c5: cmd_exec_query */
+ 0x0d34c800,
+ 0xf5701bf4,
+ 0xf103ab21,
+@@ -417,6 +493,7 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0438: query_counter */
+ 0x03ab21f5,
+ 0x080c47f1,
+ 0x980644b6,
+@@ -439,11 +516,13 @@ uint32_t nva3_pcopy_code[] = {
+ 0x47f10153,
+ 0x44b60800,
+ 0x0045d006,
++/* 0x0492: cmd_exec */
+ 0x21f500f8,
+ 0x3fc803ab,
+ 0x0e0bf400,
+ 0x018921f5,
+ 0x020047f1,
++/* 0x04a7: cmd_exec_no_format */
+ 0xf11e0ef4,
+ 0xb6081067,
+ 0x77f00664,
+@@ -451,19 +530,24 @@ uint32_t nva3_pcopy_code[] = {
+ 0x981c0780,
+ 0x67d02007,
+ 0x4067d000,
++/* 0x04c2: cmd_exec_init_src_surface */
+ 0x32f444bd,
+ 0xc854bd02,
+ 0x0bf4043f,
+ 0x8221f50a,
+ 0x0a0ef403,
++/* 0x04d4: src_tiled */
+ 0x027621f5,
++/* 0x04db: cmd_exec_init_dst_surface */
+ 0xf40749f0,
+ 0x57f00231,
+ 0x083fc82c,
+ 0xf50a0bf4,
+ 0xf4038221,
++/* 0x04ee: dst_tiled */
+ 0x21f50a0e,
+ 0x49f00276,
++/* 0x04f5: cmd_exec_kick */
+ 0x0057f108,
+ 0x0654b608,
+ 0xd0210698,
+@@ -473,6 +557,8 @@ uint32_t nva3_pcopy_code[] = {
+ 0xc80054d0,
+ 0x0bf40c3f,
+ 0xc521f507,
++/* 0x0519: cmd_exec_done */
++/* 0x051b: cmd_wrcache_flush */
+ 0xf100f803,
+ 0xbd220027,
+ 0x0133f034,
+diff --git a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+index a8d1745..cd879f3 100644
+--- a/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
++++ b/drivers/gpu/drm/nouveau/nvc0_copy.fuc.h
+@@ -1,34 +1,65 @@
+-uint32_t nvc0_pcopy_data[] = {
++u32 nvc0_pcopy_data[] = {
++/* 0x0000: ctx_object */
+ 0x00000000,
++/* 0x0004: ctx_query_address_high */
+ 0x00000000,
++/* 0x0008: ctx_query_address_low */
+ 0x00000000,
++/* 0x000c: ctx_query_counter */
+ 0x00000000,
++/* 0x0010: ctx_src_address_high */
+ 0x00000000,
++/* 0x0014: ctx_src_address_low */
+ 0x00000000,
++/* 0x0018: ctx_src_pitch */
+ 0x00000000,
++/* 0x001c: ctx_src_tile_mode */
+ 0x00000000,
++/* 0x0020: ctx_src_xsize */
+ 0x00000000,
++/* 0x0024: ctx_src_ysize */
+ 0x00000000,
++/* 0x0028: ctx_src_zsize */
+ 0x00000000,
++/* 0x002c: ctx_src_zoff */
+ 0x00000000,
++/* 0x0030: ctx_src_xoff */
+ 0x00000000,
++/* 0x0034: ctx_src_yoff */
+ 0x00000000,
++/* 0x0038: ctx_src_cpp */
+ 0x00000000,
++/* 0x003c: ctx_dst_address_high */
+ 0x00000000,
++/* 0x0040: ctx_dst_address_low */
+ 0x00000000,
++/* 0x0044: ctx_dst_pitch */
+ 0x00000000,
++/* 0x0048: ctx_dst_tile_mode */
+ 0x00000000,
++/* 0x004c: ctx_dst_xsize */
+ 0x00000000,
++/* 0x0050: ctx_dst_ysize */
+ 0x00000000,
++/* 0x0054: ctx_dst_zsize */
+ 0x00000000,
++/* 0x0058: ctx_dst_zoff */
+ 0x00000000,
++/* 0x005c: ctx_dst_xoff */
+ 0x00000000,
++/* 0x0060: ctx_dst_yoff */
+ 0x00000000,
++/* 0x0064: ctx_dst_cpp */
+ 0x00000000,
++/* 0x0068: ctx_format */
+ 0x00000000,
++/* 0x006c: ctx_swz_const0 */
+ 0x00000000,
++/* 0x0070: ctx_swz_const1 */
+ 0x00000000,
++/* 0x0074: ctx_xcnt */
+ 0x00000000,
++/* 0x0078: ctx_ycnt */
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
+@@ -63,6 +94,7 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000000,
+ 0x00000000,
+ 0x00000000,
++/* 0x0100: dispatch_table */
+ 0x00010000,
+ 0x00000000,
+ 0x00000000,
+@@ -111,11 +143,11 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000010,
+ 0xffffff00,
+ 0x00000014,
+- 0x0000000f,
++ 0x00000000,
+ 0x0000003c,
+ 0xffffff00,
+ 0x00000040,
+- 0x0000000f,
++ 0x00000000,
+ 0x00000018,
+ 0xfff80000,
+ 0x00000044,
+@@ -139,7 +171,8 @@ uint32_t nvc0_pcopy_data[] = {
+ 0x00000800,
+ };
+
+-uint32_t nvc0_pcopy_code[] = {
++u32 nvc0_pcopy_code[] = {
++/* 0x0000: main */
+ 0x04fe04bd,
+ 0x3517f000,
+ 0xf10010fe,
+@@ -151,15 +184,20 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x17f11031,
+ 0x27f01200,
+ 0x0012d003,
++/* 0x002f: spin */
+ 0xf40031f4,
+ 0x0ef40028,
++/* 0x0035: ih */
+ 0x8001cffd,
+ 0xf40812c4,
+ 0x21f4060b,
++/* 0x0041: ih_no_chsw */
+ 0x0412c4ca,
+ 0xf5070bf4,
++/* 0x004b: ih_no_cmd */
+ 0xc4010221,
+ 0x01d00c11,
++/* 0x0053: swctx */
+ 0xf101f840,
+ 0xfe770047,
+ 0x47f1004b,
+@@ -188,8 +226,11 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xf00204b9,
+ 0x01f40643,
+ 0x0604fa09,
++/* 0x00c3: swctx_load */
+ 0xfa060ef4,
++/* 0x00c6: swctx_done */
+ 0x03f80504,
++/* 0x00ca: chsw */
+ 0x27f100f8,
+ 0x23cf1400,
+ 0x1e3fc800,
+@@ -198,18 +239,22 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1e3af053,
+ 0xf00023d0,
+ 0x24d00147,
++/* 0x00eb: chsw_no_unload */
+ 0xcf00f880,
+ 0x3dc84023,
+ 0x090bf41e,
+ 0xf40131f4,
++/* 0x00fa: chsw_finish_load */
+ 0x37f05321,
+ 0x8023d002,
++/* 0x0102: dispatch */
+ 0x37f100f8,
+ 0x32cf1900,
+ 0x0033cf40,
+ 0x07ff24e4,
+ 0xf11024b6,
+ 0xbd010057,
++/* 0x011b: dispatch_loop */
+ 0x5874bd64,
+ 0x57580056,
+ 0x0450b601,
+@@ -219,6 +264,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xbb0f08f4,
+ 0x74b60276,
+ 0x0057bb03,
++/* 0x013f: dispatch_valid_mthd */
+ 0xbbdf0ef4,
+ 0x44b60246,
+ 0x0045bb03,
+@@ -229,24 +275,33 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x64b00146,
+ 0x091bf400,
+ 0xf4005380,
++/* 0x0166: dispatch_cmd */
+ 0x32f4300e,
+ 0xf455f901,
+ 0x0ef40c01,
++/* 0x0171: dispatch_invalid_bitfield */
+ 0x0225f025,
++/* 0x0174: dispatch_illegal_mthd */
++/* 0x0177: dispatch_error */
+ 0xf10125f0,
+ 0xd0100047,
+ 0x43d00042,
+ 0x4027f040,
++/* 0x0187: hostirq_wait */
+ 0xcf0002d0,
+ 0x24f08002,
+ 0x0024b040,
++/* 0x0193: dispatch_done */
+ 0xf1f71bf4,
+ 0xf01d0027,
+ 0x23d00137,
++/* 0x019f: cmd_nop */
+ 0xf800f800,
++/* 0x01a1: cmd_pm_trigger */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00233,
++/* 0x01af: cmd_exec_set_format */
+ 0xf400f800,
+ 0x01b0f030,
+ 0x0101b000,
+@@ -258,20 +313,26 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x3847c701,
+ 0xf40170b6,
+ 0x84bd0232,
++/* 0x01da: ncomp_loop */
+ 0x4ac494bd,
+ 0x0445b60f,
++/* 0x01e2: bpc_loop */
+ 0xa430b4bd,
+ 0x0f18f404,
+ 0xbbc0a5ff,
+ 0x31f400cb,
+ 0x220ef402,
++/* 0x01f4: cmp_c0 */
+ 0xf00c1bf4,
+ 0xcbbb10c7,
+ 0x160ef400,
++/* 0x0200: cmp_c1 */
+ 0xf406a430,
+ 0xc7f00c18,
+ 0x00cbbb14,
++/* 0x020f: cmp_zero */
+ 0xf1070ef4,
++/* 0x0213: bpc_next */
+ 0x380080c7,
+ 0x80b601c8,
+ 0x01b0b601,
+@@ -283,6 +344,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d08980e,
+ 0xf40068fd,
+ 0x64bd0502,
++/* 0x023c: dst_xcnt */
+ 0x800075fd,
+ 0x78fd1907,
+ 0x1057f100,
+@@ -307,15 +369,18 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1c069800,
+ 0xf44056d0,
+ 0x00f81030,
++/* 0x029c: cmd_exec_set_surface_tiled */
+ 0xc7075798,
+ 0x78c76879,
+ 0x0380b664,
+ 0xb06077c7,
+ 0x1bf40e76,
+ 0x0477f009,
++/* 0x02b7: xtile64 */
+ 0xf00f0ef4,
+ 0x70b6027c,
+ 0x0947fd11,
++/* 0x02c3: xtileok */
+ 0x980677f0,
+ 0x5b980c5a,
+ 0x00abfd0e,
+@@ -374,6 +439,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0xb70067d0,
+ 0xd0040060,
+ 0x00f80068,
++/* 0x03a8: cmd_exec_set_surface_linear */
+ 0xb7026cf0,
+ 0xb6020260,
+ 0x57980864,
+@@ -384,12 +450,15 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0060b700,
+ 0x06579804,
+ 0xf80067d0,
++/* 0x03d1: cmd_exec_wait */
+ 0xf900f900,
+ 0x0007f110,
+ 0x0604b608,
++/* 0x03dc: loop */
+ 0xf00001cf,
+ 0x1bf40114,
+ 0xfc10fcfa,
++/* 0x03eb: cmd_exec_query */
+ 0xc800f800,
+ 0x1bf40d34,
+ 0xd121f570,
+@@ -419,6 +488,7 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x0153f026,
+ 0x080047f1,
+ 0xd00644b6,
++/* 0x045e: query_counter */
+ 0x21f50045,
+ 0x47f103d1,
+ 0x44b6080c,
+@@ -442,11 +512,13 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x080047f1,
+ 0xd00644b6,
+ 0x00f80045,
++/* 0x04b8: cmd_exec */
+ 0x03d121f5,
+ 0xf4003fc8,
+ 0x21f50e0b,
+ 0x47f101af,
+ 0x0ef40200,
++/* 0x04cd: cmd_exec_no_format */
+ 0x1067f11e,
+ 0x0664b608,
+ 0x800177f0,
+@@ -454,18 +526,23 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x1d079819,
+ 0xd00067d0,
+ 0x44bd4067,
++/* 0x04e8: cmd_exec_init_src_surface */
+ 0xbd0232f4,
+ 0x043fc854,
+ 0xf50a0bf4,
+ 0xf403a821,
++/* 0x04fa: src_tiled */
+ 0x21f50a0e,
+ 0x49f0029c,
++/* 0x0501: cmd_exec_init_dst_surface */
+ 0x0231f407,
+ 0xc82c57f0,
+ 0x0bf4083f,
+ 0xa821f50a,
+ 0x0a0ef403,
++/* 0x0514: dst_tiled */
+ 0x029c21f5,
++/* 0x051b: cmd_exec_kick */
+ 0xf10849f0,
+ 0xb6080057,
+ 0x06980654,
+@@ -475,7 +552,9 @@ uint32_t nvc0_pcopy_code[] = {
+ 0x54d00546,
+ 0x0c3fc800,
+ 0xf5070bf4,
++/* 0x053f: cmd_exec_done */
+ 0xf803eb21,
++/* 0x0541: cmd_wrcache_flush */
+ 0x0027f100,
+ 0xf034bd22,
+ 0x23d00133,
+diff --git a/drivers/gpu/drm/radeon/atombios_dp.c b/drivers/gpu/drm/radeon/atombios_dp.c
+index 5131b3b..500cced 100644
+--- a/drivers/gpu/drm/radeon/atombios_dp.c
++++ b/drivers/gpu/drm/radeon/atombios_dp.c
+@@ -22,6 +22,7 @@
+ *
+ * Authors: Dave Airlie
+ * Alex Deucher
++ * Jerome Glisse
+ */
+ #include "drmP.h"
+ #include "radeon_drm.h"
+@@ -654,7 +655,6 @@ static bool radeon_dp_get_link_status(struct radeon_connector *radeon_connector,
+ ret = radeon_dp_aux_native_read(radeon_connector, DP_LANE0_1_STATUS,
+ link_status, DP_LINK_STATUS_SIZE, 100);
+ if (ret <= 0) {
+- DRM_ERROR("displayport link status failed\n");
+ return false;
+ }
+
+@@ -833,8 +833,10 @@ static int radeon_dp_link_train_cr(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_clock_recovery_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ clock_recovery = true;
+@@ -896,8 +898,10 @@ static int radeon_dp_link_train_ce(struct radeon_dp_link_train_info *dp_info)
+ else
+ mdelay(dp_info->rd_interval * 4);
+
+- if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status))
++ if (!radeon_dp_get_link_status(dp_info->radeon_connector, dp_info->link_status)) {
++ DRM_ERROR("displayport link status failed\n");
+ break;
++ }
+
+ if (dp_channel_eq_ok(dp_info->link_status, dp_info->dp_lane_count)) {
+ channel_eq = true;
+diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c
+index 486ccdf..8676b1b 100644
+--- a/drivers/gpu/drm/radeon/atombios_encoders.c
++++ b/drivers/gpu/drm/radeon/atombios_encoders.c
+@@ -1392,10 +1392,18 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode)
+ case DRM_MODE_DPMS_ON:
+ /* some early dce3.2 boards have a bug in their transmitter control table */
+ if ((rdev->family == CHIP_RV710) || (rdev->family == CHIP_RV730) ||
+- ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev))
++ ASIC_IS_DCE41(rdev) || ASIC_IS_DCE5(rdev)) {
++ if (ASIC_IS_DCE6(rdev)) {
++ /* It seems we need to call ATOM_ENCODER_CMD_SETUP again
++ * before reenabling encoder on DPMS ON, otherwise we never
++ * get picture
++ */
++ atombios_dig_encoder_setup(encoder, ATOM_ENCODER_CMD_SETUP, 0);
++ }
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0);
+- else
++ } else {
+ atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0);
++ }
+ if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) {
+ if (connector->connector_type == DRM_MODE_CONNECTOR_eDP) {
+ atombios_set_edp_panel_power(connector,
+diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
+index 2914c57..895e628 100644
+--- a/drivers/gpu/drm/radeon/radeon_connectors.c
++++ b/drivers/gpu/drm/radeon/radeon_connectors.c
+@@ -64,14 +64,33 @@ void radeon_connector_hotplug(struct drm_connector *connector)
+
+ /* just deal with DP (not eDP) here. */
+ if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort) {
+- int saved_dpms = connector->dpms;
+-
+- /* Only turn off the display it it's physically disconnected */
+- if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
+- else if (radeon_dp_needs_link_train(radeon_connector))
+- drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
+- connector->dpms = saved_dpms;
++ struct radeon_connector_atom_dig *dig_connector =
++ radeon_connector->con_priv;
++
++ /* if existing sink type was not DP no need to retrain */
++ if (dig_connector->dp_sink_type != CONNECTOR_OBJECT_ID_DISPLAYPORT)
++ return;
++
++ /* first get sink type as it may be reset after (un)plug */
++ dig_connector->dp_sink_type = radeon_dp_getsinktype(radeon_connector);
++ /* don't do anything if sink is not display port, i.e.,
++ * passive dp->(dvi|hdmi) adaptor
++ */
++ if (dig_connector->dp_sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT) {
++ int saved_dpms = connector->dpms;
++ /* Only turn off the display if it's physically disconnected */
++ if (!radeon_hpd_sense(rdev, radeon_connector->hpd.hpd)) {
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
++ } else if (radeon_dp_needs_link_train(radeon_connector)) {
++ /* set it to OFF so that drm_helper_connector_dpms()
++ * won't return immediately since the current state
++ * is ON at this point.
++ */
++ connector->dpms = DRM_MODE_DPMS_OFF;
++ drm_helper_connector_dpms(connector, DRM_MODE_DPMS_ON);
++ }
++ connector->dpms = saved_dpms;
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
+index 142f894..17238f4 100644
+--- a/drivers/gpu/drm/radeon/radeon_cs.c
++++ b/drivers/gpu/drm/radeon/radeon_cs.c
+@@ -377,7 +377,7 @@ static int radeon_cs_ib_chunk(struct radeon_device *rdev,
+ if (r) {
+ DRM_ERROR("Failed to schedule IB !\n");
+ }
+- return 0;
++ return r;
+ }
+
+ static int radeon_bo_vm_update_pte(struct radeon_cs_parser *parser,
+diff --git a/drivers/gpu/drm/radeon/radeon_cursor.c b/drivers/gpu/drm/radeon/radeon_cursor.c
+index 42acc64..711e95a 100644
+--- a/drivers/gpu/drm/radeon/radeon_cursor.c
++++ b/drivers/gpu/drm/radeon/radeon_cursor.c
+@@ -262,8 +262,14 @@ int radeon_crtc_cursor_move(struct drm_crtc *crtc,
+ if (!(cursor_end & 0x7f))
+ w--;
+ }
+- if (w <= 0)
++ if (w <= 0) {
+ w = 1;
++ cursor_end = x - xorigin + w;
++ if (!(cursor_end & 0x7f)) {
++ x--;
++ WARN_ON_ONCE(x < 0);
++ }
++ }
+ }
+ }
+
+diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
+index 830f1a7..d41e787 100644
+--- a/drivers/gpu/drm/radeon/radeon_object.c
++++ b/drivers/gpu/drm/radeon/radeon_object.c
+@@ -138,7 +138,6 @@ int radeon_bo_create(struct radeon_device *rdev,
+ acc_size = ttm_bo_dma_acc_size(&rdev->mman.bdev, size,
+ sizeof(struct radeon_bo));
+
+-retry:
+ bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
+ if (bo == NULL)
+ return -ENOMEM;
+@@ -152,6 +151,8 @@ retry:
+ bo->surface_reg = -1;
+ INIT_LIST_HEAD(&bo->list);
+ INIT_LIST_HEAD(&bo->va);
++
++retry:
+ radeon_ttm_placement_from_domain(bo, domain);
+ /* Kernel allocation are uninterruptible */
+ mutex_lock(&rdev->vram_mutex);
+diff --git a/drivers/hid/hid-multitouch.c b/drivers/hid/hid-multitouch.c
+index 7647924..c6655a5 100644
+--- a/drivers/hid/hid-multitouch.c
++++ b/drivers/hid/hid-multitouch.c
+@@ -83,6 +83,7 @@ struct mt_device {
+ unsigned last_field_index; /* last field index of the report */
+ unsigned last_slot_field; /* the last field of a slot */
+ __s8 inputmode; /* InputMode HID feature, -1 if non-existent */
++ __s8 inputmode_index; /* InputMode HID feature index in the report */
+ __s8 maxcontact_report_id; /* Maximum Contact Number HID feature,
+ -1 if non-existent */
+ __u8 num_received; /* how many contacts we received */
+@@ -260,10 +261,20 @@ static void mt_feature_mapping(struct hid_device *hdev,
+ struct hid_field *field, struct hid_usage *usage)
+ {
+ struct mt_device *td = hid_get_drvdata(hdev);
++ int i;
+
+ switch (usage->hid) {
+ case HID_DG_INPUTMODE:
+ td->inputmode = field->report->id;
++ td->inputmode_index = 0; /* has to be updated below */
++
++ for (i=0; i < field->maxusage; i++) {
++ if (field->usage[i].hid == usage->hid) {
++ td->inputmode_index = i;
++ break;
++ }
++ }
++
+ break;
+ case HID_DG_CONTACTMAX:
+ td->maxcontact_report_id = field->report->id;
+@@ -618,7 +629,7 @@ static void mt_set_input_mode(struct hid_device *hdev)
+ re = &(hdev->report_enum[HID_FEATURE_REPORT]);
+ r = re->report_id_hash[td->inputmode];
+ if (r) {
+- r->field[0]->value[0] = 0x02;
++ r->field[0]->value[td->inputmode_index] = 0x02;
+ usbhid_submit_report(hdev, r, USB_DIR_OUT);
+ }
+ }
+diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
+index 6256263..3f365ab 100644
+--- a/drivers/iommu/amd_iommu.c
++++ b/drivers/iommu/amd_iommu.c
+@@ -2254,6 +2254,18 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ iommu_init_device(dev);
+
++ /*
++ * dev_data is still NULL and
++ * got initialized in iommu_init_device
++ */
++ dev_data = get_dev_data(dev);
++
++ if (iommu_pass_through || dev_data->iommu_v2) {
++ dev_data->passthrough = true;
++ attach_device(dev, pt_domain);
++ break;
++ }
++
+ domain = domain_for_device(dev);
+
+ /* allocate a protection domain if a device is added */
+@@ -2271,10 +2283,7 @@ static int device_change_notifier(struct notifier_block *nb,
+
+ dev_data = get_dev_data(dev);
+
+- if (!dev_data->passthrough)
+- dev->archdata.dma_ops = &amd_iommu_dma_ops;
+- else
+- dev->archdata.dma_ops = &nommu_dma_ops;
++ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+
+ break;
+ case BUS_NOTIFY_DEL_DEVICE:
+diff --git a/drivers/iommu/amd_iommu_v2.c b/drivers/iommu/amd_iommu_v2.c
+index 036fe9b..a1f1bc8 100644
+--- a/drivers/iommu/amd_iommu_v2.c
++++ b/drivers/iommu/amd_iommu_v2.c
+@@ -681,6 +681,8 @@ int amd_iommu_bind_pasid(struct pci_dev *pdev, int pasid,
+
+ atomic_set(&pasid_state->count, 1);
+ init_waitqueue_head(&pasid_state->wq);
++ spin_lock_init(&pasid_state->lock);
++
+ pasid_state->task = task;
+ pasid_state->mm = get_task_mm(task);
+ pasid_state->device_state = dev_state;
+diff --git a/drivers/md/dm-thin.c b/drivers/md/dm-thin.c
+index 68694da..e1bce79 100644
+--- a/drivers/md/dm-thin.c
++++ b/drivers/md/dm-thin.c
+@@ -19,7 +19,7 @@
+ /*
+ * Tunable constants
+ */
+-#define ENDIO_HOOK_POOL_SIZE 10240
++#define ENDIO_HOOK_POOL_SIZE 1024
+ #define DEFERRED_SET_SIZE 64
+ #define MAPPING_POOL_SIZE 1024
+ #define PRISON_CELLS 1024
+@@ -859,7 +859,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+
+ if (m->err) {
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -871,7 +871,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ if (r) {
+ DMERR("dm_thin_insert_block() failed");
+ cell_error(m->cell);
+- return;
++ goto out;
+ }
+
+ /*
+@@ -886,6 +886,7 @@ static void process_prepared_mapping(struct dm_thin_new_mapping *m)
+ } else
+ cell_defer(tc, m->cell, m->data_block);
+
++out:
+ list_del(&m->list);
+ mempool_free(m, tc->pool->mapping_pool);
+ }
+diff --git a/drivers/mmc/host/sdhci-pci.c b/drivers/mmc/host/sdhci-pci.c
+index 69ef0be..504da71 100644
+--- a/drivers/mmc/host/sdhci-pci.c
++++ b/drivers/mmc/host/sdhci-pci.c
+@@ -157,6 +157,7 @@ static const struct sdhci_pci_fixes sdhci_ene_714 = {
+ static const struct sdhci_pci_fixes sdhci_cafe = {
+ .quirks = SDHCI_QUIRK_NO_SIMULT_VDD_AND_POWER |
+ SDHCI_QUIRK_NO_BUSY_IRQ |
++ SDHCI_QUIRK_BROKEN_CARD_DETECTION |
+ SDHCI_QUIRK_BROKEN_TIMEOUT_VAL,
+ };
+
+diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
+index f4b8b4d..1dffebe 100644
+--- a/drivers/mmc/host/sdhci.c
++++ b/drivers/mmc/host/sdhci.c
+@@ -27,6 +27,7 @@
+
+ #include <linux/mmc/mmc.h>
+ #include <linux/mmc/host.h>
++#include <linux/mmc/card.h>
+
+ #include "sdhci.h"
+
+@@ -1245,6 +1246,7 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ struct sdhci_host *host;
+ bool present;
+ unsigned long flags;
++ u32 tuning_opcode;
+
+ host = mmc_priv(mmc);
+
+@@ -1292,8 +1294,12 @@ static void sdhci_request(struct mmc_host *mmc, struct mmc_request *mrq)
+ */
+ if ((host->flags & SDHCI_NEEDS_RETUNING) &&
+ !(present_state & (SDHCI_DOING_WRITE | SDHCI_DOING_READ))) {
++ /* eMMC uses cmd21 while sd and sdio use cmd19 */
++ tuning_opcode = mmc->card->type == MMC_TYPE_MMC ?
++ MMC_SEND_TUNING_BLOCK_HS200 :
++ MMC_SEND_TUNING_BLOCK;
+ spin_unlock_irqrestore(&host->lock, flags);
+- sdhci_execute_tuning(mmc, mrq->cmd->opcode);
++ sdhci_execute_tuning(mmc, tuning_opcode);
+ spin_lock_irqsave(&host->lock, flags);
+
+ /* Restore original mmc_request structure */
+diff --git a/drivers/net/caif/caif_serial.c b/drivers/net/caif/caif_serial.c
+index 8a3054b..5de74e7 100644
+--- a/drivers/net/caif/caif_serial.c
++++ b/drivers/net/caif/caif_serial.c
+@@ -325,6 +325,9 @@ static int ldisc_open(struct tty_struct *tty)
+
+ sprintf(name, "cf%s", tty->name);
+ dev = alloc_netdev(sizeof(*ser), name, caifdev_setup);
++ if (!dev)
++ return -ENOMEM;
++
+ ser = netdev_priv(dev);
+ ser->tty = tty_kref_get(tty);
+ ser->dev = dev;
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+index 17d935b..21d8c4d 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_hw.h
+@@ -74,6 +74,8 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
+ #define PCI_DEVICE_ID_ATHEROS_L1D_2_0 0x1083 /* AR8151 v2.0 Gigabit 1000 */
+ #define L2CB_V10 0xc0
+ #define L2CB_V11 0xc1
++#define L2CB_V20 0xc0
++#define L2CB_V21 0xc1
+
+ /* register definition */
+ #define REG_DEVICE_CAP 0x5C
+@@ -87,6 +89,9 @@ void atl1c_post_phy_linkchg(struct atl1c_hw *hw, u16 link_speed);
+ #define LINK_CTRL_L1_EN 0x02
+ #define LINK_CTRL_EXT_SYNC 0x80
+
++#define REG_PCIE_IND_ACC_ADDR 0x80
++#define REG_PCIE_IND_ACC_DATA 0x84
++
+ #define REG_DEV_SERIALNUM_CTRL 0x200
+ #define REG_DEV_MAC_SEL_MASK 0x0 /* 0:EUI; 1:MAC */
+ #define REG_DEV_MAC_SEL_SHIFT 0
+diff --git a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+index 1f78b63..f602623f 100644
+--- a/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
++++ b/drivers/net/ethernet/atheros/atl1c/atl1c_main.c
+@@ -739,6 +739,8 @@ static const struct atl1c_platform_patch plats[] __devinitdata = {
+
+ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
+ {
++ struct pci_dev *pdev = hw->adapter->pdev;
++ u32 misc_ctrl;
+ int i = 0;
+
+ hw->msi_lnkpatch = false;
+@@ -753,6 +755,18 @@ static void __devinit atl1c_patch_assign(struct atl1c_hw *hw)
+ }
+ i++;
+ }
++
++ if (hw->device_id == PCI_DEVICE_ID_ATHEROS_L2C_B2 &&
++ hw->revision_id == L2CB_V21) {
++ /* config acess mode */
++ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
++ REG_PCIE_DEV_MISC_CTRL);
++ pci_read_config_dword(pdev, REG_PCIE_IND_ACC_DATA, &misc_ctrl);
++ misc_ctrl &= ~0x100;
++ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_ADDR,
++ REG_PCIE_DEV_MISC_CTRL);
++ pci_write_config_dword(pdev, REG_PCIE_IND_ACC_DATA, misc_ctrl);
++ }
+ }
+ /*
+ * atl1c_sw_init - Initialize general software structures (struct atl1c_adapter)
+@@ -780,7 +794,7 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
+ hw->device_id = pdev->device;
+ hw->subsystem_vendor_id = pdev->subsystem_vendor;
+ hw->subsystem_id = pdev->subsystem_device;
+- AT_READ_REG(hw, PCI_CLASS_REVISION, &revision);
++ pci_read_config_dword(pdev, PCI_CLASS_REVISION, &revision);
+ hw->revision_id = revision & 0xFF;
+ /* before link up, we assume hibernate is true */
+ hw->hibernate = true;
+diff --git a/drivers/net/ethernet/broadcom/tg3.c b/drivers/net/ethernet/broadcom/tg3.c
+index e47ff8b..15f8b00 100644
+--- a/drivers/net/ethernet/broadcom/tg3.c
++++ b/drivers/net/ethernet/broadcom/tg3.c
+@@ -298,6 +298,7 @@ static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
+ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
++ {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57762)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
+ {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
+@@ -8974,8 +8975,7 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
+ GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
+ tg3_flag(tp, 57765_PLUS)) {
+ val = tr32(TG3_RDMA_RSRVCTRL_REG);
+- if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
+- GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
++ if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0) {
+ val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
+ TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
+@@ -12282,10 +12282,12 @@ static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
+ {
+ struct tg3 *tp = netdev_priv(dev);
+
+- if (!tp->hw_stats)
++ spin_lock_bh(&tp->lock);
++ if (!tp->hw_stats) {
++ spin_unlock_bh(&tp->lock);
+ return &tp->net_stats_prev;
++ }
+
+- spin_lock_bh(&tp->lock);
+ tg3_get_nstats(tp, stats);
+ spin_unlock_bh(&tp->lock);
+
+diff --git a/drivers/net/ethernet/emulex/benet/be_ethtool.c b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+index 63e51d4..59ee51a 100644
+--- a/drivers/net/ethernet/emulex/benet/be_ethtool.c
++++ b/drivers/net/ethernet/emulex/benet/be_ethtool.c
+@@ -910,8 +910,9 @@ static void be_set_fw_log_level(struct be_adapter *adapter, u32 level)
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+- for (i = 0; i < cfgs->num_modules; i++) {
+- for (j = 0; j < cfgs->module[i].num_modes; j++) {
++ for (i = 0; i < le32_to_cpu(cfgs->num_modules); i++) {
++ u32 num_modes = le32_to_cpu(cfgs->module[i].num_modes);
++ for (j = 0; j < num_modes; j++) {
+ if (cfgs->module[i].trace_lvl[j].mode ==
+ MODE_UART)
+ cfgs->module[i].trace_lvl[j].dbg_lvl =
+diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
+index 501dfa9..bd5cf7e 100644
+--- a/drivers/net/ethernet/emulex/benet/be_main.c
++++ b/drivers/net/ethernet/emulex/benet/be_main.c
+@@ -3479,7 +3479,7 @@ u32 be_get_fw_log_level(struct be_adapter *adapter)
+ if (!status) {
+ cfgs = (struct be_fat_conf_params *)(extfat_cmd.va +
+ sizeof(struct be_cmd_resp_hdr));
+- for (j = 0; j < cfgs->module[0].num_modes; j++) {
++ for (j = 0; j < le32_to_cpu(cfgs->module[0].num_modes); j++) {
+ if (cfgs->module[0].trace_lvl[j].mode == MODE_UART)
+ level = cfgs->module[0].trace_lvl[j].dbg_lvl;
+ }
+diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
+index d7a04e0..eb81da4 100644
+--- a/drivers/net/ethernet/realtek/r8169.c
++++ b/drivers/net/ethernet/realtek/r8169.c
+@@ -5380,7 +5380,6 @@ static void rtl8169_tx_clear(struct rtl8169_private *tp)
+ {
+ rtl8169_tx_clear_range(tp, tp->dirty_tx, NUM_TX_DESC);
+ tp->cur_tx = tp->dirty_tx = 0;
+- netdev_reset_queue(tp->dev);
+ }
+
+ static void rtl_reset_work(struct rtl8169_private *tp)
+@@ -5535,8 +5534,6 @@ static netdev_tx_t rtl8169_start_xmit(struct sk_buff *skb,
+
+ txd->opts2 = cpu_to_le32(opts[1]);
+
+- netdev_sent_queue(dev, skb->len);
+-
+ skb_tx_timestamp(skb);
+
+ wmb();
+@@ -5633,16 +5630,9 @@ static void rtl8169_pcierr_interrupt(struct net_device *dev)
+ rtl_schedule_task(tp, RTL_FLAG_TASK_RESET_PENDING);
+ }
+
+-struct rtl_txc {
+- int packets;
+- int bytes;
+-};
+-
+ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ {
+- struct rtl8169_stats *tx_stats = &tp->tx_stats;
+ unsigned int dirty_tx, tx_left;
+- struct rtl_txc txc = { 0, 0 };
+
+ dirty_tx = tp->dirty_tx;
+ smp_rmb();
+@@ -5661,24 +5651,17 @@ static void rtl_tx(struct net_device *dev, struct rtl8169_private *tp)
+ rtl8169_unmap_tx_skb(&tp->pci_dev->dev, tx_skb,
+ tp->TxDescArray + entry);
+ if (status & LastFrag) {
+- struct sk_buff *skb = tx_skb->skb;
+-
+- txc.packets++;
+- txc.bytes += skb->len;
+- dev_kfree_skb(skb);
++ u64_stats_update_begin(&tp->tx_stats.syncp);
++ tp->tx_stats.packets++;
++ tp->tx_stats.bytes += tx_skb->skb->len;
++ u64_stats_update_end(&tp->tx_stats.syncp);
++ dev_kfree_skb(tx_skb->skb);
+ tx_skb->skb = NULL;
+ }
+ dirty_tx++;
+ tx_left--;
+ }
+
+- u64_stats_update_begin(&tx_stats->syncp);
+- tx_stats->packets += txc.packets;
+- tx_stats->bytes += txc.bytes;
+- u64_stats_update_end(&tx_stats->syncp);
+-
+- netdev_completed_queue(dev, txc.packets, txc.bytes);
+-
+ if (tp->dirty_tx != dirty_tx) {
+ tp->dirty_tx = dirty_tx;
+ /* Sync with rtl8169_start_xmit:
+diff --git a/drivers/net/tun.c b/drivers/net/tun.c
+index 987aeef..e44bfb8 100644
+--- a/drivers/net/tun.c
++++ b/drivers/net/tun.c
+@@ -358,6 +358,8 @@ static void tun_free_netdev(struct net_device *dev)
+ {
+ struct tun_struct *tun = netdev_priv(dev);
+
++ BUG_ON(!test_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags));
++
+ sk_release_kernel(tun->socket.sk);
+ }
+
+@@ -1115,6 +1117,7 @@ static int tun_set_iff(struct net *net, struct file *file, struct ifreq *ifr)
+ tun->flags = flags;
+ tun->txflt.count = 0;
+ tun->vnet_hdr_sz = sizeof(struct virtio_net_hdr);
++ set_bit(SOCK_EXTERNALLY_ALLOCATED, &tun->socket.flags);
+
+ err = -ENOMEM;
+ sk = sk_alloc(&init_net, AF_UNSPEC, GFP_KERNEL, &tun_proto);
+@@ -1252,10 +1255,12 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
+ int vnet_hdr_sz;
+ int ret;
+
+- if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
++ if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) {
+ if (copy_from_user(&ifr, argp, ifreq_len))
+ return -EFAULT;
+-
++ } else {
++ memset(&ifr, 0, sizeof(ifr));
++ }
+ if (cmd == TUNGETFEATURES) {
+ /* Currently this just means: "what IFF flags are valid?".
+ * This is needed because we never checked for invalid flags on
+diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
+index d8ad552..c3d0349 100644
+--- a/drivers/net/usb/kaweth.c
++++ b/drivers/net/usb/kaweth.c
+@@ -1314,7 +1314,7 @@ static int kaweth_internal_control_msg(struct usb_device *usb_dev,
+ int retv;
+ int length = 0; /* shut up GCC */
+
+- urb = usb_alloc_urb(0, GFP_NOIO);
++ urb = usb_alloc_urb(0, GFP_ATOMIC);
+ if (!urb)
+ return -ENOMEM;
+
+diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
+index c06b6cb..7c899fc 100644
+--- a/drivers/net/wireless/b43/b43.h
++++ b/drivers/net/wireless/b43/b43.h
+@@ -870,13 +870,6 @@ struct b43_wl {
+ * handler, only. This basically is just the IRQ mask register. */
+ spinlock_t hardirq_lock;
+
+- /* The number of queues that were registered with the mac80211 subsystem
+- * initially. This is a backup copy of hw->queues in case hw->queues has
+- * to be dynamically lowered at runtime (Firmware does not support QoS).
+- * hw->queues has to be restored to the original value before unregistering
+- * from the mac80211 subsystem. */
+- u16 mac80211_initially_registered_queues;
+-
+ /* Set this if we call ieee80211_register_hw() and check if we call
+ * ieee80211_unregister_hw(). */
+ bool hw_registred;
+diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
+index 1b988f2..b80352b 100644
+--- a/drivers/net/wireless/b43/main.c
++++ b/drivers/net/wireless/b43/main.c
+@@ -2359,6 +2359,8 @@ static int b43_try_request_fw(struct b43_request_fw_context *ctx)
+ if (err)
+ goto err_load;
+
++ fw->opensource = (ctx->req_type == B43_FWTYPE_OPENSOURCE);
++
+ return 0;
+
+ err_no_ucode:
+@@ -2434,6 +2436,10 @@ static void b43_request_firmware(struct work_struct *work)
+ goto out;
+
+ start_ieee80211:
++ wl->hw->queues = B43_QOS_QUEUE_NUM;
++ if (!modparam_qos || dev->fw.opensource)
++ wl->hw->queues = 1;
++
+ err = ieee80211_register_hw(wl->hw);
+ if (err)
+ goto err_one_core_detach;
+@@ -2537,11 +2543,9 @@ static int b43_upload_microcode(struct b43_wldev *dev)
+ dev->fw.hdr_format = B43_FW_HDR_410;
+ else
+ dev->fw.hdr_format = B43_FW_HDR_351;
+- dev->fw.opensource = (fwdate == 0xFFFF);
++ WARN_ON(dev->fw.opensource != (fwdate == 0xFFFF));
+
+- /* Default to use-all-queues. */
+- dev->wl->hw->queues = dev->wl->mac80211_initially_registered_queues;
+- dev->qos_enabled = !!modparam_qos;
++ dev->qos_enabled = dev->wl->hw->queues > 1;
+ /* Default to firmware/hardware crypto acceleration. */
+ dev->hwcrypto_enabled = true;
+
+@@ -2559,14 +2563,8 @@ static int b43_upload_microcode(struct b43_wldev *dev)
+ /* Disable hardware crypto and fall back to software crypto. */
+ dev->hwcrypto_enabled = false;
+ }
+- if (!(fwcapa & B43_FWCAPA_QOS)) {
+- b43info(dev->wl, "QoS not supported by firmware\n");
+- /* Disable QoS. Tweak hw->queues to 1. It will be restored before
+- * ieee80211_unregister to make sure the networking core can
+- * properly free possible resources. */
+- dev->wl->hw->queues = 1;
+- dev->qos_enabled = false;
+- }
++ /* adding QoS support should use an offline discovery mechanism */
++ WARN(fwcapa & B43_FWCAPA_QOS, "QoS in OpenFW not supported\n");
+ } else {
+ b43info(dev->wl, "Loading firmware version %u.%u "
+ "(20%.2i-%.2i-%.2i %.2i:%.2i:%.2i)\n",
+@@ -5298,8 +5296,6 @@ static struct b43_wl *b43_wireless_init(struct b43_bus_dev *dev)
+
+ hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
+
+- hw->queues = modparam_qos ? B43_QOS_QUEUE_NUM : 1;
+- wl->mac80211_initially_registered_queues = hw->queues;
+ wl->hw_registred = false;
+ hw->max_rates = 2;
+ SET_IEEE80211_DEV(hw, dev->dev);
+@@ -5374,10 +5370,6 @@ static void b43_bcma_remove(struct bcma_device *core)
+
+ B43_WARN_ON(!wl);
+ if (wl->current_dev == wldev && wl->hw_registred) {
+- /* Restore the queues count before unregistering, because firmware detect
+- * might have modified it. Restoring is important, so the networking
+- * stack can properly free resources. */
+- wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
+ ieee80211_unregister_hw(wl->hw);
+ }
+@@ -5452,10 +5444,6 @@ static void b43_ssb_remove(struct ssb_device *sdev)
+
+ B43_WARN_ON(!wl);
+ if (wl->current_dev == wldev && wl->hw_registred) {
+- /* Restore the queues count before unregistering, because firmware detect
+- * might have modified it. Restoring is important, so the networking
+- * stack can properly free resources. */
+- wl->hw->queues = wl->mac80211_initially_registered_queues;
+ b43_leds_stop(wldev);
+ ieee80211_unregister_hw(wl->hw);
+ }
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+index e55ec6c..c31072d 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+@@ -617,6 +617,11 @@ static bool iwlagn_fill_txpower_mode(struct iwl_priv *priv,
+ struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
+ int ave_rssi;
+
++ if (!ctx->vif || (ctx->vif->type != NL80211_IFTYPE_STATION)) {
++ IWL_DEBUG_INFO(priv, "BSS ctx not active or not in sta mode\n");
++ return false;
++ }
++
+ ave_rssi = ieee80211_ave_rssi(ctx->vif);
+ if (!ave_rssi) {
+ /* no rssi data, no changes to reduce tx power */
+diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+index eb6a8ea..287fdd0 100644
+--- a/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
++++ b/drivers/net/wireless/iwlwifi/iwl-agn-sta.c
+@@ -236,6 +236,7 @@ static void iwl_sta_calc_ht_flags(struct iwl_priv *priv,
+ mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
+
+ IWL_DEBUG_INFO(priv, "STA %pM SM PS mode: %s\n",
++ sta->addr,
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
+ "static" :
+ (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
+diff --git a/drivers/net/wireless/mwifiex/cfg80211.c b/drivers/net/wireless/mwifiex/cfg80211.c
+index 5c7fd18..76b5c0f 100644
+--- a/drivers/net/wireless/mwifiex/cfg80211.c
++++ b/drivers/net/wireless/mwifiex/cfg80211.c
+@@ -634,9 +634,9 @@ mwifiex_dump_station_info(struct mwifiex_private *priv,
+
+ /*
+ * Bit 0 in tx_htinfo indicates that current Tx rate is 11n rate. Valid
+- * MCS index values for us are 0 to 7.
++ * MCS index values for us are 0 to 15.
+ */
+- if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 8)) {
++ if ((priv->tx_htinfo & BIT(0)) && (priv->tx_rate < 16)) {
+ sinfo->txrate.mcs = priv->tx_rate;
+ sinfo->txrate.flags |= RATE_INFO_FLAGS_MCS;
+ /* 40MHz rate */
+diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
+index bf78317..20a5040 100644
+--- a/drivers/net/wireless/rt2x00/rt2800usb.c
++++ b/drivers/net/wireless/rt2x00/rt2800usb.c
+@@ -1137,6 +1137,8 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ #ifdef CONFIG_RT2800USB_RT33XX
+ /* Belkin */
+ { USB_DEVICE(0x050d, 0x945b) },
++ /* D-Link */
++ { USB_DEVICE(0x2001, 0x3c17) },
+ /* Panasonic */
+ { USB_DEVICE(0x083a, 0xb511) },
+ /* Philips */
+@@ -1237,7 +1239,6 @@ static struct usb_device_id rt2800usb_device_table[] = {
+ /* D-Link */
+ { USB_DEVICE(0x07d1, 0x3c0b) },
+ { USB_DEVICE(0x07d1, 0x3c17) },
+- { USB_DEVICE(0x2001, 0x3c17) },
+ /* Encore */
+ { USB_DEVICE(0x203d, 0x14a1) },
+ /* Gemtek */
+diff --git a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+index 18380a7..4420312 100644
+--- a/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
++++ b/drivers/net/wireless/rtlwifi/rtl8192de/phy.c
+@@ -3345,21 +3345,21 @@ void rtl92d_phy_config_macphymode_info(struct ieee80211_hw *hw)
+ switch (rtlhal->macphymode) {
+ case DUALMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case SINGLEMAC_SINGLEPHY:
+ rtlphy->rf_type = RF_2T2R;
+- rtlhal->version |= CHIP_92D_SINGLEPHY;
++ rtlhal->version |= RF_TYPE_2T2R;
+ rtlhal->bandset = BAND_ON_BOTH;
+ rtlhal->current_bandtype = BAND_ON_2_4G;
+ break;
+
+ case DUALMAC_DUALPHY:
+ rtlphy->rf_type = RF_1T1R;
+- rtlhal->version &= (~CHIP_92D_SINGLEPHY);
++ rtlhal->version &= RF_TYPE_1T1R;
+ /* Now we let MAC0 run on 5G band. */
+ if (rtlhal->interfaceindex == 0) {
+ rtlhal->bandset = BAND_ON_5G;
+diff --git a/drivers/net/wireless/rtlwifi/usb.c b/drivers/net/wireless/rtlwifi/usb.c
+index a6049d7..aa970fc 100644
+--- a/drivers/net/wireless/rtlwifi/usb.c
++++ b/drivers/net/wireless/rtlwifi/usb.c
+@@ -131,15 +131,19 @@ static u32 _usb_read_sync(struct rtl_priv *rtlpriv, u32 addr, u16 len)
+ u8 request;
+ u16 wvalue;
+ u16 index;
+- __le32 *data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ __le32 *data;
++ unsigned long flags;
+
++ spin_lock_irqsave(&rtlpriv->locks.usb_lock, flags);
++ if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
++ rtlpriv->usb_data_index = 0;
++ data = &rtlpriv->usb_data[rtlpriv->usb_data_index];
++ spin_unlock_irqrestore(&rtlpriv->locks.usb_lock, flags);
+ request = REALTEK_USB_VENQT_CMD_REQ;
+ index = REALTEK_USB_VENQT_CMD_IDX; /* n/a */
+
+ wvalue = (u16)addr;
+ _usbctrl_vendorreq_sync_read(udev, request, wvalue, index, data, len);
+- if (++rtlpriv->usb_data_index >= RTL_USB_MAX_RX_COUNT)
+- rtlpriv->usb_data_index = 0;
+ return le32_to_cpu(*data);
+ }
+
+@@ -951,6 +955,10 @@ int __devinit rtl_usb_probe(struct usb_interface *intf,
+ GFP_KERNEL);
+ if (!rtlpriv->usb_data)
+ return -ENOMEM;
++
++ /* this spin lock must be initialized early */
++ spin_lock_init(&rtlpriv->locks.usb_lock);
++
+ rtlpriv->usb_data_index = 0;
+ init_completion(&rtlpriv->firmware_loading_complete);
+ SET_IEEE80211_DEV(hw, &intf->dev);
+diff --git a/drivers/net/wireless/rtlwifi/wifi.h b/drivers/net/wireless/rtlwifi/wifi.h
+index bd816ae..cdaa21f 100644
+--- a/drivers/net/wireless/rtlwifi/wifi.h
++++ b/drivers/net/wireless/rtlwifi/wifi.h
+@@ -1555,6 +1555,7 @@ struct rtl_locks {
+ spinlock_t rf_ps_lock;
+ spinlock_t rf_lock;
+ spinlock_t waitq_lock;
++ spinlock_t usb_lock;
+
+ /*Dual mac*/
+ spinlock_t cck_and_rw_pagea_lock;
+diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
+index 7be5e97..c390c91 100644
+--- a/drivers/s390/net/qeth_l3_main.c
++++ b/drivers/s390/net/qeth_l3_main.c
+@@ -1760,6 +1760,8 @@ static void qeth_l3_free_vlan_addresses4(struct qeth_card *card,
+ QETH_CARD_TEXT(card, 4, "frvaddr4");
+
+ netdev = __vlan_find_dev_deep(card->dev, vid);
++ if (!netdev)
++ return;
+ in_dev = in_dev_get(netdev);
+ if (!in_dev)
+ return;
+@@ -1788,6 +1790,8 @@ static void qeth_l3_free_vlan_addresses6(struct qeth_card *card,
+ QETH_CARD_TEXT(card, 4, "frvaddr6");
+
+ netdev = __vlan_find_dev_deep(card->dev, vid);
++ if (!netdev)
++ return;
+ in6_dev = in6_dev_get(netdev);
+ if (!in6_dev)
+ return;
+diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
+index a3a056a..b48c24f 100644
+--- a/drivers/scsi/hosts.c
++++ b/drivers/scsi/hosts.c
+@@ -290,6 +290,7 @@ static void scsi_host_dev_release(struct device *dev)
+ struct Scsi_Host *shost = dev_to_shost(dev);
+ struct device *parent = dev->parent;
+ struct request_queue *q;
++ void *queuedata;
+
+ scsi_proc_hostdir_rm(shost->hostt);
+
+@@ -299,9 +300,9 @@ static void scsi_host_dev_release(struct device *dev)
+ destroy_workqueue(shost->work_q);
+ q = shost->uspace_req_q;
+ if (q) {
+- kfree(q->queuedata);
+- q->queuedata = NULL;
+- scsi_free_queue(q);
++ queuedata = q->queuedata;
++ blk_cleanup_queue(q);
++ kfree(queuedata);
+ }
+
+ scsi_destroy_command_freelist(shost);
+diff --git a/drivers/scsi/libsas/sas_expander.c b/drivers/scsi/libsas/sas_expander.c
+index caa0525..101b28e 100644
+--- a/drivers/scsi/libsas/sas_expander.c
++++ b/drivers/scsi/libsas/sas_expander.c
+@@ -868,7 +868,7 @@ static struct domain_device *sas_ex_discover_end_dev(
+ }
+
+ /* See if this phy is part of a wide port */
+-static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
++static bool sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ {
+ struct ex_phy *phy = &parent->ex_dev.ex_phy[phy_id];
+ int i;
+@@ -884,11 +884,11 @@ static int sas_ex_join_wide_port(struct domain_device *parent, int phy_id)
+ sas_port_add_phy(ephy->port, phy->phy);
+ phy->port = ephy->port;
+ phy->phy_state = PHY_DEVICE_DISCOVERED;
+- return 0;
++ return true;
+ }
+ }
+
+- return -ENODEV;
++ return false;
+ }
+
+ static struct domain_device *sas_ex_discover_expander(
+@@ -1030,8 +1030,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ return res;
+ }
+
+- res = sas_ex_join_wide_port(dev, phy_id);
+- if (!res) {
++ if (sas_ex_join_wide_port(dev, phy_id)) {
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ phy_id, SAS_ADDR(ex_phy->attached_sas_addr));
+ return res;
+@@ -1077,8 +1076,7 @@ static int sas_ex_discover_dev(struct domain_device *dev, int phy_id)
+ if (SAS_ADDR(ex->ex_phy[i].attached_sas_addr) ==
+ SAS_ADDR(child->sas_addr)) {
+ ex->ex_phy[i].phy_state= PHY_DEVICE_DISCOVERED;
+- res = sas_ex_join_wide_port(dev, i);
+- if (!res)
++ if (sas_ex_join_wide_port(dev, i))
+ SAS_DPRINTK("Attaching ex phy%d to wide port %016llx\n",
+ i, SAS_ADDR(ex->ex_phy[i].attached_sas_addr));
+
+@@ -1943,32 +1941,20 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ {
+ struct ex_phy *ex_phy = &dev->ex_dev.ex_phy[phy_id];
+ struct domain_device *child;
+- bool found = false;
+- int res, i;
++ int res;
+
+ SAS_DPRINTK("ex %016llx phy%d new device attached\n",
+ SAS_ADDR(dev->sas_addr), phy_id);
+ res = sas_ex_phy_discover(dev, phy_id);
+ if (res)
+- goto out;
+- /* to support the wide port inserted */
+- for (i = 0; i < dev->ex_dev.num_phys; i++) {
+- struct ex_phy *ex_phy_temp = &dev->ex_dev.ex_phy[i];
+- if (i == phy_id)
+- continue;
+- if (SAS_ADDR(ex_phy_temp->attached_sas_addr) ==
+- SAS_ADDR(ex_phy->attached_sas_addr)) {
+- found = true;
+- break;
+- }
+- }
+- if (found) {
+- sas_ex_join_wide_port(dev, phy_id);
++ return res;
++
++ if (sas_ex_join_wide_port(dev, phy_id))
+ return 0;
+- }
++
+ res = sas_ex_discover_devices(dev, phy_id);
+- if (!res)
+- goto out;
++ if (res)
++ return res;
+ list_for_each_entry(child, &dev->ex_dev.children, siblings) {
+ if (SAS_ADDR(child->sas_addr) ==
+ SAS_ADDR(ex_phy->attached_sas_addr)) {
+@@ -1978,7 +1964,6 @@ static int sas_discover_new(struct domain_device *dev, int phy_id)
+ break;
+ }
+ }
+-out:
+ return res;
+ }
+
+@@ -2109,9 +2094,7 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ struct domain_device *dev = NULL;
+
+ res = sas_find_bcast_dev(port_dev, &dev);
+- if (res)
+- goto out;
+- if (dev) {
++ while (res == 0 && dev) {
+ struct expander_device *ex = &dev->ex_dev;
+ int i = 0, phy_id;
+
+@@ -2123,8 +2106,10 @@ int sas_ex_revalidate_domain(struct domain_device *port_dev)
+ res = sas_rediscover(dev, phy_id);
+ i = phy_id + 1;
+ } while (i < ex->num_phys);
++
++ dev = NULL;
++ res = sas_find_bcast_dev(port_dev, &dev);
+ }
+-out:
+ return res;
+ }
+
+diff --git a/drivers/scsi/scsi_error.c b/drivers/scsi/scsi_error.c
+index d0f71e5..804f632 100644
+--- a/drivers/scsi/scsi_error.c
++++ b/drivers/scsi/scsi_error.c
+@@ -1687,6 +1687,20 @@ static void scsi_restart_operations(struct Scsi_Host *shost)
+ * requests are started.
+ */
+ scsi_run_host_queues(shost);
++
++ /*
++ * if eh is active and host_eh_scheduled is pending we need to re-run
++ * recovery. we do this check after scsi_run_host_queues() to allow
++ * everything pent up since the last eh run a chance to make forward
++ * progress before we sync again. Either we'll immediately re-run
++ * recovery or scsi_device_unbusy() will wake us again when these
++ * pending commands complete.
++ */
++ spin_lock_irqsave(shost->host_lock, flags);
++ if (shost->host_eh_scheduled)
++ if (scsi_host_set_state(shost, SHOST_RECOVERY))
++ WARN_ON(scsi_host_set_state(shost, SHOST_CANCEL_RECOVERY));
++ spin_unlock_irqrestore(shost->host_lock, flags);
+ }
+
+ /**
+diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
+index 6dfb978..495db80 100644
+--- a/drivers/scsi/scsi_lib.c
++++ b/drivers/scsi/scsi_lib.c
+@@ -406,10 +406,6 @@ static void scsi_run_queue(struct request_queue *q)
+ LIST_HEAD(starved_list);
+ unsigned long flags;
+
+- /* if the device is dead, sdev will be NULL, so no queue to run */
+- if (!sdev)
+- return;
+-
+ shost = sdev->host;
+ if (scsi_target(sdev)->single_lun)
+ scsi_single_lun_run(sdev);
+@@ -483,15 +479,26 @@ void scsi_requeue_run_queue(struct work_struct *work)
+ */
+ static void scsi_requeue_command(struct request_queue *q, struct scsi_cmnd *cmd)
+ {
++ struct scsi_device *sdev = cmd->device;
+ struct request *req = cmd->request;
+ unsigned long flags;
+
++ /*
++ * We need to hold a reference on the device to avoid the queue being
++ * killed after the unlock and before scsi_run_queue is invoked which
++ * may happen because scsi_unprep_request() puts the command which
++ * releases its reference on the device.
++ */
++ get_device(&sdev->sdev_gendev);
++
+ spin_lock_irqsave(q->queue_lock, flags);
+ scsi_unprep_request(req);
+ blk_requeue_request(q, req);
+ spin_unlock_irqrestore(q->queue_lock, flags);
+
+ scsi_run_queue(q);
++
++ put_device(&sdev->sdev_gendev);
+ }
+
+ void scsi_next_command(struct scsi_cmnd *cmd)
+@@ -1370,16 +1377,16 @@ static inline int scsi_host_queue_ready(struct request_queue *q,
+ * may be changed after request stacking drivers call the function,
+ * regardless of taking lock or not.
+ *
+- * When scsi can't dispatch I/Os anymore and needs to kill I/Os
+- * (e.g. !sdev), scsi needs to return 'not busy'.
+- * Otherwise, request stacking drivers may hold requests forever.
++ * When scsi can't dispatch I/Os anymore and needs to kill I/Os scsi
++ * needs to return 'not busy'. Otherwise, request stacking drivers
++ * may hold requests forever.
+ */
+ static int scsi_lld_busy(struct request_queue *q)
+ {
+ struct scsi_device *sdev = q->queuedata;
+ struct Scsi_Host *shost;
+
+- if (!sdev)
++ if (blk_queue_dead(q))
+ return 0;
+
+ shost = sdev->host;
+@@ -1490,12 +1497,6 @@ static void scsi_request_fn(struct request_queue *q)
+ struct scsi_cmnd *cmd;
+ struct request *req;
+
+- if (!sdev) {
+- while ((req = blk_peek_request(q)) != NULL)
+- scsi_kill_request(req, q);
+- return;
+- }
+-
+ if(!get_device(&sdev->sdev_gendev))
+ /* We must be tearing the block queue down already */
+ return;
+@@ -1697,20 +1698,6 @@ struct request_queue *scsi_alloc_queue(struct scsi_device *sdev)
+ return q;
+ }
+
+-void scsi_free_queue(struct request_queue *q)
+-{
+- unsigned long flags;
+-
+- WARN_ON(q->queuedata);
+-
+- /* cause scsi_request_fn() to kill all non-finished requests */
+- spin_lock_irqsave(q->queue_lock, flags);
+- q->request_fn(q);
+- spin_unlock_irqrestore(q->queue_lock, flags);
+-
+- blk_cleanup_queue(q);
+-}
+-
+ /*
+ * Function: scsi_block_requests()
+ *
+diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h
+index 07ce3f5..2b8d8b5 100644
+--- a/drivers/scsi/scsi_priv.h
++++ b/drivers/scsi/scsi_priv.h
+@@ -84,7 +84,6 @@ extern void scsi_next_command(struct scsi_cmnd *cmd);
+ extern void scsi_io_completion(struct scsi_cmnd *, unsigned int);
+ extern void scsi_run_host_queues(struct Scsi_Host *shost);
+ extern struct request_queue *scsi_alloc_queue(struct scsi_device *sdev);
+-extern void scsi_free_queue(struct request_queue *q);
+ extern int scsi_init_queue(void);
+ extern void scsi_exit_queue(void);
+ struct request_queue;
+diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c
+index 2e5fe58..f55e5f1 100644
+--- a/drivers/scsi/scsi_scan.c
++++ b/drivers/scsi/scsi_scan.c
+@@ -1717,6 +1717,9 @@ static void scsi_sysfs_add_devices(struct Scsi_Host *shost)
+ {
+ struct scsi_device *sdev;
+ shost_for_each_device(sdev, shost) {
++ /* target removed before the device could be added */
++ if (sdev->sdev_state == SDEV_DEL)
++ continue;
+ if (!scsi_host_scan_allowed(shost) ||
+ scsi_sysfs_add_sdev(sdev) != 0)
+ __scsi_remove_device(sdev);
+diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
+index 04c2a27..bb7c482 100644
+--- a/drivers/scsi/scsi_sysfs.c
++++ b/drivers/scsi/scsi_sysfs.c
+@@ -971,11 +971,8 @@ void __scsi_remove_device(struct scsi_device *sdev)
+ sdev->host->hostt->slave_destroy(sdev);
+ transport_destroy_device(dev);
+
+- /* cause the request function to reject all I/O requests */
+- sdev->request_queue->queuedata = NULL;
+-
+ /* Freeing the queue signals to block that we're done */
+- scsi_free_queue(sdev->request_queue);
++ blk_cleanup_queue(sdev->request_queue);
+ put_device(dev);
+ }
+
+@@ -1000,7 +997,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ struct scsi_device *sdev;
+
+ spin_lock_irqsave(shost->host_lock, flags);
+- starget->reap_ref++;
+ restart:
+ list_for_each_entry(sdev, &shost->__devices, siblings) {
+ if (sdev->channel != starget->channel ||
+@@ -1014,14 +1010,6 @@ static void __scsi_remove_target(struct scsi_target *starget)
+ goto restart;
+ }
+ spin_unlock_irqrestore(shost->host_lock, flags);
+- scsi_target_reap(starget);
+-}
+-
+-static int __remove_child (struct device * dev, void * data)
+-{
+- if (scsi_is_target_device(dev))
+- __scsi_remove_target(to_scsi_target(dev));
+- return 0;
+ }
+
+ /**
+@@ -1034,14 +1022,34 @@ static int __remove_child (struct device * dev, void * data)
+ */
+ void scsi_remove_target(struct device *dev)
+ {
+- if (scsi_is_target_device(dev)) {
+- __scsi_remove_target(to_scsi_target(dev));
+- return;
++ struct Scsi_Host *shost = dev_to_shost(dev->parent);
++ struct scsi_target *starget, *found;
++ unsigned long flags;
++
++ restart:
++ found = NULL;
++ spin_lock_irqsave(shost->host_lock, flags);
++ list_for_each_entry(starget, &shost->__targets, siblings) {
++ if (starget->state == STARGET_DEL)
++ continue;
++ if (starget->dev.parent == dev || &starget->dev == dev) {
++ found = starget;
++ found->reap_ref++;
++ break;
++ }
+ }
++ spin_unlock_irqrestore(shost->host_lock, flags);
+
+- get_device(dev);
+- device_for_each_child(dev, NULL, __remove_child);
+- put_device(dev);
++ if (found) {
++ __scsi_remove_target(found);
++ scsi_target_reap(found);
++ /* in the case where @dev has multiple starget children,
++ * continue removing.
++ *
++ * FIXME: does such a case exist?
++ */
++ goto restart;
++ }
+ }
+ EXPORT_SYMBOL(scsi_remove_target);
+
+diff --git a/drivers/spi/spi-pl022.c b/drivers/spi/spi-pl022.c
+index 400ae21..469eb28 100644
+--- a/drivers/spi/spi-pl022.c
++++ b/drivers/spi/spi-pl022.c
+@@ -489,6 +489,11 @@ static void giveback(struct pl022 *pl022)
+ pl022->cur_transfer = NULL;
+ pl022->cur_chip = NULL;
+ spi_finalize_current_message(pl022->master);
++
++ /* disable the SPI/SSP operation */
++ writew((readw(SSP_CR1(pl022->virtbase)) &
++ (~SSP_CR1_MASK_SSE)), SSP_CR1(pl022->virtbase));
++
+ }
+
+ /**
+diff --git a/drivers/staging/zsmalloc/zsmalloc-main.c b/drivers/staging/zsmalloc/zsmalloc-main.c
+index 4496737..8ded9a0 100644
+--- a/drivers/staging/zsmalloc/zsmalloc-main.c
++++ b/drivers/staging/zsmalloc/zsmalloc-main.c
+@@ -425,12 +425,6 @@ static struct page *find_get_zspage(struct size_class *class)
+ }
+
+
+-/*
+- * If this becomes a separate module, register zs_init() with
+- * module_init(), zs_exit with module_exit(), and remove zs_initialized
+-*/
+-static int zs_initialized;
+-
+ static int zs_cpu_notifier(struct notifier_block *nb, unsigned long action,
+ void *pcpu)
+ {
+@@ -489,7 +483,7 @@ fail:
+
+ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+ {
+- int i, error, ovhd_size;
++ int i, ovhd_size;
+ struct zs_pool *pool;
+
+ if (!name)
+@@ -516,28 +510,9 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+
+ }
+
+- /*
+- * If this becomes a separate module, register zs_init with
+- * module_init, and remove this block
+- */
+- if (!zs_initialized) {
+- error = zs_init();
+- if (error)
+- goto cleanup;
+- zs_initialized = 1;
+- }
+-
+ pool->flags = flags;
+ pool->name = name;
+
+- error = 0; /* Success */
+-
+-cleanup:
+- if (error) {
+- zs_destroy_pool(pool);
+- pool = NULL;
+- }
+-
+ return pool;
+ }
+ EXPORT_SYMBOL_GPL(zs_create_pool);
+@@ -753,3 +728,9 @@ u64 zs_get_total_size_bytes(struct zs_pool *pool)
+ return npages << PAGE_SHIFT;
+ }
+ EXPORT_SYMBOL_GPL(zs_get_total_size_bytes);
++
++module_init(zs_init);
++module_exit(zs_exit);
++
++MODULE_LICENSE("Dual BSD/GPL");
++MODULE_AUTHOR("Nitin Gupta <ngupta@vflare.org>");
+diff --git a/drivers/target/iscsi/iscsi_target.c b/drivers/target/iscsi/iscsi_target.c
+index d57d10c..d7dcd67 100644
+--- a/drivers/target/iscsi/iscsi_target.c
++++ b/drivers/target/iscsi/iscsi_target.c
+@@ -429,19 +429,8 @@ int iscsit_reset_np_thread(
+
+ int iscsit_del_np_comm(struct iscsi_np *np)
+ {
+- if (!np->np_socket)
+- return 0;
+-
+- /*
+- * Some network transports allocate their own struct sock->file,
+- * see if we need to free any additional allocated resources.
+- */
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(np->np_socket->file);
+- np->np_socket->file = NULL;
+- }
+-
+- sock_release(np->np_socket);
++ if (np->np_socket)
++ sock_release(np->np_socket);
+ return 0;
+ }
+
+@@ -4077,13 +4066,8 @@ int iscsit_close_connection(
+ kfree(conn->conn_ops);
+ conn->conn_ops = NULL;
+
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ conn->thread_set = NULL;
+
+ pr_debug("Moving to TARG_CONN_STATE_FREE.\n");
+diff --git a/drivers/target/iscsi/iscsi_target_core.h b/drivers/target/iscsi/iscsi_target_core.h
+index 1c70144..1dd5716 100644
+--- a/drivers/target/iscsi/iscsi_target_core.h
++++ b/drivers/target/iscsi/iscsi_target_core.h
+@@ -224,7 +224,6 @@ enum iscsi_timer_flags_table {
+ /* Used for struct iscsi_np->np_flags */
+ enum np_flags_table {
+ NPF_IP_NETWORK = 0x00,
+- NPF_SCTP_STRUCT_FILE = 0x01 /* Bugfix */
+ };
+
+ /* Used for struct iscsi_np->np_thread_state */
+@@ -503,7 +502,6 @@ struct iscsi_conn {
+ u16 local_port;
+ int net_size;
+ u32 auth_id;
+-#define CONNFLAG_SCTP_STRUCT_FILE 0x01
+ u32 conn_flags;
+ /* Used for iscsi_tx_login_rsp() */
+ u32 login_itt;
+diff --git a/drivers/target/iscsi/iscsi_target_login.c b/drivers/target/iscsi/iscsi_target_login.c
+index a3656c9..ae30424 100644
+--- a/drivers/target/iscsi/iscsi_target_login.c
++++ b/drivers/target/iscsi/iscsi_target_login.c
+@@ -795,22 +795,6 @@ int iscsi_target_setup_login_socket(
+ }
+ np->np_socket = sock;
+ /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!sock->file) {
+- sock->file = kzalloc(sizeof(struct file), GFP_KERNEL);
+- if (!sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- ret = -ENOMEM;
+- goto fail;
+- }
+- np->np_flags |= NPF_SCTP_STRUCT_FILE;
+- }
+- }
+- /*
+ * Setup the np->np_sockaddr from the passed sockaddr setup
+ * in iscsi_target_configfs.c code..
+ */
+@@ -869,21 +853,15 @@ int iscsi_target_setup_login_socket(
+
+ fail:
+ np->np_socket = NULL;
+- if (sock) {
+- if (np->np_flags & NPF_SCTP_STRUCT_FILE) {
+- kfree(sock->file);
+- sock->file = NULL;
+- }
+-
++ if (sock)
+ sock_release(sock);
+- }
+ return ret;
+ }
+
+ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ {
+ u8 buffer[ISCSI_HDR_LEN], iscsi_opcode, zero_tsih = 0;
+- int err, ret = 0, set_sctp_conn_flag, stop;
++ int err, ret = 0, stop;
+ struct iscsi_conn *conn = NULL;
+ struct iscsi_login *login;
+ struct iscsi_portal_group *tpg = NULL;
+@@ -894,7 +872,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ struct sockaddr_in6 sock_in6;
+
+ flush_signals(current);
+- set_sctp_conn_flag = 0;
+ sock = np->np_socket;
+
+ spin_lock_bh(&np->np_thread_lock);
+@@ -917,35 +894,12 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ spin_unlock_bh(&np->np_thread_lock);
+ goto out;
+ }
+- /*
+- * The SCTP stack needs struct socket->file.
+- */
+- if ((np->np_network_transport == ISCSI_SCTP_TCP) ||
+- (np->np_network_transport == ISCSI_SCTP_UDP)) {
+- if (!new_sock->file) {
+- new_sock->file = kzalloc(
+- sizeof(struct file), GFP_KERNEL);
+- if (!new_sock->file) {
+- pr_err("Unable to allocate struct"
+- " file for SCTP\n");
+- sock_release(new_sock);
+- /* Get another socket */
+- return 1;
+- }
+- set_sctp_conn_flag = 1;
+- }
+- }
+-
+ iscsi_start_login_thread_timer(np);
+
+ conn = kzalloc(sizeof(struct iscsi_conn), GFP_KERNEL);
+ if (!conn) {
+ pr_err("Could not allocate memory for"
+ " new connection\n");
+- if (set_sctp_conn_flag) {
+- kfree(new_sock->file);
+- new_sock->file = NULL;
+- }
+ sock_release(new_sock);
+ /* Get another socket */
+ return 1;
+@@ -955,9 +909,6 @@ static int __iscsi_target_login_thread(struct iscsi_np *np)
+ conn->conn_state = TARG_CONN_STATE_FREE;
+ conn->sock = new_sock;
+
+- if (set_sctp_conn_flag)
+- conn->conn_flags |= CONNFLAG_SCTP_STRUCT_FILE;
+-
+ pr_debug("Moving to TARG_CONN_STATE_XPT_UP.\n");
+ conn->conn_state = TARG_CONN_STATE_XPT_UP;
+
+@@ -1205,13 +1156,8 @@ old_sess_out:
+ iscsi_release_param_list(conn->param_list);
+ conn->param_list = NULL;
+ }
+- if (conn->sock) {
+- if (conn->conn_flags & CONNFLAG_SCTP_STRUCT_FILE) {
+- kfree(conn->sock->file);
+- conn->sock->file = NULL;
+- }
++ if (conn->sock)
+ sock_release(conn->sock);
+- }
+ kfree(conn);
+
+ if (tpg) {
+diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
+index 634d0f3..c6c385f 100644
+--- a/drivers/target/target_core_transport.c
++++ b/drivers/target/target_core_transport.c
+@@ -1797,6 +1797,7 @@ void transport_generic_request_failure(struct se_cmd *cmd)
+ case TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE:
+ case TCM_UNKNOWN_MODE_PAGE:
+ case TCM_WRITE_PROTECTED:
++ case TCM_ADDRESS_OUT_OF_RANGE:
+ case TCM_CHECK_CONDITION_ABORT_CMD:
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ case TCM_CHECK_CONDITION_NOT_READY:
+@@ -4212,6 +4213,15 @@ int transport_send_check_condition_and_sense(
+ /* WRITE PROTECTED */
+ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x27;
+ break;
++ case TCM_ADDRESS_OUT_OF_RANGE:
++ /* CURRENT ERROR */
++ buffer[offset] = 0x70;
++ buffer[offset+SPC_ADD_SENSE_LEN_OFFSET] = 10;
++ /* ILLEGAL REQUEST */
++ buffer[offset+SPC_SENSE_KEY_OFFSET] = ILLEGAL_REQUEST;
++ /* LOGICAL BLOCK ADDRESS OUT OF RANGE */
++ buffer[offset+SPC_ASC_KEY_OFFSET] = 0x21;
++ break;
+ case TCM_CHECK_CONDITION_UNIT_ATTENTION:
+ /* CURRENT ERROR */
+ buffer[offset] = 0x70;
+diff --git a/drivers/usb/core/devio.c b/drivers/usb/core/devio.c
+index e0f1079..62679bc 100644
+--- a/drivers/usb/core/devio.c
++++ b/drivers/usb/core/devio.c
+@@ -1604,10 +1604,14 @@ static int processcompl_compat(struct async *as, void __user * __user *arg)
+ void __user *addr = as->userurb;
+ unsigned int i;
+
+- if (as->userbuffer && urb->actual_length)
+- if (copy_to_user(as->userbuffer, urb->transfer_buffer,
+- urb->actual_length))
++ if (as->userbuffer && urb->actual_length) {
++ if (urb->number_of_packets > 0) /* Isochronous */
++ i = urb->transfer_buffer_length;
++ else /* Non-Isoc */
++ i = urb->actual_length;
++ if (copy_to_user(as->userbuffer, urb->transfer_buffer, i))
+ return -EFAULT;
++ }
+ if (put_user(as->status, &userurb->status))
+ return -EFAULT;
+ if (put_user(urb->actual_length, &userurb->actual_length))
+diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c
+index 8fb4849..9fc2e90 100644
+--- a/drivers/usb/core/hub.c
++++ b/drivers/usb/core/hub.c
+@@ -4672,6 +4672,16 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ }
+ parent_hub = hdev_to_hub(parent_hdev);
+
++ /* Disable LPM while we reset the device and reinstall the alt settings.
++ * Device-initiated LPM settings, and system exit latency settings are
++ * cleared when the device is reset, so we have to set them up again.
++ */
++ ret = usb_unlocked_disable_lpm(udev);
++ if (ret) {
++ dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
++ goto re_enumerate;
++ }
++
+ set_bit(port1, parent_hub->busy_bits);
+ for (i = 0; i < SET_CONFIG_TRIES; ++i) {
+
+@@ -4699,22 +4709,11 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ goto done;
+
+ mutex_lock(hcd->bandwidth_mutex);
+- /* Disable LPM while we reset the device and reinstall the alt settings.
+- * Device-initiated LPM settings, and system exit latency settings are
+- * cleared when the device is reset, so we have to set them up again.
+- */
+- ret = usb_disable_lpm(udev);
+- if (ret) {
+- dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__);
+- mutex_unlock(hcd->bandwidth_mutex);
+- goto done;
+- }
+ ret = usb_hcd_alloc_bandwidth(udev, udev->actconfig, NULL, NULL);
+ if (ret < 0) {
+ dev_warn(&udev->dev,
+ "Busted HC? Not enough HCD resources for "
+ "old configuration.\n");
+- usb_enable_lpm(udev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ goto re_enumerate;
+ }
+@@ -4726,7 +4725,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ dev_err(&udev->dev,
+ "can't restore configuration #%d (error=%d)\n",
+ udev->actconfig->desc.bConfigurationValue, ret);
+- usb_enable_lpm(udev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ goto re_enumerate;
+ }
+@@ -4765,17 +4763,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev)
+ desc->bInterfaceNumber,
+ desc->bAlternateSetting,
+ ret);
+- usb_unlocked_enable_lpm(udev);
+ goto re_enumerate;
+ }
+ }
+
++done:
+ /* Now that the alt settings are re-installed, enable LPM. */
+ usb_unlocked_enable_lpm(udev);
+-done:
+ return 0;
+
+ re_enumerate:
++ /* LPM state doesn't matter when we're about to destroy the device. */
+ hub_port_logical_disconnect(parent_hub, port1);
+ return -ENODEV;
+ }
+diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
+index bdd1c67..11cc49d 100644
+--- a/drivers/usb/core/message.c
++++ b/drivers/usb/core/message.c
+@@ -1174,6 +1174,7 @@ void usb_disable_device(struct usb_device *dev, int skip_ep0)
+ put_device(&dev->actconfig->interface[i]->dev);
+ dev->actconfig->interface[i] = NULL;
+ }
++ usb_unlocked_disable_lpm(dev);
+ dev->actconfig = NULL;
+ if (dev->state == USB_STATE_CONFIGURED)
+ usb_set_device_state(dev, USB_STATE_ADDRESS);
+@@ -1791,14 +1792,15 @@ free_interfaces:
+ * installed, so that the xHCI driver can recalculate the U1/U2
+ * timeouts.
+ */
+- if (usb_disable_lpm(dev)) {
++ if (dev->actconfig && usb_disable_lpm(dev)) {
+ dev_err(&dev->dev, "%s Failed to disable LPM\n.", __func__);
+ mutex_unlock(hcd->bandwidth_mutex);
+ return -ENOMEM;
+ }
+ ret = usb_hcd_alloc_bandwidth(dev, cp, NULL, NULL);
+ if (ret < 0) {
+- usb_enable_lpm(dev);
++ if (dev->actconfig)
++ usb_enable_lpm(dev);
+ mutex_unlock(hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
+ goto free_interfaces;
+@@ -1818,7 +1820,7 @@ free_interfaces:
+ if (!cp) {
+ usb_set_device_state(dev, USB_STATE_ADDRESS);
+ usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL);
+- usb_enable_lpm(dev);
++ /* Leave LPM disabled while the device is unconfigured. */
+ mutex_unlock(hcd->bandwidth_mutex);
+ usb_autosuspend_device(dev);
+ goto free_interfaces;
+diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
+index 25d0c61..cd8fb44 100644
+--- a/drivers/usb/core/usb.c
++++ b/drivers/usb/core/usb.c
+@@ -396,6 +396,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
+ dev->dev.dma_mask = bus->controller->dma_mask;
+ set_dev_node(&dev->dev, dev_to_node(bus->controller));
+ dev->state = USB_STATE_ATTACHED;
++ dev->lpm_disable_count = 1;
+ atomic_set(&dev->urbnum, 0);
+
+ INIT_LIST_HEAD(&dev->ep0.urb_list);
+diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
+index 1fc8f12..347bb05 100644
+--- a/drivers/usb/early/ehci-dbgp.c
++++ b/drivers/usb/early/ehci-dbgp.c
+@@ -450,7 +450,7 @@ static int dbgp_ehci_startup(void)
+ writel(FLAG_CF, &ehci_regs->configured_flag);
+
+ /* Wait until the controller is no longer halted */
+- loop = 10;
++ loop = 1000;
+ do {
+ status = readl(&ehci_regs->status);
+ if (!(status & STS_HALT))
+diff --git a/drivers/usb/gadget/u_ether.c b/drivers/usb/gadget/u_ether.c
+index 47cf48b..5b46f02 100644
+--- a/drivers/usb/gadget/u_ether.c
++++ b/drivers/usb/gadget/u_ether.c
+@@ -798,12 +798,6 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+
+ SET_ETHTOOL_OPS(net, &ops);
+
+- /* two kinds of host-initiated state changes:
+- * - iff DATA transfer is active, carrier is "on"
+- * - tx queueing enabled if open *and* carrier is "on"
+- */
+- netif_carrier_off(net);
+-
+ dev->gadget = g;
+ SET_NETDEV_DEV(net, &g->dev);
+ SET_NETDEV_DEVTYPE(net, &gadget_type);
+@@ -817,6 +811,12 @@ int gether_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
+
+ the_dev = dev;
++
++ /* two kinds of host-initiated state changes:
++ * - iff DATA transfer is active, carrier is "on"
++ * - tx queueing enabled if open *and* carrier is "on"
++ */
++ netif_carrier_off(net);
+ }
+
+ return status;
+diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
+index 417ab1b..46cee56 100644
+--- a/drivers/usb/serial/option.c
++++ b/drivers/usb/serial/option.c
+@@ -936,6 +936,8 @@ static const struct usb_device_id option_ids[] = {
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0167, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0326, 0xff, 0xff, 0xff),
++ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff),
+ .driver_info = (kernel_ulong_t)&net_intf4_blacklist },
+ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff),
+diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c
+index 8ec8a6e..f98ba40 100644
+--- a/drivers/usb/storage/uas.c
++++ b/drivers/usb/storage/uas.c
+@@ -58,9 +58,6 @@ enum {
+ SUBMIT_DATA_OUT_URB = (1 << 5),
+ ALLOC_CMD_URB = (1 << 6),
+ SUBMIT_CMD_URB = (1 << 7),
+- COMPLETED_DATA_IN = (1 << 8),
+- COMPLETED_DATA_OUT = (1 << 9),
+- DATA_COMPLETES_CMD = (1 << 10),
+ };
+
+ /* Overrides scsi_pointer */
+@@ -114,7 +111,6 @@ static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
+ {
+ struct sense_iu *sense_iu = urb->transfer_buffer;
+ struct scsi_device *sdev = cmnd->device;
+- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+
+ if (urb->actual_length > 16) {
+ unsigned len = be16_to_cpup(&sense_iu->len);
+@@ -132,15 +128,13 @@ static void uas_sense(struct urb *urb, struct scsi_cmnd *cmnd)
+ }
+
+ cmnd->result = sense_iu->status;
+- if (!(cmdinfo->state & DATA_COMPLETES_CMD))
+- cmnd->scsi_done(cmnd);
++ cmnd->scsi_done(cmnd);
+ }
+
+ static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
+ {
+ struct sense_iu_old *sense_iu = urb->transfer_buffer;
+ struct scsi_device *sdev = cmnd->device;
+- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+
+ if (urb->actual_length > 8) {
+ unsigned len = be16_to_cpup(&sense_iu->len) - 2;
+@@ -158,8 +152,7 @@ static void uas_sense_old(struct urb *urb, struct scsi_cmnd *cmnd)
+ }
+
+ cmnd->result = sense_iu->status;
+- if (!(cmdinfo->state & DATA_COMPLETES_CMD))
+- cmnd->scsi_done(cmnd);
++ cmnd->scsi_done(cmnd);
+ }
+
+ static void uas_xfer_data(struct urb *urb, struct scsi_cmnd *cmnd,
+@@ -184,7 +177,6 @@ static void uas_stat_cmplt(struct urb *urb)
+ struct Scsi_Host *shost = urb->context;
+ struct uas_dev_info *devinfo = (void *)shost->hostdata[0];
+ struct scsi_cmnd *cmnd;
+- struct uas_cmd_info *cmdinfo;
+ u16 tag;
+ int ret;
+
+@@ -210,32 +202,12 @@ static void uas_stat_cmplt(struct urb *urb)
+ dev_err(&urb->dev->dev, "failed submit status urb\n");
+ return;
+ }
+- cmdinfo = (void *)&cmnd->SCp;
+
+ switch (iu->iu_id) {
+ case IU_ID_STATUS:
+ if (devinfo->cmnd == cmnd)
+ devinfo->cmnd = NULL;
+
+- if (!(cmdinfo->state & COMPLETED_DATA_IN) &&
+- cmdinfo->data_in_urb) {
+- if (devinfo->use_streams) {
+- cmdinfo->state |= DATA_COMPLETES_CMD;
+- usb_unlink_urb(cmdinfo->data_in_urb);
+- } else {
+- usb_free_urb(cmdinfo->data_in_urb);
+- }
+- }
+- if (!(cmdinfo->state & COMPLETED_DATA_OUT) &&
+- cmdinfo->data_out_urb) {
+- if (devinfo->use_streams) {
+- cmdinfo->state |= DATA_COMPLETES_CMD;
+- usb_unlink_urb(cmdinfo->data_in_urb);
+- } else {
+- usb_free_urb(cmdinfo->data_out_urb);
+- }
+- }
+-
+ if (urb->actual_length < 16)
+ devinfo->uas_sense_old = 1;
+ if (devinfo->uas_sense_old)
+@@ -264,59 +236,27 @@ static void uas_stat_cmplt(struct urb *urb)
+ dev_err(&urb->dev->dev, "failed submit status urb\n");
+ }
+
+-static void uas_data_out_cmplt(struct urb *urb)
+-{
+- struct scsi_cmnd *cmnd = urb->context;
+- struct scsi_data_buffer *sdb = scsi_out(cmnd);
+- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+-
+- cmdinfo->state |= COMPLETED_DATA_OUT;
+-
+- sdb->resid = sdb->length - urb->actual_length;
+- usb_free_urb(urb);
+-
+- if (cmdinfo->state & DATA_COMPLETES_CMD)
+- cmnd->scsi_done(cmnd);
+-}
+-
+-static void uas_data_in_cmplt(struct urb *urb)
++static void uas_data_cmplt(struct urb *urb)
+ {
+- struct scsi_cmnd *cmnd = urb->context;
+- struct scsi_data_buffer *sdb = scsi_in(cmnd);
+- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+-
+- cmdinfo->state |= COMPLETED_DATA_IN;
+-
++ struct scsi_data_buffer *sdb = urb->context;
+ sdb->resid = sdb->length - urb->actual_length;
+ usb_free_urb(urb);
+-
+- if (cmdinfo->state & DATA_COMPLETES_CMD)
+- cmnd->scsi_done(cmnd);
+ }
+
+ static struct urb *uas_alloc_data_urb(struct uas_dev_info *devinfo, gfp_t gfp,
+- unsigned int pipe, struct scsi_cmnd *cmnd,
+- enum dma_data_direction dir)
++ unsigned int pipe, u16 stream_id,
++ struct scsi_data_buffer *sdb,
++ enum dma_data_direction dir)
+ {
+- struct uas_cmd_info *cmdinfo = (void *)&cmnd->SCp;
+ struct usb_device *udev = devinfo->udev;
+ struct urb *urb = usb_alloc_urb(0, gfp);
+- struct scsi_data_buffer *sdb;
+- usb_complete_t complete_fn;
+- u16 stream_id = cmdinfo->stream;
+
+ if (!urb)
+ goto out;
+- if (dir == DMA_FROM_DEVICE) {
+- sdb = scsi_in(cmnd);
+- complete_fn = uas_data_in_cmplt;
+- } else {
+- sdb = scsi_out(cmnd);
+- complete_fn = uas_data_out_cmplt;
+- }
+- usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length,
+- complete_fn, cmnd);
+- urb->stream_id = stream_id;
++ usb_fill_bulk_urb(urb, udev, pipe, NULL, sdb->length, uas_data_cmplt,
++ sdb);
++ if (devinfo->use_streams)
++ urb->stream_id = stream_id;
+ urb->num_sgs = udev->bus->sg_tablesize ? sdb->table.nents : 0;
+ urb->sg = sdb->table.sgl;
+ out:
+@@ -418,8 +358,8 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+
+ if (cmdinfo->state & ALLOC_DATA_IN_URB) {
+ cmdinfo->data_in_urb = uas_alloc_data_urb(devinfo, gfp,
+- devinfo->data_in_pipe, cmnd,
+- DMA_FROM_DEVICE);
++ devinfo->data_in_pipe, cmdinfo->stream,
++ scsi_in(cmnd), DMA_FROM_DEVICE);
+ if (!cmdinfo->data_in_urb)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ cmdinfo->state &= ~ALLOC_DATA_IN_URB;
+@@ -436,8 +376,8 @@ static int uas_submit_urbs(struct scsi_cmnd *cmnd,
+
+ if (cmdinfo->state & ALLOC_DATA_OUT_URB) {
+ cmdinfo->data_out_urb = uas_alloc_data_urb(devinfo, gfp,
+- devinfo->data_out_pipe, cmnd,
+- DMA_TO_DEVICE);
++ devinfo->data_out_pipe, cmdinfo->stream,
++ scsi_out(cmnd), DMA_TO_DEVICE);
+ if (!cmdinfo->data_out_urb)
+ return SCSI_MLQUEUE_DEVICE_BUSY;
+ cmdinfo->state &= ~ALLOC_DATA_OUT_URB;
+diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
+index 4270414..58b7d14 100644
+--- a/fs/btrfs/async-thread.c
++++ b/fs/btrfs/async-thread.c
+@@ -206,10 +206,17 @@ static noinline void run_ordered_completions(struct btrfs_workers *workers,
+
+ work->ordered_func(work);
+
+- /* now take the lock again and call the freeing code */
++ /* now take the lock again and drop our item from the list */
+ spin_lock(&workers->order_lock);
+ list_del(&work->order_list);
++ spin_unlock(&workers->order_lock);
++
++ /*
++ * we don't want to call the ordered free functions
++ * with the lock held though
++ */
+ work->ordered_free(work);
++ spin_lock(&workers->order_lock);
+ }
+
+ spin_unlock(&workers->order_lock);
+diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
+index 6df0cbe..d86ba9f 100644
+--- a/fs/cifs/cifsglob.h
++++ b/fs/cifs/cifsglob.h
+@@ -657,13 +657,13 @@ struct cifs_io_parms {
+ * Take a reference on the file private data. Must be called with
+ * cifs_file_list_lock held.
+ */
+-static inline
+-struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file)
++static inline void
++cifsFileInfo_get_locked(struct cifsFileInfo *cifs_file)
+ {
+ ++cifs_file->count;
+- return cifs_file;
+ }
+
++struct cifsFileInfo *cifsFileInfo_get(struct cifsFileInfo *cifs_file);
+ void cifsFileInfo_put(struct cifsFileInfo *cifs_file);
+
+ /*
+diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
+index 94b7788..f967005 100644
+--- a/fs/cifs/connect.c
++++ b/fs/cifs/connect.c
+@@ -238,8 +238,8 @@ static const match_table_t cifs_mount_option_tokens = {
+ enum {
+ Opt_sec_krb5, Opt_sec_krb5i, Opt_sec_krb5p,
+ Opt_sec_ntlmsspi, Opt_sec_ntlmssp,
+- Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2i,
+- Opt_sec_nontlm, Opt_sec_lanman,
++ Opt_ntlm, Opt_sec_ntlmi, Opt_sec_ntlmv2,
++ Opt_sec_ntlmv2i, Opt_sec_lanman,
+ Opt_sec_none,
+
+ Opt_sec_err
+@@ -253,8 +253,9 @@ static const match_table_t cifs_secflavor_tokens = {
+ { Opt_sec_ntlmssp, "ntlmssp" },
+ { Opt_ntlm, "ntlm" },
+ { Opt_sec_ntlmi, "ntlmi" },
++ { Opt_sec_ntlmv2, "nontlm" },
++ { Opt_sec_ntlmv2, "ntlmv2" },
+ { Opt_sec_ntlmv2i, "ntlmv2i" },
+- { Opt_sec_nontlm, "nontlm" },
+ { Opt_sec_lanman, "lanman" },
+ { Opt_sec_none, "none" },
+
+@@ -1167,7 +1168,7 @@ static int cifs_parse_security_flavors(char *value,
+ case Opt_sec_ntlmi:
+ vol->secFlg |= CIFSSEC_MAY_NTLM | CIFSSEC_MUST_SIGN;
+ break;
+- case Opt_sec_nontlm:
++ case Opt_sec_ntlmv2:
+ vol->secFlg |= CIFSSEC_MAY_NTLMV2;
+ break;
+ case Opt_sec_ntlmv2i:
+diff --git a/fs/cifs/file.c b/fs/cifs/file.c
+index 513adbc..2cbda4e 100644
+--- a/fs/cifs/file.c
++++ b/fs/cifs/file.c
+@@ -284,6 +284,15 @@ cifs_new_fileinfo(__u16 fileHandle, struct file *file,
+
+ static void cifs_del_lock_waiters(struct cifsLockInfo *lock);
+
++struct cifsFileInfo *
++cifsFileInfo_get(struct cifsFileInfo *cifs_file)
++{
++ spin_lock(&cifs_file_list_lock);
++ cifsFileInfo_get_locked(cifs_file);
++ spin_unlock(&cifs_file_list_lock);
++ return cifs_file;
++}
++
+ /*
+ * Release a reference on the file private data. This may involve closing
+ * the filehandle out on the server. Must be called without holding
+@@ -1563,7 +1572,7 @@ struct cifsFileInfo *find_readable_file(struct cifsInodeInfo *cifs_inode,
+ if (!open_file->invalidHandle) {
+ /* found a good file */
+ /* lock it so it will not be closed on us */
+- cifsFileInfo_get(open_file);
++ cifsFileInfo_get_locked(open_file);
+ spin_unlock(&cifs_file_list_lock);
+ return open_file;
+ } /* else might as well continue, and look for
+@@ -1615,7 +1624,7 @@ refind_writable:
+ if (OPEN_FMODE(open_file->f_flags) & FMODE_WRITE) {
+ if (!open_file->invalidHandle) {
+ /* found a good writable file */
+- cifsFileInfo_get(open_file);
++ cifsFileInfo_get_locked(open_file);
+ spin_unlock(&cifs_file_list_lock);
+ return open_file;
+ } else {
+@@ -1632,7 +1641,7 @@ refind_writable:
+
+ if (inv_file) {
+ any_available = false;
+- cifsFileInfo_get(inv_file);
++ cifsFileInfo_get_locked(inv_file);
+ }
+
+ spin_unlock(&cifs_file_list_lock);
+@@ -3082,8 +3091,6 @@ static int cifs_readpages(struct file *file, struct address_space *mapping,
+ break;
+ }
+
+- spin_lock(&cifs_file_list_lock);
+- spin_unlock(&cifs_file_list_lock);
+ rdata->cfile = cifsFileInfo_get(open_file);
+ rdata->mapping = mapping;
+ rdata->offset = offset;
+diff --git a/fs/exec.c b/fs/exec.c
+index da27b91..e95aeed 100644
+--- a/fs/exec.c
++++ b/fs/exec.c
+@@ -1020,7 +1020,7 @@ static void flush_old_files(struct files_struct * files)
+ unsigned long set, i;
+
+ j++;
+- i = j * __NFDBITS;
++ i = j * BITS_PER_LONG;
+ fdt = files_fdtable(files);
+ if (i >= fdt->max_fds)
+ break;
+diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
+index cee7812..d23b31c 100644
+--- a/fs/ext4/balloc.c
++++ b/fs/ext4/balloc.c
+@@ -609,7 +609,8 @@ ext4_fsblk_t ext4_count_free_clusters(struct super_block *sb)
+ if (bitmap_bh == NULL)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, sb->s_blocksize);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_BLOCKS_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
+ i, ext4_free_group_clusters(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/bitmap.c b/fs/ext4/bitmap.c
+index b319721..a94b9c6 100644
+--- a/fs/ext4/bitmap.c
++++ b/fs/ext4/bitmap.c
+@@ -11,24 +11,18 @@
+ #include <linux/jbd2.h>
+ #include "ext4.h"
+
+-#ifdef EXT4FS_DEBUG
+-
+ static const int nibblemap[] = {4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0};
+
+-unsigned int ext4_count_free(struct buffer_head *map, unsigned int numchars)
++unsigned int ext4_count_free(char *bitmap, unsigned int numchars)
+ {
+ unsigned int i, sum = 0;
+
+- if (!map)
+- return 0;
+ for (i = 0; i < numchars; i++)
+- sum += nibblemap[map->b_data[i] & 0xf] +
+- nibblemap[(map->b_data[i] >> 4) & 0xf];
++ sum += nibblemap[bitmap[i] & 0xf] +
++ nibblemap[(bitmap[i] >> 4) & 0xf];
+ return sum;
+ }
+
+-#endif /* EXT4FS_DEBUG */
+-
+ int ext4_inode_bitmap_csum_verify(struct super_block *sb, ext4_group_t group,
+ struct ext4_group_desc *gdp,
+ struct buffer_head *bh, int sz)
+diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
+index cfc4e01..01434f2 100644
+--- a/fs/ext4/ext4.h
++++ b/fs/ext4/ext4.h
+@@ -1161,8 +1161,7 @@ struct ext4_sb_info {
+ unsigned long s_desc_per_block; /* Number of group descriptors per block */
+ ext4_group_t s_groups_count; /* Number of groups in the fs */
+ ext4_group_t s_blockfile_groups;/* Groups acceptable for non-extent files */
+- unsigned long s_overhead_last; /* Last calculated overhead */
+- unsigned long s_blocks_last; /* Last seen block count */
++ unsigned long s_overhead; /* # of fs overhead clusters */
+ unsigned int s_cluster_ratio; /* Number of blocks per cluster */
+ unsigned int s_cluster_bits; /* log2 of s_cluster_ratio */
+ loff_t s_bitmap_maxbytes; /* max bytes for bitmap files */
+@@ -1852,7 +1851,7 @@ struct mmpd_data {
+ # define NORET_AND noreturn,
+
+ /* bitmap.c */
+-extern unsigned int ext4_count_free(struct buffer_head *, unsigned);
++extern unsigned int ext4_count_free(char *bitmap, unsigned numchars);
+ void ext4_inode_bitmap_csum_set(struct super_block *sb, ext4_group_t group,
+ struct ext4_group_desc *gdp,
+ struct buffer_head *bh, int sz);
+@@ -2037,6 +2036,7 @@ extern int ext4_group_extend(struct super_block *sb,
+ extern int ext4_resize_fs(struct super_block *sb, ext4_fsblk_t n_blocks_count);
+
+ /* super.c */
++extern int ext4_calculate_overhead(struct super_block *sb);
+ extern int ext4_superblock_csum_verify(struct super_block *sb,
+ struct ext4_super_block *es);
+ extern void ext4_superblock_csum_set(struct super_block *sb,
+diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
+index 91341ec..58a75fe 100644
+--- a/fs/ext4/extents.c
++++ b/fs/ext4/extents.c
+@@ -2570,10 +2570,10 @@ static int ext4_ext_remove_space(struct inode *inode, ext4_lblk_t start,
+ {
+ struct super_block *sb = inode->i_sb;
+ int depth = ext_depth(inode);
+- struct ext4_ext_path *path;
++ struct ext4_ext_path *path = NULL;
+ ext4_fsblk_t partial_cluster = 0;
+ handle_t *handle;
+- int i, err;
++ int i = 0, err;
+
+ ext_debug("truncate since %u to %u\n", start, end);
+
+@@ -2606,8 +2606,12 @@ again:
+ }
+ depth = ext_depth(inode);
+ ex = path[depth].p_ext;
+- if (!ex)
++ if (!ex) {
++ ext4_ext_drop_refs(path);
++ kfree(path);
++ path = NULL;
+ goto cont;
++ }
+
+ ee_block = le32_to_cpu(ex->ee_block);
+
+@@ -2637,8 +2641,6 @@ again:
+ if (err < 0)
+ goto out;
+ }
+- ext4_ext_drop_refs(path);
+- kfree(path);
+ }
+ cont:
+
+@@ -2647,19 +2649,27 @@ cont:
+ * after i_size and walking into the tree depth-wise.
+ */
+ depth = ext_depth(inode);
+- path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1), GFP_NOFS);
+- if (path == NULL) {
+- ext4_journal_stop(handle);
+- return -ENOMEM;
+- }
+- path[0].p_depth = depth;
+- path[0].p_hdr = ext_inode_hdr(inode);
++ if (path) {
++ int k = i = depth;
++ while (--k > 0)
++ path[k].p_block =
++ le16_to_cpu(path[k].p_hdr->eh_entries)+1;
++ } else {
++ path = kzalloc(sizeof(struct ext4_ext_path) * (depth + 1),
++ GFP_NOFS);
++ if (path == NULL) {
++ ext4_journal_stop(handle);
++ return -ENOMEM;
++ }
++ path[0].p_depth = depth;
++ path[0].p_hdr = ext_inode_hdr(inode);
+
+- if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
+- err = -EIO;
+- goto out;
++ if (ext4_ext_check(inode, path[0].p_hdr, depth)) {
++ err = -EIO;
++ goto out;
++ }
+ }
+- i = err = 0;
++ err = 0;
+
+ while (i >= 0 && err == 0) {
+ if (i == depth) {
+@@ -2773,8 +2783,10 @@ cont:
+ out:
+ ext4_ext_drop_refs(path);
+ kfree(path);
+- if (err == -EAGAIN)
++ if (err == -EAGAIN) {
++ path = NULL;
+ goto again;
++ }
+ ext4_journal_stop(handle);
+
+ return err;
+diff --git a/fs/ext4/ialloc.c b/fs/ext4/ialloc.c
+index d48e8b1..6866bc2 100644
+--- a/fs/ext4/ialloc.c
++++ b/fs/ext4/ialloc.c
+@@ -1054,7 +1054,8 @@ unsigned long ext4_count_free_inodes(struct super_block *sb)
+ if (!bitmap_bh)
+ continue;
+
+- x = ext4_count_free(bitmap_bh, EXT4_INODES_PER_GROUP(sb) / 8);
++ x = ext4_count_free(bitmap_bh->b_data,
++ EXT4_INODES_PER_GROUP(sb) / 8);
+ printk(KERN_DEBUG "group %lu: stored = %d, counted = %lu\n",
+ (unsigned long) i, ext4_free_inodes_count(sb, gdp), x);
+ bitmap_count += x;
+diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
+index 02bc8cb..47a3e00 100644
+--- a/fs/ext4/inode.c
++++ b/fs/ext4/inode.c
+@@ -346,6 +346,15 @@ void ext4_da_update_reserve_space(struct inode *inode,
+ used = ei->i_reserved_data_blocks;
+ }
+
++ if (unlikely(ei->i_allocated_meta_blocks > ei->i_reserved_meta_blocks)) {
++ ext4_msg(inode->i_sb, KERN_NOTICE, "%s: ino %lu, allocated %d "
++ "with only %d reserved metadata blocks\n", __func__,
++ inode->i_ino, ei->i_allocated_meta_blocks,
++ ei->i_reserved_meta_blocks);
++ WARN_ON(1);
++ ei->i_allocated_meta_blocks = ei->i_reserved_meta_blocks;
++ }
++
+ /* Update per-inode reservations */
+ ei->i_reserved_data_blocks -= used;
+ ei->i_reserved_meta_blocks -= ei->i_allocated_meta_blocks;
+@@ -1171,6 +1180,17 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ struct ext4_inode_info *ei = EXT4_I(inode);
+ unsigned int md_needed;
+ int ret;
++ ext4_lblk_t save_last_lblock;
++ int save_len;
++
++ /*
++ * We will charge metadata quota at writeout time; this saves
++ * us from metadata over-estimation, though we may go over by
++ * a small amount in the end. Here we just reserve for data.
++ */
++ ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
++ if (ret)
++ return ret;
+
+ /*
+ * recalculate the amount of metadata blocks to reserve
+@@ -1179,32 +1199,31 @@ static int ext4_da_reserve_space(struct inode *inode, ext4_lblk_t lblock)
+ */
+ repeat:
+ spin_lock(&ei->i_block_reservation_lock);
++ /*
++ * ext4_calc_metadata_amount() has side effects, which we have
++ * to be prepared undo if we fail to claim space.
++ */
++ save_len = ei->i_da_metadata_calc_len;
++ save_last_lblock = ei->i_da_metadata_calc_last_lblock;
+ md_needed = EXT4_NUM_B2C(sbi,
+ ext4_calc_metadata_amount(inode, lblock));
+ trace_ext4_da_reserve_space(inode, md_needed);
+- spin_unlock(&ei->i_block_reservation_lock);
+
+ /*
+- * We will charge metadata quota at writeout time; this saves
+- * us from metadata over-estimation, though we may go over by
+- * a small amount in the end. Here we just reserve for data.
+- */
+- ret = dquot_reserve_block(inode, EXT4_C2B(sbi, 1));
+- if (ret)
+- return ret;
+- /*
+ * We do still charge estimated metadata to the sb though;
+ * we cannot afford to run out of free blocks.
+ */
+ if (ext4_claim_free_clusters(sbi, md_needed + 1, 0)) {
+- dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
++ ei->i_da_metadata_calc_len = save_len;
++ ei->i_da_metadata_calc_last_lblock = save_last_lblock;
++ spin_unlock(&ei->i_block_reservation_lock);
+ if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
+ yield();
+ goto repeat;
+ }
++ dquot_release_reservation_block(inode, EXT4_C2B(sbi, 1));
+ return -ENOSPC;
+ }
+- spin_lock(&ei->i_block_reservation_lock);
+ ei->i_reserved_data_blocks++;
+ ei->i_reserved_meta_blocks += md_needed;
+ spin_unlock(&ei->i_block_reservation_lock);
+diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
+index 5845cd9..0edaf18 100644
+--- a/fs/ext4/namei.c
++++ b/fs/ext4/namei.c
+@@ -2918,8 +2918,15 @@ static int ext4_rename(struct inode *old_dir, struct dentry *old_dentry,
+ PARENT_INO(dir_bh->b_data, new_dir->i_sb->s_blocksize) =
+ cpu_to_le32(new_dir->i_ino);
+ BUFFER_TRACE(dir_bh, "call ext4_handle_dirty_metadata");
+- retval = ext4_handle_dirty_dirent_node(handle, old_inode,
+- dir_bh);
++ if (is_dx(old_inode)) {
++ retval = ext4_handle_dirty_dx_node(handle,
++ old_inode,
++ dir_bh);
++ } else {
++ retval = ext4_handle_dirty_dirent_node(handle,
++ old_inode,
++ dir_bh);
++ }
+ if (retval) {
+ ext4_std_error(old_dir->i_sb, retval);
+ goto end_rename;
+diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
+index 7ea6cbb..17d38de 100644
+--- a/fs/ext4/resize.c
++++ b/fs/ext4/resize.c
+@@ -1197,7 +1197,7 @@ static void ext4_update_super(struct super_block *sb,
+ struct ext4_new_group_data *group_data = flex_gd->groups;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+- int i;
++ int i, ret;
+
+ BUG_ON(flex_gd->count == 0 || group_data == NULL);
+ /*
+@@ -1272,6 +1272,11 @@ static void ext4_update_super(struct super_block *sb,
+ &sbi->s_flex_groups[flex_group].free_inodes);
+ }
+
++ /*
++ * Update the fs overhead information
++ */
++ ext4_calculate_overhead(sb);
++
+ if (test_opt(sb, DEBUG))
+ printk(KERN_DEBUG "EXT4-fs: added group %u:"
+ "%llu blocks(%llu free %llu reserved)\n", flex_gd->count,
+diff --git a/fs/ext4/super.c b/fs/ext4/super.c
+index eb7aa3e..78b7ede 100644
+--- a/fs/ext4/super.c
++++ b/fs/ext4/super.c
+@@ -3085,6 +3085,114 @@ static int set_journal_csum_feature_set(struct super_block *sb)
+ return ret;
+ }
+
++/*
++ * Note: calculating the overhead so we can be compatible with
++ * historical BSD practice is quite difficult in the face of
++ * clusters/bigalloc. This is because multiple metadata blocks from
++ * different block group can end up in the same allocation cluster.
++ * Calculating the exact overhead in the face of clustered allocation
++ * requires either O(all block bitmaps) in memory or O(number of block
++ * groups**2) in time. We will still calculate the superblock for
++ * older file systems --- and if we come across with a bigalloc file
++ * system with zero in s_overhead_clusters the estimate will be close to
++ * correct especially for very large cluster sizes --- but for newer
++ * file systems, it's better to calculate this figure once at mkfs
++ * time, and store it in the superblock. If the superblock value is
++ * present (even for non-bigalloc file systems), we will use it.
++ */
++static int count_overhead(struct super_block *sb, ext4_group_t grp,
++ char *buf)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_group_desc *gdp;
++ ext4_fsblk_t first_block, last_block, b;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ int s, j, count = 0;
++
++ first_block = le32_to_cpu(sbi->s_es->s_first_data_block) +
++ (grp * EXT4_BLOCKS_PER_GROUP(sb));
++ last_block = first_block + EXT4_BLOCKS_PER_GROUP(sb) - 1;
++ for (i = 0; i < ngroups; i++) {
++ gdp = ext4_get_group_desc(sb, i, NULL);
++ b = ext4_block_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_bitmap(sb, gdp);
++ if (b >= first_block && b <= last_block) {
++ ext4_set_bit(EXT4_B2C(sbi, b - first_block), buf);
++ count++;
++ }
++ b = ext4_inode_table(sb, gdp);
++ if (b >= first_block && b + sbi->s_itb_per_group <= last_block)
++ for (j = 0; j < sbi->s_itb_per_group; j++, b++) {
++ int c = EXT4_B2C(sbi, b - first_block);
++ ext4_set_bit(c, buf);
++ count++;
++ }
++ if (i != grp)
++ continue;
++ s = 0;
++ if (ext4_bg_has_super(sb, grp)) {
++ ext4_set_bit(s++, buf);
++ count++;
++ }
++ for (j = ext4_bg_num_gdb(sb, grp); j > 0; j--) {
++ ext4_set_bit(EXT4_B2C(sbi, s++), buf);
++ count++;
++ }
++ }
++ if (!count)
++ return 0;
++ return EXT4_CLUSTERS_PER_GROUP(sb) -
++ ext4_count_free(buf, EXT4_CLUSTERS_PER_GROUP(sb) / 8);
++}
++
++/*
++ * Compute the overhead and stash it in sbi->s_overhead
++ */
++int ext4_calculate_overhead(struct super_block *sb)
++{
++ struct ext4_sb_info *sbi = EXT4_SB(sb);
++ struct ext4_super_block *es = sbi->s_es;
++ ext4_group_t i, ngroups = ext4_get_groups_count(sb);
++ ext4_fsblk_t overhead = 0;
++ char *buf = (char *) get_zeroed_page(GFP_KERNEL);
++
++ memset(buf, 0, PAGE_SIZE);
++ if (!buf)
++ return -ENOMEM;
++
++ /*
++ * Compute the overhead (FS structures). This is constant
++ * for a given filesystem unless the number of block groups
++ * changes so we cache the previous value until it does.
++ */
++
++ /*
++ * All of the blocks before first_data_block are overhead
++ */
++ overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
++
++ /*
++ * Add the overhead found in each block group
++ */
++ for (i = 0; i < ngroups; i++) {
++ int blks;
++
++ blks = count_overhead(sb, i, buf);
++ overhead += blks;
++ if (blks)
++ memset(buf, 0, PAGE_SIZE);
++ cond_resched();
++ }
++ sbi->s_overhead = overhead;
++ smp_wmb();
++ free_page((unsigned long) buf);
++ return 0;
++}
++
+ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+ {
+ char *orig_data = kstrdup(data, GFP_KERNEL);
+@@ -3735,6 +3843,18 @@ static int ext4_fill_super(struct super_block *sb, void *data, int silent)
+
+ no_journal:
+ /*
++ * Get the # of file system overhead blocks from the
++ * superblock if present.
++ */
++ if (es->s_overhead_clusters)
++ sbi->s_overhead = le32_to_cpu(es->s_overhead_clusters);
++ else {
++ ret = ext4_calculate_overhead(sb);
++ if (ret)
++ goto failed_mount_wq;
++ }
++
++ /*
+ * The maximum number of concurrent works can be high and
+ * concurrency isn't really necessary. Limit it to 1.
+ */
+@@ -4600,67 +4720,21 @@ restore_opts:
+ return err;
+ }
+
+-/*
+- * Note: calculating the overhead so we can be compatible with
+- * historical BSD practice is quite difficult in the face of
+- * clusters/bigalloc. This is because multiple metadata blocks from
+- * different block group can end up in the same allocation cluster.
+- * Calculating the exact overhead in the face of clustered allocation
+- * requires either O(all block bitmaps) in memory or O(number of block
+- * groups**2) in time. We will still calculate the superblock for
+- * older file systems --- and if we come across with a bigalloc file
+- * system with zero in s_overhead_clusters the estimate will be close to
+- * correct especially for very large cluster sizes --- but for newer
+- * file systems, it's better to calculate this figure once at mkfs
+- * time, and store it in the superblock. If the superblock value is
+- * present (even for non-bigalloc file systems), we will use it.
+- */
+ static int ext4_statfs(struct dentry *dentry, struct kstatfs *buf)
+ {
+ struct super_block *sb = dentry->d_sb;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ struct ext4_super_block *es = sbi->s_es;
+- struct ext4_group_desc *gdp;
++ ext4_fsblk_t overhead = 0;
+ u64 fsid;
+ s64 bfree;
+
+- if (test_opt(sb, MINIX_DF)) {
+- sbi->s_overhead_last = 0;
+- } else if (es->s_overhead_clusters) {
+- sbi->s_overhead_last = le32_to_cpu(es->s_overhead_clusters);
+- } else if (sbi->s_blocks_last != ext4_blocks_count(es)) {
+- ext4_group_t i, ngroups = ext4_get_groups_count(sb);
+- ext4_fsblk_t overhead = 0;
+-
+- /*
+- * Compute the overhead (FS structures). This is constant
+- * for a given filesystem unless the number of block groups
+- * changes so we cache the previous value until it does.
+- */
+-
+- /*
+- * All of the blocks before first_data_block are
+- * overhead
+- */
+- overhead = EXT4_B2C(sbi, le32_to_cpu(es->s_first_data_block));
+-
+- /*
+- * Add the overhead found in each block group
+- */
+- for (i = 0; i < ngroups; i++) {
+- gdp = ext4_get_group_desc(sb, i, NULL);
+- overhead += ext4_num_overhead_clusters(sb, i, gdp);
+- cond_resched();
+- }
+- sbi->s_overhead_last = overhead;
+- smp_wmb();
+- sbi->s_blocks_last = ext4_blocks_count(es);
+- }
++ if (!test_opt(sb, MINIX_DF))
++ overhead = sbi->s_overhead;
+
+ buf->f_type = EXT4_SUPER_MAGIC;
+ buf->f_bsize = sb->s_blocksize;
+- buf->f_blocks = (ext4_blocks_count(es) -
+- EXT4_C2B(sbi, sbi->s_overhead_last));
++ buf->f_blocks = ext4_blocks_count(es) - EXT4_C2B(sbi, sbi->s_overhead);
+ bfree = percpu_counter_sum_positive(&sbi->s_freeclusters_counter) -
+ percpu_counter_sum_positive(&sbi->s_dirtyclusters_counter);
+ /* prevent underflow in case that few free space is available */
+diff --git a/fs/ext4/xattr.c b/fs/ext4/xattr.c
+index e56c9ed..2cdb98d 100644
+--- a/fs/ext4/xattr.c
++++ b/fs/ext4/xattr.c
+@@ -127,19 +127,16 @@ static __le32 ext4_xattr_block_csum(struct inode *inode,
+ struct ext4_xattr_header *hdr)
+ {
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+- struct ext4_inode_info *ei = EXT4_I(inode);
+ __u32 csum, old;
+
+ old = hdr->h_checksum;
+ hdr->h_checksum = 0;
+- if (le32_to_cpu(hdr->h_refcount) != 1) {
+- block_nr = cpu_to_le64(block_nr);
+- csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
+- sizeof(block_nr));
+- } else
+- csum = ei->i_csum_seed;
++ block_nr = cpu_to_le64(block_nr);
++ csum = ext4_chksum(sbi, sbi->s_csum_seed, (__u8 *)&block_nr,
++ sizeof(block_nr));
+ csum = ext4_chksum(sbi, csum, (__u8 *)hdr,
+ EXT4_BLOCK_SIZE(inode->i_sb));
++
+ hdr->h_checksum = old;
+ return cpu_to_le32(csum);
+ }
+diff --git a/fs/locks.c b/fs/locks.c
+index fce6238..82c3533 100644
+--- a/fs/locks.c
++++ b/fs/locks.c
+@@ -308,7 +308,7 @@ static int flock_make_lock(struct file *filp, struct file_lock **lock,
+ return 0;
+ }
+
+-static int assign_type(struct file_lock *fl, int type)
++static int assign_type(struct file_lock *fl, long type)
+ {
+ switch (type) {
+ case F_RDLCK:
+@@ -445,7 +445,7 @@ static const struct lock_manager_operations lease_manager_ops = {
+ /*
+ * Initialize a lease, use the default lock manager operations
+ */
+-static int lease_init(struct file *filp, int type, struct file_lock *fl)
++static int lease_init(struct file *filp, long type, struct file_lock *fl)
+ {
+ if (assign_type(fl, type) != 0)
+ return -EINVAL;
+@@ -463,7 +463,7 @@ static int lease_init(struct file *filp, int type, struct file_lock *fl)
+ }
+
+ /* Allocate a file_lock initialised to this type of lease */
+-static struct file_lock *lease_alloc(struct file *filp, int type)
++static struct file_lock *lease_alloc(struct file *filp, long type)
+ {
+ struct file_lock *fl = locks_alloc_lock();
+ int error = -ENOMEM;
+diff --git a/fs/nfs/file.c b/fs/nfs/file.c
+index a6708e6b..9075769 100644
+--- a/fs/nfs/file.c
++++ b/fs/nfs/file.c
+@@ -459,8 +459,11 @@ static int nfs_release_page(struct page *page, gfp_t gfp)
+
+ dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
+
+- /* Only do I/O if gfp is a superset of GFP_KERNEL */
+- if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL) {
++ /* Only do I/O if gfp is a superset of GFP_KERNEL, and we're not
++ * doing this memory reclaim for a fs-related allocation.
++ */
++ if (mapping && (gfp & GFP_KERNEL) == GFP_KERNEL &&
++ !(current->flags & PF_FSTRANS)) {
+ int how = FLUSH_SYNC;
+
+ /* Don't let kswapd deadlock waiting for OOM RPC calls */
+diff --git a/fs/nfs/idmap.c b/fs/nfs/idmap.c
+index 864c51e..1b5058b 100644
+--- a/fs/nfs/idmap.c
++++ b/fs/nfs/idmap.c
+@@ -205,12 +205,18 @@ static int nfs_idmap_init_keyring(void)
+ if (ret < 0)
+ goto failed_put_key;
+
++ ret = register_key_type(&key_type_id_resolver_legacy);
++ if (ret < 0)
++ goto failed_reg_legacy;
++
+ set_bit(KEY_FLAG_ROOT_CAN_CLEAR, &keyring->flags);
+ cred->thread_keyring = keyring;
+ cred->jit_keyring = KEY_REQKEY_DEFL_THREAD_KEYRING;
+ id_resolver_cache = cred;
+ return 0;
+
++failed_reg_legacy:
++ unregister_key_type(&key_type_id_resolver);
+ failed_put_key:
+ key_put(keyring);
+ failed_put_cred:
+@@ -222,6 +228,7 @@ static void nfs_idmap_quit_keyring(void)
+ {
+ key_revoke(id_resolver_cache->thread_keyring);
+ unregister_key_type(&key_type_id_resolver);
++ unregister_key_type(&key_type_id_resolver_legacy);
+ put_cred(id_resolver_cache);
+ }
+
+@@ -385,7 +392,7 @@ static const struct rpc_pipe_ops idmap_upcall_ops = {
+ };
+
+ static struct key_type key_type_id_resolver_legacy = {
+- .name = "id_resolver",
++ .name = "id_legacy",
+ .instantiate = user_instantiate,
+ .match = user_match,
+ .revoke = user_revoke,
+@@ -674,6 +681,7 @@ static int nfs_idmap_legacy_upcall(struct key_construction *cons,
+ if (ret < 0)
+ goto out2;
+
++ BUG_ON(idmap->idmap_key_cons != NULL);
+ idmap->idmap_key_cons = cons;
+
+ ret = rpc_queue_upcall(idmap->idmap_pipe, msg);
+@@ -687,8 +695,7 @@ out2:
+ out1:
+ kfree(msg);
+ out0:
+- key_revoke(cons->key);
+- key_revoke(cons->authkey);
++ complete_request_key(cons, ret);
+ return ret;
+ }
+
+@@ -722,11 +729,18 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ {
+ struct rpc_inode *rpci = RPC_I(filp->f_path.dentry->d_inode);
+ struct idmap *idmap = (struct idmap *)rpci->private;
+- struct key_construction *cons = idmap->idmap_key_cons;
++ struct key_construction *cons;
+ struct idmap_msg im;
+ size_t namelen_in;
+ int ret;
+
++ /* If instantiation is successful, anyone waiting for key construction
++ * will have been woken up and someone else may now have used
++ * idmap_key_cons - so after this point we may no longer touch it.
++ */
++ cons = ACCESS_ONCE(idmap->idmap_key_cons);
++ idmap->idmap_key_cons = NULL;
++
+ if (mlen != sizeof(im)) {
+ ret = -ENOSPC;
+ goto out;
+@@ -739,7 +753,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+
+ if (!(im.im_status & IDMAP_STATUS_SUCCESS)) {
+ ret = mlen;
+- complete_request_key(idmap->idmap_key_cons, -ENOKEY);
++ complete_request_key(cons, -ENOKEY);
+ goto out_incomplete;
+ }
+
+@@ -756,7 +770,7 @@ idmap_pipe_downcall(struct file *filp, const char __user *src, size_t mlen)
+ }
+
+ out:
+- complete_request_key(idmap->idmap_key_cons, ret);
++ complete_request_key(cons, ret);
+ out_incomplete:
+ return ret;
+ }
+diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
+index 94effd5..e8ead04 100644
+--- a/fs/nfsd/nfs4state.c
++++ b/fs/nfsd/nfs4state.c
+@@ -1215,7 +1215,7 @@ static bool groups_equal(struct group_info *g1, struct group_info *g2)
+ return true;
+ }
+
+-static int
++static bool
+ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
+ {
+ if ((cr1->cr_flavor != cr2->cr_flavor)
+@@ -1227,7 +1227,7 @@ same_creds(struct svc_cred *cr1, struct svc_cred *cr2)
+ return true;
+ if (!cr1->cr_principal || !cr2->cr_principal)
+ return false;
+- return 0 == strcmp(cr1->cr_principal, cr1->cr_principal);
++ return 0 == strcmp(cr1->cr_principal, cr2->cr_principal);
+ }
+
+ static void gen_clid(struct nfs4_client *clp)
+diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
+index 4949667..6322df3 100644
+--- a/fs/nfsd/nfs4xdr.c
++++ b/fs/nfsd/nfs4xdr.c
+@@ -2259,7 +2259,7 @@ out_acl:
+ if (bmval0 & FATTR4_WORD0_CASE_INSENSITIVE) {
+ if ((buflen -= 4) < 0)
+ goto out_resource;
+- WRITE32(1);
++ WRITE32(0);
+ }
+ if (bmval0 & FATTR4_WORD0_CASE_PRESERVING) {
+ if ((buflen -= 4) < 0)
+diff --git a/fs/select.c b/fs/select.c
+index bae3215..db14c78 100644
+--- a/fs/select.c
++++ b/fs/select.c
+@@ -345,8 +345,8 @@ static int max_select_fd(unsigned long n, fd_set_bits *fds)
+ struct fdtable *fdt;
+
+ /* handle last in-complete long-word first */
+- set = ~(~0UL << (n & (__NFDBITS-1)));
+- n /= __NFDBITS;
++ set = ~(~0UL << (n & (BITS_PER_LONG-1)));
++ n /= BITS_PER_LONG;
+ fdt = files_fdtable(current->files);
+ open_fds = fdt->open_fds + n;
+ max = 0;
+@@ -373,7 +373,7 @@ get_max:
+ max++;
+ set >>= 1;
+ } while (set);
+- max += n * __NFDBITS;
++ max += n * BITS_PER_LONG;
+ }
+
+ return max;
+@@ -435,11 +435,11 @@ int do_select(int n, fd_set_bits *fds, struct timespec *end_time)
+ in = *inp++; out = *outp++; ex = *exp++;
+ all_bits = in | out | ex;
+ if (all_bits == 0) {
+- i += __NFDBITS;
++ i += BITS_PER_LONG;
+ continue;
+ }
+
+- for (j = 0; j < __NFDBITS; ++j, ++i, bit <<= 1) {
++ for (j = 0; j < BITS_PER_LONG; ++j, ++i, bit <<= 1) {
+ int fput_needed;
+ if (i >= n)
+ break;
+diff --git a/fs/udf/super.c b/fs/udf/super.c
+index 8d86a87..e660ffd 100644
+--- a/fs/udf/super.c
++++ b/fs/udf/super.c
+@@ -1283,7 +1283,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
+ BUG_ON(ident != TAG_IDENT_LVD);
+ lvd = (struct logicalVolDesc *)bh->b_data;
+ table_len = le32_to_cpu(lvd->mapTableLength);
+- if (sizeof(*lvd) + table_len > sb->s_blocksize) {
++ if (table_len > sb->s_blocksize - sizeof(*lvd)) {
+ udf_err(sb, "error loading logical volume descriptor: "
+ "Partition table too long (%u > %lu)\n", table_len,
+ sb->s_blocksize - sizeof(*lvd));
+diff --git a/include/linux/cpu.h b/include/linux/cpu.h
+index 2e9b9eb..ce7a074 100644
+--- a/include/linux/cpu.h
++++ b/include/linux/cpu.h
+@@ -73,8 +73,9 @@ enum {
+ /* migration should happen before other stuff but after perf */
+ CPU_PRI_PERF = 20,
+ CPU_PRI_MIGRATION = 10,
+- /* prepare workqueues for other notifiers */
+- CPU_PRI_WORKQUEUE = 5,
++ /* bring up workqueues before normal notifiers and down after */
++ CPU_PRI_WORKQUEUE_UP = 5,
++ CPU_PRI_WORKQUEUE_DOWN = -5,
+ };
+
+ #define CPU_ONLINE 0x0002 /* CPU (unsigned)v is up */
+diff --git a/include/linux/mm.h b/include/linux/mm.h
+index b36d08c..f9f279c 100644
+--- a/include/linux/mm.h
++++ b/include/linux/mm.h
+@@ -1591,6 +1591,7 @@ void vmemmap_populate_print_last(void);
+ enum mf_flags {
+ MF_COUNT_INCREASED = 1 << 0,
+ MF_ACTION_REQUIRED = 1 << 1,
++ MF_MUST_KILL = 1 << 2,
+ };
+ extern int memory_failure(unsigned long pfn, int trapno, int flags);
+ extern void memory_failure_queue(unsigned long pfn, int trapno, int flags);
+diff --git a/include/linux/net.h b/include/linux/net.h
+index e9ac2df..dc95700 100644
+--- a/include/linux/net.h
++++ b/include/linux/net.h
+@@ -72,6 +72,7 @@ struct net;
+ #define SOCK_NOSPACE 2
+ #define SOCK_PASSCRED 3
+ #define SOCK_PASSSEC 4
++#define SOCK_EXTERNALLY_ALLOCATED 5
+
+ #ifndef ARCH_HAS_SOCKET_TYPES
+ /**
+diff --git a/include/linux/posix_types.h b/include/linux/posix_types.h
+index f04c98c..988f76e 100644
+--- a/include/linux/posix_types.h
++++ b/include/linux/posix_types.h
+@@ -15,26 +15,14 @@
+ */
+
+ /*
+- * Those macros may have been defined in <gnu/types.h>. But we always
+- * use the ones here.
++ * This macro may have been defined in <gnu/types.h>. But we always
++ * use the one here.
+ */
+-#undef __NFDBITS
+-#define __NFDBITS (8 * sizeof(unsigned long))
+-
+ #undef __FD_SETSIZE
+ #define __FD_SETSIZE 1024
+
+-#undef __FDSET_LONGS
+-#define __FDSET_LONGS (__FD_SETSIZE/__NFDBITS)
+-
+-#undef __FDELT
+-#define __FDELT(d) ((d) / __NFDBITS)
+-
+-#undef __FDMASK
+-#define __FDMASK(d) (1UL << ((d) % __NFDBITS))
+-
+ typedef struct {
+- unsigned long fds_bits [__FDSET_LONGS];
++ unsigned long fds_bits[__FD_SETSIZE / (8 * sizeof(long))];
+ } __kernel_fd_set;
+
+ /* Type of a signal handler. */
+diff --git a/include/linux/time.h b/include/linux/time.h
+index 179f4d6..c81c5e4 100644
+--- a/include/linux/time.h
++++ b/include/linux/time.h
+@@ -257,14 +257,6 @@ static __always_inline void timespec_add_ns(struct timespec *a, u64 ns)
+
+ #endif /* __KERNEL__ */
+
+-#define NFDBITS __NFDBITS
+-
+-#define FD_SETSIZE __FD_SETSIZE
+-#define FD_SET(fd,fdsetp) __FD_SET(fd,fdsetp)
+-#define FD_CLR(fd,fdsetp) __FD_CLR(fd,fdsetp)
+-#define FD_ISSET(fd,fdsetp) __FD_ISSET(fd,fdsetp)
+-#define FD_ZERO(fdsetp) __FD_ZERO(fdsetp)
+-
+ /*
+ * Names of the interval timers, and structure
+ * defining a timer setting:
+diff --git a/include/linux/usb.h b/include/linux/usb.h
+index dea39dc..72a0e7d 100644
+--- a/include/linux/usb.h
++++ b/include/linux/usb.h
+@@ -556,7 +556,6 @@ struct usb_device {
+ struct usb3_lpm_parameters u1_params;
+ struct usb3_lpm_parameters u2_params;
+ unsigned lpm_disable_count;
+- unsigned hub_initiated_lpm_disable_count;
+ };
+ #define to_usb_device(d) container_of(d, struct usb_device, dev)
+
+diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
+index dc35d86..362e0d9 100644
+--- a/include/target/target_core_base.h
++++ b/include/target/target_core_base.h
+@@ -220,6 +220,7 @@ enum tcm_sense_reason_table {
+ TCM_CHECK_CONDITION_UNIT_ATTENTION = 0x0e,
+ TCM_CHECK_CONDITION_NOT_READY = 0x0f,
+ TCM_RESERVATION_CONFLICT = 0x10,
++ TCM_ADDRESS_OUT_OF_RANGE = 0x11,
+ };
+
+ enum target_sc_flags_table {
+diff --git a/kernel/cgroup.c b/kernel/cgroup.c
+index b303dfc..15462a0 100644
+--- a/kernel/cgroup.c
++++ b/kernel/cgroup.c
+@@ -954,7 +954,7 @@ static int cgroup_rm_file(struct cgroup *cgrp, const struct cftype *cft)
+
+ dget(d);
+ d_delete(d);
+- simple_unlink(d->d_inode, d);
++ simple_unlink(cgrp->dentry->d_inode, d);
+ list_del_init(&cfe->node);
+ dput(d);
+
+diff --git a/kernel/exit.c b/kernel/exit.c
+index 2f59cc3..46ce8da 100644
+--- a/kernel/exit.c
++++ b/kernel/exit.c
+@@ -483,7 +483,7 @@ static void close_files(struct files_struct * files)
+ rcu_read_unlock();
+ for (;;) {
+ unsigned long set;
+- i = j * __NFDBITS;
++ i = j * BITS_PER_LONG;
+ if (i >= fdt->max_fds)
+ break;
+ set = fdt->open_fds[j++];
+diff --git a/kernel/futex.c b/kernel/futex.c
+index e2b0fb9..3717e7b 100644
+--- a/kernel/futex.c
++++ b/kernel/futex.c
+@@ -2231,11 +2231,11 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb,
+ * @uaddr2: the pi futex we will take prior to returning to user-space
+ *
+ * The caller will wait on uaddr and will be requeued by futex_requeue() to
+- * uaddr2 which must be PI aware. Normal wakeup will wake on uaddr2 and
+- * complete the acquisition of the rt_mutex prior to returning to userspace.
+- * This ensures the rt_mutex maintains an owner when it has waiters; without
+- * one, the pi logic wouldn't know which task to boost/deboost, if there was a
+- * need to.
++ * uaddr2 which must be PI aware and unique from uaddr. Normal wakeup will wake
++ * on uaddr2 and complete the acquisition of the rt_mutex prior to returning to
++ * userspace. This ensures the rt_mutex maintains an owner when it has waiters;
++ * without one, the pi logic would not know which task to boost/deboost, if
++ * there was a need to.
+ *
+ * We call schedule in futex_wait_queue_me() when we enqueue and return there
+ * via the following:
+@@ -2272,6 +2272,9 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ struct futex_q q = futex_q_init;
+ int res, ret;
+
++ if (uaddr == uaddr2)
++ return -EINVAL;
++
+ if (!bitset)
+ return -EINVAL;
+
+@@ -2343,7 +2346,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * signal. futex_unlock_pi() will not destroy the lock_ptr nor
+ * the pi_state.
+ */
+- WARN_ON(!&q.pi_state);
++ WARN_ON(!q.pi_state);
+ pi_mutex = &q.pi_state->pi_mutex;
+ ret = rt_mutex_finish_proxy_lock(pi_mutex, to, &rt_waiter, 1);
+ debug_rt_mutex_free_waiter(&rt_waiter);
+@@ -2370,7 +2373,7 @@ static int futex_wait_requeue_pi(u32 __user *uaddr, unsigned int flags,
+ * fault, unlock the rt_mutex and return the fault to userspace.
+ */
+ if (ret == -EFAULT) {
+- if (rt_mutex_owner(pi_mutex) == current)
++ if (pi_mutex && rt_mutex_owner(pi_mutex) == current)
+ rt_mutex_unlock(pi_mutex);
+ } else if (ret == -EINTR) {
+ /*
+diff --git a/kernel/power/hibernate.c b/kernel/power/hibernate.c
+index 238025f..4d46daf 100644
+--- a/kernel/power/hibernate.c
++++ b/kernel/power/hibernate.c
+@@ -353,6 +353,7 @@ int hibernation_snapshot(int platform_mode)
+ }
+
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+
+ error = dpm_suspend(PMSG_FREEZE);
+@@ -378,6 +379,7 @@ int hibernation_snapshot(int platform_mode)
+ if (error || !in_suspend)
+ pm_restore_gfp_mask();
+
++ ftrace_start();
+ resume_console();
+ dpm_complete(msg);
+
+@@ -480,6 +482,7 @@ int hibernation_restore(int platform_mode)
+
+ pm_prepare_console();
+ suspend_console();
++ ftrace_stop();
+ pm_restrict_gfp_mask();
+ error = dpm_suspend_start(PMSG_QUIESCE);
+ if (!error) {
+@@ -487,6 +490,7 @@ int hibernation_restore(int platform_mode)
+ dpm_resume_end(PMSG_RECOVER);
+ }
+ pm_restore_gfp_mask();
++ ftrace_start();
+ resume_console();
+ pm_restore_console();
+ return error;
+@@ -513,6 +517,7 @@ int hibernation_platform_enter(void)
+
+ entering_platform_hibernation = true;
+ suspend_console();
++ ftrace_stop();
+ error = dpm_suspend_start(PMSG_HIBERNATE);
+ if (error) {
+ if (hibernation_ops->recover)
+@@ -556,6 +561,7 @@ int hibernation_platform_enter(void)
+ Resume_devices:
+ entering_platform_hibernation = false;
+ dpm_resume_end(PMSG_RESTORE);
++ ftrace_start();
+ resume_console();
+
+ Close:
+diff --git a/kernel/power/suspend.c b/kernel/power/suspend.c
+index 396d262..c8b7446 100644
+--- a/kernel/power/suspend.c
++++ b/kernel/power/suspend.c
+@@ -24,6 +24,7 @@
+ #include <linux/export.h>
+ #include <linux/suspend.h>
+ #include <linux/syscore_ops.h>
++#include <linux/ftrace.h>
+ #include <trace/events/power.h>
+
+ #include "power.h"
+@@ -212,6 +213,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ goto Close;
+ }
+ suspend_console();
++ ftrace_stop();
+ suspend_test_start();
+ error = dpm_suspend_start(PMSG_SUSPEND);
+ if (error) {
+@@ -231,6 +233,7 @@ int suspend_devices_and_enter(suspend_state_t state)
+ suspend_test_start();
+ dpm_resume_end(PMSG_RESUME);
+ suspend_test_finish("resume devices");
++ ftrace_start();
+ resume_console();
+ Close:
+ if (suspend_ops->end)
+diff --git a/kernel/power/wakelock.c b/kernel/power/wakelock.c
+index c8fba33..8f50de3 100644
+--- a/kernel/power/wakelock.c
++++ b/kernel/power/wakelock.c
+@@ -9,6 +9,7 @@
+ * manipulate wakelocks on Android.
+ */
+
++#include <linux/capability.h>
+ #include <linux/ctype.h>
+ #include <linux/device.h>
+ #include <linux/err.h>
+@@ -188,6 +189,9 @@ int pm_wake_lock(const char *buf)
+ size_t len;
+ int ret = 0;
+
++ if (!capable(CAP_BLOCK_SUSPEND))
++ return -EPERM;
++
+ while (*str && !isspace(*str))
+ str++;
+
+@@ -231,6 +235,9 @@ int pm_wake_unlock(const char *buf)
+ size_t len;
+ int ret = 0;
+
++ if (!capable(CAP_BLOCK_SUSPEND))
++ return -EPERM;
++
+ len = strlen(buf);
+ if (!len)
+ return -EINVAL;
+diff --git a/kernel/printk.c b/kernel/printk.c
+index ac4bc9e..21bea76 100644
+--- a/kernel/printk.c
++++ b/kernel/printk.c
+@@ -235,7 +235,8 @@ static u32 log_next_idx;
+ static u64 clear_seq;
+ static u32 clear_idx;
+
+-#define LOG_LINE_MAX 1024
++#define PREFIX_MAX 32
++#define LOG_LINE_MAX 1024 - PREFIX_MAX
+
+ /* record buffer */
+ #if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
+@@ -876,7 +877,7 @@ static size_t msg_print_text(const struct log *msg, enum log_flags prev,
+
+ if (buf) {
+ if (print_prefix(msg, syslog, NULL) +
+- text_len + 1>= size - len)
++ text_len + 1 >= size - len)
+ break;
+
+ if (prefix)
+@@ -907,7 +908,7 @@ static int syslog_print(char __user *buf, int size)
+ struct log *msg;
+ int len = 0;
+
+- text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+@@ -930,7 +931,8 @@ static int syslog_print(char __user *buf, int size)
+
+ skip = syslog_partial;
+ msg = log_from_idx(syslog_idx);
+- n = msg_print_text(msg, syslog_prev, true, text, LOG_LINE_MAX);
++ n = msg_print_text(msg, syslog_prev, true, text,
++ LOG_LINE_MAX + PREFIX_MAX);
+ if (n - syslog_partial <= size) {
+ /* message fits into buffer, move forward */
+ syslog_idx = log_next(syslog_idx);
+@@ -969,7 +971,7 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ char *text;
+ int len = 0;
+
+- text = kmalloc(LOG_LINE_MAX, GFP_KERNEL);
++ text = kmalloc(LOG_LINE_MAX + PREFIX_MAX, GFP_KERNEL);
+ if (!text)
+ return -ENOMEM;
+
+@@ -1022,7 +1024,8 @@ static int syslog_print_all(char __user *buf, int size, bool clear)
+ struct log *msg = log_from_idx(idx);
+ int textlen;
+
+- textlen = msg_print_text(msg, prev, true, text, LOG_LINE_MAX);
++ textlen = msg_print_text(msg, prev, true, text,
++ LOG_LINE_MAX + PREFIX_MAX);
+ if (textlen < 0) {
+ len = textlen;
+ break;
+@@ -1352,15 +1355,15 @@ static struct cont {
+ bool flushed:1; /* buffer sealed and committed */
+ } cont;
+
+-static void cont_flush(void)
++static void cont_flush(enum log_flags flags)
+ {
+ if (cont.flushed)
+ return;
+ if (cont.len == 0)
+ return;
+
+- log_store(cont.facility, cont.level, LOG_NOCONS, cont.ts_nsec,
+- NULL, 0, cont.buf, cont.len);
++ log_store(cont.facility, cont.level, LOG_NOCONS | flags,
++ cont.ts_nsec, NULL, 0, cont.buf, cont.len);
+
+ cont.flushed = true;
+ }
+@@ -1371,7 +1374,8 @@ static bool cont_add(int facility, int level, const char *text, size_t len)
+ return false;
+
+ if (cont.len + len > sizeof(cont.buf)) {
+- cont_flush();
++ /* the line gets too long, split it up in separate records */
++ cont_flush(LOG_CONT);
+ return false;
+ }
+
+@@ -1507,7 +1511,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+ * or another task also prints continuation lines.
+ */
+ if (cont.len && (lflags & LOG_PREFIX || cont.owner != current))
+- cont_flush();
++ cont_flush(0);
+
+ /* buffer line if possible, otherwise store it right away */
+ if (!cont_add(facility, level, text, text_len))
+@@ -1525,7 +1529,7 @@ asmlinkage int vprintk_emit(int facility, int level,
+ if (cont.len && cont.owner == current) {
+ if (!(lflags & LOG_PREFIX))
+ stored = cont_add(facility, level, text, text_len);
+- cont_flush();
++ cont_flush(0);
+ }
+
+ if (!stored)
+@@ -1618,7 +1622,8 @@ EXPORT_SYMBOL(printk);
+
+ #else
+
+-#define LOG_LINE_MAX 0
++#define LOG_LINE_MAX 0
++#define PREFIX_MAX 0
+ static struct cont {
+ size_t len;
+ size_t cons;
+@@ -1923,7 +1928,7 @@ static enum log_flags console_prev;
+ */
+ void console_unlock(void)
+ {
+- static char text[LOG_LINE_MAX];
++ static char text[LOG_LINE_MAX + PREFIX_MAX];
+ static u64 seen_seq;
+ unsigned long flags;
+ bool wake_klogd = false;
+diff --git a/kernel/workqueue.c b/kernel/workqueue.c
+index 9a3128d..bc63253 100644
+--- a/kernel/workqueue.c
++++ b/kernel/workqueue.c
+@@ -3590,6 +3590,41 @@ static int __devinit workqueue_cpu_callback(struct notifier_block *nfb,
+ return notifier_from_errno(0);
+ }
+
++/*
++ * Workqueues should be brought up before normal priority CPU notifiers.
++ * This will be registered high priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_up_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_UP_PREPARE:
++ case CPU_UP_CANCELED:
++ case CPU_DOWN_FAILED:
++ case CPU_ONLINE:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
++/*
++ * Workqueues should be brought down after normal priority CPU notifiers.
++ * This will be registered as low priority CPU notifier.
++ */
++static int __devinit workqueue_cpu_down_callback(struct notifier_block *nfb,
++ unsigned long action,
++ void *hcpu)
++{
++ switch (action & ~CPU_TASKS_FROZEN) {
++ case CPU_DOWN_PREPARE:
++ case CPU_DYING:
++ case CPU_POST_DEAD:
++ return workqueue_cpu_callback(nfb, action, hcpu);
++ }
++ return NOTIFY_OK;
++}
++
+ #ifdef CONFIG_SMP
+
+ struct work_for_cpu {
+@@ -3783,7 +3818,8 @@ static int __init init_workqueues(void)
+ unsigned int cpu;
+ int i;
+
+- cpu_notifier(workqueue_cpu_callback, CPU_PRI_WORKQUEUE);
++ cpu_notifier(workqueue_cpu_up_callback, CPU_PRI_WORKQUEUE_UP);
++ cpu_notifier(workqueue_cpu_down_callback, CPU_PRI_WORKQUEUE_DOWN);
+
+ /* initialize gcwqs */
+ for_each_gcwq_cpu(cpu) {
+diff --git a/mm/memory-failure.c b/mm/memory-failure.c
+index ab1e714..de4ce70 100644
+--- a/mm/memory-failure.c
++++ b/mm/memory-failure.c
+@@ -345,14 +345,14 @@ static void add_to_kill(struct task_struct *tsk, struct page *p,
+ * Also when FAIL is set do a force kill because something went
+ * wrong earlier.
+ */
+-static void kill_procs(struct list_head *to_kill, int doit, int trapno,
++static void kill_procs(struct list_head *to_kill, int forcekill, int trapno,
+ int fail, struct page *page, unsigned long pfn,
+ int flags)
+ {
+ struct to_kill *tk, *next;
+
+ list_for_each_entry_safe (tk, next, to_kill, nd) {
+- if (doit) {
++ if (forcekill) {
+ /*
+ * In case something went wrong with munmapping
+ * make sure the process doesn't catch the
+@@ -858,7 +858,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ struct address_space *mapping;
+ LIST_HEAD(tokill);
+ int ret;
+- int kill = 1;
++ int kill = 1, forcekill;
+ struct page *hpage = compound_head(p);
+ struct page *ppage;
+
+@@ -888,7 +888,7 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * be called inside page lock (it's recommended but not enforced).
+ */
+ mapping = page_mapping(hpage);
+- if (!PageDirty(hpage) && mapping &&
++ if (!(flags & MF_MUST_KILL) && !PageDirty(hpage) && mapping &&
+ mapping_cap_writeback_dirty(mapping)) {
+ if (page_mkclean(hpage)) {
+ SetPageDirty(hpage);
+@@ -965,12 +965,14 @@ static int hwpoison_user_mappings(struct page *p, unsigned long pfn,
+ * Now that the dirty bit has been propagated to the
+ * struct page and all unmaps done we can decide if
+ * killing is needed or not. Only kill when the page
+- * was dirty, otherwise the tokill list is merely
++ * was dirty or the process is not restartable,
++ * otherwise the tokill list is merely
+ * freed. When there was a problem unmapping earlier
+ * use a more force-full uncatchable kill to prevent
+ * any accesses to the poisoned memory.
+ */
+- kill_procs(&tokill, !!PageDirty(ppage), trapno,
++ forcekill = PageDirty(ppage) || (flags & MF_MUST_KILL);
++ kill_procs(&tokill, forcekill, trapno,
+ ret != SWAP_SUCCESS, p, pfn, flags);
+
+ return ret;
+diff --git a/net/compat.c b/net/compat.c
+index 1b96281..74ed1d7 100644
+--- a/net/compat.c
++++ b/net/compat.c
+@@ -221,6 +221,8 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+ {
+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
+ struct compat_cmsghdr cmhdr;
++ struct compat_timeval ctv;
++ struct compat_timespec cts[3];
+ int cmlen;
+
+ if (cm == NULL || kmsg->msg_controllen < sizeof(*cm)) {
+@@ -229,8 +231,6 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
+ }
+
+ if (!COMPAT_USE_64BIT_TIME) {
+- struct compat_timeval ctv;
+- struct compat_timespec cts[3];
+ if (level == SOL_SOCKET && type == SCM_TIMESTAMP) {
+ struct timeval *tv = (struct timeval *)data;
+ ctv.tv_sec = tv->tv_sec;
+diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
+index 21318d1..23e3f66 100644
+--- a/net/core/rtnetlink.c
++++ b/net/core/rtnetlink.c
+@@ -674,6 +674,12 @@ static void set_operstate(struct net_device *dev, unsigned char transition)
+ }
+ }
+
++static unsigned int rtnl_dev_get_flags(const struct net_device *dev)
++{
++ return (dev->flags & ~(IFF_PROMISC | IFF_ALLMULTI)) |
++ (dev->gflags & (IFF_PROMISC | IFF_ALLMULTI));
++}
++
+ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ const struct ifinfomsg *ifm)
+ {
+@@ -682,7 +688,7 @@ static unsigned int rtnl_dev_combine_flags(const struct net_device *dev,
+ /* bugwards compatibility: ifi_change == 0 is treated as ~0 */
+ if (ifm->ifi_change)
+ flags = (flags & ifm->ifi_change) |
+- (dev->flags & ~ifm->ifi_change);
++ (rtnl_dev_get_flags(dev) & ~ifm->ifi_change);
+
+ return flags;
+ }
+diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c
+index 3ba605f..6fcd885 100644
+--- a/net/ipv4/tcp.c
++++ b/net/ipv4/tcp.c
+@@ -2625,7 +2625,10 @@ static int do_tcp_setsockopt(struct sock *sk, int level,
+ /* Cap the max timeout in ms TCP will retry/retrans
+ * before giving up and aborting (ETIMEDOUT) a connection.
+ */
+- icsk->icsk_user_timeout = msecs_to_jiffies(val);
++ if (val < 0)
++ err = -EINVAL;
++ else
++ icsk->icsk_user_timeout = msecs_to_jiffies(val);
+ break;
+ default:
+ err = -ENOPROTOOPT;
+diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
+index b224eb8..05fe1f4 100644
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -5602,7 +5602,9 @@ int tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
+ if (tp->copied_seq == tp->rcv_nxt &&
+ len - tcp_header_len <= tp->ucopy.len) {
+ #ifdef CONFIG_NET_DMA
+- if (tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
++ if (tp->ucopy.task == current &&
++ sock_owned_by_user(sk) &&
++ tcp_dma_try_early_copy(sk, skb, tcp_header_len)) {
+ copied_early = 1;
+ eaten = 1;
+ }
+diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c
+index 0db5d34..95ae431 100644
+--- a/net/mac80211/mlme.c
++++ b/net/mac80211/mlme.c
+@@ -1805,7 +1805,8 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ if (status_code != WLAN_STATUS_SUCCESS) {
+ printk(KERN_DEBUG "%s: %pM denied authentication (status %d)\n",
+ sdata->name, mgmt->sa, status_code);
+- goto out;
++ ieee80211_destroy_auth_data(sdata, false);
++ return RX_MGMT_CFG80211_RX_AUTH;
+ }
+
+ switch (ifmgd->auth_data->algorithm) {
+@@ -1827,7 +1828,6 @@ ieee80211_rx_mgmt_auth(struct ieee80211_sub_if_data *sdata,
+ }
+
+ printk(KERN_DEBUG "%s: authenticated\n", sdata->name);
+- out:
+ ifmgd->auth_data->done = true;
+ ifmgd->auth_data->timeout = jiffies + IEEE80211_AUTH_WAIT_ASSOC;
+ run_again(ifmgd, ifmgd->auth_data->timeout);
+diff --git a/net/mac80211/tx.c b/net/mac80211/tx.c
+index e453212..85cf32d 100644
+--- a/net/mac80211/tx.c
++++ b/net/mac80211/tx.c
+@@ -2733,7 +2733,7 @@ EXPORT_SYMBOL(ieee80211_get_buffered_bc);
+ void ieee80211_tx_skb_tid(struct ieee80211_sub_if_data *sdata,
+ struct sk_buff *skb, int tid)
+ {
+- int ac = ieee802_1d_to_ac[tid];
++ int ac = ieee802_1d_to_ac[tid & 7];
+
+ skb_set_mac_header(skb, 0);
+ skb_set_network_header(skb, 0);
+diff --git a/net/mac80211/util.c b/net/mac80211/util.c
+index 8dd4712..f564b5e 100644
+--- a/net/mac80211/util.c
++++ b/net/mac80211/util.c
+@@ -268,6 +268,10 @@ EXPORT_SYMBOL(ieee80211_ctstoself_duration);
+ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
+ {
+ struct ieee80211_sub_if_data *sdata;
++ int n_acs = IEEE80211_NUM_ACS;
++
++ if (local->hw.queues < IEEE80211_NUM_ACS)
++ n_acs = 1;
+
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+@@ -279,7 +283,7 @@ void ieee80211_propagate_queue_wake(struct ieee80211_local *local, int queue)
+ local->queue_stop_reasons[sdata->vif.cab_queue] != 0)
+ continue;
+
+- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++ for (ac = 0; ac < n_acs; ac++) {
+ int ac_queue = sdata->vif.hw_queue[ac];
+
+ if (ac_queue == queue ||
+@@ -341,6 +345,7 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
+ {
+ struct ieee80211_local *local = hw_to_local(hw);
+ struct ieee80211_sub_if_data *sdata;
++ int n_acs = IEEE80211_NUM_ACS;
+
+ trace_stop_queue(local, queue, reason);
+
+@@ -352,11 +357,14 @@ static void __ieee80211_stop_queue(struct ieee80211_hw *hw, int queue,
+
+ __set_bit(reason, &local->queue_stop_reasons[queue]);
+
++ if (local->hw.queues < IEEE80211_NUM_ACS)
++ n_acs = 1;
++
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdata, &local->interfaces, list) {
+ int ac;
+
+- for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
++ for (ac = 0; ac < n_acs; ac++) {
+ if (sdata->vif.hw_queue[ac] == queue ||
+ sdata->vif.cab_queue == queue)
+ netif_stop_subqueue(sdata->dev, ac);
+diff --git a/net/socket.c b/net/socket.c
+index 6e0ccc0..0452dca 100644
+--- a/net/socket.c
++++ b/net/socket.c
+@@ -522,6 +522,9 @@ void sock_release(struct socket *sock)
+ if (rcu_dereference_protected(sock->wq, 1)->fasync_list)
+ printk(KERN_ERR "sock_release: fasync list not empty!\n");
+
++ if (test_bit(SOCK_EXTERNALLY_ALLOCATED, &sock->flags))
++ return;
++
+ this_cpu_sub(sockets_in_use, 1);
+ if (!sock->file) {
+ iput(SOCK_INODE(sock));
+diff --git a/net/sunrpc/sched.c b/net/sunrpc/sched.c
+index 994cfea..eda32ae 100644
+--- a/net/sunrpc/sched.c
++++ b/net/sunrpc/sched.c
+@@ -790,7 +790,9 @@ void rpc_execute(struct rpc_task *task)
+
+ static void rpc_async_schedule(struct work_struct *work)
+ {
++ current->flags |= PF_FSTRANS;
+ __rpc_execute(container_of(work, struct rpc_task, u.tk_work));
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/sunrpc/xprtrdma/transport.c b/net/sunrpc/xprtrdma/transport.c
+index b446e10..06cdbff 100644
+--- a/net/sunrpc/xprtrdma/transport.c
++++ b/net/sunrpc/xprtrdma/transport.c
+@@ -200,6 +200,7 @@ xprt_rdma_connect_worker(struct work_struct *work)
+ int rc = 0;
+
+ if (!xprt->shutdown) {
++ current->flags |= PF_FSTRANS;
+ xprt_clear_connected(xprt);
+
+ dprintk("RPC: %s: %sconnect\n", __func__,
+@@ -212,10 +213,10 @@ xprt_rdma_connect_worker(struct work_struct *work)
+
+ out:
+ xprt_wake_pending_tasks(xprt, rc);
+-
+ out_clear:
+ dprintk("RPC: %s: exit\n", __func__);
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+diff --git a/net/sunrpc/xprtsock.c b/net/sunrpc/xprtsock.c
+index 890b03f..b88c6bf 100644
+--- a/net/sunrpc/xprtsock.c
++++ b/net/sunrpc/xprtsock.c
+@@ -1895,6 +1895,8 @@ static void xs_local_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ status = __sock_create(xprt->xprt_net, AF_LOCAL,
+ SOCK_STREAM, 0, &sock, 1);
+@@ -1928,6 +1930,7 @@ static void xs_local_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ static void xs_udp_finish_connecting(struct rpc_xprt *xprt, struct socket *sock)
+@@ -1970,6 +1973,8 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ /* Start by resetting any existing state */
+ xs_reset_transport(transport);
+ sock = xs_create_sock(xprt, transport,
+@@ -1988,6 +1993,7 @@ static void xs_udp_setup_socket(struct work_struct *work)
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /*
+@@ -2113,6 +2119,8 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ if (xprt->shutdown)
+ goto out;
+
++ current->flags |= PF_FSTRANS;
++
+ if (!sock) {
+ clear_bit(XPRT_CONNECTION_ABORT, &xprt->state);
+ sock = xs_create_sock(xprt, transport,
+@@ -2162,6 +2170,7 @@ static void xs_tcp_setup_socket(struct work_struct *work)
+ case -EINPROGRESS:
+ case -EALREADY:
+ xprt_clear_connecting(xprt);
++ current->flags &= ~PF_FSTRANS;
+ return;
+ case -EINVAL:
+ /* Happens, for instance, if the user specified a link
+@@ -2174,6 +2183,7 @@ out_eagain:
+ out:
+ xprt_clear_connecting(xprt);
+ xprt_wake_pending_tasks(xprt, status);
++ current->flags &= ~PF_FSTRANS;
+ }
+
+ /**
+diff --git a/net/wanrouter/wanmain.c b/net/wanrouter/wanmain.c
+index 788a12c..2ab7850 100644
+--- a/net/wanrouter/wanmain.c
++++ b/net/wanrouter/wanmain.c
+@@ -602,36 +602,31 @@ static int wanrouter_device_new_if(struct wan_device *wandev,
+ * successfully, add it to the interface list.
+ */
+
+- if (dev->name == NULL) {
+- err = -EINVAL;
+- } else {
++#ifdef WANDEBUG
++ printk(KERN_INFO "%s: registering interface %s...\n",
++ wanrouter_modname, dev->name);
++#endif
+
+- #ifdef WANDEBUG
+- printk(KERN_INFO "%s: registering interface %s...\n",
+- wanrouter_modname, dev->name);
+- #endif
+-
+- err = register_netdev(dev);
+- if (!err) {
+- struct net_device *slave = NULL;
+- unsigned long smp_flags=0;
+-
+- lock_adapter_irq(&wandev->lock, &smp_flags);
+-
+- if (wandev->dev == NULL) {
+- wandev->dev = dev;
+- } else {
+- for (slave=wandev->dev;
+- DEV_TO_SLAVE(slave);
+- slave = DEV_TO_SLAVE(slave))
+- DEV_TO_SLAVE(slave) = dev;
+- }
+- ++wandev->ndev;
+-
+- unlock_adapter_irq(&wandev->lock, &smp_flags);
+- err = 0; /* done !!! */
+- goto out;
++ err = register_netdev(dev);
++ if (!err) {
++ struct net_device *slave = NULL;
++ unsigned long smp_flags=0;
++
++ lock_adapter_irq(&wandev->lock, &smp_flags);
++
++ if (wandev->dev == NULL) {
++ wandev->dev = dev;
++ } else {
++ for (slave=wandev->dev;
++ DEV_TO_SLAVE(slave);
++ slave = DEV_TO_SLAVE(slave))
++ DEV_TO_SLAVE(slave) = dev;
+ }
++ ++wandev->ndev;
++
++ unlock_adapter_irq(&wandev->lock, &smp_flags);
++ err = 0; /* done !!! */
++ goto out;
+ }
+ if (wandev->del_if)
+ wandev->del_if(wandev, dev);
+diff --git a/security/selinux/hooks.c b/security/selinux/hooks.c
+index ffd8900..daaa4ed 100644
+--- a/security/selinux/hooks.c
++++ b/security/selinux/hooks.c
+@@ -2129,7 +2129,7 @@ static inline void flush_unauthorized_files(const struct cred *cred,
+ int fd;
+
+ j++;
+- i = j * __NFDBITS;
++ i = j * BITS_PER_LONG;
+ fdt = files_fdtable(files);
+ if (i >= fdt->max_fds)
+ break;
+diff --git a/security/smack/smackfs.c b/security/smack/smackfs.c
+index 1810c9a..cb6d904 100644
+--- a/security/smack/smackfs.c
++++ b/security/smack/smackfs.c
+@@ -325,11 +325,11 @@ static int smk_parse_long_rule(const char *data, struct smack_rule *rule,
+ int datalen;
+ int rc = -1;
+
+- /*
+- * This is probably inefficient, but safe.
+- */
++ /* This is inefficient */
+ datalen = strlen(data);
+- subject = kzalloc(datalen, GFP_KERNEL);
++
++ /* Our first element can be 64 + \0 with no spaces */
++ subject = kzalloc(datalen + 1, GFP_KERNEL);
+ if (subject == NULL)
+ return -1;
+ object = kzalloc(datalen, GFP_KERNEL);
+diff --git a/sound/drivers/mpu401/mpu401_uart.c b/sound/drivers/mpu401/mpu401_uart.c
+index 1cff331..4608c2c 100644
+--- a/sound/drivers/mpu401/mpu401_uart.c
++++ b/sound/drivers/mpu401/mpu401_uart.c
+@@ -554,6 +554,7 @@ int snd_mpu401_uart_new(struct snd_card *card, int device,
+ spin_lock_init(&mpu->output_lock);
+ spin_lock_init(&mpu->timer_lock);
+ mpu->hardware = hardware;
++ mpu->irq = -1;
+ if (! (info_flags & MPU401_INFO_INTEGRATED)) {
+ int res_size = hardware == MPU401_HW_PC98II ? 4 : 2;
+ mpu->res = request_region(port, res_size, "MPU401 UART");
+diff --git a/sound/pci/hda/hda_codec.c b/sound/pci/hda/hda_codec.c
+index 51cb2a2..4e17033 100644
+--- a/sound/pci/hda/hda_codec.c
++++ b/sound/pci/hda/hda_codec.c
+@@ -4418,6 +4418,13 @@ static void __snd_hda_power_up(struct hda_codec *codec, bool wait_power_down)
+ cancel_delayed_work_sync(&codec->power_work);
+
+ spin_lock(&codec->power_lock);
++ /* If the power down delayed work was cancelled above before starting,
++ * then there is no need to go through power up here.
++ */
++ if (codec->power_on) {
++ spin_unlock(&codec->power_lock);
++ return;
++ }
+ trace_hda_power_up(codec);
+ snd_hda_update_power_acct(codec);
+ codec->power_on = 1;
+diff --git a/sound/pci/hda/patch_hdmi.c b/sound/pci/hda/patch_hdmi.c
+index ad319d4..5d52332 100644
+--- a/sound/pci/hda/patch_hdmi.c
++++ b/sound/pci/hda/patch_hdmi.c
+@@ -876,7 +876,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec_per_pin *per_pin;
+ struct hdmi_eld *eld;
+ struct hdmi_spec_per_cvt *per_cvt = NULL;
+- int pinctl;
+
+ /* Validate hinfo */
+ pin_idx = hinfo_to_pin_index(spec, hinfo);
+@@ -912,11 +911,6 @@ static int hdmi_pcm_open(struct hda_pcm_stream *hinfo,
+ snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+ AC_VERB_SET_CONNECT_SEL,
+ mux_idx);
+- pinctl = snd_hda_codec_read(codec, per_pin->pin_nid, 0,
+- AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
+- snd_hda_codec_write(codec, per_pin->pin_nid, 0,
+- AC_VERB_SET_PIN_WIDGET_CONTROL,
+- pinctl | PIN_OUT);
+ snd_hda_spdif_ctls_assign(codec, pin_idx, per_cvt->cvt_nid);
+
+ /* Initially set the converter's capabilities */
+@@ -1153,11 +1147,17 @@ static int generic_hdmi_playback_pcm_prepare(struct hda_pcm_stream *hinfo,
+ struct hdmi_spec *spec = codec->spec;
+ int pin_idx = hinfo_to_pin_index(spec, hinfo);
+ hda_nid_t pin_nid = spec->pins[pin_idx].pin_nid;
++ int pinctl;
+
+ hdmi_set_channel_count(codec, cvt_nid, substream->runtime->channels);
+
+ hdmi_setup_audio_infoframe(codec, pin_idx, substream);
+
++ pinctl = snd_hda_codec_read(codec, pin_nid, 0,
++ AC_VERB_GET_PIN_WIDGET_CONTROL, 0);
++ snd_hda_codec_write(codec, pin_nid, 0,
++ AC_VERB_SET_PIN_WIDGET_CONTROL, pinctl | PIN_OUT);
++
+ return hdmi_setup_stream(codec, cvt_nid, pin_nid, stream_tag, format);
+ }
+
+diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c
+index aa4c25e..fc964d4 100644
+--- a/sound/pci/hda/patch_realtek.c
++++ b/sound/pci/hda/patch_realtek.c
+@@ -5704,6 +5704,15 @@ static int alc269_resume(struct hda_codec *codec)
+ }
+ #endif /* CONFIG_PM */
+
++static void alc269_fixup_pincfg_no_hp_to_lineout(struct hda_codec *codec,
++ const struct alc_fixup *fix, int action)
++{
++ struct alc_spec *spec = codec->spec;
++
++ if (action == ALC_FIXUP_ACT_PRE_PROBE)
++ spec->parse_flags = HDA_PINCFG_NO_HP_FIXUP;
++}
++
+ static void alc269_fixup_hweq(struct hda_codec *codec,
+ const struct alc_fixup *fix, int action)
+ {
+@@ -5828,6 +5837,8 @@ enum {
+ ALC269VB_FIXUP_AMIC,
+ ALC269VB_FIXUP_DMIC,
+ ALC269_FIXUP_MIC2_MUTE_LED,
++ ALC269_FIXUP_LENOVO_DOCK,
++ ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT,
+ };
+
+ static const struct alc_fixup alc269_fixups[] = {
+@@ -5952,6 +5963,20 @@ static const struct alc_fixup alc269_fixups[] = {
+ .type = ALC_FIXUP_FUNC,
+ .v.func = alc269_fixup_mic2_mute,
+ },
++ [ALC269_FIXUP_LENOVO_DOCK] = {
++ .type = ALC_FIXUP_PINS,
++ .v.pins = (const struct alc_pincfg[]) {
++ { 0x19, 0x23a11040 }, /* dock mic */
++ { 0x1b, 0x2121103f }, /* dock headphone */
++ { }
++ },
++ .chained = true,
++ .chain_id = ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT
++ },
++ [ALC269_FIXUP_PINCFG_NO_HP_TO_LINEOUT] = {
++ .type = ALC_FIXUP_FUNC,
++ .v.func = alc269_fixup_pincfg_no_hp_to_lineout,
++ },
+ };
+
+ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+@@ -5975,6 +6000,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ SND_PCI_QUIRK(0x17aa, 0x21b8, "Thinkpad Edge 14", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21ca, "Thinkpad L412", ALC269_FIXUP_SKU_IGNORE),
+ SND_PCI_QUIRK(0x17aa, 0x21e9, "Thinkpad Edge 15", ALC269_FIXUP_SKU_IGNORE),
++ SND_PCI_QUIRK(0x17aa, 0x21f6, "Thinkpad T530", ALC269_FIXUP_LENOVO_DOCK),
++ SND_PCI_QUIRK(0x17aa, 0x2203, "Thinkpad X230 Tablet", ALC269_FIXUP_LENOVO_DOCK),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_QUANTA_MUTE),
+ SND_PCI_QUIRK(0x17aa, 0x3bf8, "Lenovo Ideapd", ALC269_FIXUP_PCM_44K),
+ SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
+@@ -6033,6 +6060,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
+ static const struct alc_model_fixup alc269_fixup_models[] = {
+ {.id = ALC269_FIXUP_AMIC, .name = "laptop-amic"},
+ {.id = ALC269_FIXUP_DMIC, .name = "laptop-dmic"},
++ {.id = ALC269_FIXUP_LENOVO_DOCK, .name = "lenovo-dock"},
+ {}
+ };
+
+@@ -6831,6 +6859,7 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = {
+ { .id = 0x10ec0275, .name = "ALC275", .patch = patch_alc269 },
+ { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 },
+ { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 },
++ { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 },
+ { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660",
+ .patch = patch_alc861 },
+ { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd },
+diff --git a/sound/pci/hda/patch_sigmatel.c b/sound/pci/hda/patch_sigmatel.c
+index 0767528..d2f2264 100644
+--- a/sound/pci/hda/patch_sigmatel.c
++++ b/sound/pci/hda/patch_sigmatel.c
+@@ -101,6 +101,8 @@ enum {
+ STAC_92HD83XXX_HP_cNB11_INTQUAD,
+ STAC_HP_DV7_4000,
+ STAC_HP_ZEPHYR,
++ STAC_92HD83XXX_HP_LED,
++ STAC_92HD83XXX_HP_INV_LED,
+ STAC_92HD83XXX_MODELS
+ };
+
+@@ -1675,6 +1677,8 @@ static const char * const stac92hd83xxx_models[STAC_92HD83XXX_MODELS] = {
+ [STAC_92HD83XXX_HP_cNB11_INTQUAD] = "hp_cNB11_intquad",
+ [STAC_HP_DV7_4000] = "hp-dv7-4000",
+ [STAC_HP_ZEPHYR] = "hp-zephyr",
++ [STAC_92HD83XXX_HP_LED] = "hp-led",
++ [STAC_92HD83XXX_HP_INV_LED] = "hp-inv-led",
+ };
+
+ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+@@ -1729,6 +1733,8 @@ static const struct snd_pci_quirk stac92hd83xxx_cfg_tbl[] = {
+ "HP", STAC_92HD83XXX_HP_cNB11_INTQUAD),
+ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3561,
+ "HP", STAC_HP_ZEPHYR),
++ SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x3660,
++ "HP Mini", STAC_92HD83XXX_HP_LED),
+ {} /* terminator */
+ };
+
+@@ -4414,7 +4420,12 @@ static int stac92xx_init(struct hda_codec *codec)
+ snd_hda_jack_report_sync(codec);
+
+ /* sync mute LED */
+- snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
++ if (spec->gpio_led) {
++ if (spec->vmaster_mute.hook)
++ snd_hda_sync_vmaster_hook(&spec->vmaster_mute);
++ else /* the very first init call doesn't have vmaster yet */
++ stac92xx_update_led_status(codec, false);
++ }
+
+ /* sync the power-map */
+ if (spec->num_pwrs)
+@@ -5507,6 +5518,7 @@ static void stac92hd8x_fill_auto_spec(struct hda_codec *codec)
+ static int patch_stac92hd83xxx(struct hda_codec *codec)
+ {
+ struct sigmatel_spec *spec;
++ int default_polarity = -1; /* no default cfg */
+ int err;
+
+ spec = kzalloc(sizeof(*spec), GFP_KERNEL);
+@@ -5555,9 +5567,15 @@ again:
+ case STAC_HP_ZEPHYR:
+ spec->init = stac92hd83xxx_hp_zephyr_init;
+ break;
++ case STAC_92HD83XXX_HP_LED:
++ default_polarity = 0;
++ break;
++ case STAC_92HD83XXX_HP_INV_LED:
++ default_polarity = 1;
++ break;
+ }
+
+- if (find_mute_led_cfg(codec, -1/*no default cfg*/))
++ if (find_mute_led_cfg(codec, default_polarity))
+ snd_printd("mute LED gpio %d polarity %d\n",
+ spec->gpio_led,
+ spec->gpio_led_polarity);
+diff --git a/sound/pci/hda/patch_via.c b/sound/pci/hda/patch_via.c
+index 82b3680..f21fd91 100644
+--- a/sound/pci/hda/patch_via.c
++++ b/sound/pci/hda/patch_via.c
+@@ -3226,7 +3226,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ {
+ struct via_spec *spec = codec->spec;
+ int imux_is_smixer;
+- unsigned int parm;
++ unsigned int parm, parm2;
+ /* MUX6 (1eh) = stereo mixer */
+ imux_is_smixer =
+ snd_hda_codec_read(codec, 0x1e, 0, AC_VERB_GET_CONNECT_SEL, 0x00) == 5;
+@@ -3249,7 +3249,7 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ parm = AC_PWRST_D3;
+ set_pin_power_state(codec, 0x27, &parm);
+ update_power_state(codec, 0x1a, parm);
+- update_power_state(codec, 0xb, parm);
++ parm2 = parm; /* for pin 0x0b */
+
+ /* PW2 (26h), AOW2 (ah) */
+ parm = AC_PWRST_D3;
+@@ -3264,6 +3264,9 @@ static void set_widgets_power_state_vt1718S(struct hda_codec *codec)
+ if (!spec->hp_independent_mode) /* check for redirected HP */
+ set_pin_power_state(codec, 0x28, &parm);
+ update_power_state(codec, 0x8, parm);
++ if (!spec->hp_independent_mode && parm2 != AC_PWRST_D3)
++ parm = parm2;
++ update_power_state(codec, 0xb, parm);
+ /* MW9 (21h), Mw2 (1ah), AOW0 (8h) */
+ update_power_state(codec, 0x21, imux_is_smixer ? AC_PWRST_D0 : parm);
+
+diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c
+index 0cfce99..dab6256 100644
+--- a/sound/soc/codecs/wm8962.c
++++ b/sound/soc/codecs/wm8962.c
+@@ -2501,6 +2501,9 @@ static int wm8962_set_bias_level(struct snd_soc_codec *codec,
+ /* VMID 2*250k */
+ snd_soc_update_bits(codec, WM8962_PWR_MGMT_1,
+ WM8962_VMID_SEL_MASK, 0x100);
++
++ if (codec->dapm.bias_level == SND_SOC_BIAS_OFF)
++ msleep(100);
+ break;
+
+ case SND_SOC_BIAS_OFF:
+@@ -3722,6 +3725,9 @@ static int wm8962_runtime_resume(struct device *dev)
+ }
+
+ regcache_cache_only(wm8962->regmap, false);
++
++ wm8962_reset(wm8962);
++
+ regcache_sync(wm8962->regmap);
+
+ regmap_update_bits(wm8962->regmap, WM8962_ANTI_POP,
+diff --git a/sound/soc/codecs/wm8994.c b/sound/soc/codecs/wm8994.c
+index 1436b6c..fc9afc8 100644
+--- a/sound/soc/codecs/wm8994.c
++++ b/sound/soc/codecs/wm8994.c
+@@ -2649,7 +2649,7 @@ static int wm8994_hw_params(struct snd_pcm_substream *substream,
+ return -EINVAL;
+ }
+
+- bclk_rate = params_rate(params) * 2;
++ bclk_rate = params_rate(params) * 4;
+ switch (params_format(params)) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ bclk_rate *= 16;
+diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c
+index 89eae93..5b32b15 100644
+--- a/sound/soc/soc-dapm.c
++++ b/sound/soc/soc-dapm.c
+@@ -1570,7 +1570,15 @@ static int dapm_power_widgets(struct snd_soc_dapm_context *dapm, int event)
+ }
+
+ list_for_each_entry(w, &card->widgets, list) {
+- list_del_init(&w->dirty);
++ switch (w->id) {
++ case snd_soc_dapm_pre:
++ case snd_soc_dapm_post:
++ /* These widgets always need to be powered */
++ break;
++ default:
++ list_del_init(&w->dirty);
++ break;
++ }
+
+ if (w->power) {
+ d = w->dapm;
+@@ -3538,10 +3546,13 @@ EXPORT_SYMBOL_GPL(snd_soc_dapm_free);
+
+ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
+ {
++ struct snd_soc_card *card = dapm->card;
+ struct snd_soc_dapm_widget *w;
+ LIST_HEAD(down_list);
+ int powerdown = 0;
+
++ mutex_lock(&card->dapm_mutex);
++
+ list_for_each_entry(w, &dapm->card->widgets, list) {
+ if (w->dapm != dapm)
+ continue;
+@@ -3564,6 +3575,8 @@ static void soc_dapm_shutdown_codec(struct snd_soc_dapm_context *dapm)
+ snd_soc_dapm_set_bias_level(dapm,
+ SND_SOC_BIAS_STANDBY);
+ }
++
++ mutex_unlock(&card->dapm_mutex);
+ }
+
+ /*
+diff --git a/sound/usb/clock.c b/sound/usb/clock.c
+index 379baad..5e634a2 100644
+--- a/sound/usb/clock.c
++++ b/sound/usb/clock.c
+@@ -111,7 +111,8 @@ static bool uac_clock_source_is_valid(struct snd_usb_audio *chip, int source_id)
+ return 0;
+
+ /* If a clock source can't tell us whether it's valid, we assume it is */
+- if (!uac2_control_is_readable(cs_desc->bmControls, UAC2_CS_CONTROL_CLOCK_VALID))
++ if (!uac2_control_is_readable(cs_desc->bmControls,
++ UAC2_CS_CONTROL_CLOCK_VALID - 1))
+ return 1;
+
+ err = snd_usb_ctl_msg(dev, usb_rcvctrlpipe(dev, 0), UAC2_CS_CUR,