diff options
Diffstat (limited to '1008_linux-3.2.9.patch')
-rw-r--r-- | 1008_linux-3.2.9.patch | 3675 |
1 files changed, 3675 insertions, 0 deletions
diff --git a/1008_linux-3.2.9.patch b/1008_linux-3.2.9.patch new file mode 100644 index 00000000..7cac2cf2 --- /dev/null +++ b/1008_linux-3.2.9.patch @@ -0,0 +1,3675 @@ +diff --git a/Makefile b/Makefile +index 7df8a84..5f1739b 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 2 +-SUBLEVEL = 8 ++SUBLEVEL = 9 + EXTRAVERSION = + NAME = Saber-toothed Squirrel + +diff --git a/arch/arm/common/pl330.c b/arch/arm/common/pl330.c +index 8d8df74..67abef5 100644 +--- a/arch/arm/common/pl330.c ++++ b/arch/arm/common/pl330.c +@@ -1496,12 +1496,13 @@ int pl330_chan_ctrl(void *ch_id, enum pl330_chan_op op) + struct pl330_thread *thrd = ch_id; + struct pl330_dmac *pl330; + unsigned long flags; +- int ret = 0, active = thrd->req_running; ++ int ret = 0, active; + + if (!thrd || thrd->free || thrd->dmac->state == DYING) + return -EINVAL; + + pl330 = thrd->dmac; ++ active = thrd->req_running; + + spin_lock_irqsave(&pl330->lock, flags); + +diff --git a/arch/arm/include/asm/assembler.h b/arch/arm/include/asm/assembler.h +index 29035e8..7bb8bf9 100644 +--- a/arch/arm/include/asm/assembler.h ++++ b/arch/arm/include/asm/assembler.h +@@ -137,6 +137,11 @@ + disable_irq + .endm + ++ .macro save_and_disable_irqs_notrace, oldcpsr ++ mrs \oldcpsr, cpsr ++ disable_irq_notrace ++ .endm ++ + /* + * Restore interrupt state previously stored in a register. We don't + * guarantee that this will preserve the flags. +diff --git a/arch/arm/mach-at91/at91rm9200_devices.c b/arch/arm/mach-at91/at91rm9200_devices.c +index ad93068..143eebb 100644 +--- a/arch/arm/mach-at91/at91rm9200_devices.c ++++ b/arch/arm/mach-at91/at91rm9200_devices.c +@@ -83,7 +83,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} + * USB Device (Gadget) + * -------------------------------------------------------------------- */ + +-#ifdef CONFIG_USB_AT91 ++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) + static struct at91_udc_data udc_data; + + static struct resource udc_resources[] = { +diff --git a/arch/arm/mach-at91/at91sam9260_devices.c b/arch/arm/mach-at91/at91sam9260_devices.c +index 629fa97..2590988 100644 +--- a/arch/arm/mach-at91/at91sam9260_devices.c ++++ b/arch/arm/mach-at91/at91sam9260_devices.c +@@ -84,7 +84,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} + * USB Device (Gadget) + * -------------------------------------------------------------------- */ + +-#ifdef CONFIG_USB_AT91 ++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) + static struct at91_udc_data udc_data; + + static struct resource udc_resources[] = { +diff --git a/arch/arm/mach-at91/at91sam9261_devices.c b/arch/arm/mach-at91/at91sam9261_devices.c +index a178b58..daf3e66 100644 +--- a/arch/arm/mach-at91/at91sam9261_devices.c ++++ b/arch/arm/mach-at91/at91sam9261_devices.c +@@ -87,7 +87,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} + * USB Device (Gadget) + * -------------------------------------------------------------------- */ + +-#ifdef CONFIG_USB_AT91 ++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) + static struct at91_udc_data udc_data; + + static struct resource udc_resources[] = { +diff --git a/arch/arm/mach-at91/at91sam9263_devices.c b/arch/arm/mach-at91/at91sam9263_devices.c +index d5fbac9..32a7e43 100644 +--- a/arch/arm/mach-at91/at91sam9263_devices.c ++++ b/arch/arm/mach-at91/at91sam9263_devices.c +@@ -92,7 +92,7 @@ void __init at91_add_device_usbh(struct at91_usbh_data *data) {} + * USB Device (Gadget) + * -------------------------------------------------------------------- */ + +-#ifdef CONFIG_USB_AT91 ++#if defined(CONFIG_USB_AT91) || defined(CONFIG_USB_AT91_MODULE) + static struct at91_udc_data udc_data; + + static struct resource udc_resources[] = { +diff --git a/arch/arm/mach-omap2/vp.c b/arch/arm/mach-omap2/vp.c +index 66bd700..3b52027 100644 +--- a/arch/arm/mach-omap2/vp.c ++++ b/arch/arm/mach-omap2/vp.c +@@ -41,6 +41,11 @@ void __init omap_vp_init(struct voltagedomain *voltdm) + u32 val, sys_clk_rate, timeout, waittime; + u32 vddmin, vddmax, vstepmin, vstepmax; + ++ if (!voltdm->pmic || !voltdm->pmic->uv_to_vsel) { ++ pr_err("%s: No PMIC info for vdd_%s\n", __func__, voltdm->name); ++ return; ++ } ++ + if (!voltdm->read || !voltdm->write) { + pr_err("%s: No read/write API for accessing vdd_%s regs\n", + __func__, voltdm->name); +diff --git a/arch/arm/mm/cache-v7.S b/arch/arm/mm/cache-v7.S +index 07c4bc8..a655d3d 100644 +--- a/arch/arm/mm/cache-v7.S ++++ b/arch/arm/mm/cache-v7.S +@@ -54,9 +54,15 @@ loop1: + and r1, r1, #7 @ mask of the bits for current cache only + cmp r1, #2 @ see what cache we have at this level + blt skip @ skip if no cache, or just i-cache ++#ifdef CONFIG_PREEMPT ++ save_and_disable_irqs_notrace r9 @ make cssr&csidr read atomic ++#endif + mcr p15, 2, r10, c0, c0, 0 @ select current cache level in cssr + isb @ isb to sych the new cssr&csidr + mrc p15, 1, r1, c0, c0, 0 @ read the new csidr ++#ifdef CONFIG_PREEMPT ++ restore_irqs_notrace r9 ++#endif + and r2, r1, #7 @ extract the length of the cache lines + add r2, r2, #4 @ add 4 (line length offset) + ldr r4, =0x3ff +diff --git a/arch/powerpc/kernel/perf_event.c b/arch/powerpc/kernel/perf_event.c +index 10a140f..64483fd 100644 +--- a/arch/powerpc/kernel/perf_event.c ++++ b/arch/powerpc/kernel/perf_event.c +@@ -865,6 +865,7 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) + { + unsigned long flags; + s64 left; ++ unsigned long val; + + if (!event->hw.idx || !event->hw.sample_period) + return; +@@ -880,7 +881,12 @@ static void power_pmu_start(struct perf_event *event, int ef_flags) + + event->hw.state = 0; + left = local64_read(&event->hw.period_left); +- write_pmc(event->hw.idx, left); ++ ++ val = 0; ++ if (left < 0x80000000L) ++ val = 0x80000000L - left; ++ ++ write_pmc(event->hw.idx, val); + + perf_event_update_userpage(event); + perf_pmu_enable(event->pmu); +diff --git a/arch/s390/kernel/time.c b/arch/s390/kernel/time.c +index ebbfab3..e03c555 100644 +--- a/arch/s390/kernel/time.c ++++ b/arch/s390/kernel/time.c +@@ -113,11 +113,14 @@ static void fixup_clock_comparator(unsigned long long delta) + static int s390_next_ktime(ktime_t expires, + struct clock_event_device *evt) + { ++ struct timespec ts; + u64 nsecs; + +- nsecs = ktime_to_ns(ktime_sub(expires, ktime_get_monotonic_offset())); ++ ts.tv_sec = ts.tv_nsec = 0; ++ monotonic_to_bootbased(&ts); ++ nsecs = ktime_to_ns(ktime_add(timespec_to_ktime(ts), expires)); + do_div(nsecs, 125); +- S390_lowcore.clock_comparator = TOD_UNIX_EPOCH + (nsecs << 9); ++ S390_lowcore.clock_comparator = sched_clock_base_cc + (nsecs << 9); + set_clock_comparator(S390_lowcore.clock_comparator); + return 0; + } +diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c +index a3b0811..0e89635 100644 +--- a/arch/x86/kernel/cpu/intel_cacheinfo.c ++++ b/arch/x86/kernel/cpu/intel_cacheinfo.c +@@ -326,8 +326,7 @@ static void __cpuinit amd_calc_l3_indices(struct amd_northbridge *nb) + l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1; + } + +-static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, +- int index) ++static void __cpuinit amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index) + { + int node; + +@@ -725,14 +724,16 @@ static DEFINE_PER_CPU(struct _cpuid4_info *, ici_cpuid4_info); + #define CPUID4_INFO_IDX(x, y) (&((per_cpu(ici_cpuid4_info, x))[y])) + + #ifdef CONFIG_SMP +-static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ++ ++static int __cpuinit cache_shared_amd_cpu_map_setup(unsigned int cpu, int index) + { +- struct _cpuid4_info *this_leaf, *sibling_leaf; +- unsigned long num_threads_sharing; +- int index_msb, i, sibling; ++ struct _cpuid4_info *this_leaf; ++ int ret, i, sibling; + struct cpuinfo_x86 *c = &cpu_data(cpu); + +- if ((index == 3) && (c->x86_vendor == X86_VENDOR_AMD)) { ++ ret = 0; ++ if (index == 3) { ++ ret = 1; + for_each_cpu(i, cpu_llc_shared_mask(cpu)) { + if (!per_cpu(ici_cpuid4_info, i)) + continue; +@@ -743,8 +744,35 @@ static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) + set_bit(sibling, this_leaf->shared_cpu_map); + } + } +- return; ++ } else if ((c->x86 == 0x15) && ((index == 1) || (index == 2))) { ++ ret = 1; ++ for_each_cpu(i, cpu_sibling_mask(cpu)) { ++ if (!per_cpu(ici_cpuid4_info, i)) ++ continue; ++ this_leaf = CPUID4_INFO_IDX(i, index); ++ for_each_cpu(sibling, cpu_sibling_mask(cpu)) { ++ if (!cpu_online(sibling)) ++ continue; ++ set_bit(sibling, this_leaf->shared_cpu_map); ++ } ++ } + } ++ ++ return ret; ++} ++ ++static void __cpuinit cache_shared_cpu_map_setup(unsigned int cpu, int index) ++{ ++ struct _cpuid4_info *this_leaf, *sibling_leaf; ++ unsigned long num_threads_sharing; ++ int index_msb, i; ++ struct cpuinfo_x86 *c = &cpu_data(cpu); ++ ++ if (c->x86_vendor == X86_VENDOR_AMD) { ++ if (cache_shared_amd_cpu_map_setup(cpu, index)) ++ return; ++ } ++ + this_leaf = CPUID4_INFO_IDX(cpu, index); + num_threads_sharing = 1 + this_leaf->base.eax.split.num_threads_sharing; + +diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c +index 666f6f5..64004b0 100644 +--- a/drivers/base/regmap/regcache.c ++++ b/drivers/base/regmap/regcache.c +@@ -54,7 +54,7 @@ static int regcache_hw_init(struct regmap *map) + for (count = 0, i = 0; i < map->num_reg_defaults_raw; i++) { + val = regcache_get_val(map->reg_defaults_raw, + i, map->cache_word_size); +- if (!val) ++ if (regmap_volatile(map, i)) + continue; + count++; + } +@@ -69,7 +69,7 @@ static int regcache_hw_init(struct regmap *map) + for (i = 0, j = 0; i < map->num_reg_defaults_raw; i++) { + val = regcache_get_val(map->reg_defaults_raw, + i, map->cache_word_size); +- if (!val) ++ if (regmap_volatile(map, i)) + continue; + map->reg_defaults[j].reg = i; + map->reg_defaults[j].def = val; +diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c +index cedb231..2678b6f 100644 +--- a/drivers/cdrom/cdrom.c ++++ b/drivers/cdrom/cdrom.c +@@ -2120,11 +2120,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, + if (!nr) + return -ENOMEM; + +- if (!access_ok(VERIFY_WRITE, ubuf, nframes * CD_FRAMESIZE_RAW)) { +- ret = -EFAULT; +- goto out; +- } +- + cgc.data_direction = CGC_DATA_READ; + while (nframes > 0) { + if (nr > nframes) +@@ -2133,7 +2128,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, + ret = cdrom_read_block(cdi, &cgc, lba, nr, 1, CD_FRAMESIZE_RAW); + if (ret) + break; +- if (__copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { ++ if (copy_to_user(ubuf, cgc.buffer, CD_FRAMESIZE_RAW * nr)) { + ret = -EFAULT; + break; + } +@@ -2141,7 +2136,6 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf, + nframes -= nr; + lba += nr; + } +-out: + kfree(cgc.buffer); + return ret; + } +diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c +index 31b0d1a..fad7cd1 100644 +--- a/drivers/gpu/drm/radeon/r100.c ++++ b/drivers/gpu/drm/radeon/r100.c +@@ -789,9 +789,7 @@ int r100_irq_process(struct radeon_device *rdev) + WREG32(RADEON_AIC_CNTL, msi_rearm | RS400_MSI_REARM); + break; + default: +- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; +- WREG32(RADEON_MSI_REARM_EN, msi_rearm); +- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); ++ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); + break; + } + } +diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c +index c259e21..ee898e9 100644 +--- a/drivers/gpu/drm/radeon/rs600.c ++++ b/drivers/gpu/drm/radeon/rs600.c +@@ -693,9 +693,7 @@ int rs600_irq_process(struct radeon_device *rdev) + WREG32(RADEON_BUS_CNTL, msi_rearm | RS600_MSI_REARM); + break; + default: +- msi_rearm = RREG32(RADEON_MSI_REARM_EN) & ~RV370_MSI_REARM_EN; +- WREG32(RADEON_MSI_REARM_EN, msi_rearm); +- WREG32(RADEON_MSI_REARM_EN, msi_rearm | RV370_MSI_REARM_EN); ++ WREG32(RADEON_MSI_REARM_EN, RV370_MSI_REARM_EN); + break; + } + } +diff --git a/drivers/hwmon/ads1015.c b/drivers/hwmon/ads1015.c +index eedca3c..dd87ae9 100644 +--- a/drivers/hwmon/ads1015.c ++++ b/drivers/hwmon/ads1015.c +@@ -271,7 +271,7 @@ static int ads1015_probe(struct i2c_client *client, + continue; + err = device_create_file(&client->dev, &ads1015_in[k].dev_attr); + if (err) +- goto exit_free; ++ goto exit_remove; + } + + data->hwmon_dev = hwmon_device_register(&client->dev); +@@ -285,7 +285,6 @@ static int ads1015_probe(struct i2c_client *client, + exit_remove: + for (k = 0; k < ADS1015_CHANNELS; ++k) + device_remove_file(&client->dev, &ads1015_in[k].dev_attr); +-exit_free: + kfree(data); + exit: + return err; +diff --git a/drivers/hwmon/f75375s.c b/drivers/hwmon/f75375s.c +index e4ab491..040a820 100644 +--- a/drivers/hwmon/f75375s.c ++++ b/drivers/hwmon/f75375s.c +@@ -304,8 +304,6 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val) + case 0: /* Full speed */ + fanmode |= (3 << FAN_CTRL_MODE(nr)); + data->pwm[nr] = 255; +- f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr), +- data->pwm[nr]); + break; + case 1: /* PWM */ + fanmode |= (3 << FAN_CTRL_MODE(nr)); +@@ -318,6 +316,9 @@ static int set_pwm_enable_direct(struct i2c_client *client, int nr, int val) + } + f75375_write8(client, F75375_REG_FAN_TIMER, fanmode); + data->pwm_enable[nr] = val; ++ if (val == 0) ++ f75375_write8(client, F75375_REG_FAN_PWM_DUTY(nr), ++ data->pwm[nr]); + return 0; + } + +diff --git a/drivers/hwmon/max6639.c b/drivers/hwmon/max6639.c +index f20d997..8c3df04 100644 +--- a/drivers/hwmon/max6639.c ++++ b/drivers/hwmon/max6639.c +@@ -72,8 +72,8 @@ static unsigned short normal_i2c[] = { 0x2c, 0x2e, 0x2f, I2C_CLIENT_END }; + + static const int rpm_ranges[] = { 2000, 4000, 8000, 16000 }; + +-#define FAN_FROM_REG(val, div, rpm_range) ((val) == 0 ? -1 : \ +- (val) == 255 ? 0 : (rpm_ranges[rpm_range] * 30) / ((div + 1) * (val))) ++#define FAN_FROM_REG(val, rpm_range) ((val) == 0 || (val) == 255 ? \ ++ 0 : (rpm_ranges[rpm_range] * 30) / (val)) + #define TEMP_LIMIT_TO_REG(val) SENSORS_LIMIT((val) / 1000, 0, 255) + + /* +@@ -333,7 +333,7 @@ static ssize_t show_fan_input(struct device *dev, + return PTR_ERR(data); + + return sprintf(buf, "%d\n", FAN_FROM_REG(data->fan[attr->index], +- data->ppr, data->rpm_range)); ++ data->rpm_range)); + } + + static ssize_t show_alarm(struct device *dev, +@@ -429,9 +429,9 @@ static int max6639_init_client(struct i2c_client *client) + struct max6639_data *data = i2c_get_clientdata(client); + struct max6639_platform_data *max6639_info = + client->dev.platform_data; +- int i = 0; ++ int i; + int rpm_range = 1; /* default: 4000 RPM */ +- int err = 0; ++ int err; + + /* Reset chip to default values, see below for GCONFIG setup */ + err = i2c_smbus_write_byte_data(client, MAX6639_REG_GCONFIG, +@@ -446,11 +446,6 @@ static int max6639_init_client(struct i2c_client *client) + else + data->ppr = 2; + data->ppr -= 1; +- err = i2c_smbus_write_byte_data(client, +- MAX6639_REG_FAN_PPR(i), +- data->ppr << 5); +- if (err) +- goto exit; + + if (max6639_info) + rpm_range = rpm_range_to_reg(max6639_info->rpm_range); +@@ -458,6 +453,13 @@ static int max6639_init_client(struct i2c_client *client) + + for (i = 0; i < 2; i++) { + ++ /* Set Fan pulse per revolution */ ++ err = i2c_smbus_write_byte_data(client, ++ MAX6639_REG_FAN_PPR(i), ++ data->ppr << 6); ++ if (err) ++ goto exit; ++ + /* Fans config PWM, RPM */ + err = i2c_smbus_write_byte_data(client, + MAX6639_REG_FAN_CONFIG1(i), +diff --git a/drivers/infiniband/ulp/ipoib/ipoib.h b/drivers/infiniband/ulp/ipoib/ipoib.h +index b3cc1e0..86df632 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib.h ++++ b/drivers/infiniband/ulp/ipoib/ipoib.h +@@ -44,6 +44,7 @@ + #include <linux/mutex.h> + + #include <net/neighbour.h> ++#include <net/sch_generic.h> + + #include <linux/atomic.h> + +@@ -117,8 +118,9 @@ struct ipoib_header { + u16 reserved; + }; + +-struct ipoib_pseudoheader { +- u8 hwaddr[INFINIBAND_ALEN]; ++struct ipoib_cb { ++ struct qdisc_skb_cb qdisc_cb; ++ u8 hwaddr[INFINIBAND_ALEN]; + }; + + /* Used for all multicast joins (broadcast, IPv4 mcast and IPv6 mcast) */ +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_main.c b/drivers/infiniband/ulp/ipoib/ipoib_main.c +index 83695b4..fe2fdbb 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_main.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_main.c +@@ -658,7 +658,7 @@ static void ipoib_path_lookup(struct sk_buff *skb, struct net_device *dev) + } + + static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, +- struct ipoib_pseudoheader *phdr) ++ struct ipoib_cb *cb) + { + struct ipoib_dev_priv *priv = netdev_priv(dev); + struct ipoib_path *path; +@@ -666,17 +666,15 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, + + spin_lock_irqsave(&priv->lock, flags); + +- path = __path_find(dev, phdr->hwaddr + 4); ++ path = __path_find(dev, cb->hwaddr + 4); + if (!path || !path->valid) { + int new_path = 0; + + if (!path) { +- path = path_rec_create(dev, phdr->hwaddr + 4); ++ path = path_rec_create(dev, cb->hwaddr + 4); + new_path = 1; + } + if (path) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, sizeof *phdr); + __skb_queue_tail(&path->queue, skb); + + if (!path->query && path_rec_start(dev, path)) { +@@ -700,12 +698,10 @@ static void unicast_arp_send(struct sk_buff *skb, struct net_device *dev, + be16_to_cpu(path->pathrec.dlid)); + + spin_unlock_irqrestore(&priv->lock, flags); +- ipoib_send(dev, skb, path->ah, IPOIB_QPN(phdr->hwaddr)); ++ ipoib_send(dev, skb, path->ah, IPOIB_QPN(cb->hwaddr)); + return; + } else if ((path->query || !path_rec_start(dev, path)) && + skb_queue_len(&path->queue) < IPOIB_MAX_PATH_REC_QUEUE) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, sizeof *phdr); + __skb_queue_tail(&path->queue, skb); + } else { + ++dev->stats.tx_dropped; +@@ -774,16 +770,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) + dev_kfree_skb_any(skb); + } + } else { +- struct ipoib_pseudoheader *phdr = +- (struct ipoib_pseudoheader *) skb->data; +- skb_pull(skb, sizeof *phdr); ++ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; + +- if (phdr->hwaddr[4] == 0xff) { ++ if (cb->hwaddr[4] == 0xff) { + /* Add in the P_Key for multicast*/ +- phdr->hwaddr[8] = (priv->pkey >> 8) & 0xff; +- phdr->hwaddr[9] = priv->pkey & 0xff; ++ cb->hwaddr[8] = (priv->pkey >> 8) & 0xff; ++ cb->hwaddr[9] = priv->pkey & 0xff; + +- ipoib_mcast_send(dev, phdr->hwaddr + 4, skb); ++ ipoib_mcast_send(dev, cb->hwaddr + 4, skb); + } else { + /* unicast GID -- should be ARP or RARP reply */ + +@@ -792,14 +786,14 @@ static int ipoib_start_xmit(struct sk_buff *skb, struct net_device *dev) + ipoib_warn(priv, "Unicast, no %s: type %04x, QPN %06x %pI6\n", + skb_dst(skb) ? "neigh" : "dst", + be16_to_cpup((__be16 *) skb->data), +- IPOIB_QPN(phdr->hwaddr), +- phdr->hwaddr + 4); ++ IPOIB_QPN(cb->hwaddr), ++ cb->hwaddr + 4); + dev_kfree_skb_any(skb); + ++dev->stats.tx_dropped; + goto unlock; + } + +- unicast_arp_send(skb, dev, phdr); ++ unicast_arp_send(skb, dev, cb); + } + } + unlock: +@@ -825,8 +819,6 @@ static int ipoib_hard_header(struct sk_buff *skb, + const void *daddr, const void *saddr, unsigned len) + { + struct ipoib_header *header; +- struct dst_entry *dst; +- struct neighbour *n; + + header = (struct ipoib_header *) skb_push(skb, sizeof *header); + +@@ -834,18 +826,13 @@ static int ipoib_hard_header(struct sk_buff *skb, + header->reserved = 0; + + /* +- * If we don't have a neighbour structure, stuff the +- * destination address onto the front of the skb so we can +- * figure out where to send the packet later. ++ * If we don't have a dst_entry structure, stuff the ++ * destination address into skb->cb so we can figure out where ++ * to send the packet later. + */ +- dst = skb_dst(skb); +- n = NULL; +- if (dst) +- n = dst_get_neighbour_raw(dst); +- if ((!dst || !n) && daddr) { +- struct ipoib_pseudoheader *phdr = +- (struct ipoib_pseudoheader *) skb_push(skb, sizeof *phdr); +- memcpy(phdr->hwaddr, daddr, INFINIBAND_ALEN); ++ if (!skb_dst(skb)) { ++ struct ipoib_cb *cb = (struct ipoib_cb *) skb->cb; ++ memcpy(cb->hwaddr, daddr, INFINIBAND_ALEN); + } + + return 0; +@@ -1021,11 +1008,7 @@ static void ipoib_setup(struct net_device *dev) + + dev->flags |= IFF_BROADCAST | IFF_MULTICAST; + +- /* +- * We add in INFINIBAND_ALEN to allow for the destination +- * address "pseudoheader" for skbs without neighbour struct. +- */ +- dev->hard_header_len = IPOIB_ENCAP_LEN + INFINIBAND_ALEN; ++ dev->hard_header_len = IPOIB_ENCAP_LEN; + dev->addr_len = INFINIBAND_ALEN; + dev->type = ARPHRD_INFINIBAND; + dev->tx_queue_len = ipoib_sendq_size * 2; +diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +index 873bff9..e5069b4 100644 +--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c ++++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c +@@ -262,21 +262,14 @@ static int ipoib_mcast_join_finish(struct ipoib_mcast *mcast, + netif_tx_lock_bh(dev); + while (!skb_queue_empty(&mcast->pkt_queue)) { + struct sk_buff *skb = skb_dequeue(&mcast->pkt_queue); +- struct dst_entry *dst = skb_dst(skb); +- struct neighbour *n = NULL; + + netif_tx_unlock_bh(dev); + + skb->dev = dev; +- if (dst) +- n = dst_get_neighbour_raw(dst); +- if (!dst || !n) { +- /* put pseudoheader back on for next time */ +- skb_push(skb, sizeof (struct ipoib_pseudoheader)); +- } + + if (dev_queue_xmit(skb)) + ipoib_warn(priv, "dev_queue_xmit failed to requeue packet\n"); ++ + netif_tx_lock_bh(dev); + } + netif_tx_unlock_bh(dev); +diff --git a/drivers/media/rc/imon.c b/drivers/media/rc/imon.c +index 6ed9646..3f175eb 100644 +--- a/drivers/media/rc/imon.c ++++ b/drivers/media/rc/imon.c +@@ -47,7 +47,7 @@ + #define MOD_AUTHOR "Jarod Wilson <jarod@wilsonet.com>" + #define MOD_DESC "Driver for SoundGraph iMON MultiMedia IR/Display" + #define MOD_NAME "imon" +-#define MOD_VERSION "0.9.3" ++#define MOD_VERSION "0.9.4" + + #define DISPLAY_MINOR_BASE 144 + #define DEVICE_NAME "lcd%d" +@@ -1658,9 +1658,17 @@ static void usb_rx_callback_intf0(struct urb *urb) + return; + + ictx = (struct imon_context *)urb->context; +- if (!ictx || !ictx->dev_present_intf0) ++ if (!ictx) + return; + ++ /* ++ * if we get a callback before we're done configuring the hardware, we ++ * can't yet process the data, as there's nowhere to send it, but we ++ * still need to submit a new rx URB to avoid wedging the hardware ++ */ ++ if (!ictx->dev_present_intf0) ++ goto out; ++ + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; +@@ -1678,6 +1686,7 @@ static void usb_rx_callback_intf0(struct urb *urb) + break; + } + ++out: + usb_submit_urb(ictx->rx_urb_intf0, GFP_ATOMIC); + } + +@@ -1690,9 +1699,17 @@ static void usb_rx_callback_intf1(struct urb *urb) + return; + + ictx = (struct imon_context *)urb->context; +- if (!ictx || !ictx->dev_present_intf1) ++ if (!ictx) + return; + ++ /* ++ * if we get a callback before we're done configuring the hardware, we ++ * can't yet process the data, as there's nowhere to send it, but we ++ * still need to submit a new rx URB to avoid wedging the hardware ++ */ ++ if (!ictx->dev_present_intf1) ++ goto out; ++ + switch (urb->status) { + case -ENOENT: /* usbcore unlink successful! */ + return; +@@ -1710,6 +1727,7 @@ static void usb_rx_callback_intf1(struct urb *urb) + break; + } + ++out: + usb_submit_urb(ictx->rx_urb_intf1, GFP_ATOMIC); + } + +@@ -2242,7 +2260,7 @@ find_endpoint_failed: + mutex_unlock(&ictx->lock); + usb_free_urb(rx_urb); + rx_urb_alloc_failed: +- dev_err(ictx->dev, "unable to initialize intf0, err %d\n", ret); ++ dev_err(ictx->dev, "unable to initialize intf1, err %d\n", ret); + + return NULL; + } +diff --git a/drivers/media/video/hdpvr/hdpvr-video.c b/drivers/media/video/hdpvr/hdpvr-video.c +index 087f7c0..41fd57b 100644 +--- a/drivers/media/video/hdpvr/hdpvr-video.c ++++ b/drivers/media/video/hdpvr/hdpvr-video.c +@@ -283,12 +283,13 @@ static int hdpvr_start_streaming(struct hdpvr_device *dev) + + hdpvr_config_call(dev, CTRL_START_STREAMING_VALUE, 0x00); + ++ dev->status = STATUS_STREAMING; ++ + INIT_WORK(&dev->worker, hdpvr_transmit_buffers); + queue_work(dev->workqueue, &dev->worker); + + v4l2_dbg(MSG_BUFFER, hdpvr_debug, &dev->v4l2_dev, + "streaming started\n"); +- dev->status = STATUS_STREAMING; + + return 0; + } +diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c +index 1e0e27c..e15e47d 100644 +--- a/drivers/mmc/card/block.c ++++ b/drivers/mmc/card/block.c +@@ -266,6 +266,9 @@ static struct mmc_blk_ioc_data *mmc_blk_ioctl_copy_from_user( + goto idata_err; + } + ++ if (!idata->buf_bytes) ++ return idata; ++ + idata->buf = kzalloc(idata->buf_bytes, GFP_KERNEL); + if (!idata->buf) { + err = -ENOMEM; +@@ -312,25 +315,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, + if (IS_ERR(idata)) + return PTR_ERR(idata); + +- cmd.opcode = idata->ic.opcode; +- cmd.arg = idata->ic.arg; +- cmd.flags = idata->ic.flags; +- +- data.sg = &sg; +- data.sg_len = 1; +- data.blksz = idata->ic.blksz; +- data.blocks = idata->ic.blocks; +- +- sg_init_one(data.sg, idata->buf, idata->buf_bytes); +- +- if (idata->ic.write_flag) +- data.flags = MMC_DATA_WRITE; +- else +- data.flags = MMC_DATA_READ; +- +- mrq.cmd = &cmd; +- mrq.data = &data; +- + md = mmc_blk_get(bdev->bd_disk); + if (!md) { + err = -EINVAL; +@@ -343,6 +327,48 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, + goto cmd_done; + } + ++ cmd.opcode = idata->ic.opcode; ++ cmd.arg = idata->ic.arg; ++ cmd.flags = idata->ic.flags; ++ ++ if (idata->buf_bytes) { ++ data.sg = &sg; ++ data.sg_len = 1; ++ data.blksz = idata->ic.blksz; ++ data.blocks = idata->ic.blocks; ++ ++ sg_init_one(data.sg, idata->buf, idata->buf_bytes); ++ ++ if (idata->ic.write_flag) ++ data.flags = MMC_DATA_WRITE; ++ else ++ data.flags = MMC_DATA_READ; ++ ++ /* data.flags must already be set before doing this. */ ++ mmc_set_data_timeout(&data, card); ++ ++ /* Allow overriding the timeout_ns for empirical tuning. */ ++ if (idata->ic.data_timeout_ns) ++ data.timeout_ns = idata->ic.data_timeout_ns; ++ ++ if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { ++ /* ++ * Pretend this is a data transfer and rely on the ++ * host driver to compute timeout. When all host ++ * drivers support cmd.cmd_timeout for R1B, this ++ * can be changed to: ++ * ++ * mrq.data = NULL; ++ * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; ++ */ ++ data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; ++ } ++ ++ mrq.data = &data; ++ } ++ ++ mrq.cmd = &cmd; ++ + mmc_claim_host(card->host); + + if (idata->ic.is_acmd) { +@@ -351,24 +377,6 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev, + goto cmd_rel_host; + } + +- /* data.flags must already be set before doing this. */ +- mmc_set_data_timeout(&data, card); +- /* Allow overriding the timeout_ns for empirical tuning. */ +- if (idata->ic.data_timeout_ns) +- data.timeout_ns = idata->ic.data_timeout_ns; +- +- if ((cmd.flags & MMC_RSP_R1B) == MMC_RSP_R1B) { +- /* +- * Pretend this is a data transfer and rely on the host driver +- * to compute timeout. When all host drivers support +- * cmd.cmd_timeout for R1B, this can be changed to: +- * +- * mrq.data = NULL; +- * cmd.cmd_timeout = idata->ic.cmd_timeout_ms; +- */ +- data.timeout_ns = idata->ic.cmd_timeout_ms * 1000000; +- } +- + mmc_wait_for_req(card->host, &mrq); + + if (cmd.error) { +diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c +index 04a3f1b..192b0d1 100644 +--- a/drivers/net/can/sja1000/sja1000.c ++++ b/drivers/net/can/sja1000/sja1000.c +@@ -95,11 +95,16 @@ static void sja1000_write_cmdreg(struct sja1000_priv *priv, u8 val) + spin_unlock_irqrestore(&priv->cmdreg_lock, flags); + } + ++static int sja1000_is_absent(struct sja1000_priv *priv) ++{ ++ return (priv->read_reg(priv, REG_MOD) == 0xFF); ++} ++ + static int sja1000_probe_chip(struct net_device *dev) + { + struct sja1000_priv *priv = netdev_priv(dev); + +- if (priv->reg_base && (priv->read_reg(priv, 0) == 0xFF)) { ++ if (priv->reg_base && sja1000_is_absent(priv)) { + printk(KERN_INFO "%s: probing @0x%lX failed\n", + DRV_NAME, dev->base_addr); + return 0; +@@ -493,6 +498,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) + while ((isrc = priv->read_reg(priv, REG_IR)) && (n < SJA1000_MAX_IRQ)) { + n++; + status = priv->read_reg(priv, REG_SR); ++ /* check for absent controller due to hw unplug */ ++ if (status == 0xFF && sja1000_is_absent(priv)) ++ return IRQ_NONE; + + if (isrc & IRQ_WUI) + dev_warn(dev->dev.parent, "wakeup interrupt\n"); +@@ -509,6 +517,9 @@ irqreturn_t sja1000_interrupt(int irq, void *dev_id) + while (status & SR_RBS) { + sja1000_rx(dev); + status = priv->read_reg(priv, REG_SR); ++ /* check for absent controller */ ++ if (status == 0xFF && sja1000_is_absent(priv)) ++ return IRQ_NONE; + } + } + if (isrc & (IRQ_DOI | IRQ_EI | IRQ_BEI | IRQ_EPI | IRQ_ALI)) { +diff --git a/drivers/net/ethernet/3com/3c59x.c b/drivers/net/ethernet/3com/3c59x.c +index b42c06b..e0c5529 100644 +--- a/drivers/net/ethernet/3com/3c59x.c ++++ b/drivers/net/ethernet/3com/3c59x.c +@@ -1842,7 +1842,7 @@ vortex_timer(unsigned long data) + ok = 1; + } + +- if (!netif_carrier_ok(dev)) ++ if (dev->flags & IFF_SLAVE || !netif_carrier_ok(dev)) + next_tick = 5*HZ; + + if (vp->medialock) +diff --git a/drivers/net/ethernet/jme.c b/drivers/net/ethernet/jme.c +index 76b8457..ab784e0 100644 +--- a/drivers/net/ethernet/jme.c ++++ b/drivers/net/ethernet/jme.c +@@ -2328,19 +2328,11 @@ jme_change_mtu(struct net_device *netdev, int new_mtu) + ((new_mtu) < IPV6_MIN_MTU)) + return -EINVAL; + +- if (new_mtu > 4000) { +- jme->reg_rxcs &= ~RXCS_FIFOTHNP; +- jme->reg_rxcs |= RXCS_FIFOTHNP_64QW; +- jme_restart_rx_engine(jme); +- } else { +- jme->reg_rxcs &= ~RXCS_FIFOTHNP; +- jme->reg_rxcs |= RXCS_FIFOTHNP_128QW; +- jme_restart_rx_engine(jme); +- } + + netdev->mtu = new_mtu; + netdev_update_features(netdev); + ++ jme_restart_rx_engine(jme); + jme_reset_link(jme); + + return 0; +diff --git a/drivers/net/ethernet/jme.h b/drivers/net/ethernet/jme.h +index 4304072..3efc897 100644 +--- a/drivers/net/ethernet/jme.h ++++ b/drivers/net/ethernet/jme.h +@@ -730,7 +730,7 @@ enum jme_rxcs_values { + RXCS_RETRYCNT_60 = 0x00000F00, + + RXCS_DEFAULT = RXCS_FIFOTHTP_128T | +- RXCS_FIFOTHNP_128QW | ++ RXCS_FIFOTHNP_16QW | + RXCS_DMAREQSZ_128B | + RXCS_RETRYGAP_256ns | + RXCS_RETRYCNT_32, +diff --git a/drivers/net/ethernet/ti/davinci_emac.c b/drivers/net/ethernet/ti/davinci_emac.c +index 815c797..22f2788 100644 +--- a/drivers/net/ethernet/ti/davinci_emac.c ++++ b/drivers/net/ethernet/ti/davinci_emac.c +@@ -1007,7 +1007,7 @@ static void emac_rx_handler(void *token, int len, int status) + int ret; + + /* free and bail if we are shutting down */ +- if (unlikely(!netif_running(ndev) || !netif_carrier_ok(ndev))) { ++ if (unlikely(!netif_running(ndev))) { + dev_kfree_skb_any(skb); + return; + } +@@ -1036,7 +1036,9 @@ static void emac_rx_handler(void *token, int len, int status) + recycle: + ret = cpdma_chan_submit(priv->rxchan, skb, skb->data, + skb_tailroom(skb), GFP_KERNEL); +- if (WARN_ON(ret < 0)) ++ ++ WARN_ON(ret == -ENOMEM); ++ if (unlikely(ret < 0)) + dev_kfree_skb_any(skb); + } + +diff --git a/drivers/net/ethernet/via/via-velocity.c b/drivers/net/ethernet/via/via-velocity.c +index 4535d7c..e015a57 100644 +--- a/drivers/net/ethernet/via/via-velocity.c ++++ b/drivers/net/ethernet/via/via-velocity.c +@@ -2489,9 +2489,6 @@ static int velocity_close(struct net_device *dev) + if (dev->irq != 0) + free_irq(dev->irq, dev); + +- /* Power down the chip */ +- pci_set_power_state(vptr->pdev, PCI_D3hot); +- + velocity_free_rings(vptr); + + vptr->flags &= (~VELOCITY_FLAGS_OPENED); +diff --git a/drivers/net/usb/ipheth.c b/drivers/net/usb/ipheth.c +index 13c1f04..ad96164 100644 +--- a/drivers/net/usb/ipheth.c ++++ b/drivers/net/usb/ipheth.c +@@ -60,6 +60,7 @@ + #define USB_PRODUCT_IPHONE_3GS 0x1294 + #define USB_PRODUCT_IPHONE_4 0x1297 + #define USB_PRODUCT_IPHONE_4_VZW 0x129c ++#define USB_PRODUCT_IPHONE_4S 0x12a0 + + #define IPHETH_USBINTF_CLASS 255 + #define IPHETH_USBINTF_SUBCLASS 253 +@@ -103,6 +104,10 @@ static struct usb_device_id ipheth_table[] = { + USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4_VZW, + IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, + IPHETH_USBINTF_PROTO) }, ++ { USB_DEVICE_AND_INTERFACE_INFO( ++ USB_VENDOR_APPLE, USB_PRODUCT_IPHONE_4S, ++ IPHETH_USBINTF_CLASS, IPHETH_USBINTF_SUBCLASS, ++ IPHETH_USBINTF_PROTO) }, + { } + }; + MODULE_DEVICE_TABLE(usb, ipheth_table); +diff --git a/drivers/net/veth.c b/drivers/net/veth.c +index ef883e9..b907398 100644 +--- a/drivers/net/veth.c ++++ b/drivers/net/veth.c +@@ -423,7 +423,9 @@ static void veth_dellink(struct net_device *dev, struct list_head *head) + unregister_netdevice_queue(peer, head); + } + +-static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; ++static const struct nla_policy veth_policy[VETH_INFO_MAX + 1] = { ++ [VETH_INFO_PEER] = { .len = sizeof(struct ifinfomsg) }, ++}; + + static struct rtnl_link_ops veth_link_ops = { + .kind = DRV_NAME, +diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c +index 528d5f3..64af11f 100644 +--- a/drivers/net/wireless/ath/ath9k/rc.c ++++ b/drivers/net/wireless/ath/ath9k/rc.c +@@ -1347,7 +1347,7 @@ static void ath_tx_status(void *priv, struct ieee80211_supported_band *sband, + fc = hdr->frame_control; + for (i = 0; i < sc->hw->max_rates; i++) { + struct ieee80211_tx_rate *rate = &tx_info->status.rates[i]; +- if (!rate->count) ++ if (rate->idx < 0 || !rate->count) + break; + + final_ts_idx = i; +diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c +index 04e74f4..dfee1b3 100644 +--- a/drivers/pci/probe.c ++++ b/drivers/pci/probe.c +@@ -651,6 +651,11 @@ int __devinit pci_scan_bridge(struct pci_bus *bus, struct pci_dev *dev, int max, + dev_dbg(&dev->dev, "scanning [bus %02x-%02x] behind bridge, pass %d\n", + secondary, subordinate, pass); + ++ if (!primary && (primary != bus->number) && secondary && subordinate) { ++ dev_warn(&dev->dev, "Primary bus is hard wired to 0\n"); ++ primary = bus->number; ++ } ++ + /* Check if setup is sensible at all */ + if (!pass && + (primary != bus->number || secondary <= bus->number)) { +diff --git a/drivers/scsi/scsi_pm.c b/drivers/scsi/scsi_pm.c +index d329f8b..2aeaf5c 100644 +--- a/drivers/scsi/scsi_pm.c ++++ b/drivers/scsi/scsi_pm.c +@@ -7,6 +7,7 @@ + + #include <linux/pm_runtime.h> + #include <linux/export.h> ++#include <linux/async.h> + + #include <scsi/scsi.h> + #include <scsi/scsi_device.h> +@@ -69,6 +70,19 @@ static int scsi_bus_resume_common(struct device *dev) + return err; + } + ++static int scsi_bus_prepare(struct device *dev) ++{ ++ if (scsi_is_sdev_device(dev)) { ++ /* sd probing uses async_schedule. Wait until it finishes. */ ++ async_synchronize_full(); ++ ++ } else if (scsi_is_host_device(dev)) { ++ /* Wait until async scanning is finished */ ++ scsi_complete_async_scans(); ++ } ++ return 0; ++} ++ + static int scsi_bus_suspend(struct device *dev) + { + return scsi_bus_suspend_common(dev, PMSG_SUSPEND); +@@ -87,6 +101,7 @@ static int scsi_bus_poweroff(struct device *dev) + #else /* CONFIG_PM_SLEEP */ + + #define scsi_bus_resume_common NULL ++#define scsi_bus_prepare NULL + #define scsi_bus_suspend NULL + #define scsi_bus_freeze NULL + #define scsi_bus_poweroff NULL +@@ -195,6 +210,7 @@ void scsi_autopm_put_host(struct Scsi_Host *shost) + #endif /* CONFIG_PM_RUNTIME */ + + const struct dev_pm_ops scsi_bus_pm_ops = { ++ .prepare = scsi_bus_prepare, + .suspend = scsi_bus_suspend, + .resume = scsi_bus_resume_common, + .freeze = scsi_bus_freeze, +diff --git a/drivers/scsi/scsi_priv.h b/drivers/scsi/scsi_priv.h +index 2a58895..5b475d0 100644 +--- a/drivers/scsi/scsi_priv.h ++++ b/drivers/scsi/scsi_priv.h +@@ -110,6 +110,7 @@ extern void scsi_exit_procfs(void); + #endif /* CONFIG_PROC_FS */ + + /* scsi_scan.c */ ++extern int scsi_complete_async_scans(void); + extern int scsi_scan_host_selected(struct Scsi_Host *, unsigned int, + unsigned int, unsigned int, int); + extern void scsi_forget_host(struct Scsi_Host *); +diff --git a/drivers/scsi/scsi_scan.c b/drivers/scsi/scsi_scan.c +index b3c6d95..6e7ea4a 100644 +--- a/drivers/scsi/scsi_scan.c ++++ b/drivers/scsi/scsi_scan.c +@@ -1815,6 +1815,7 @@ static void scsi_finish_async_scan(struct async_scan_data *data) + } + spin_unlock(&async_scan_lock); + ++ scsi_autopm_put_host(shost); + scsi_host_put(shost); + kfree(data); + } +@@ -1841,7 +1842,6 @@ static int do_scan_async(void *_data) + + do_scsi_scan_host(shost); + scsi_finish_async_scan(data); +- scsi_autopm_put_host(shost); + return 0; + } + +@@ -1869,7 +1869,7 @@ void scsi_scan_host(struct Scsi_Host *shost) + p = kthread_run(do_scan_async, data, "scsi_scan_%d", shost->host_no); + if (IS_ERR(p)) + do_scan_async(data); +- /* scsi_autopm_put_host(shost) is called in do_scan_async() */ ++ /* scsi_autopm_put_host(shost) is called in scsi_finish_async_scan() */ + } + EXPORT_SYMBOL(scsi_scan_host); + +diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c +index 1dcbef4..1d24512 100644 +--- a/drivers/target/target_core_alua.c ++++ b/drivers/target/target_core_alua.c +@@ -79,7 +79,7 @@ int target_emulate_report_target_port_groups(struct se_task *task) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + spin_lock(&su_dev->t10_alua.tg_pt_gps_lock); + list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list, +@@ -164,7 +164,7 @@ int target_emulate_report_target_port_groups(struct se_task *task) + buf[2] = ((rd_len >> 8) & 0xff); + buf[3] = (rd_len & 0xff); + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); +@@ -195,7 +195,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) + cmd->scsi_sense_reason = TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE; + return -EINVAL; + } +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + /* + * Determine if explict ALUA via SET_TARGET_PORT_GROUPS is allowed +@@ -352,7 +352,7 @@ int target_emulate_set_target_port_groups(struct se_task *task) + } + + out: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + return 0; +diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c +index 251e48f..8facd33 100644 +--- a/drivers/target/target_core_cdb.c ++++ b/drivers/target/target_core_cdb.c +@@ -82,7 +82,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + if (dev == tpg->tpg_virt_lun0.lun_se_dev) { + buf[0] = 0x3f; /* Not connected */ +@@ -135,7 +135,7 @@ target_emulate_inquiry_std(struct se_cmd *cmd) + buf[4] = 31; /* Set additional length to 31 */ + + out: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + return 0; + } + +@@ -726,7 +726,7 @@ int target_emulate_inquiry(struct se_task *task) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + buf[0] = dev->transport->get_device_type(dev); + +@@ -743,7 +743,7 @@ int target_emulate_inquiry(struct se_task *task) + ret = -EINVAL; + + out_unmap: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + out: + if (!ret) { + task->task_scsi_status = GOOD; +@@ -765,7 +765,7 @@ int target_emulate_readcapacity(struct se_task *task) + else + blocks = (u32)blocks_long; + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + buf[0] = (blocks >> 24) & 0xff; + buf[1] = (blocks >> 16) & 0xff; +@@ -781,7 +781,7 @@ int target_emulate_readcapacity(struct se_task *task) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) + put_unaligned_be32(0xFFFFFFFF, &buf[0]); + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); +@@ -795,7 +795,7 @@ int target_emulate_readcapacity_16(struct se_task *task) + unsigned char *buf; + unsigned long long blocks = dev->transport->get_blocks(dev); + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + buf[0] = (blocks >> 56) & 0xff; + buf[1] = (blocks >> 48) & 0xff; +@@ -816,7 +816,7 @@ int target_emulate_readcapacity_16(struct se_task *task) + if (dev->se_sub_dev->se_dev_attrib.emulate_tpu || dev->se_sub_dev->se_dev_attrib.emulate_tpws) + buf[14] = 0x80; + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); +@@ -1029,9 +1029,9 @@ int target_emulate_modesense(struct se_task *task) + offset = cmd->data_length; + } + +- rbuf = transport_kmap_first_data_page(cmd); ++ rbuf = transport_kmap_data_sg(cmd); + memcpy(rbuf, buf, offset); +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); +@@ -1053,7 +1053,7 @@ int target_emulate_request_sense(struct se_task *task) + return -ENOSYS; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) { + /* +@@ -1099,7 +1099,7 @@ int target_emulate_request_sense(struct se_task *task) + } + + end: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); + return 0; +@@ -1133,7 +1133,7 @@ int target_emulate_unmap(struct se_task *task) + dl = get_unaligned_be16(&cdb[0]); + bd_dl = get_unaligned_be16(&cdb[2]); + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + ptr = &buf[offset]; + pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu" +@@ -1157,7 +1157,7 @@ int target_emulate_unmap(struct se_task *task) + } + + err: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + if (!ret) { + task->task_scsi_status = GOOD; + transport_complete_task(task, 1); +diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c +index 9b86394..19f8aca 100644 +--- a/drivers/target/target_core_device.c ++++ b/drivers/target/target_core_device.c +@@ -658,7 +658,7 @@ int target_report_luns(struct se_task *se_task) + unsigned char *buf; + u32 cdb_offset = 0, lun_count = 0, offset = 8, i; + +- buf = transport_kmap_first_data_page(se_cmd); ++ buf = (unsigned char *) transport_kmap_data_sg(se_cmd); + + /* + * If no struct se_session pointer is present, this struct se_cmd is +@@ -696,7 +696,7 @@ int target_report_luns(struct se_task *se_task) + * See SPC3 r07, page 159. + */ + done: +- transport_kunmap_first_data_page(se_cmd); ++ transport_kunmap_data_sg(se_cmd); + lun_count *= 8; + buf[0] = ((lun_count >> 24) & 0xff); + buf[1] = ((lun_count >> 16) & 0xff); +diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c +index 9119d92..778c1a6 100644 +--- a/drivers/target/target_core_pr.c ++++ b/drivers/target/target_core_pr.c +@@ -1538,7 +1538,7 @@ static int core_scsi3_decode_spec_i_port( + tidh_new->dest_local_nexus = 1; + list_add_tail(&tidh_new->dest_list, &tid_dest_list); + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + /* + * For a PERSISTENT RESERVE OUT specify initiator ports payload, + * first extract TransportID Parameter Data Length, and make sure +@@ -1789,7 +1789,7 @@ static int core_scsi3_decode_spec_i_port( + + } + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + /* + * Go ahead and create a registrations from tid_dest_list for the +@@ -1837,7 +1837,7 @@ static int core_scsi3_decode_spec_i_port( + + return 0; + out: +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + /* + * For the failure case, release everything from tid_dest_list + * including *dest_pr_reg and the configfs dependances.. +@@ -3429,14 +3429,14 @@ static int core_scsi3_emulate_pro_register_and_move( + * will be moved to for the TransportID containing SCSI initiator WWN + * information. + */ +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + rtpi = (buf[18] & 0xff) << 8; + rtpi |= buf[19] & 0xff; + tid_len = (buf[20] & 0xff) << 24; + tid_len |= (buf[21] & 0xff) << 16; + tid_len |= (buf[22] & 0xff) << 8; + tid_len |= buf[23] & 0xff; +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + buf = NULL; + + if ((tid_len + 24) != cmd->data_length) { +@@ -3488,7 +3488,7 @@ static int core_scsi3_emulate_pro_register_and_move( + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + proto_ident = (buf[24] & 0x0f); + #if 0 + pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:" +@@ -3522,7 +3522,7 @@ static int core_scsi3_emulate_pro_register_and_move( + goto out; + } + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + buf = NULL; + + pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s" +@@ -3787,13 +3787,13 @@ after_iport_check: + " REGISTER_AND_MOVE\n"); + } + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + core_scsi3_put_pr_reg(dest_pr_reg); + return 0; + out: + if (buf) +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + if (dest_se_deve) + core_scsi3_lunacl_undepend_item(dest_se_deve); + if (dest_node_acl) +@@ -3867,7 +3867,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task) + scope = (cdb[2] & 0xf0); + type = (cdb[2] & 0x0f); + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + /* + * From PERSISTENT_RESERVE_OUT parameter list (payload) + */ +@@ -3885,7 +3885,7 @@ int target_scsi3_emulate_pr_out(struct se_task *task) + aptpl = (buf[17] & 0x01); + unreg = (buf[17] & 0x02); + } +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + buf = NULL; + + /* +@@ -3985,7 +3985,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); +@@ -4019,7 +4019,7 @@ static int core_scsi3_pri_read_keys(struct se_cmd *cmd) + buf[6] = ((add_len >> 8) & 0xff); + buf[7] = (add_len & 0xff); + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + return 0; + } +@@ -4045,7 +4045,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); + buf[2] = ((su_dev->t10_pr.pr_generation >> 8) & 0xff); +@@ -4104,7 +4104,7 @@ static int core_scsi3_pri_read_reservation(struct se_cmd *cmd) + + err: + spin_unlock(&se_dev->dev_reservation_lock); +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + return 0; + } +@@ -4128,7 +4128,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + buf[0] = ((add_len << 8) & 0xff); + buf[1] = (add_len & 0xff); +@@ -4160,7 +4160,7 @@ static int core_scsi3_pri_report_capabilities(struct se_cmd *cmd) + buf[4] |= 0x02; /* PR_TYPE_WRITE_EXCLUSIVE */ + buf[5] |= 0x01; /* PR_TYPE_EXCLUSIVE_ACCESS_ALLREG */ + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + return 0; + } +@@ -4190,7 +4190,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) + return -EINVAL; + } + +- buf = transport_kmap_first_data_page(cmd); ++ buf = transport_kmap_data_sg(cmd); + + buf[0] = ((su_dev->t10_pr.pr_generation >> 24) & 0xff); + buf[1] = ((su_dev->t10_pr.pr_generation >> 16) & 0xff); +@@ -4311,7 +4311,7 @@ static int core_scsi3_pri_read_full_status(struct se_cmd *cmd) + buf[6] = ((add_len >> 8) & 0xff); + buf[7] = (add_len & 0xff); + +- transport_kunmap_first_data_page(cmd); ++ transport_kunmap_data_sg(cmd); + + return 0; + } +diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c +index 8b15e56..5c12137 100644 +--- a/drivers/target/target_core_pscsi.c ++++ b/drivers/target/target_core_pscsi.c +@@ -695,7 +695,7 @@ static int pscsi_transport_complete(struct se_task *task) + + if (task->task_se_cmd->se_deve->lun_flags & + TRANSPORT_LUNFLAGS_READ_ONLY) { +- unsigned char *buf = transport_kmap_first_data_page(task->task_se_cmd); ++ unsigned char *buf = transport_kmap_data_sg(task->task_se_cmd); + + if (cdb[0] == MODE_SENSE_10) { + if (!(buf[3] & 0x80)) +@@ -705,7 +705,7 @@ static int pscsi_transport_complete(struct se_task *task) + buf[2] |= 0x80; + } + +- transport_kunmap_first_data_page(task->task_se_cmd); ++ transport_kunmap_data_sg(task->task_se_cmd); + } + } + after_mode_sense: +diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c +index 861628e..e4ddb93 100644 +--- a/drivers/target/target_core_transport.c ++++ b/drivers/target/target_core_transport.c +@@ -3053,11 +3053,6 @@ static int transport_generic_cmd_sequencer( + (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB))) + goto out_unsupported_cdb; + +- /* Let's limit control cdbs to a page, for simplicity's sake. */ +- if ((cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) && +- size > PAGE_SIZE) +- goto out_invalid_cdb_field; +- + transport_set_supported_SAM_opcode(cmd); + return ret; + +@@ -3435,9 +3430,11 @@ int transport_generic_map_mem_to_cmd( + } + EXPORT_SYMBOL(transport_generic_map_mem_to_cmd); + +-void *transport_kmap_first_data_page(struct se_cmd *cmd) ++void *transport_kmap_data_sg(struct se_cmd *cmd) + { + struct scatterlist *sg = cmd->t_data_sg; ++ struct page **pages; ++ int i; + + BUG_ON(!sg); + /* +@@ -3445,15 +3442,41 @@ void *transport_kmap_first_data_page(struct se_cmd *cmd) + * tcm_loop who may be using a contig buffer from the SCSI midlayer for + * control CDBs passed as SGLs via transport_generic_map_mem_to_cmd() + */ +- return kmap(sg_page(sg)) + sg->offset; ++ if (!cmd->t_data_nents) ++ return NULL; ++ else if (cmd->t_data_nents == 1) ++ return kmap(sg_page(sg)) + sg->offset; ++ ++ /* >1 page. use vmap */ ++ pages = kmalloc(sizeof(*pages) * cmd->t_data_nents, GFP_KERNEL); ++ if (!pages) ++ return NULL; ++ ++ /* convert sg[] to pages[] */ ++ for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, i) { ++ pages[i] = sg_page(sg); ++ } ++ ++ cmd->t_data_vmap = vmap(pages, cmd->t_data_nents, VM_MAP, PAGE_KERNEL); ++ kfree(pages); ++ if (!cmd->t_data_vmap) ++ return NULL; ++ ++ return cmd->t_data_vmap + cmd->t_data_sg[0].offset; + } +-EXPORT_SYMBOL(transport_kmap_first_data_page); ++EXPORT_SYMBOL(transport_kmap_data_sg); + +-void transport_kunmap_first_data_page(struct se_cmd *cmd) ++void transport_kunmap_data_sg(struct se_cmd *cmd) + { +- kunmap(sg_page(cmd->t_data_sg)); ++ if (!cmd->t_data_nents) ++ return; ++ else if (cmd->t_data_nents == 1) ++ kunmap(sg_page(cmd->t_data_sg)); ++ ++ vunmap(cmd->t_data_vmap); ++ cmd->t_data_vmap = NULL; + } +-EXPORT_SYMBOL(transport_kunmap_first_data_page); ++EXPORT_SYMBOL(transport_kunmap_data_sg); + + static int + transport_generic_get_mem(struct se_cmd *cmd) +diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c +index a004db3..61d08dd 100644 +--- a/drivers/usb/core/hcd-pci.c ++++ b/drivers/usb/core/hcd-pci.c +@@ -187,7 +187,10 @@ int usb_hcd_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) + return -ENODEV; + dev->current_state = PCI_D0; + +- if (!dev->irq) { ++ /* The xHCI driver supports MSI and MSI-X, ++ * so don't fail if the BIOS doesn't provide a legacy IRQ. ++ */ ++ if (!dev->irq && (driver->flags & HCD_MASK) != HCD_USB3) { + dev_err(&dev->dev, + "Found HC with no IRQ. Check BIOS/PCI %s setup!\n", + pci_name(dev)); +diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c +index 179e364..8cb9304 100644 +--- a/drivers/usb/core/hcd.c ++++ b/drivers/usb/core/hcd.c +@@ -2465,8 +2465,10 @@ int usb_add_hcd(struct usb_hcd *hcd, + && device_can_wakeup(&hcd->self.root_hub->dev)) + dev_dbg(hcd->self.controller, "supports USB remote wakeup\n"); + +- /* enable irqs just before we start the controller */ +- if (usb_hcd_is_primary_hcd(hcd)) { ++ /* enable irqs just before we start the controller, ++ * if the BIOS provides legacy PCI irqs. ++ */ ++ if (usb_hcd_is_primary_hcd(hcd) && irqnum) { + retval = usb_hcd_request_irqs(hcd, irqnum, irqflags); + if (retval) + goto err_request_irq; +diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c +index 7978146..bc06a8f 100644 +--- a/drivers/usb/core/hub.c ++++ b/drivers/usb/core/hub.c +@@ -705,10 +705,26 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) + if (type == HUB_INIT3) + goto init3; + +- /* After a resume, port power should still be on. ++ /* The superspeed hub except for root hub has to use Hub Depth ++ * value as an offset into the route string to locate the bits ++ * it uses to determine the downstream port number. So hub driver ++ * should send a set hub depth request to superspeed hub after ++ * the superspeed hub is set configuration in initialization or ++ * reset procedure. ++ * ++ * After a resume, port power should still be on. + * For any other type of activation, turn it on. + */ + if (type != HUB_RESUME) { ++ if (hdev->parent && hub_is_superspeed(hdev)) { ++ ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), ++ HUB_SET_DEPTH, USB_RT_HUB, ++ hdev->level - 1, 0, NULL, 0, ++ USB_CTRL_SET_TIMEOUT); ++ if (ret < 0) ++ dev_err(hub->intfdev, ++ "set hub depth failed\n"); ++ } + + /* Speed up system boot by using a delayed_work for the + * hub's initial power-up delays. This is pretty awkward +@@ -987,18 +1003,6 @@ static int hub_configure(struct usb_hub *hub, + goto fail; + } + +- if (hub_is_superspeed(hdev) && (hdev->parent != NULL)) { +- ret = usb_control_msg(hdev, usb_sndctrlpipe(hdev, 0), +- HUB_SET_DEPTH, USB_RT_HUB, +- hdev->level - 1, 0, NULL, 0, +- USB_CTRL_SET_TIMEOUT); +- +- if (ret < 0) { +- message = "can't set hub depth"; +- goto fail; +- } +- } +- + /* Request the entire hub descriptor. + * hub->descriptor can handle USB_MAXCHILDREN ports, + * but the hub can/will return fewer bytes here. +diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c +index ac53a66..7732d69 100644 +--- a/drivers/usb/host/pci-quirks.c ++++ b/drivers/usb/host/pci-quirks.c +@@ -872,7 +872,17 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) + */ + if (pdev->vendor == 0x184e) /* vendor Netlogic */ + return; ++ if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && ++ pdev->class != PCI_CLASS_SERIAL_USB_OHCI && ++ pdev->class != PCI_CLASS_SERIAL_USB_EHCI && ++ pdev->class != PCI_CLASS_SERIAL_USB_XHCI) ++ return; + ++ if (pci_enable_device(pdev) < 0) { ++ dev_warn(&pdev->dev, "Can't enable PCI device, " ++ "BIOS handoff failed.\n"); ++ return; ++ } + if (pdev->class == PCI_CLASS_SERIAL_USB_UHCI) + quirk_usb_handoff_uhci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_OHCI) +@@ -881,5 +891,6 @@ static void __devinit quirk_usb_early_handoff(struct pci_dev *pdev) + quirk_usb_disable_ehci(pdev); + else if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI) + quirk_usb_handoff_xhci(pdev); ++ pci_disable_device(pdev); + } + DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, quirk_usb_early_handoff); +diff --git a/drivers/usb/host/xhci-hub.c b/drivers/usb/host/xhci-hub.c +index 430e88f..a8b2980 100644 +--- a/drivers/usb/host/xhci-hub.c ++++ b/drivers/usb/host/xhci-hub.c +@@ -95,7 +95,7 @@ static void xhci_usb2_hub_descriptor(struct usb_hcd *hcd, struct xhci_hcd *xhci, + */ + memset(port_removable, 0, sizeof(port_removable)); + for (i = 0; i < ports; i++) { +- portsc = xhci_readl(xhci, xhci->usb3_ports[i]); ++ portsc = xhci_readl(xhci, xhci->usb2_ports[i]); + /* If a device is removable, PORTSC reports a 0, same as in the + * hub descriptor DeviceRemovable bits. + */ +diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c +index 0e4b25f..c69cf54 100644 +--- a/drivers/usb/host/xhci-mem.c ++++ b/drivers/usb/host/xhci-mem.c +@@ -1140,26 +1140,42 @@ static unsigned int xhci_parse_exponent_interval(struct usb_device *udev, + } + + /* +- * Convert bInterval expressed in frames (in 1-255 range) to exponent of ++ * Convert bInterval expressed in microframes (in 1-255 range) to exponent of + * microframes, rounded down to nearest power of 2. + */ +-static unsigned int xhci_parse_frame_interval(struct usb_device *udev, +- struct usb_host_endpoint *ep) ++static unsigned int xhci_microframes_to_exponent(struct usb_device *udev, ++ struct usb_host_endpoint *ep, unsigned int desc_interval, ++ unsigned int min_exponent, unsigned int max_exponent) + { + unsigned int interval; + +- interval = fls(8 * ep->desc.bInterval) - 1; +- interval = clamp_val(interval, 3, 10); +- if ((1 << interval) != 8 * ep->desc.bInterval) ++ interval = fls(desc_interval) - 1; ++ interval = clamp_val(interval, min_exponent, max_exponent); ++ if ((1 << interval) != desc_interval) + dev_warn(&udev->dev, + "ep %#x - rounding interval to %d microframes, ep desc says %d microframes\n", + ep->desc.bEndpointAddress, + 1 << interval, +- 8 * ep->desc.bInterval); ++ desc_interval); + + return interval; + } + ++static unsigned int xhci_parse_microframe_interval(struct usb_device *udev, ++ struct usb_host_endpoint *ep) ++{ ++ return xhci_microframes_to_exponent(udev, ep, ++ ep->desc.bInterval, 0, 15); ++} ++ ++ ++static unsigned int xhci_parse_frame_interval(struct usb_device *udev, ++ struct usb_host_endpoint *ep) ++{ ++ return xhci_microframes_to_exponent(udev, ep, ++ ep->desc.bInterval * 8, 3, 10); ++} ++ + /* Return the polling or NAK interval. + * + * The polling interval is expressed in "microframes". If xHCI's Interval field +@@ -1178,7 +1194,7 @@ static unsigned int xhci_get_endpoint_interval(struct usb_device *udev, + /* Max NAK rate */ + if (usb_endpoint_xfer_control(&ep->desc) || + usb_endpoint_xfer_bulk(&ep->desc)) { +- interval = ep->desc.bInterval; ++ interval = xhci_parse_microframe_interval(udev, ep); + break; + } + /* Fall through - SS and HS isoc/int have same decoding */ +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index b33f059..034f554 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -352,6 +352,11 @@ static int xhci_try_enable_msi(struct usb_hcd *hcd) + /* hcd->irq is -1, we have MSI */ + return 0; + ++ if (!pdev->irq) { ++ xhci_err(xhci, "No msi-x/msi found and no IRQ in BIOS\n"); ++ return -EINVAL; ++ } ++ + /* fall back to legacy interrupt*/ + ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED, + hcd->irq_descr, hcd); +diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c +index a515237..33d25d4 100644 +--- a/drivers/usb/serial/cp210x.c ++++ b/drivers/usb/serial/cp210x.c +@@ -136,6 +136,8 @@ static const struct usb_device_id id_table[] = { + { USB_DEVICE(0x16DC, 0x0011) }, /* W-IE-NE-R Plein & Baus GmbH RCM Remote Control for MARATON Power Supply */ + { USB_DEVICE(0x16DC, 0x0012) }, /* W-IE-NE-R Plein & Baus GmbH MPOD Multi Channel Power Supply */ + { USB_DEVICE(0x16DC, 0x0015) }, /* W-IE-NE-R Plein & Baus GmbH CML Control, Monitoring and Data Logger */ ++ { USB_DEVICE(0x17A8, 0x0001) }, /* Kamstrup Optical Eye/3-wire */ ++ { USB_DEVICE(0x17A8, 0x0005) }, /* Kamstrup M-Bus Master MultiPort 250D */ + { USB_DEVICE(0x17F4, 0xAAAA) }, /* Wavesense Jazz blood glucose meter */ + { USB_DEVICE(0x1843, 0x0200) }, /* Vaisala USB Instrument Cable */ + { USB_DEVICE(0x18EF, 0xE00F) }, /* ELV USB-I2C-Interface */ +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index 338d082..68fa8c7 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -788,7 +788,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0012, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0013, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_MF628, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0016, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0017, 0xff, 0xff, 0xff), +@@ -803,7 +802,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0024, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0025, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, +- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0026, 0xff, 0xff, 0xff) }, */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0028, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0029, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0030, 0xff, 0xff, 0xff) }, +@@ -828,7 +826,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0051, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0052, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +- /* { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0053, 0xff, 0xff, 0xff) }, */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0054, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0055, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf1_blacklist }, +@@ -836,7 +833,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0057, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0058, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0061, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0062, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0063, 0xff, 0xff, 0xff), +@@ -846,7 +842,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0066, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0067, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0069, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0076, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0077, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0078, 0xff, 0xff, 0xff) }, +@@ -865,8 +860,6 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0095, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0096, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0097, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0098, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0099, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0104, 0xff, 0xff, 0xff), + .driver_info = (kernel_ulong_t)&net_intf4_blacklist }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0105, 0xff, 0xff, 0xff) }, +@@ -887,28 +880,18 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0143, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0144, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0145, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0146, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0148, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0149, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0150, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0151, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0153, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0155, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0156, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0157, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0158, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0159, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0160, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0161, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0162, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0164, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0165, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1008, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1010, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1012, 0xff, 0xff, 0xff) }, +@@ -1083,127 +1066,27 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1298, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1299, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1300, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1401, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1402, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1403, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1404, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1405, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1406, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1407, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1408, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1409, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1410, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1411, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1412, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1413, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1414, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1415, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1416, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1417, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1418, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1419, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1420, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1421, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1422, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1423, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1424, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1425, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1426, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1427, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1428, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1429, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1430, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1431, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1432, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1433, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1434, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1435, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1436, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1437, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1438, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1439, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1440, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1441, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1442, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1443, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1444, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1445, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1446, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1447, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1448, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1449, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1450, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1451, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1452, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1453, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1454, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1455, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1456, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1457, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1458, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1459, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1460, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1461, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1462, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1463, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1464, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1465, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1466, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1467, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1468, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1469, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1470, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1471, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1472, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1473, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1474, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1475, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1476, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1477, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1478, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1479, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1480, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1481, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1482, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1483, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1484, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1485, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1486, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1487, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1488, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1489, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1490, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1491, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1492, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1493, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1494, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1495, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1496, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1497, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1498, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1499, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1500, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1501, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1502, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1503, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1504, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1505, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1506, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1507, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1508, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1509, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x1510, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, ++ 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, ++ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0014, 0xff, 0xff, 0xff) }, /* ZTE CDMA products */ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0027, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0059, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0060, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0070, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0073, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0094, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0130, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0133, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0141, 0xff, 0xff, 0xff) }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2002, 0xff, +- 0xff, 0xff), .driver_info = (kernel_ulong_t)&zte_k3765_z_blacklist }, +- { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x2003, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0147, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0152, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0168, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0170, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0176, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, 0x0178, 0xff, 0xff, 0xff) }, ++ + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_CDMA_TECH, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC8710, 0xff, 0xff, 0xff) }, + { USB_DEVICE_AND_INTERFACE_INFO(ZTE_VENDOR_ID, ZTE_PRODUCT_AC2726, 0xff, 0xff, 0xff) }, +diff --git a/drivers/usb/serial/ti_usb_3410_5052.c b/drivers/usb/serial/ti_usb_3410_5052.c +index ea84456..21c82b0 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.c ++++ b/drivers/usb/serial/ti_usb_3410_5052.c +@@ -165,7 +165,7 @@ static unsigned int product_5052_count; + /* the array dimension is the number of default entries plus */ + /* TI_EXTRA_VID_PID_COUNT user defined entries plus 1 terminating */ + /* null entry */ +-static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { ++static struct usb_device_id ti_id_table_3410[14+TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, +@@ -179,6 +179,7 @@ static struct usb_device_id ti_id_table_3410[13+TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, ++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + }; + + static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { +@@ -188,7 +189,7 @@ static struct usb_device_id ti_id_table_5052[5+TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_5052_FIRMWARE_PRODUCT_ID) }, + }; + +-static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] = { ++static struct usb_device_id ti_id_table_combined[18+2*TI_EXTRA_VID_PID_COUNT+1] = { + { USB_DEVICE(TI_VENDOR_ID, TI_3410_PRODUCT_ID) }, + { USB_DEVICE(TI_VENDOR_ID, TI_3410_EZ430_ID) }, + { USB_DEVICE(MTS_VENDOR_ID, MTS_GSM_NO_FW_PRODUCT_ID) }, +@@ -206,6 +207,7 @@ static struct usb_device_id ti_id_table_combined[17+2*TI_EXTRA_VID_PID_COUNT+1] + { USB_DEVICE(IBM_VENDOR_ID, IBM_4543_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_454B_PRODUCT_ID) }, + { USB_DEVICE(IBM_VENDOR_ID, IBM_454C_PRODUCT_ID) }, ++ { USB_DEVICE(ABBOTT_VENDOR_ID, ABBOTT_PRODUCT_ID) }, + { } + }; + +diff --git a/drivers/usb/serial/ti_usb_3410_5052.h b/drivers/usb/serial/ti_usb_3410_5052.h +index 2aac195..f140f1b 100644 +--- a/drivers/usb/serial/ti_usb_3410_5052.h ++++ b/drivers/usb/serial/ti_usb_3410_5052.h +@@ -49,6 +49,10 @@ + #define MTS_MT9234ZBA_PRODUCT_ID 0xF115 + #define MTS_MT9234ZBAOLD_PRODUCT_ID 0x0319 + ++/* Abbott Diabetics vendor and product ids */ ++#define ABBOTT_VENDOR_ID 0x1a61 ++#define ABBOTT_PRODUCT_ID 0x3410 ++ + /* Commands */ + #define TI_GET_VERSION 0x01 + #define TI_GET_PORT_STATUS 0x02 +diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c +index 9e069ef..db51ba1 100644 +--- a/drivers/usb/storage/usb.c ++++ b/drivers/usb/storage/usb.c +@@ -788,15 +788,19 @@ static void quiesce_and_remove_host(struct us_data *us) + struct Scsi_Host *host = us_to_host(us); + + /* If the device is really gone, cut short reset delays */ +- if (us->pusb_dev->state == USB_STATE_NOTATTACHED) ++ if (us->pusb_dev->state == USB_STATE_NOTATTACHED) { + set_bit(US_FLIDX_DISCONNECTING, &us->dflags); ++ wake_up(&us->delay_wait); ++ } + +- /* Prevent SCSI-scanning (if it hasn't started yet) +- * and wait for the SCSI-scanning thread to stop. ++ /* Prevent SCSI scanning (if it hasn't started yet) ++ * or wait for the SCSI-scanning routine to stop. + */ +- set_bit(US_FLIDX_DONT_SCAN, &us->dflags); +- wake_up(&us->delay_wait); +- wait_for_completion(&us->scanning_done); ++ cancel_delayed_work_sync(&us->scan_dwork); ++ ++ /* Balance autopm calls if scanning was cancelled */ ++ if (test_bit(US_FLIDX_SCAN_PENDING, &us->dflags)) ++ usb_autopm_put_interface_no_suspend(us->pusb_intf); + + /* Removing the host will perform an orderly shutdown: caches + * synchronized, disks spun down, etc. +@@ -823,52 +827,28 @@ static void release_everything(struct us_data *us) + scsi_host_put(us_to_host(us)); + } + +-/* Thread to carry out delayed SCSI-device scanning */ +-static int usb_stor_scan_thread(void * __us) ++/* Delayed-work routine to carry out SCSI-device scanning */ ++static void usb_stor_scan_dwork(struct work_struct *work) + { +- struct us_data *us = (struct us_data *)__us; ++ struct us_data *us = container_of(work, struct us_data, ++ scan_dwork.work); + struct device *dev = &us->pusb_intf->dev; + +- dev_dbg(dev, "device found\n"); ++ dev_dbg(dev, "starting scan\n"); + +- set_freezable_with_signal(); +- /* +- * Wait for the timeout to expire or for a disconnect +- * +- * We can't freeze in this thread or we risk causing khubd to +- * fail to freeze, but we can't be non-freezable either. Nor can +- * khubd freeze while waiting for scanning to complete as it may +- * hold the device lock, causing a hang when suspending devices. +- * So we request a fake signal when freezing and use +- * interruptible sleep to kick us out of our wait early when +- * freezing happens. +- */ +- if (delay_use > 0) { +- dev_dbg(dev, "waiting for device to settle " +- "before scanning\n"); +- wait_event_interruptible_timeout(us->delay_wait, +- test_bit(US_FLIDX_DONT_SCAN, &us->dflags), +- delay_use * HZ); ++ /* For bulk-only devices, determine the max LUN value */ ++ if (us->protocol == USB_PR_BULK && !(us->fflags & US_FL_SINGLE_LUN)) { ++ mutex_lock(&us->dev_mutex); ++ us->max_lun = usb_stor_Bulk_max_lun(us); ++ mutex_unlock(&us->dev_mutex); + } ++ scsi_scan_host(us_to_host(us)); ++ dev_dbg(dev, "scan complete\n"); + +- /* If the device is still connected, perform the scanning */ +- if (!test_bit(US_FLIDX_DONT_SCAN, &us->dflags)) { +- +- /* For bulk-only devices, determine the max LUN value */ +- if (us->protocol == USB_PR_BULK && +- !(us->fflags & US_FL_SINGLE_LUN)) { +- mutex_lock(&us->dev_mutex); +- us->max_lun = usb_stor_Bulk_max_lun(us); +- mutex_unlock(&us->dev_mutex); +- } +- scsi_scan_host(us_to_host(us)); +- dev_dbg(dev, "scan complete\n"); +- +- /* Should we unbind if no devices were detected? */ +- } ++ /* Should we unbind if no devices were detected? */ + + usb_autopm_put_interface(us->pusb_intf); +- complete_and_exit(&us->scanning_done, 0); ++ clear_bit(US_FLIDX_SCAN_PENDING, &us->dflags); + } + + static unsigned int usb_stor_sg_tablesize(struct usb_interface *intf) +@@ -915,7 +895,7 @@ int usb_stor_probe1(struct us_data **pus, + init_completion(&us->cmnd_ready); + init_completion(&(us->notify)); + init_waitqueue_head(&us->delay_wait); +- init_completion(&us->scanning_done); ++ INIT_DELAYED_WORK(&us->scan_dwork, usb_stor_scan_dwork); + + /* Associate the us_data structure with the USB device */ + result = associate_dev(us, intf); +@@ -946,7 +926,6 @@ EXPORT_SYMBOL_GPL(usb_stor_probe1); + /* Second part of general USB mass-storage probing */ + int usb_stor_probe2(struct us_data *us) + { +- struct task_struct *th; + int result; + struct device *dev = &us->pusb_intf->dev; + +@@ -987,20 +966,14 @@ int usb_stor_probe2(struct us_data *us) + goto BadDevice; + } + +- /* Start up the thread for delayed SCSI-device scanning */ +- th = kthread_create(usb_stor_scan_thread, us, "usb-stor-scan"); +- if (IS_ERR(th)) { +- dev_warn(dev, +- "Unable to start the device-scanning thread\n"); +- complete(&us->scanning_done); +- quiesce_and_remove_host(us); +- result = PTR_ERR(th); +- goto BadDevice; +- } +- ++ /* Submit the delayed_work for SCSI-device scanning */ + usb_autopm_get_interface_no_resume(us->pusb_intf); +- wake_up_process(th); ++ set_bit(US_FLIDX_SCAN_PENDING, &us->dflags); + ++ if (delay_use > 0) ++ dev_dbg(dev, "waiting for device to settle before scanning\n"); ++ queue_delayed_work(system_freezable_wq, &us->scan_dwork, ++ delay_use * HZ); + return 0; + + /* We come here if there are any problems */ +diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h +index 7b0f211..75f70f0 100644 +--- a/drivers/usb/storage/usb.h ++++ b/drivers/usb/storage/usb.h +@@ -47,6 +47,7 @@ + #include <linux/blkdev.h> + #include <linux/completion.h> + #include <linux/mutex.h> ++#include <linux/workqueue.h> + #include <scsi/scsi_host.h> + + struct us_data; +@@ -72,7 +73,7 @@ struct us_unusual_dev { + #define US_FLIDX_DISCONNECTING 3 /* disconnect in progress */ + #define US_FLIDX_RESETTING 4 /* device reset in progress */ + #define US_FLIDX_TIMED_OUT 5 /* SCSI midlayer timed out */ +-#define US_FLIDX_DONT_SCAN 6 /* don't scan (disconnect) */ ++#define US_FLIDX_SCAN_PENDING 6 /* scanning not yet done */ + #define US_FLIDX_REDO_READ10 7 /* redo READ(10) command */ + #define US_FLIDX_READ10_WORKED 8 /* previous READ(10) succeeded */ + +@@ -147,8 +148,8 @@ struct us_data { + /* mutual exclusion and synchronization structures */ + struct completion cmnd_ready; /* to sleep thread on */ + struct completion notify; /* thread begin/end */ +- wait_queue_head_t delay_wait; /* wait during scan, reset */ +- struct completion scanning_done; /* wait for scan thread */ ++ wait_queue_head_t delay_wait; /* wait during reset */ ++ struct delayed_work scan_dwork; /* for async scanning */ + + /* subdriver information */ + void *extra; /* Any extra data */ +diff --git a/drivers/video/omap2/dss/dpi.c b/drivers/video/omap2/dss/dpi.c +index 976ac23..c04205c 100644 +--- a/drivers/video/omap2/dss/dpi.c ++++ b/drivers/video/omap2/dss/dpi.c +@@ -180,6 +180,11 @@ int omapdss_dpi_display_enable(struct omap_dss_device *dssdev) + { + int r; + ++ if (cpu_is_omap34xx() && !dpi.vdds_dsi_reg) { ++ DSSERR("no VDSS_DSI regulator\n"); ++ return -ENODEV; ++ } ++ + if (dssdev->manager == NULL) { + DSSERR("failed to enable display: no manager\n"); + return -ENODEV; +diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c +index d2039ca..af11098 100644 +--- a/fs/ecryptfs/inode.c ++++ b/fs/ecryptfs/inode.c +@@ -1104,6 +1104,8 @@ ecryptfs_setxattr(struct dentry *dentry, const char *name, const void *value, + } + + rc = vfs_setxattr(lower_dentry, name, value, size, flags); ++ if (!rc) ++ fsstack_copy_attr_all(dentry->d_inode, lower_dentry->d_inode); + out: + return rc; + } +diff --git a/fs/eventpoll.c b/fs/eventpoll.c +index 828e750..ea54cde 100644 +--- a/fs/eventpoll.c ++++ b/fs/eventpoll.c +@@ -197,6 +197,12 @@ struct eventpoll { + + /* The user that created the eventpoll descriptor */ + struct user_struct *user; ++ ++ struct file *file; ++ ++ /* used to optimize loop detection check */ ++ int visited; ++ struct list_head visited_list_link; + }; + + /* Wait structure used by the poll hooks */ +@@ -255,6 +261,15 @@ static struct kmem_cache *epi_cache __read_mostly; + /* Slab cache used to allocate "struct eppoll_entry" */ + static struct kmem_cache *pwq_cache __read_mostly; + ++/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ ++static LIST_HEAD(visited_list); ++ ++/* ++ * List of files with newly added links, where we may need to limit the number ++ * of emanating paths. Protected by the epmutex. ++ */ ++static LIST_HEAD(tfile_check_list); ++ + #ifdef CONFIG_SYSCTL + + #include <linux/sysctl.h> +@@ -276,6 +291,12 @@ ctl_table epoll_table[] = { + }; + #endif /* CONFIG_SYSCTL */ + ++static const struct file_operations eventpoll_fops; ++ ++static inline int is_file_epoll(struct file *f) ++{ ++ return f->f_op == &eventpoll_fops; ++} + + /* Setup the structure that is used as key for the RB tree */ + static inline void ep_set_ffd(struct epoll_filefd *ffd, +@@ -299,6 +320,11 @@ static inline int ep_is_linked(struct list_head *p) + return !list_empty(p); + } + ++static inline struct eppoll_entry *ep_pwq_from_wait(wait_queue_t *p) ++{ ++ return container_of(p, struct eppoll_entry, wait); ++} ++ + /* Get the "struct epitem" from a wait queue pointer */ + static inline struct epitem *ep_item_from_wait(wait_queue_t *p) + { +@@ -446,6 +472,18 @@ static void ep_poll_safewake(wait_queue_head_t *wq) + put_cpu(); + } + ++static void ep_remove_wait_queue(struct eppoll_entry *pwq) ++{ ++ wait_queue_head_t *whead; ++ ++ rcu_read_lock(); ++ /* If it is cleared by POLLFREE, it should be rcu-safe */ ++ whead = rcu_dereference(pwq->whead); ++ if (whead) ++ remove_wait_queue(whead, &pwq->wait); ++ rcu_read_unlock(); ++} ++ + /* + * This function unregisters poll callbacks from the associated file + * descriptor. Must be called with "mtx" held (or "epmutex" if called from +@@ -460,7 +498,7 @@ static void ep_unregister_pollwait(struct eventpoll *ep, struct epitem *epi) + pwq = list_first_entry(lsthead, struct eppoll_entry, llink); + + list_del(&pwq->llink); +- remove_wait_queue(pwq->whead, &pwq->wait); ++ ep_remove_wait_queue(pwq); + kmem_cache_free(pwq_cache, pwq); + } + } +@@ -711,12 +749,6 @@ static const struct file_operations eventpoll_fops = { + .llseek = noop_llseek, + }; + +-/* Fast test to see if the file is an eventpoll file */ +-static inline int is_file_epoll(struct file *f) +-{ +- return f->f_op == &eventpoll_fops; +-} +- + /* + * This is called from eventpoll_release() to unlink files from the eventpoll + * interface. We need to have this facility to cleanup correctly files that are +@@ -827,6 +859,17 @@ static int ep_poll_callback(wait_queue_t *wait, unsigned mode, int sync, void *k + struct epitem *epi = ep_item_from_wait(wait); + struct eventpoll *ep = epi->ep; + ++ if ((unsigned long)key & POLLFREE) { ++ ep_pwq_from_wait(wait)->whead = NULL; ++ /* ++ * whead = NULL above can race with ep_remove_wait_queue() ++ * which can do another remove_wait_queue() after us, so we ++ * can't use __remove_wait_queue(). whead->lock is held by ++ * the caller. ++ */ ++ list_del_init(&wait->task_list); ++ } ++ + spin_lock_irqsave(&ep->lock, flags); + + /* +@@ -926,6 +969,99 @@ static void ep_rbtree_insert(struct eventpoll *ep, struct epitem *epi) + rb_insert_color(&epi->rbn, &ep->rbr); + } + ++ ++ ++#define PATH_ARR_SIZE 5 ++/* ++ * These are the number paths of length 1 to 5, that we are allowing to emanate ++ * from a single file of interest. For example, we allow 1000 paths of length ++ * 1, to emanate from each file of interest. This essentially represents the ++ * potential wakeup paths, which need to be limited in order to avoid massive ++ * uncontrolled wakeup storms. The common use case should be a single ep which ++ * is connected to n file sources. In this case each file source has 1 path ++ * of length 1. Thus, the numbers below should be more than sufficient. These ++ * path limits are enforced during an EPOLL_CTL_ADD operation, since a modify ++ * and delete can't add additional paths. Protected by the epmutex. ++ */ ++static const int path_limits[PATH_ARR_SIZE] = { 1000, 500, 100, 50, 10 }; ++static int path_count[PATH_ARR_SIZE]; ++ ++static int path_count_inc(int nests) ++{ ++ if (++path_count[nests] > path_limits[nests]) ++ return -1; ++ return 0; ++} ++ ++static void path_count_init(void) ++{ ++ int i; ++ ++ for (i = 0; i < PATH_ARR_SIZE; i++) ++ path_count[i] = 0; ++} ++ ++static int reverse_path_check_proc(void *priv, void *cookie, int call_nests) ++{ ++ int error = 0; ++ struct file *file = priv; ++ struct file *child_file; ++ struct epitem *epi; ++ ++ list_for_each_entry(epi, &file->f_ep_links, fllink) { ++ child_file = epi->ep->file; ++ if (is_file_epoll(child_file)) { ++ if (list_empty(&child_file->f_ep_links)) { ++ if (path_count_inc(call_nests)) { ++ error = -1; ++ break; ++ } ++ } else { ++ error = ep_call_nested(&poll_loop_ncalls, ++ EP_MAX_NESTS, ++ reverse_path_check_proc, ++ child_file, child_file, ++ current); ++ } ++ if (error != 0) ++ break; ++ } else { ++ printk(KERN_ERR "reverse_path_check_proc: " ++ "file is not an ep!\n"); ++ } ++ } ++ return error; ++} ++ ++/** ++ * reverse_path_check - The tfile_check_list is list of file *, which have ++ * links that are proposed to be newly added. We need to ++ * make sure that those added links don't add too many ++ * paths such that we will spend all our time waking up ++ * eventpoll objects. ++ * ++ * Returns: Returns zero if the proposed links don't create too many paths, ++ * -1 otherwise. ++ */ ++static int reverse_path_check(void) ++{ ++ int length = 0; ++ int error = 0; ++ struct file *current_file; ++ ++ /* let's call this for all tfiles */ ++ list_for_each_entry(current_file, &tfile_check_list, f_tfile_llink) { ++ length++; ++ path_count_init(); ++ error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ++ reverse_path_check_proc, current_file, ++ current_file, current); ++ if (error) ++ break; ++ } ++ return error; ++} ++ + /* + * Must be called with "mtx" held. + */ +@@ -987,6 +1123,11 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + */ + ep_rbtree_insert(ep, epi); + ++ /* now check if we've created too many backpaths */ ++ error = -EINVAL; ++ if (reverse_path_check()) ++ goto error_remove_epi; ++ + /* We have to drop the new item inside our item list to keep track of it */ + spin_lock_irqsave(&ep->lock, flags); + +@@ -1011,6 +1152,14 @@ static int ep_insert(struct eventpoll *ep, struct epoll_event *event, + + return 0; + ++error_remove_epi: ++ spin_lock(&tfile->f_lock); ++ if (ep_is_linked(&epi->fllink)) ++ list_del_init(&epi->fllink); ++ spin_unlock(&tfile->f_lock); ++ ++ rb_erase(&epi->rbn, &ep->rbr); ++ + error_unregister: + ep_unregister_pollwait(ep, epi); + +@@ -1275,18 +1424,36 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + int error = 0; + struct file *file = priv; + struct eventpoll *ep = file->private_data; ++ struct eventpoll *ep_tovisit; + struct rb_node *rbp; + struct epitem *epi; + + mutex_lock_nested(&ep->mtx, call_nests + 1); ++ ep->visited = 1; ++ list_add(&ep->visited_list_link, &visited_list); + for (rbp = rb_first(&ep->rbr); rbp; rbp = rb_next(rbp)) { + epi = rb_entry(rbp, struct epitem, rbn); + if (unlikely(is_file_epoll(epi->ffd.file))) { ++ ep_tovisit = epi->ffd.file->private_data; ++ if (ep_tovisit->visited) ++ continue; + error = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, +- ep_loop_check_proc, epi->ffd.file, +- epi->ffd.file->private_data, current); ++ ep_loop_check_proc, epi->ffd.file, ++ ep_tovisit, current); + if (error != 0) + break; ++ } else { ++ /* ++ * If we've reached a file that is not associated with ++ * an ep, then we need to check if the newly added ++ * links are going to add too many wakeup paths. We do ++ * this by adding it to the tfile_check_list, if it's ++ * not already there, and calling reverse_path_check() ++ * during ep_insert(). ++ */ ++ if (list_empty(&epi->ffd.file->f_tfile_llink)) ++ list_add(&epi->ffd.file->f_tfile_llink, ++ &tfile_check_list); + } + } + mutex_unlock(&ep->mtx); +@@ -1307,8 +1474,31 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) + */ + static int ep_loop_check(struct eventpoll *ep, struct file *file) + { +- return ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, ++ int ret; ++ struct eventpoll *ep_cur, *ep_next; ++ ++ ret = ep_call_nested(&poll_loop_ncalls, EP_MAX_NESTS, + ep_loop_check_proc, file, ep, current); ++ /* clear visited list */ ++ list_for_each_entry_safe(ep_cur, ep_next, &visited_list, ++ visited_list_link) { ++ ep_cur->visited = 0; ++ list_del(&ep_cur->visited_list_link); ++ } ++ return ret; ++} ++ ++static void clear_tfile_check_list(void) ++{ ++ struct file *file; ++ ++ /* first clear the tfile_check_list */ ++ while (!list_empty(&tfile_check_list)) { ++ file = list_first_entry(&tfile_check_list, struct file, ++ f_tfile_llink); ++ list_del_init(&file->f_tfile_llink); ++ } ++ INIT_LIST_HEAD(&tfile_check_list); + } + + /* +@@ -1316,8 +1506,9 @@ static int ep_loop_check(struct eventpoll *ep, struct file *file) + */ + SYSCALL_DEFINE1(epoll_create1, int, flags) + { +- int error; ++ int error, fd; + struct eventpoll *ep = NULL; ++ struct file *file; + + /* Check the EPOLL_* constant for consistency. */ + BUILD_BUG_ON(EPOLL_CLOEXEC != O_CLOEXEC); +@@ -1334,11 +1525,25 @@ SYSCALL_DEFINE1(epoll_create1, int, flags) + * Creates all the items needed to setup an eventpoll file. That is, + * a file structure and a free file descriptor. + */ +- error = anon_inode_getfd("[eventpoll]", &eventpoll_fops, ep, ++ fd = get_unused_fd_flags(O_RDWR | (flags & O_CLOEXEC)); ++ if (fd < 0) { ++ error = fd; ++ goto out_free_ep; ++ } ++ file = anon_inode_getfile("[eventpoll]", &eventpoll_fops, ep, + O_RDWR | (flags & O_CLOEXEC)); +- if (error < 0) +- ep_free(ep); +- ++ if (IS_ERR(file)) { ++ error = PTR_ERR(file); ++ goto out_free_fd; ++ } ++ fd_install(fd, file); ++ ep->file = file; ++ return fd; ++ ++out_free_fd: ++ put_unused_fd(fd); ++out_free_ep: ++ ep_free(ep); + return error; + } + +@@ -1404,21 +1609,27 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + /* + * When we insert an epoll file descriptor, inside another epoll file + * descriptor, there is the change of creating closed loops, which are +- * better be handled here, than in more critical paths. ++ * better be handled here, than in more critical paths. While we are ++ * checking for loops we also determine the list of files reachable ++ * and hang them on the tfile_check_list, so we can check that we ++ * haven't created too many possible wakeup paths. + * +- * We hold epmutex across the loop check and the insert in this case, in +- * order to prevent two separate inserts from racing and each doing the +- * insert "at the same time" such that ep_loop_check passes on both +- * before either one does the insert, thereby creating a cycle. ++ * We need to hold the epmutex across both ep_insert and ep_remove ++ * b/c we want to make sure we are looking at a coherent view of ++ * epoll network. + */ +- if (unlikely(is_file_epoll(tfile) && op == EPOLL_CTL_ADD)) { ++ if (op == EPOLL_CTL_ADD || op == EPOLL_CTL_DEL) { + mutex_lock(&epmutex); + did_lock_epmutex = 1; +- error = -ELOOP; +- if (ep_loop_check(ep, tfile) != 0) +- goto error_tgt_fput; + } +- ++ if (op == EPOLL_CTL_ADD) { ++ if (is_file_epoll(tfile)) { ++ error = -ELOOP; ++ if (ep_loop_check(ep, tfile) != 0) ++ goto error_tgt_fput; ++ } else ++ list_add(&tfile->f_tfile_llink, &tfile_check_list); ++ } + + mutex_lock_nested(&ep->mtx, 0); + +@@ -1437,6 +1648,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + error = ep_insert(ep, &epds, tfile, fd); + } else + error = -EEXIST; ++ clear_tfile_check_list(); + break; + case EPOLL_CTL_DEL: + if (epi) +@@ -1455,7 +1667,7 @@ SYSCALL_DEFINE4(epoll_ctl, int, epfd, int, op, int, fd, + mutex_unlock(&ep->mtx); + + error_tgt_fput: +- if (unlikely(did_lock_epmutex)) ++ if (did_lock_epmutex) + mutex_unlock(&epmutex); + + fput(tfile); +diff --git a/fs/namei.c b/fs/namei.c +index 5008f01..744e942 100644 +--- a/fs/namei.c ++++ b/fs/namei.c +@@ -1094,8 +1094,10 @@ static struct dentry *d_inode_lookup(struct dentry *parent, struct dentry *dentr + struct dentry *old; + + /* Don't create child dentry for a dead directory. */ +- if (unlikely(IS_DEADDIR(inode))) ++ if (unlikely(IS_DEADDIR(inode))) { ++ dput(dentry); + return ERR_PTR(-ENOENT); ++ } + + old = inode->i_op->lookup(inode, dentry, nd); + if (unlikely(old)) { +diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c +index 055d702..e527030 100644 +--- a/fs/nfs/nfs4proc.c ++++ b/fs/nfs/nfs4proc.c +@@ -3568,8 +3568,8 @@ static ssize_t __nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t bu + } + if (npages > 1) { + /* for decoding across pages */ +- args.acl_scratch = alloc_page(GFP_KERNEL); +- if (!args.acl_scratch) ++ res.acl_scratch = alloc_page(GFP_KERNEL); ++ if (!res.acl_scratch) + goto out_free; + } + args.acl_len = npages * PAGE_SIZE; +@@ -3605,8 +3605,8 @@ out_free: + for (i = 0; i < npages; i++) + if (pages[i]) + __free_page(pages[i]); +- if (args.acl_scratch) +- __free_page(args.acl_scratch); ++ if (res.acl_scratch) ++ __free_page(res.acl_scratch); + return ret; + } + +@@ -4876,8 +4876,10 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) + clp->cl_rpcclient->cl_auth->au_flavor); + + res.server_scope = kzalloc(sizeof(struct server_scope), GFP_KERNEL); +- if (unlikely(!res.server_scope)) +- return -ENOMEM; ++ if (unlikely(!res.server_scope)) { ++ status = -ENOMEM; ++ goto out; ++ } + + status = rpc_call_sync(clp->cl_rpcclient, &msg, RPC_TASK_TIMEOUT); + if (!status) +@@ -4894,12 +4896,13 @@ int nfs4_proc_exchange_id(struct nfs_client *clp, struct rpc_cred *cred) + clp->server_scope = NULL; + } + +- if (!clp->server_scope) ++ if (!clp->server_scope) { + clp->server_scope = res.server_scope; +- else +- kfree(res.server_scope); ++ goto out; ++ } + } +- ++ kfree(res.server_scope); ++out: + dprintk("<-- %s status= %d\n", __func__, status); + return status; + } +diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c +index 6a7107a..a58eed7 100644 +--- a/fs/nfs/nfs4state.c ++++ b/fs/nfs/nfs4state.c +@@ -1071,6 +1071,8 @@ void nfs4_schedule_stateid_recovery(const struct nfs_server *server, struct nfs4 + { + struct nfs_client *clp = server->nfs_client; + ++ if (test_and_clear_bit(NFS_DELEGATED_STATE, &state->flags)) ++ nfs_async_inode_return_delegation(state->inode, &state->stateid); + nfs4_state_mark_reclaim_nograce(clp, state); + nfs4_schedule_state_manager(clp); + } +diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c +index dcaf693..68adab4 100644 +--- a/fs/nfs/nfs4xdr.c ++++ b/fs/nfs/nfs4xdr.c +@@ -2522,7 +2522,6 @@ static void nfs4_xdr_enc_getacl(struct rpc_rqst *req, struct xdr_stream *xdr, + + xdr_inline_pages(&req->rq_rcv_buf, replen << 2, + args->acl_pages, args->acl_pgbase, args->acl_len); +- xdr_set_scratch_buffer(xdr, page_address(args->acl_scratch), PAGE_SIZE); + + encode_nops(&hdr); + } +@@ -6034,6 +6033,10 @@ nfs4_xdr_dec_getacl(struct rpc_rqst *rqstp, struct xdr_stream *xdr, + struct compound_hdr hdr; + int status; + ++ if (res->acl_scratch != NULL) { ++ void *p = page_address(res->acl_scratch); ++ xdr_set_scratch_buffer(xdr, p, PAGE_SIZE); ++ } + status = decode_compound_hdr(xdr, &hdr); + if (status) + goto out; +diff --git a/fs/signalfd.c b/fs/signalfd.c +index 492465b..7ae2a57 100644 +--- a/fs/signalfd.c ++++ b/fs/signalfd.c +@@ -30,6 +30,21 @@ + #include <linux/signalfd.h> + #include <linux/syscalls.h> + ++void signalfd_cleanup(struct sighand_struct *sighand) ++{ ++ wait_queue_head_t *wqh = &sighand->signalfd_wqh; ++ /* ++ * The lockless check can race with remove_wait_queue() in progress, ++ * but in this case its caller should run under rcu_read_lock() and ++ * sighand_cachep is SLAB_DESTROY_BY_RCU, we can safely return. ++ */ ++ if (likely(!waitqueue_active(wqh))) ++ return; ++ ++ /* wait_queue_t->func(POLLFREE) should do remove_wait_queue() */ ++ wake_up_poll(wqh, POLLHUP | POLLFREE); ++} ++ + struct signalfd_ctx { + sigset_t sigmask; + }; +diff --git a/include/asm-generic/poll.h b/include/asm-generic/poll.h +index 44bce83..9ce7f44 100644 +--- a/include/asm-generic/poll.h ++++ b/include/asm-generic/poll.h +@@ -28,6 +28,8 @@ + #define POLLRDHUP 0x2000 + #endif + ++#define POLLFREE 0x4000 /* currently only for epoll */ ++ + struct pollfd { + int fd; + short events; +diff --git a/include/linux/eventpoll.h b/include/linux/eventpoll.h +index f362733..657ab55 100644 +--- a/include/linux/eventpoll.h ++++ b/include/linux/eventpoll.h +@@ -61,6 +61,7 @@ struct file; + static inline void eventpoll_init_file(struct file *file) + { + INIT_LIST_HEAD(&file->f_ep_links); ++ INIT_LIST_HEAD(&file->f_tfile_llink); + } + + +diff --git a/include/linux/fs.h b/include/linux/fs.h +index e0bc4ff..10b2288 100644 +--- a/include/linux/fs.h ++++ b/include/linux/fs.h +@@ -1001,6 +1001,7 @@ struct file { + #ifdef CONFIG_EPOLL + /* Used by fs/eventpoll.c to link all the hooks to this file */ + struct list_head f_ep_links; ++ struct list_head f_tfile_llink; + #endif /* #ifdef CONFIG_EPOLL */ + struct address_space *f_mapping; + #ifdef CONFIG_DEBUG_WRITECOUNT +diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h +index 6c898af..41116ab 100644 +--- a/include/linux/nfs_xdr.h ++++ b/include/linux/nfs_xdr.h +@@ -602,7 +602,6 @@ struct nfs_getaclargs { + size_t acl_len; + unsigned int acl_pgbase; + struct page ** acl_pages; +- struct page * acl_scratch; + struct nfs4_sequence_args seq_args; + }; + +@@ -612,6 +611,7 @@ struct nfs_getaclres { + size_t acl_len; + size_t acl_data_offset; + int acl_flags; ++ struct page * acl_scratch; + struct nfs4_sequence_res seq_res; + }; + +diff --git a/include/linux/signalfd.h b/include/linux/signalfd.h +index 3ff4961..247399b 100644 +--- a/include/linux/signalfd.h ++++ b/include/linux/signalfd.h +@@ -61,13 +61,16 @@ static inline void signalfd_notify(struct task_struct *tsk, int sig) + wake_up(&tsk->sighand->signalfd_wqh); + } + ++extern void signalfd_cleanup(struct sighand_struct *sighand); ++ + #else /* CONFIG_SIGNALFD */ + + static inline void signalfd_notify(struct task_struct *tsk, int sig) { } + ++static inline void signalfd_cleanup(struct sighand_struct *sighand) { } ++ + #endif /* CONFIG_SIGNALFD */ + + #endif /* __KERNEL__ */ + + #endif /* _LINUX_SIGNALFD_H */ +- +diff --git a/include/linux/usb/ch11.h b/include/linux/usb/ch11.h +index 4ebaf08..1eb735b 100644 +--- a/include/linux/usb/ch11.h ++++ b/include/linux/usb/ch11.h +@@ -62,12 +62,6 @@ + #define USB_PORT_FEAT_TEST 21 + #define USB_PORT_FEAT_INDICATOR 22 + #define USB_PORT_FEAT_C_PORT_L1 23 +-#define USB_PORT_FEAT_C_PORT_LINK_STATE 25 +-#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26 +-#define USB_PORT_FEAT_PORT_REMOTE_WAKE_MASK 27 +-#define USB_PORT_FEAT_BH_PORT_RESET 28 +-#define USB_PORT_FEAT_C_BH_PORT_RESET 29 +-#define USB_PORT_FEAT_FORCE_LINKPM_ACCEPT 30 + + /* + * Port feature selectors added by USB 3.0 spec. +@@ -76,8 +70,8 @@ + #define USB_PORT_FEAT_LINK_STATE 5 + #define USB_PORT_FEAT_U1_TIMEOUT 23 + #define USB_PORT_FEAT_U2_TIMEOUT 24 +-#define USB_PORT_FEAT_C_LINK_STATE 25 +-#define USB_PORT_FEAT_C_CONFIG_ERR 26 ++#define USB_PORT_FEAT_C_PORT_LINK_STATE 25 ++#define USB_PORT_FEAT_C_PORT_CONFIG_ERROR 26 + #define USB_PORT_FEAT_REMOTE_WAKE_MASK 27 + #define USB_PORT_FEAT_BH_PORT_RESET 28 + #define USB_PORT_FEAT_C_BH_PORT_RESET 29 +diff --git a/include/net/flow.h b/include/net/flow.h +index 57f15a7..2a7eefd 100644 +--- a/include/net/flow.h ++++ b/include/net/flow.h +@@ -90,6 +90,16 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; + } ++ ++/* Reset some input parameters after previous lookup */ ++static inline void flowi4_update_output(struct flowi4 *fl4, int oif, __u8 tos, ++ __be32 daddr, __be32 saddr) ++{ ++ fl4->flowi4_oif = oif; ++ fl4->flowi4_tos = tos; ++ fl4->daddr = daddr; ++ fl4->saddr = saddr; ++} + + + struct flowi6 { +diff --git a/include/net/route.h b/include/net/route.h +index 91855d1..b1c0d5b 100644 +--- a/include/net/route.h ++++ b/include/net/route.h +@@ -270,6 +270,7 @@ static inline struct rtable *ip_route_connect(struct flowi4 *fl4, + if (IS_ERR(rt)) + return rt; + ip_rt_put(rt); ++ flowi4_update_output(fl4, oif, tos, fl4->daddr, fl4->saddr); + } + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + return ip_route_output_flow(net, fl4, sk); +@@ -284,6 +285,9 @@ static inline struct rtable *ip_route_newports(struct flowi4 *fl4, struct rtable + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; + ip_rt_put(rt); ++ flowi4_update_output(fl4, sk->sk_bound_dev_if, ++ RT_CONN_FLAGS(sk), fl4->daddr, ++ fl4->saddr); + security_sk_classify_flow(sk, flowi4_to_flowi(fl4)); + return ip_route_output_flow(sock_net(sk), fl4, sk); + } +diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h +index f6bb08b..55ce96b 100644 +--- a/include/net/sch_generic.h ++++ b/include/net/sch_generic.h +@@ -220,9 +220,16 @@ struct tcf_proto { + + struct qdisc_skb_cb { + unsigned int pkt_len; +- long data[]; ++ unsigned char data[24]; + }; + ++static inline void qdisc_cb_private_validate(const struct sk_buff *skb, int sz) ++{ ++ struct qdisc_skb_cb *qcb; ++ BUILD_BUG_ON(sizeof(skb->cb) < sizeof(unsigned int) + sz); ++ BUILD_BUG_ON(sizeof(qcb->data) < sz); ++} ++ + static inline int qdisc_qlen(const struct Qdisc *q) + { + return q->q.qlen; +diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h +index a79886c..94bbec3 100644 +--- a/include/target/target_core_base.h ++++ b/include/target/target_core_base.h +@@ -486,6 +486,7 @@ struct se_cmd { + + struct scatterlist *t_data_sg; + unsigned int t_data_nents; ++ void *t_data_vmap; + struct scatterlist *t_bidi_data_sg; + unsigned int t_bidi_data_nents; + +diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h +index dac4f2d..72751e8 100644 +--- a/include/target/target_core_transport.h ++++ b/include/target/target_core_transport.h +@@ -129,8 +129,8 @@ extern void transport_init_se_cmd(struct se_cmd *, + struct target_core_fabric_ops *, + struct se_session *, u32, int, int, + unsigned char *); +-void *transport_kmap_first_data_page(struct se_cmd *cmd); +-void transport_kunmap_first_data_page(struct se_cmd *cmd); ++void *transport_kmap_data_sg(struct se_cmd *); ++void transport_kunmap_data_sg(struct se_cmd *); + extern int transport_generic_allocate_tasks(struct se_cmd *, unsigned char *); + extern int transport_handle_cdb_direct(struct se_cmd *); + extern int transport_generic_handle_cdb_map(struct se_cmd *); +diff --git a/kernel/fork.c b/kernel/fork.c +index da4a6a1..0acf42c0 100644 +--- a/kernel/fork.c ++++ b/kernel/fork.c +@@ -66,6 +66,7 @@ + #include <linux/user-return-notifier.h> + #include <linux/oom.h> + #include <linux/khugepaged.h> ++#include <linux/signalfd.h> + + #include <asm/pgtable.h> + #include <asm/pgalloc.h> +@@ -910,8 +911,10 @@ static int copy_sighand(unsigned long clone_flags, struct task_struct *tsk) + + void __cleanup_sighand(struct sighand_struct *sighand) + { +- if (atomic_dec_and_test(&sighand->count)) ++ if (atomic_dec_and_test(&sighand->count)) { ++ signalfd_cleanup(sighand); + kmem_cache_free(sighand_cachep, sighand); ++ } + } + + +diff --git a/kernel/irq/autoprobe.c b/kernel/irq/autoprobe.c +index 342d8f4..0119b9d 100644 +--- a/kernel/irq/autoprobe.c ++++ b/kernel/irq/autoprobe.c +@@ -53,7 +53,7 @@ unsigned long probe_irq_on(void) + if (desc->irq_data.chip->irq_set_type) + desc->irq_data.chip->irq_set_type(&desc->irq_data, + IRQ_TYPE_PROBE); +- irq_startup(desc); ++ irq_startup(desc, false); + } + raw_spin_unlock_irq(&desc->lock); + } +@@ -70,7 +70,7 @@ unsigned long probe_irq_on(void) + raw_spin_lock_irq(&desc->lock); + if (!desc->action && irq_settings_can_probe(desc)) { + desc->istate |= IRQS_AUTODETECT | IRQS_WAITING; +- if (irq_startup(desc)) ++ if (irq_startup(desc, false)) + desc->istate |= IRQS_PENDING; + } + raw_spin_unlock_irq(&desc->lock); +diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c +index f7c543a..fb7db75 100644 +--- a/kernel/irq/chip.c ++++ b/kernel/irq/chip.c +@@ -157,19 +157,22 @@ static void irq_state_set_masked(struct irq_desc *desc) + irqd_set(&desc->irq_data, IRQD_IRQ_MASKED); + } + +-int irq_startup(struct irq_desc *desc) ++int irq_startup(struct irq_desc *desc, bool resend) + { ++ int ret = 0; ++ + irq_state_clr_disabled(desc); + desc->depth = 0; + + if (desc->irq_data.chip->irq_startup) { +- int ret = desc->irq_data.chip->irq_startup(&desc->irq_data); ++ ret = desc->irq_data.chip->irq_startup(&desc->irq_data); + irq_state_clr_masked(desc); +- return ret; ++ } else { ++ irq_enable(desc); + } +- +- irq_enable(desc); +- return 0; ++ if (resend) ++ check_irq_resend(desc, desc->irq_data.irq); ++ return ret; + } + + void irq_shutdown(struct irq_desc *desc) +@@ -330,6 +333,24 @@ out_unlock: + } + EXPORT_SYMBOL_GPL(handle_simple_irq); + ++/* ++ * Called unconditionally from handle_level_irq() and only for oneshot ++ * interrupts from handle_fasteoi_irq() ++ */ ++static void cond_unmask_irq(struct irq_desc *desc) ++{ ++ /* ++ * We need to unmask in the following cases: ++ * - Standard level irq (IRQF_ONESHOT is not set) ++ * - Oneshot irq which did not wake the thread (caused by a ++ * spurious interrupt or a primary handler handling it ++ * completely). ++ */ ++ if (!irqd_irq_disabled(&desc->irq_data) && ++ irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) ++ unmask_irq(desc); ++} ++ + /** + * handle_level_irq - Level type irq handler + * @irq: the interrupt number +@@ -362,8 +383,8 @@ handle_level_irq(unsigned int irq, struct irq_desc *desc) + + handle_irq_event(desc); + +- if (!irqd_irq_disabled(&desc->irq_data) && !(desc->istate & IRQS_ONESHOT)) +- unmask_irq(desc); ++ cond_unmask_irq(desc); ++ + out_unlock: + raw_spin_unlock(&desc->lock); + } +@@ -417,6 +438,9 @@ handle_fasteoi_irq(unsigned int irq, struct irq_desc *desc) + preflow_handler(desc); + handle_irq_event(desc); + ++ if (desc->istate & IRQS_ONESHOT) ++ cond_unmask_irq(desc); ++ + out_eoi: + desc->irq_data.chip->irq_eoi(&desc->irq_data); + out_unlock: +@@ -625,7 +649,7 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, + irq_settings_set_noprobe(desc); + irq_settings_set_norequest(desc); + irq_settings_set_nothread(desc); +- irq_startup(desc); ++ irq_startup(desc, true); + } + out: + irq_put_desc_busunlock(desc, flags); +diff --git a/kernel/irq/internals.h b/kernel/irq/internals.h +index a73dd6c..e1a8b64 100644 +--- a/kernel/irq/internals.h ++++ b/kernel/irq/internals.h +@@ -67,7 +67,7 @@ extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, + extern void __disable_irq(struct irq_desc *desc, unsigned int irq, bool susp); + extern void __enable_irq(struct irq_desc *desc, unsigned int irq, bool resume); + +-extern int irq_startup(struct irq_desc *desc); ++extern int irq_startup(struct irq_desc *desc, bool resend); + extern void irq_shutdown(struct irq_desc *desc); + extern void irq_enable(struct irq_desc *desc); + extern void irq_disable(struct irq_desc *desc); +diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c +index 1da999f..cf2d7ae 100644 +--- a/kernel/irq/manage.c ++++ b/kernel/irq/manage.c +@@ -1027,7 +1027,7 @@ __setup_irq(unsigned int irq, struct irq_desc *desc, struct irqaction *new) + desc->istate |= IRQS_ONESHOT; + + if (irq_settings_can_autoenable(desc)) +- irq_startup(desc); ++ irq_startup(desc, true); + else + /* Undo nested disables: */ + desc->depth = 1; +diff --git a/mm/nommu.c b/mm/nommu.c +index b982290..ee7e57e 100644 +--- a/mm/nommu.c ++++ b/mm/nommu.c +@@ -696,9 +696,11 @@ static void add_vma_to_mm(struct mm_struct *mm, struct vm_area_struct *vma) + if (vma->vm_file) { + mapping = vma->vm_file->f_mapping; + ++ mutex_lock(&mapping->i_mmap_mutex); + flush_dcache_mmap_lock(mapping); + vma_prio_tree_insert(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); ++ mutex_unlock(&mapping->i_mmap_mutex); + } + + /* add the VMA to the tree */ +@@ -760,9 +762,11 @@ static void delete_vma_from_mm(struct vm_area_struct *vma) + if (vma->vm_file) { + mapping = vma->vm_file->f_mapping; + ++ mutex_lock(&mapping->i_mmap_mutex); + flush_dcache_mmap_lock(mapping); + vma_prio_tree_remove(vma, &mapping->i_mmap); + flush_dcache_mmap_unlock(mapping); ++ mutex_unlock(&mapping->i_mmap_mutex); + } + + /* remove from the MM's tree and list */ +@@ -2052,6 +2056,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, + high = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; + + down_write(&nommu_region_sem); ++ mutex_lock(&inode->i_mapping->i_mmap_mutex); + + /* search for VMAs that fall within the dead zone */ + vma_prio_tree_foreach(vma, &iter, &inode->i_mapping->i_mmap, +@@ -2059,6 +2064,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, + /* found one - only interested if it's shared out of the page + * cache */ + if (vma->vm_flags & VM_SHARED) { ++ mutex_unlock(&inode->i_mapping->i_mmap_mutex); + up_write(&nommu_region_sem); + return -ETXTBSY; /* not quite true, but near enough */ + } +@@ -2086,6 +2092,7 @@ int nommu_shrink_inode_mappings(struct inode *inode, size_t size, + } + } + ++ mutex_unlock(&inode->i_mapping->i_mmap_mutex); + up_write(&nommu_region_sem); + return 0; + } +diff --git a/net/core/dev.c b/net/core/dev.c +index 5a13edf..c56cacf 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -3565,14 +3565,20 @@ static inline gro_result_t + __napi_gro_receive(struct napi_struct *napi, struct sk_buff *skb) + { + struct sk_buff *p; ++ unsigned int maclen = skb->dev->hard_header_len; + + for (p = napi->gro_list; p; p = p->next) { + unsigned long diffs; + + diffs = (unsigned long)p->dev ^ (unsigned long)skb->dev; + diffs |= p->vlan_tci ^ skb->vlan_tci; +- diffs |= compare_ether_header(skb_mac_header(p), +- skb_gro_mac_header(skb)); ++ if (maclen == ETH_HLEN) ++ diffs |= compare_ether_header(skb_mac_header(p), ++ skb_gro_mac_header(skb)); ++ else if (!diffs) ++ diffs = memcmp(skb_mac_header(p), ++ skb_gro_mac_header(skb), ++ maclen); + NAPI_GRO_CB(p)->same_flow = !diffs; + NAPI_GRO_CB(p)->flush = 0; + } +diff --git a/net/core/netpoll.c b/net/core/netpoll.c +index 5d4d896..ab0633f 100644 +--- a/net/core/netpoll.c ++++ b/net/core/netpoll.c +@@ -194,7 +194,7 @@ static void netpoll_poll_dev(struct net_device *dev) + + poll_napi(dev); + +- if (dev->priv_flags & IFF_SLAVE) { ++ if (dev->flags & IFF_SLAVE) { + if (dev->npinfo) { + struct net_device *bond_dev = dev->master; + struct sk_buff *skb; +diff --git a/net/ipv4/arp.c b/net/ipv4/arp.c +index 96a164a..59a7041 100644 +--- a/net/ipv4/arp.c ++++ b/net/ipv4/arp.c +@@ -867,7 +867,8 @@ static int arp_process(struct sk_buff *skb) + if (addr_type == RTN_UNICAST && + (arp_fwd_proxy(in_dev, dev, rt) || + arp_fwd_pvlan(in_dev, dev, rt, sip, tip) || +- pneigh_lookup(&arp_tbl, net, &tip, dev, 0))) { ++ (rt->dst.dev != dev && ++ pneigh_lookup(&arp_tbl, net, &tip, dev, 0)))) { + n = neigh_event_ns(&arp_tbl, sha, &sip, dev); + if (n) + neigh_release(n); +diff --git a/net/ipv4/ip_options.c b/net/ipv4/ip_options.c +index 1e60f76..42dd1a9 100644 +--- a/net/ipv4/ip_options.c ++++ b/net/ipv4/ip_options.c +@@ -573,8 +573,8 @@ void ip_forward_options(struct sk_buff *skb) + } + if (srrptr + 3 <= srrspace) { + opt->is_changed = 1; +- ip_rt_get_source(&optptr[srrptr-1], skb, rt); + ip_hdr(skb)->daddr = opt->nexthop; ++ ip_rt_get_source(&optptr[srrptr-1], skb, rt); + optptr[2] = srrptr+4; + } else if (net_ratelimit()) + printk(KERN_CRIT "ip_forward(): Argh! Destination lost!\n"); +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index 52b5c2d..53113b9 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -1310,25 +1310,26 @@ static int tcp_match_skb_to_sack(struct sock *sk, struct sk_buff *skb, + return in_sack; + } + +-static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, +- struct tcp_sacktag_state *state, ++/* Mark the given newly-SACKed range as such, adjusting counters and hints. */ ++static u8 tcp_sacktag_one(struct sock *sk, ++ struct tcp_sacktag_state *state, u8 sacked, ++ u32 start_seq, u32 end_seq, + int dup_sack, int pcount) + { + struct tcp_sock *tp = tcp_sk(sk); +- u8 sacked = TCP_SKB_CB(skb)->sacked; + int fack_count = state->fack_count; + + /* Account D-SACK for retransmitted packet. */ + if (dup_sack && (sacked & TCPCB_RETRANS)) { + if (tp->undo_marker && tp->undo_retrans && +- after(TCP_SKB_CB(skb)->end_seq, tp->undo_marker)) ++ after(end_seq, tp->undo_marker)) + tp->undo_retrans--; + if (sacked & TCPCB_SACKED_ACKED) + state->reord = min(fack_count, state->reord); + } + + /* Nothing to do; acked frame is about to be dropped (was ACKed). */ +- if (!after(TCP_SKB_CB(skb)->end_seq, tp->snd_una)) ++ if (!after(end_seq, tp->snd_una)) + return sacked; + + if (!(sacked & TCPCB_SACKED_ACKED)) { +@@ -1347,13 +1348,13 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, + /* New sack for not retransmitted frame, + * which was in hole. It is reordering. + */ +- if (before(TCP_SKB_CB(skb)->seq, ++ if (before(start_seq, + tcp_highest_sack_seq(tp))) + state->reord = min(fack_count, + state->reord); + + /* SACK enhanced F-RTO (RFC4138; Appendix B) */ +- if (!after(TCP_SKB_CB(skb)->end_seq, tp->frto_highmark)) ++ if (!after(end_seq, tp->frto_highmark)) + state->flag |= FLAG_ONLY_ORIG_SACKED; + } + +@@ -1371,8 +1372,7 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, + + /* Lost marker hint past SACKed? Tweak RFC3517 cnt */ + if (!tcp_is_fack(tp) && (tp->lost_skb_hint != NULL) && +- before(TCP_SKB_CB(skb)->seq, +- TCP_SKB_CB(tp->lost_skb_hint)->seq)) ++ before(start_seq, TCP_SKB_CB(tp->lost_skb_hint)->seq)) + tp->lost_cnt_hint += pcount; + + if (fack_count > tp->fackets_out) +@@ -1391,6 +1391,9 @@ static u8 tcp_sacktag_one(const struct sk_buff *skb, struct sock *sk, + return sacked; + } + ++/* Shift newly-SACKed bytes from this skb to the immediately previous ++ * already-SACKed sk_buff. Mark the newly-SACKed bytes as such. ++ */ + static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + struct tcp_sacktag_state *state, + unsigned int pcount, int shifted, int mss, +@@ -1398,10 +1401,13 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + { + struct tcp_sock *tp = tcp_sk(sk); + struct sk_buff *prev = tcp_write_queue_prev(sk, skb); ++ u32 start_seq = TCP_SKB_CB(skb)->seq; /* start of newly-SACKed */ ++ u32 end_seq = start_seq + shifted; /* end of newly-SACKed */ + + BUG_ON(!pcount); + +- if (skb == tp->lost_skb_hint) ++ /* Adjust hint for FACK. Non-FACK is handled in tcp_sacktag_one(). */ ++ if (tcp_is_fack(tp) && (skb == tp->lost_skb_hint)) + tp->lost_cnt_hint += pcount; + + TCP_SKB_CB(prev)->end_seq += shifted; +@@ -1427,8 +1433,11 @@ static int tcp_shifted_skb(struct sock *sk, struct sk_buff *skb, + skb_shinfo(skb)->gso_type = 0; + } + +- /* We discard results */ +- tcp_sacktag_one(skb, sk, state, dup_sack, pcount); ++ /* Adjust counters and hints for the newly sacked sequence range but ++ * discard the return value since prev is already marked. ++ */ ++ tcp_sacktag_one(sk, state, TCP_SKB_CB(skb)->sacked, ++ start_seq, end_seq, dup_sack, pcount); + + /* Difference in this won't matter, both ACKed by the same cumul. ACK */ + TCP_SKB_CB(prev)->sacked |= (TCP_SKB_CB(skb)->sacked & TCPCB_EVER_RETRANS); +@@ -1667,10 +1676,14 @@ static struct sk_buff *tcp_sacktag_walk(struct sk_buff *skb, struct sock *sk, + break; + + if (in_sack) { +- TCP_SKB_CB(skb)->sacked = tcp_sacktag_one(skb, sk, +- state, +- dup_sack, +- tcp_skb_pcount(skb)); ++ TCP_SKB_CB(skb)->sacked = ++ tcp_sacktag_one(sk, ++ state, ++ TCP_SKB_CB(skb)->sacked, ++ TCP_SKB_CB(skb)->seq, ++ TCP_SKB_CB(skb)->end_seq, ++ dup_sack, ++ tcp_skb_pcount(skb)); + + if (!before(TCP_SKB_CB(skb)->seq, + tcp_highest_sack_seq(tp))) +diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c +index c89e354..eb90aa8 100644 +--- a/net/ipv4/tcp_ipv4.c ++++ b/net/ipv4/tcp_ipv4.c +@@ -650,6 +650,11 @@ static void tcp_v4_send_reset(struct sock *sk, struct sk_buff *skb) + arg.iov[0].iov_len, IPPROTO_TCP, 0); + arg.csumoffset = offsetof(struct tcphdr, check) / 2; + arg.flags = (sk && inet_sk(sk)->transparent) ? IP_REPLY_ARG_NOSRCCHECK : 0; ++ /* When socket is gone, all binding information is lost. ++ * routing might fail in this case. using iif for oif to ++ * make sure we can deliver it ++ */ ++ arg.bound_dev_if = sk ? sk->sk_bound_dev_if : inet_iif(skb); + + net = dev_net(skb_dst(skb)->dev); + arg.tos = ip_hdr(skb)->tos; +diff --git a/net/mac80211/main.c b/net/mac80211/main.c +index a7536fd..7d9b21d 100644 +--- a/net/mac80211/main.c ++++ b/net/mac80211/main.c +@@ -885,6 +885,8 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + wiphy_debug(local->hw.wiphy, "Failed to initialize wep: %d\n", + result); + ++ ieee80211_led_init(local); ++ + rtnl_lock(); + + result = ieee80211_init_rate_ctrl_alg(local, +@@ -906,8 +908,6 @@ int ieee80211_register_hw(struct ieee80211_hw *hw) + + rtnl_unlock(); + +- ieee80211_led_init(local); +- + local->network_latency_notifier.notifier_call = + ieee80211_max_network_latency; + result = pm_qos_add_notifier(PM_QOS_NETWORK_LATENCY, +diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c +index 093cc32..6dc7d7d 100644 +--- a/net/netfilter/ipvs/ip_vs_core.c ++++ b/net/netfilter/ipvs/ip_vs_core.c +@@ -232,6 +232,7 @@ ip_vs_sched_persist(struct ip_vs_service *svc, + __be16 dport = 0; /* destination port to forward */ + unsigned int flags; + struct ip_vs_conn_param param; ++ const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; + union nf_inet_addr snet; /* source network of the client, + after masking */ + +@@ -267,7 +268,6 @@ ip_vs_sched_persist(struct ip_vs_service *svc, + { + int protocol = iph.protocol; + const union nf_inet_addr *vaddr = &iph.daddr; +- const union nf_inet_addr fwmark = { .ip = htonl(svc->fwmark) }; + __be16 vport = 0; + + if (dst_port == svc->port) { +diff --git a/net/sched/sch_choke.c b/net/sched/sch_choke.c +index 3422b25..081ffb9 100644 +--- a/net/sched/sch_choke.c ++++ b/net/sched/sch_choke.c +@@ -225,8 +225,7 @@ struct choke_skb_cb { + + static inline struct choke_skb_cb *choke_skb_cb(const struct sk_buff *skb) + { +- BUILD_BUG_ON(sizeof(skb->cb) < +- sizeof(struct qdisc_skb_cb) + sizeof(struct choke_skb_cb)); ++ qdisc_cb_private_validate(skb, sizeof(struct choke_skb_cb)); + return (struct choke_skb_cb *)qdisc_skb_cb(skb)->data; + } + +diff --git a/net/sched/sch_netem.c b/net/sched/sch_netem.c +index a4ab207..7801b15 100644 +--- a/net/sched/sch_netem.c ++++ b/net/sched/sch_netem.c +@@ -118,8 +118,7 @@ struct netem_skb_cb { + + static inline struct netem_skb_cb *netem_skb_cb(struct sk_buff *skb) + { +- BUILD_BUG_ON(sizeof(skb->cb) < +- sizeof(struct qdisc_skb_cb) + sizeof(struct netem_skb_cb)); ++ qdisc_cb_private_validate(skb, sizeof(struct netem_skb_cb)); + return (struct netem_skb_cb *)qdisc_skb_cb(skb)->data; + } + +@@ -383,8 +382,8 @@ static int netem_enqueue(struct sk_buff *skb, struct Qdisc *sch) + q->counter = 0; + + __skb_queue_head(&q->qdisc->q, skb); +- q->qdisc->qstats.backlog += qdisc_pkt_len(skb); +- q->qdisc->qstats.requeues++; ++ sch->qstats.backlog += qdisc_pkt_len(skb); ++ sch->qstats.requeues++; + ret = NET_XMIT_SUCCESS; + } + +diff --git a/net/sched/sch_sfb.c b/net/sched/sch_sfb.c +index e83c272..17859ea 100644 +--- a/net/sched/sch_sfb.c ++++ b/net/sched/sch_sfb.c +@@ -93,8 +93,7 @@ struct sfb_skb_cb { + + static inline struct sfb_skb_cb *sfb_skb_cb(const struct sk_buff *skb) + { +- BUILD_BUG_ON(sizeof(skb->cb) < +- sizeof(struct qdisc_skb_cb) + sizeof(struct sfb_skb_cb)); ++ qdisc_cb_private_validate(skb, sizeof(struct sfb_skb_cb)); + return (struct sfb_skb_cb *)qdisc_skb_cb(skb)->data; + } + +diff --git a/scripts/package/builddeb b/scripts/package/builddeb +index f6cbc3d..3c6c0b1 100644 +--- a/scripts/package/builddeb ++++ b/scripts/package/builddeb +@@ -238,14 +238,14 @@ EOF + fi + + # Build header package +-(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > /tmp/files$$) +-(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> /tmp/files$$) +-(cd $objtree; find .config Module.symvers include scripts -type f >> /tmp/objfiles$$) ++(cd $srctree; find . -name Makefile -o -name Kconfig\* -o -name \*.pl > "$objtree/debian/hdrsrcfiles") ++(cd $srctree; find arch/$SRCARCH/include include scripts -type f >> "$objtree/debian/hdrsrcfiles") ++(cd $objtree; find .config Module.symvers include scripts -type f >> "$objtree/debian/hdrobjfiles") + destdir=$kernel_headers_dir/usr/src/linux-headers-$version + mkdir -p "$destdir" +-(cd $srctree; tar -c -f - -T /tmp/files$$) | (cd $destdir; tar -xf -) +-(cd $objtree; tar -c -f - -T /tmp/objfiles$$) | (cd $destdir; tar -xf -) +-rm -f /tmp/files$$ /tmp/objfiles$$ ++(cd $srctree; tar -c -f - -T "$objtree/debian/hdrsrcfiles") | (cd $destdir; tar -xf -) ++(cd $objtree; tar -c -f - -T "$objtree/debian/hdrobjfiles") | (cd $destdir; tar -xf -) ++rm -f "$objtree/debian/hdrsrcfiles" "$objtree/debian/hdrobjfiles" + arch=$(dpkg --print-architecture) + + cat <<EOF >> debian/control +diff --git a/security/tomoyo/.gitignore b/security/tomoyo/.gitignore +new file mode 100644 +index 0000000..5caf1a6 +--- /dev/null ++++ b/security/tomoyo/.gitignore +@@ -0,0 +1,2 @@ ++builtin-policy.h ++policy/ +diff --git a/sound/pci/hda/patch_conexant.c b/sound/pci/hda/patch_conexant.c +index 7072251..08bad5b 100644 +--- a/sound/pci/hda/patch_conexant.c ++++ b/sound/pci/hda/patch_conexant.c +@@ -1899,6 +1899,10 @@ static void cxt5051_init_mic_port(struct hda_codec *codec, hda_nid_t nid, + snd_hda_codec_write(codec, nid, 0, + AC_VERB_SET_UNSOLICITED_ENABLE, + AC_USRSP_EN | event); ++} ++ ++static void cxt5051_init_mic_jack(struct hda_codec *codec, hda_nid_t nid) ++{ + snd_hda_input_jack_add(codec, nid, SND_JACK_MICROPHONE, NULL); + snd_hda_input_jack_report(codec, nid); + } +@@ -1916,7 +1920,6 @@ static int cxt5051_init(struct hda_codec *codec) + struct conexant_spec *spec = codec->spec; + + conexant_init(codec); +- conexant_init_jacks(codec); + + if (spec->auto_mic & AUTO_MIC_PORTB) + cxt5051_init_mic_port(codec, 0x17, CXT5051_PORTB_EVENT); +@@ -2037,6 +2040,12 @@ static int patch_cxt5051(struct hda_codec *codec) + if (spec->beep_amp) + snd_hda_attach_beep_device(codec, spec->beep_amp); + ++ conexant_init_jacks(codec); ++ if (spec->auto_mic & AUTO_MIC_PORTB) ++ cxt5051_init_mic_jack(codec, 0x17); ++ if (spec->auto_mic & AUTO_MIC_PORTC) ++ cxt5051_init_mic_jack(codec, 0x18); ++ + return 0; + } + +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 9c197d4..c4c8d78 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -79,6 +79,8 @@ enum { + ALC_AUTOMUTE_MIXER, /* mute/unmute mixer widget AMP */ + }; + ++#define MAX_VOL_NIDS 0x40 ++ + struct alc_spec { + /* codec parameterization */ + const struct snd_kcontrol_new *mixers[5]; /* mixer arrays */ +@@ -117,8 +119,8 @@ struct alc_spec { + const hda_nid_t *capsrc_nids; + hda_nid_t dig_in_nid; /* digital-in NID; optional */ + hda_nid_t mixer_nid; /* analog-mixer NID */ +- DECLARE_BITMAP(vol_ctls, 0x20 << 1); +- DECLARE_BITMAP(sw_ctls, 0x20 << 1); ++ DECLARE_BITMAP(vol_ctls, MAX_VOL_NIDS << 1); ++ DECLARE_BITMAP(sw_ctls, MAX_VOL_NIDS << 1); + + /* capture setup for dynamic dual-adc switch */ + hda_nid_t cur_adc; +@@ -3068,7 +3070,10 @@ static int alc_auto_fill_dac_nids(struct hda_codec *codec) + static inline unsigned int get_ctl_pos(unsigned int data) + { + hda_nid_t nid = get_amp_nid_(data); +- unsigned int dir = get_amp_direction_(data); ++ unsigned int dir; ++ if (snd_BUG_ON(nid >= MAX_VOL_NIDS)) ++ return 0; ++ dir = get_amp_direction_(data); + return (nid << 1) | dir; + } + +@@ -4224,12 +4229,20 @@ static void alc889_fixup_dac_route(struct hda_codec *codec, + const struct alc_fixup *fix, int action) + { + if (action == ALC_FIXUP_ACT_PRE_PROBE) { ++ /* fake the connections during parsing the tree */ + hda_nid_t conn1[2] = { 0x0c, 0x0d }; + hda_nid_t conn2[2] = { 0x0e, 0x0f }; + snd_hda_override_conn_list(codec, 0x14, 2, conn1); + snd_hda_override_conn_list(codec, 0x15, 2, conn1); + snd_hda_override_conn_list(codec, 0x18, 2, conn2); + snd_hda_override_conn_list(codec, 0x1a, 2, conn2); ++ } else if (action == ALC_FIXUP_ACT_PROBE) { ++ /* restore the connections */ ++ hda_nid_t conn[5] = { 0x0c, 0x0d, 0x0e, 0x0f, 0x26 }; ++ snd_hda_override_conn_list(codec, 0x14, 5, conn); ++ snd_hda_override_conn_list(codec, 0x15, 5, conn); ++ snd_hda_override_conn_list(codec, 0x18, 5, conn); ++ snd_hda_override_conn_list(codec, 0x1a, 5, conn); + } + } + +diff --git a/sound/soc/codecs/wm8962.c b/sound/soc/codecs/wm8962.c +index d795294..07dd7eb 100644 +--- a/sound/soc/codecs/wm8962.c ++++ b/sound/soc/codecs/wm8962.c +@@ -2559,7 +2559,7 @@ static int dsp2_event(struct snd_soc_dapm_widget *w, + return 0; + } + +-static const char *st_text[] = { "None", "Right", "Left" }; ++static const char *st_text[] = { "None", "Left", "Right" }; + + static const struct soc_enum str_enum = + SOC_ENUM_SINGLE(WM8962_DAC_DSP_MIXING_1, 2, 3, st_text); |