diff options
author | Anthony G. Basile <blueness@gentoo.org> | 2012-12-09 21:33:36 -0500 |
---|---|---|
committer | Anthony G. Basile <blueness@gentoo.org> | 2012-12-09 22:39:47 -0500 |
commit | d994308b9ee141c7fef9a62d37ef87707494f854 (patch) | |
tree | 8d59eb597e13a737ef5173b291fdbc0109e063d6 | |
parent | Grsec/PaX: 2.9.1-{2.6.32.60,3.2.34,3.6.9}-201212041903 (diff) | |
download | hardened-patchset-d994308b9ee141c7fef9a62d37ef87707494f854.tar.gz hardened-patchset-d994308b9ee141c7fef9a62d37ef87707494f854.tar.bz2 hardened-patchset-d994308b9ee141c7fef9a62d37ef87707494f854.zip |
Grsec/PaX: 2.9.1-{2.6.32.60,3.2.35,3.6.9}-20121207164120121207
-rw-r--r-- | 2.6.32/0000_README | 2 | ||||
-rw-r--r-- | 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212071640.patch (renamed from 2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212041903.patch) | 59 | ||||
-rw-r--r-- | 3.2.35/0000_README (renamed from 3.2.34/0000_README) | 6 | ||||
-rw-r--r-- | 3.2.35/1021_linux-3.2.22.patch (renamed from 3.2.34/1021_linux-3.2.22.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1022_linux-3.2.23.patch (renamed from 3.2.34/1022_linux-3.2.23.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1023_linux-3.2.24.patch (renamed from 3.2.34/1023_linux-3.2.24.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1024_linux-3.2.25.patch (renamed from 3.2.34/1024_linux-3.2.25.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1025_linux-3.2.26.patch (renamed from 3.2.34/1025_linux-3.2.26.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1026_linux-3.2.27.patch (renamed from 3.2.34/1026_linux-3.2.27.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1027_linux-3.2.28.patch (renamed from 3.2.34/1027_linux-3.2.28.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1028_linux-3.2.29.patch (renamed from 3.2.34/1028_linux-3.2.29.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1029_linux-3.2.30.patch (renamed from 3.2.34/1029_linux-3.2.30.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1030_linux-3.2.31.patch (renamed from 3.2.34/1030_linux-3.2.31.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1031_linux-3.2.32.patch (renamed from 3.2.34/1031_linux-3.2.32.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1032_linux-3.2.33.patch (renamed from 3.2.34/1032_linux-3.2.33.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1033_linux-3.2.34.patch (renamed from 3.2.34/1033_linux-3.2.34.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/1034_linux-3.2.35.patch | 3014 | ||||
-rw-r--r-- | 3.2.35/4420_grsecurity-2.9.1-3.2.35-201212071641.patch (renamed from 3.2.34/4420_grsecurity-2.9.1-3.2.34-201212041903.patch) | 574 | ||||
-rw-r--r-- | 3.2.35/4425-tmpfs-user-namespace.patch (renamed from 3.2.34/4425-tmpfs-user-namespace.patch) | 7 | ||||
-rw-r--r-- | 3.2.35/4430_grsec-remove-localversion-grsec.patch (renamed from 3.2.34/4430_grsec-remove-localversion-grsec.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/4435_grsec-mute-warnings.patch (renamed from 3.2.34/4435_grsec-mute-warnings.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/4440_grsec-remove-protected-paths.patch (renamed from 3.2.34/4440_grsec-remove-protected-paths.patch) | 0 | ||||
-rw-r--r-- | 3.2.35/4450_grsec-kconfig-default-gids.patch (renamed from 3.2.34/4450_grsec-kconfig-default-gids.patch) | 12 | ||||
-rw-r--r-- | 3.2.35/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.2.34/4465_selinux-avc_audit-log-curr_ip.patch) | 2 | ||||
-rw-r--r-- | 3.2.35/4470_disable-compat_vdso.patch (renamed from 3.2.34/4470_disable-compat_vdso.patch) | 0 | ||||
-rw-r--r-- | 3.6.9/0000_README | 2 | ||||
-rw-r--r-- | 3.6.9/4420_grsecurity-2.9.1-3.6.9-201212071641.patch (renamed from 3.6.9/4420_grsecurity-2.9.1-3.6.9-201212041903.patch) | 181 | ||||
-rw-r--r-- | 3.6.9/4425-tmpfs-user-namespace.patch | 7 | ||||
-rw-r--r-- | 3.6.9/4450_grsec-kconfig-default-gids.patch | 12 | ||||
-rw-r--r-- | 3.6.9/4465_selinux-avc_audit-log-curr_ip.patch | 2 |
30 files changed, 3475 insertions, 405 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README index 640e15f..1443d3a 100644 --- a/2.6.32/0000_README +++ b/2.6.32/0000_README @@ -34,7 +34,7 @@ Patch: 1059_linux-2.6.32.60.patch From: http://www.kernel.org Desc: Linux 2.6.32.59 -Patch: 4420_grsecurity-2.9.1-2.6.32.60-201212041903.patch +Patch: 4420_grsecurity-2.9.1-2.6.32.60-201212071640.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212041903.patch b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212071640.patch index 585b0cc..06f7fb7 100644 --- a/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212041903.patch +++ b/2.6.32/4420_grsecurity-2.9.1-2.6.32.60-201212071640.patch @@ -76791,7 +76791,7 @@ index ff57421..f65f88a 100644 out_free_fd: diff --git a/fs/exec.c b/fs/exec.c -index 86fafc6..ab06586 100644 +index 86fafc6..ddb5122 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -56,12 +56,33 @@ @@ -77568,7 +77568,7 @@ index 86fafc6..ab06586 100644 audit_core_dumps(signr); + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) -+ gr_handle_brute_attach(current, mm->flags); ++ gr_handle_brute_attach(mm->flags); + binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) @@ -92700,10 +92700,10 @@ index 0000000..78f8733 +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..c648492 +index 0000000..d450a74 --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,206 @@ +@@ -0,0 +1,219 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/delay.h> @@ -92781,6 +92781,7 @@ index 0000000..c648492 + +#ifdef CONFIG_GRKERNSEC_BRUTE +#define GR_USER_BAN_TIME (15 * 60) ++#define GR_DAEMON_BRUTE_TIME (30 * 60) + +static int __get_dumpable(unsigned long mm_flags) +{ @@ -92791,10 +92792,12 @@ index 0000000..c648492 +} +#endif + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) ++void gr_handle_brute_attach(unsigned long mm_flags) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct task_struct *p = current; + uid_t uid = 0; ++ int daemon = 0; + + if (!grsec_enable_brute) + return; @@ -92802,9 +92805,11 @@ index 0000000..c648492 + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; -+ else { ++ daemon = 1; ++ } else { + const struct cred *cred = __task_cred(p), *cred2; + struct task_struct *tsk, *tsk2; + @@ -92836,6 +92841,8 @@ index 0000000..c648492 + + if (uid) + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++ else if (daemon) ++ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); +#endif + return; +} @@ -92843,8 +92850,14 @@ index 0000000..c648492 +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE -+ if (current->brute) -+ msleep(30 * 1000); ++ struct task_struct *p = current; ++ ++ if (unlikely(p->brute)) { ++ if (!grsec_enable_brute) ++ p->brute = 0; ++ else if (time_before(get_seconds(), p->brute_expires)) ++ msleep(30 * 1000); ++ } +#endif + return; +} @@ -96667,10 +96680,10 @@ index 0000000..3322652 +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..ac88734 +index 0000000..18863d1 --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,110 @@ +@@ -0,0 +1,111 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -96781,9 +96794,10 @@ index 0000000..ac88734 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " ++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..0e2a522 +index 0000000..6e2f8bc --- /dev/null +++ b/include/linux/grsecurity.h @@ -0,0 +1,226 @@ @@ -96809,7 +96823,7 @@ index 0000000..0e2a522 +#error "CONFIG_PAX enabled, but no PaX options are enabled." +#endif + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); ++void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); +int gr_process_user_ban(void); @@ -98401,7 +98415,7 @@ index 3392c59..a746428 100644 #if defined(CONFIG_RFKILL) || defined(CONFIG_RFKILL_MODULE) /** diff --git a/include/linux/sched.h b/include/linux/sched.h -index 71849bf..903514a 100644 +index 71849bf..42936d2 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio; @@ -98529,7 +98543,7 @@ index 71849bf..903514a 100644 struct io_context *io_context; unsigned long ptrace_message; -@@ -1519,6 +1544,27 @@ struct task_struct { +@@ -1519,6 +1544,28 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -98546,6 +98560,7 @@ index 71849bf..903514a 100644 + struct acl_subject_label *acl; + struct acl_role_label *role; + struct file *exec_file; ++ unsigned long brute_expires; + u16 acl_role_id; + /* is this the task that authenticated to the special role */ + u8 acl_sp_role; @@ -98557,7 +98572,7 @@ index 71849bf..903514a 100644 #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored adress in ret_stack */ int curr_ret_stack; -@@ -1542,6 +1588,57 @@ struct task_struct { +@@ -1542,6 +1589,57 @@ struct task_struct { #endif /* CONFIG_TRACING */ }; @@ -98615,7 +98630,7 @@ index 71849bf..903514a 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) -@@ -1740,7 +1837,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * +@@ -1740,7 +1838,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_DUMPCORE 0x00000200 /* dumped core */ #define PF_SIGNALED 0x00000400 /* killed by a signal */ #define PF_MEMALLOC 0x00000800 /* Allocating memory */ @@ -98624,7 +98639,7 @@ index 71849bf..903514a 100644 #define PF_USED_MATH 0x00002000 /* if unset the fpu must be initialized before use */ #define PF_FREEZING 0x00004000 /* freeze in progress. do not account to load */ #define PF_NOFREEZE 0x00008000 /* this thread should not be frozen */ -@@ -1978,7 +2075,9 @@ void yield(void); +@@ -1978,7 +2076,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -98634,7 +98649,7 @@ index 71849bf..903514a 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2011,6 +2110,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2011,6 +2111,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -98642,7 +98657,7 @@ index 71849bf..903514a 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2155,7 +2255,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2155,7 +2256,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -98651,7 +98666,7 @@ index 71849bf..903514a 100644 extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2284,9 +2384,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2284,9 +2385,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -98663,7 +98678,7 @@ index 71849bf..903514a 100644 return (obj >= stack) && (obj < (stack + THREAD_SIZE)); } -@@ -2616,6 +2716,23 @@ static inline unsigned long rlimit_max(unsigned int limit) +@@ -2616,6 +2717,23 @@ static inline unsigned long rlimit_max(unsigned int limit) return task_rlimit_max(current, limit); } diff --git a/3.2.34/0000_README b/3.2.35/0000_README index a6e67e7..d60b72b 100644 --- a/3.2.34/0000_README +++ b/3.2.35/0000_README @@ -54,7 +54,11 @@ Patch: 1033_linux-3.2.34.patch From: http://www.kernel.org Desc: Linux 3.2.34 -Patch: 4420_grsecurity-2.9.1-3.2.34-201212041903.patch +Patch: 1034_linux-3.2.35.patch +From: http://www.kernel.org +Desc: Linux 3.2.35 + +Patch: 4420_grsecurity-2.9.1-3.2.35-201212071641.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.2.34/1021_linux-3.2.22.patch b/3.2.35/1021_linux-3.2.22.patch index e6ad93a..e6ad93a 100644 --- a/3.2.34/1021_linux-3.2.22.patch +++ b/3.2.35/1021_linux-3.2.22.patch diff --git a/3.2.34/1022_linux-3.2.23.patch b/3.2.35/1022_linux-3.2.23.patch index 3d796d0..3d796d0 100644 --- a/3.2.34/1022_linux-3.2.23.patch +++ b/3.2.35/1022_linux-3.2.23.patch diff --git a/3.2.34/1023_linux-3.2.24.patch b/3.2.35/1023_linux-3.2.24.patch index 4692eb4..4692eb4 100644 --- a/3.2.34/1023_linux-3.2.24.patch +++ b/3.2.35/1023_linux-3.2.24.patch diff --git a/3.2.34/1024_linux-3.2.25.patch b/3.2.35/1024_linux-3.2.25.patch index e95c213..e95c213 100644 --- a/3.2.34/1024_linux-3.2.25.patch +++ b/3.2.35/1024_linux-3.2.25.patch diff --git a/3.2.34/1025_linux-3.2.26.patch b/3.2.35/1025_linux-3.2.26.patch index 44065b9..44065b9 100644 --- a/3.2.34/1025_linux-3.2.26.patch +++ b/3.2.35/1025_linux-3.2.26.patch diff --git a/3.2.34/1026_linux-3.2.27.patch b/3.2.35/1026_linux-3.2.27.patch index 5878eb4..5878eb4 100644 --- a/3.2.34/1026_linux-3.2.27.patch +++ b/3.2.35/1026_linux-3.2.27.patch diff --git a/3.2.34/1027_linux-3.2.28.patch b/3.2.35/1027_linux-3.2.28.patch index 4dbba4b..4dbba4b 100644 --- a/3.2.34/1027_linux-3.2.28.patch +++ b/3.2.35/1027_linux-3.2.28.patch diff --git a/3.2.34/1028_linux-3.2.29.patch b/3.2.35/1028_linux-3.2.29.patch index 3c65179..3c65179 100644 --- a/3.2.34/1028_linux-3.2.29.patch +++ b/3.2.35/1028_linux-3.2.29.patch diff --git a/3.2.34/1029_linux-3.2.30.patch b/3.2.35/1029_linux-3.2.30.patch index 86aea4b..86aea4b 100644 --- a/3.2.34/1029_linux-3.2.30.patch +++ b/3.2.35/1029_linux-3.2.30.patch diff --git a/3.2.34/1030_linux-3.2.31.patch b/3.2.35/1030_linux-3.2.31.patch index c6accf5..c6accf5 100644 --- a/3.2.34/1030_linux-3.2.31.patch +++ b/3.2.35/1030_linux-3.2.31.patch diff --git a/3.2.34/1031_linux-3.2.32.patch b/3.2.35/1031_linux-3.2.32.patch index 247fc0b..247fc0b 100644 --- a/3.2.34/1031_linux-3.2.32.patch +++ b/3.2.35/1031_linux-3.2.32.patch diff --git a/3.2.34/1032_linux-3.2.33.patch b/3.2.35/1032_linux-3.2.33.patch index c32fb75..c32fb75 100644 --- a/3.2.34/1032_linux-3.2.33.patch +++ b/3.2.35/1032_linux-3.2.33.patch diff --git a/3.2.34/1033_linux-3.2.34.patch b/3.2.35/1033_linux-3.2.34.patch index d647b38..d647b38 100644 --- a/3.2.34/1033_linux-3.2.34.patch +++ b/3.2.35/1033_linux-3.2.34.patch diff --git a/3.2.35/1034_linux-3.2.35.patch b/3.2.35/1034_linux-3.2.35.patch new file mode 100644 index 0000000..76a9c19 --- /dev/null +++ b/3.2.35/1034_linux-3.2.35.patch @@ -0,0 +1,3014 @@ +diff --git a/Documentation/cgroups/memory.txt b/Documentation/cgroups/memory.txt +index cc0ebc5..fd129f6 100644 +--- a/Documentation/cgroups/memory.txt ++++ b/Documentation/cgroups/memory.txt +@@ -440,6 +440,10 @@ Note: + 5.3 swappiness + + Similar to /proc/sys/vm/swappiness, but affecting a hierarchy of groups only. ++Please note that unlike the global swappiness, memcg knob set to 0 ++really prevents from any swapping even if there is a swap storage ++available. This might lead to memcg OOM killer if there are no file ++pages to reclaim. + + Following cgroups' swappiness can't be changed. + - root cgroup (uses /proc/sys/vm/swappiness). +diff --git a/Documentation/dvb/get_dvb_firmware b/Documentation/dvb/get_dvb_firmware +index e67be7a..48d25ea 100755 +--- a/Documentation/dvb/get_dvb_firmware ++++ b/Documentation/dvb/get_dvb_firmware +@@ -115,7 +115,7 @@ sub tda10045 { + + sub tda10046 { + my $sourcefile = "TT_PCI_2.19h_28_11_2006.zip"; +- my $url = "http://www.tt-download.com/download/updates/219/$sourcefile"; ++ my $url = "http://technotrend.com.ua/download/software/219/$sourcefile"; + my $hash = "6a7e1e2f2644b162ff0502367553c72d"; + my $outfile = "dvb-fe-tda10046.fw"; + my $tmpdir = tempdir(DIR => "/tmp", CLEANUP => 1); +diff --git a/Makefile b/Makefile +index 14ebacf..d985af0 100644 +--- a/Makefile ++++ b/Makefile +@@ -1,6 +1,6 @@ + VERSION = 3 + PATCHLEVEL = 2 +-SUBLEVEL = 34 ++SUBLEVEL = 35 + EXTRAVERSION = + NAME = Saber-toothed Squirrel + +diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig +index 9fdc151..27bcd12 100644 +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -542,6 +542,7 @@ config ARCH_KIRKWOOD + bool "Marvell Kirkwood" + select CPU_FEROCEON + select PCI ++ select PCI_QUIRKS + select ARCH_REQUIRE_GPIOLIB + select GENERIC_CLOCKEVENTS + select PLAT_ORION +diff --git a/arch/arm/mach-dove/include/mach/pm.h b/arch/arm/mach-dove/include/mach/pm.h +index 3ad9f94..11799c3 100644 +--- a/arch/arm/mach-dove/include/mach/pm.h ++++ b/arch/arm/mach-dove/include/mach/pm.h +@@ -45,7 +45,7 @@ static inline int pmu_to_irq(int pin) + + static inline int irq_to_pmu(int irq) + { +- if (IRQ_DOVE_PMU_START < irq && irq < NR_IRQS) ++ if (IRQ_DOVE_PMU_START <= irq && irq < NR_IRQS) + return irq - IRQ_DOVE_PMU_START; + + return -EINVAL; +diff --git a/arch/arm/mach-dove/irq.c b/arch/arm/mach-dove/irq.c +index f07fd16..9f2fd10 100644 +--- a/arch/arm/mach-dove/irq.c ++++ b/arch/arm/mach-dove/irq.c +@@ -61,8 +61,20 @@ static void pmu_irq_ack(struct irq_data *d) + int pin = irq_to_pmu(d->irq); + u32 u; + ++ /* ++ * The PMU mask register is not RW0C: it is RW. This means that ++ * the bits take whatever value is written to them; if you write ++ * a '1', you will set the interrupt. ++ * ++ * Unfortunately this means there is NO race free way to clear ++ * these interrupts. ++ * ++ * So, let's structure the code so that the window is as small as ++ * possible. ++ */ + u = ~(1 << (pin & 31)); +- writel(u, PMU_INTERRUPT_CAUSE); ++ u &= readl_relaxed(PMU_INTERRUPT_CAUSE); ++ writel_relaxed(u, PMU_INTERRUPT_CAUSE); + } + + static struct irq_chip pmu_irq_chip = { +diff --git a/arch/arm/mach-kirkwood/pcie.c b/arch/arm/mach-kirkwood/pcie.c +index 74b992d..a881c54 100644 +--- a/arch/arm/mach-kirkwood/pcie.c ++++ b/arch/arm/mach-kirkwood/pcie.c +@@ -213,14 +213,19 @@ static int __init kirkwood_pcie_setup(int nr, struct pci_sys_data *sys) + return 1; + } + ++/* ++ * The root complex has a hardwired class of PCI_CLASS_MEMORY_OTHER, when it ++ * is operating as a root complex this needs to be switched to ++ * PCI_CLASS_BRIDGE_HOST or Linux will errantly try to process the BAR's on ++ * the device. Decoding setup is handled by the orion code. ++ */ + static void __devinit rc_pci_fixup(struct pci_dev *dev) + { +- /* +- * Prevent enumeration of root complex. +- */ + if (dev->bus->parent == NULL && dev->devfn == 0) { + int i; + ++ dev->class &= 0xff; ++ dev->class |= PCI_CLASS_BRIDGE_HOST << 8; + for (i = 0; i < DEVICE_COUNT_RESOURCE; i++) { + dev->resource[i].start = 0; + dev->resource[i].end = 0; +diff --git a/arch/m68k/include/asm/signal.h b/arch/m68k/include/asm/signal.h +index 60e8866..93fe83e 100644 +--- a/arch/m68k/include/asm/signal.h ++++ b/arch/m68k/include/asm/signal.h +@@ -156,7 +156,7 @@ typedef struct sigaltstack { + static inline void sigaddset(sigset_t *set, int _sig) + { + asm ("bfset %0{%1,#1}" +- : "+od" (*set) ++ : "+o" (*set) + : "id" ((_sig - 1) ^ 31) + : "cc"); + } +@@ -164,7 +164,7 @@ static inline void sigaddset(sigset_t *set, int _sig) + static inline void sigdelset(sigset_t *set, int _sig) + { + asm ("bfclr %0{%1,#1}" +- : "+od" (*set) ++ : "+o" (*set) + : "id" ((_sig - 1) ^ 31) + : "cc"); + } +@@ -180,7 +180,7 @@ static inline int __gen_sigismember(sigset_t *set, int _sig) + int ret; + asm ("bfextu %1{%2,#1},%0" + : "=d" (ret) +- : "od" (*set), "id" ((_sig-1) ^ 31) ++ : "o" (*set), "id" ((_sig-1) ^ 31) + : "cc"); + return ret; + } +diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c +index e141324..d0ea054 100644 +--- a/arch/parisc/kernel/signal32.c ++++ b/arch/parisc/kernel/signal32.c +@@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) + { + compat_sigset_t s; + +- if (sz != sizeof *set) panic("put_sigset32()"); ++ if (sz != sizeof *set) ++ return -EINVAL; + sigset_64to32(&s, set); + + return copy_to_user(up, &s, sizeof s); +@@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) + compat_sigset_t s; + int r; + +- if (sz != sizeof *set) panic("put_sigset32()"); ++ if (sz != sizeof *set) ++ return -EINVAL; + + if ((r = copy_from_user(&s, up, sz)) == 0) { + sigset_32to64(set, &s); +diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c +index c9b9322..7ea75d1 100644 +--- a/arch/parisc/kernel/sys_parisc.c ++++ b/arch/parisc/kernel/sys_parisc.c +@@ -73,6 +73,8 @@ static unsigned long get_shared_area(struct address_space *mapping, + struct vm_area_struct *vma; + int offset = mapping ? get_offset(mapping) : 0; + ++ offset = (offset + (pgoff << PAGE_SHIFT)) & 0x3FF000; ++ + addr = DCACHE_ALIGN(addr - offset) + offset; + + for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) { +diff --git a/arch/s390/include/asm/compat.h b/arch/s390/include/asm/compat.h +index 234f1d8..2e0a15b 100644 +--- a/arch/s390/include/asm/compat.h ++++ b/arch/s390/include/asm/compat.h +@@ -20,7 +20,7 @@ + #define PSW32_MASK_CC 0x00003000UL + #define PSW32_MASK_PM 0x00000f00UL + +-#define PSW32_MASK_USER 0x00003F00UL ++#define PSW32_MASK_USER 0x0000FF00UL + + #define PSW32_ADDR_AMODE 0x80000000UL + #define PSW32_ADDR_INSN 0x7FFFFFFFUL +diff --git a/arch/s390/include/asm/ptrace.h b/arch/s390/include/asm/ptrace.h +index a658463..a5b4c48 100644 +--- a/arch/s390/include/asm/ptrace.h ++++ b/arch/s390/include/asm/ptrace.h +@@ -240,7 +240,7 @@ typedef struct + #define PSW_MASK_EA 0x00000000UL + #define PSW_MASK_BA 0x00000000UL + +-#define PSW_MASK_USER 0x00003F00UL ++#define PSW_MASK_USER 0x0000FF00UL + + #define PSW_ADDR_AMODE 0x80000000UL + #define PSW_ADDR_INSN 0x7FFFFFFFUL +@@ -269,7 +269,7 @@ typedef struct + #define PSW_MASK_EA 0x0000000100000000UL + #define PSW_MASK_BA 0x0000000080000000UL + +-#define PSW_MASK_USER 0x00003F0180000000UL ++#define PSW_MASK_USER 0x0000FF0180000000UL + + #define PSW_ADDR_AMODE 0x0000000000000000UL + #define PSW_ADDR_INSN 0xFFFFFFFFFFFFFFFFUL +diff --git a/arch/s390/kernel/compat_signal.c b/arch/s390/kernel/compat_signal.c +index 4f68c81..9fdd05d 100644 +--- a/arch/s390/kernel/compat_signal.c ++++ b/arch/s390/kernel/compat_signal.c +@@ -312,6 +312,10 @@ static int restore_sigregs32(struct pt_regs *regs,_sigregs32 __user *sregs) + regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + (__u64)(regs32.psw.mask & PSW32_MASK_USER) << 32 | + (__u64)(regs32.psw.addr & PSW32_ADDR_AMODE); ++ /* Check for invalid user address space control. */ ++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) ++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (__u64)(regs32.psw.addr & PSW32_ADDR_INSN); + for (i = 0; i < NUM_GPRS; i++) + regs->gprs[i] = (__u64) regs32.gprs[i]; +@@ -493,7 +497,10 @@ static int setup_frame32(int sig, struct k_sigaction *ka, + + /* Set up registers for signal handler */ + regs->gprs[15] = (__force __u64) frame; +- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ ++ /* Force 31 bit amode and default user address space control. */ ++ regs->psw.mask = PSW_MASK_BA | ++ (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (__force __u64) ka->sa.sa_handler; + + regs->gprs[2] = map_signal(sig); +@@ -557,7 +564,10 @@ static int setup_rt_frame32(int sig, struct k_sigaction *ka, siginfo_t *info, + + /* Set up registers for signal handler */ + regs->gprs[15] = (__force __u64) frame; +- regs->psw.mask |= PSW_MASK_BA; /* force amode 31 */ ++ /* Force 31 bit amode and default user address space control. */ ++ regs->psw.mask = PSW_MASK_BA | ++ (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (__u64) ka->sa.sa_handler; + + regs->gprs[2] = map_signal(sig); +diff --git a/arch/s390/kernel/signal.c b/arch/s390/kernel/signal.c +index 5086553..d54d475 100644 +--- a/arch/s390/kernel/signal.c ++++ b/arch/s390/kernel/signal.c +@@ -147,6 +147,10 @@ static int restore_sigregs(struct pt_regs *regs, _sigregs __user *sregs) + /* Use regs->psw.mask instead of psw_user_bits to preserve PER bit. */ + regs->psw.mask = (regs->psw.mask & ~PSW_MASK_USER) | + (user_sregs.regs.psw.mask & PSW_MASK_USER); ++ /* Check for invalid user address space control. */ ++ if ((regs->psw.mask & PSW_MASK_ASC) >= (psw_kernel_bits & PSW_MASK_ASC)) ++ regs->psw.mask = (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + /* Check for invalid amode */ + if (regs->psw.mask & PSW_MASK_EA) + regs->psw.mask |= PSW_MASK_BA; +@@ -293,7 +297,10 @@ static int setup_frame(int sig, struct k_sigaction *ka, + + /* Set up registers for signal handler */ + regs->gprs[15] = (unsigned long) frame; +- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ ++ /* Force default amode and default user address space control. */ ++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | ++ (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; + + regs->gprs[2] = map_signal(sig); +@@ -362,7 +369,10 @@ static int setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info, + + /* Set up registers for signal handler */ + regs->gprs[15] = (unsigned long) frame; +- regs->psw.mask |= PSW_MASK_EA | PSW_MASK_BA; /* 64 bit amode */ ++ /* Force default amode and default user address space control. */ ++ regs->psw.mask = PSW_MASK_EA | PSW_MASK_BA | ++ (psw_user_bits & PSW_MASK_ASC) | ++ (regs->psw.mask & ~PSW_MASK_ASC); + regs->psw.addr = (unsigned long) ka->sa.sa_handler | PSW_ADDR_AMODE; + + regs->gprs[2] = map_signal(sig); +diff --git a/arch/s390/mm/gup.c b/arch/s390/mm/gup.c +index 65cb06e..4ccf9f5 100644 +--- a/arch/s390/mm/gup.c ++++ b/arch/s390/mm/gup.c +@@ -183,7 +183,7 @@ int get_user_pages_fast(unsigned long start, int nr_pages, int write, + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; +- if (end < start) ++ if ((end < start) || (end > TASK_SIZE)) + goto slow_irqon; + + /* +diff --git a/arch/sparc/kernel/signal_64.c b/arch/sparc/kernel/signal_64.c +index f0836cd..d58d3ed 100644 +--- a/arch/sparc/kernel/signal_64.c ++++ b/arch/sparc/kernel/signal_64.c +@@ -307,9 +307,7 @@ void do_rt_sigreturn(struct pt_regs *regs) + err |= restore_fpu_state(regs, fpu_save); + + err |= __copy_from_user(&set, &sf->mask, sizeof(sigset_t)); +- err |= do_sigaltstack(&sf->stack, NULL, (unsigned long)sf); +- +- if (err) ++ if (err || do_sigaltstack(&sf->stack, NULL, (unsigned long)sf) == -EFAULT) + goto segv; + + err |= __get_user(rwin_save, &sf->rwin_save); +diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h +index 3566454..3b96fd4 100644 +--- a/arch/x86/include/asm/ptrace.h ++++ b/arch/x86/include/asm/ptrace.h +@@ -206,21 +206,14 @@ static inline bool user_64bit_mode(struct pt_regs *regs) + } + #endif + +-/* +- * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode +- * when it traps. The previous stack will be directly underneath the saved +- * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. +- * +- * This is valid only for kernel mode traps. +- */ +-static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +-{ + #ifdef CONFIG_X86_32 +- return (unsigned long)(®s->sp); ++extern unsigned long kernel_stack_pointer(struct pt_regs *regs); + #else ++static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) ++{ + return regs->sp; +-#endif + } ++#endif + + #define GET_IP(regs) ((regs)->ip) + #define GET_FP(regs) ((regs)->bp) +diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c +index ff8557e..f07becc 100644 +--- a/arch/x86/kernel/cpu/amd.c ++++ b/arch/x86/kernel/cpu/amd.c +@@ -587,6 +587,20 @@ static void __cpuinit init_amd(struct cpuinfo_x86 *c) + } + } + ++ /* ++ * The way access filter has a performance penalty on some workloads. ++ * Disable it on the affected CPUs. ++ */ ++ if ((c->x86 == 0x15) && ++ (c->x86_model >= 0x02) && (c->x86_model < 0x20)) { ++ u64 val; ++ ++ if (!rdmsrl_safe(0xc0011021, &val) && !(val & 0x1E)) { ++ val |= 0x1E; ++ checking_wrmsrl(0xc0011021, val); ++ } ++ } ++ + cpu_detect_cache_sizes(c); + + /* Multi core CPU? */ +diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c +index 787e06c..ce04b58 100644 +--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c ++++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c +@@ -323,17 +323,6 @@ device_initcall(thermal_throttle_init_device); + + #endif /* CONFIG_SYSFS */ + +-/* +- * Set up the most two significant bit to notify mce log that this thermal +- * event type. +- * This is a temp solution. May be changed in the future with mce log +- * infrasture. +- */ +-#define CORE_THROTTLED (0) +-#define CORE_POWER_LIMIT ((__u64)1 << 62) +-#define PACKAGE_THROTTLED ((__u64)2 << 62) +-#define PACKAGE_POWER_LIMIT ((__u64)3 << 62) +- + static void notify_thresholds(__u64 msr_val) + { + /* check whether the interrupt handler is defined; +@@ -363,27 +352,23 @@ static void intel_thermal_interrupt(void) + if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT, + THERMAL_THROTTLING_EVENT, + CORE_LEVEL) != 0) +- mce_log_therm_throt_event(CORE_THROTTLED | msr_val); ++ mce_log_therm_throt_event(msr_val); + + if (this_cpu_has(X86_FEATURE_PLN)) +- if (therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, ++ therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT, + POWER_LIMIT_EVENT, +- CORE_LEVEL) != 0) +- mce_log_therm_throt_event(CORE_POWER_LIMIT | msr_val); ++ CORE_LEVEL); + + if (this_cpu_has(X86_FEATURE_PTS)) { + rdmsrl(MSR_IA32_PACKAGE_THERM_STATUS, msr_val); +- if (therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, ++ therm_throt_process(msr_val & PACKAGE_THERM_STATUS_PROCHOT, + THERMAL_THROTTLING_EVENT, +- PACKAGE_LEVEL) != 0) +- mce_log_therm_throt_event(PACKAGE_THROTTLED | msr_val); ++ PACKAGE_LEVEL); + if (this_cpu_has(X86_FEATURE_PLN)) +- if (therm_throt_process(msr_val & ++ therm_throt_process(msr_val & + PACKAGE_THERM_STATUS_POWER_LIMIT, + POWER_LIMIT_EVENT, +- PACKAGE_LEVEL) != 0) +- mce_log_therm_throt_event(PACKAGE_POWER_LIMIT +- | msr_val); ++ PACKAGE_LEVEL); + } + } + +diff --git a/arch/x86/kernel/microcode_amd.c b/arch/x86/kernel/microcode_amd.c +index ac52c15..1ef962b 100644 +--- a/arch/x86/kernel/microcode_amd.c ++++ b/arch/x86/kernel/microcode_amd.c +@@ -163,6 +163,7 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) + #define F1XH_MPB_MAX_SIZE 2048 + #define F14H_MPB_MAX_SIZE 1824 + #define F15H_MPB_MAX_SIZE 4096 ++#define F16H_MPB_MAX_SIZE 3458 + + switch (c->x86) { + case 0x14: +@@ -171,6 +172,9 @@ static unsigned int verify_ucode_size(int cpu, const u8 *buf, unsigned int size) + case 0x15: + max_size = F15H_MPB_MAX_SIZE; + break; ++ case 0x16: ++ max_size = F16H_MPB_MAX_SIZE; ++ break; + default: + max_size = F1XH_MPB_MAX_SIZE; + break; +diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c +index 8252879..2dc4121 100644 +--- a/arch/x86/kernel/ptrace.c ++++ b/arch/x86/kernel/ptrace.c +@@ -21,6 +21,7 @@ + #include <linux/signal.h> + #include <linux/perf_event.h> + #include <linux/hw_breakpoint.h> ++#include <linux/module.h> + + #include <asm/uaccess.h> + #include <asm/pgtable.h> +@@ -164,6 +165,35 @@ static inline bool invalid_selector(u16 value) + + #define FLAG_MASK FLAG_MASK_32 + ++/* ++ * X86_32 CPUs don't save ss and esp if the CPU is already in kernel mode ++ * when it traps. The previous stack will be directly underneath the saved ++ * registers, and 'sp/ss' won't even have been saved. Thus the '®s->sp'. ++ * ++ * Now, if the stack is empty, '®s->sp' is out of range. In this ++ * case we try to take the previous stack. To always return a non-null ++ * stack pointer we fall back to regs as stack if no previous stack ++ * exists. ++ * ++ * This is valid only for kernel mode traps. ++ */ ++unsigned long kernel_stack_pointer(struct pt_regs *regs) ++{ ++ unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); ++ unsigned long sp = (unsigned long)®s->sp; ++ struct thread_info *tinfo; ++ ++ if (context == (sp & ~(THREAD_SIZE - 1))) ++ return sp; ++ ++ tinfo = (struct thread_info *)context; ++ if (tinfo->previous_esp) ++ return tinfo->previous_esp; ++ ++ return (unsigned long)regs; ++} ++EXPORT_SYMBOL_GPL(kernel_stack_pointer); ++ + static unsigned long *pt_regs_access(struct pt_regs *regs, unsigned long regno) + { + BUILD_BUG_ON(offsetof(struct pt_regs, bx) != 0); +diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c +index cf0ef98..0d403aa 100644 +--- a/arch/x86/kernel/setup.c ++++ b/arch/x86/kernel/setup.c +@@ -937,8 +937,21 @@ void __init setup_arch(char **cmdline_p) + + #ifdef CONFIG_X86_64 + if (max_pfn > max_low_pfn) { +- max_pfn_mapped = init_memory_mapping(1UL<<32, +- max_pfn<<PAGE_SHIFT); ++ int i; ++ for (i = 0; i < e820.nr_map; i++) { ++ struct e820entry *ei = &e820.map[i]; ++ ++ if (ei->addr + ei->size <= 1UL << 32) ++ continue; ++ ++ if (ei->type == E820_RESERVED) ++ continue; ++ ++ max_pfn_mapped = init_memory_mapping( ++ ei->addr < 1UL << 32 ? 1UL << 32 : ei->addr, ++ ei->addr + ei->size); ++ } ++ + /* can we preseve max_low_pfn ?*/ + max_low_pfn = max_pfn; + } +diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c +index 87488b9..34a7f40 100644 +--- a/arch/x86/mm/init.c ++++ b/arch/x86/mm/init.c +@@ -28,36 +28,50 @@ int direct_gbpages + #endif + ; + +-static void __init find_early_table_space(unsigned long end, int use_pse, +- int use_gbpages) ++struct map_range { ++ unsigned long start; ++ unsigned long end; ++ unsigned page_size_mask; ++}; ++ ++/* ++ * First calculate space needed for kernel direct mapping page tables to cover ++ * mr[0].start to mr[nr_range - 1].end, while accounting for possible 2M and 1GB ++ * pages. Then find enough contiguous space for those page tables. ++ */ ++static void __init find_early_table_space(struct map_range *mr, int nr_range) + { +- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; ++ int i; ++ unsigned long puds = 0, pmds = 0, ptes = 0, tables; ++ unsigned long start = 0, good_end; + phys_addr_t base; + +- puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; +- tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); ++ for (i = 0; i < nr_range; i++) { ++ unsigned long range, extra; + +- if (use_gbpages) { +- unsigned long extra; ++ range = mr[i].end - mr[i].start; ++ puds += (range + PUD_SIZE - 1) >> PUD_SHIFT; + +- extra = end - ((end>>PUD_SHIFT) << PUD_SHIFT); +- pmds = (extra + PMD_SIZE - 1) >> PMD_SHIFT; +- } else +- pmds = (end + PMD_SIZE - 1) >> PMD_SHIFT; +- +- tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); ++ if (mr[i].page_size_mask & (1 << PG_LEVEL_1G)) { ++ extra = range - ((range >> PUD_SHIFT) << PUD_SHIFT); ++ pmds += (extra + PMD_SIZE - 1) >> PMD_SHIFT; ++ } else { ++ pmds += (range + PMD_SIZE - 1) >> PMD_SHIFT; ++ } + +- if (use_pse) { +- unsigned long extra; +- +- extra = end - ((end>>PMD_SHIFT) << PMD_SHIFT); ++ if (mr[i].page_size_mask & (1 << PG_LEVEL_2M)) { ++ extra = range - ((range >> PMD_SHIFT) << PMD_SHIFT); + #ifdef CONFIG_X86_32 +- extra += PMD_SIZE; ++ extra += PMD_SIZE; + #endif +- ptes = (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; +- } else +- ptes = (end + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ ptes += (extra + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ } else { ++ ptes += (range + PAGE_SIZE - 1) >> PAGE_SHIFT; ++ } ++ } + ++ tables = roundup(puds * sizeof(pud_t), PAGE_SIZE); ++ tables += roundup(pmds * sizeof(pmd_t), PAGE_SIZE); + tables += roundup(ptes * sizeof(pte_t), PAGE_SIZE); + + #ifdef CONFIG_X86_32 +@@ -75,7 +89,8 @@ static void __init find_early_table_space(unsigned long end, int use_pse, + pgt_buf_top = pgt_buf_start + (tables >> PAGE_SHIFT); + + printk(KERN_DEBUG "kernel direct mapping tables up to %lx @ %lx-%lx\n", +- end, pgt_buf_start << PAGE_SHIFT, pgt_buf_top << PAGE_SHIFT); ++ mr[nr_range - 1].end, pgt_buf_start << PAGE_SHIFT, ++ pgt_buf_top << PAGE_SHIFT); + } + + void __init native_pagetable_reserve(u64 start, u64 end) +@@ -83,12 +98,6 @@ void __init native_pagetable_reserve(u64 start, u64 end) + memblock_x86_reserve_range(start, end, "PGTABLE"); + } + +-struct map_range { +- unsigned long start; +- unsigned long end; +- unsigned page_size_mask; +-}; +- + #ifdef CONFIG_X86_32 + #define NR_RANGE_MR 3 + #else /* CONFIG_X86_64 */ +@@ -260,7 +269,7 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + * nodes are discovered. + */ + if (!after_bootmem) +- find_early_table_space(end, use_pse, use_gbpages); ++ find_early_table_space(mr, nr_range); + + for (i = 0; i < nr_range; i++) + ret = kernel_physical_mapping_init(mr[i].start, mr[i].end, +diff --git a/block/blk-exec.c b/block/blk-exec.c +index 6053285..ac2c6e7 100644 +--- a/block/blk-exec.c ++++ b/block/blk-exec.c +@@ -49,6 +49,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, + rq_end_io_fn *done) + { + int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK; ++ bool is_pm_resume; + + if (unlikely(blk_queue_dead(q))) { + rq->errors = -ENXIO; +@@ -59,12 +60,18 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, + + rq->rq_disk = bd_disk; + rq->end_io = done; ++ /* ++ * need to check this before __blk_run_queue(), because rq can ++ * be freed before that returns. ++ */ ++ is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME; ++ + WARN_ON(irqs_disabled()); + spin_lock_irq(q->queue_lock); + __elv_add_request(q, rq, where); + __blk_run_queue(q); + /* the queue is stopped so it won't be run */ +- if (rq->cmd_type == REQ_TYPE_PM_RESUME) ++ if (is_pm_resume) + q->request_fn(q); + spin_unlock_irq(q->queue_lock); + } +diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h +index c364358..791df46 100644 +--- a/drivers/gpu/drm/i915/i915_drv.h ++++ b/drivers/gpu/drm/i915/i915_drv.h +@@ -374,6 +374,8 @@ typedef struct drm_i915_private { + unsigned int lvds_use_ssc:1; + unsigned int display_clock_mode:1; + int lvds_ssc_freq; ++ unsigned int bios_lvds_val; /* initial [PCH_]LVDS reg val in VBIOS */ ++ unsigned int lvds_val; /* used for checking LVDS channel mode */ + struct { + int rate; + int lanes; +diff --git a/drivers/gpu/drm/i915/intel_bios.c b/drivers/gpu/drm/i915/intel_bios.c +index 22efb08..87bb87b 100644 +--- a/drivers/gpu/drm/i915/intel_bios.c ++++ b/drivers/gpu/drm/i915/intel_bios.c +@@ -174,6 +174,28 @@ get_lvds_dvo_timing(const struct bdb_lvds_lfp_data *lvds_lfp_data, + return (struct lvds_dvo_timing *)(entry + dvo_timing_offset); + } + ++/* get lvds_fp_timing entry ++ * this function may return NULL if the corresponding entry is invalid ++ */ ++static const struct lvds_fp_timing * ++get_lvds_fp_timing(const struct bdb_header *bdb, ++ const struct bdb_lvds_lfp_data *data, ++ const struct bdb_lvds_lfp_data_ptrs *ptrs, ++ int index) ++{ ++ size_t data_ofs = (const u8 *)data - (const u8 *)bdb; ++ u16 data_size = ((const u16 *)data)[-1]; /* stored in header */ ++ size_t ofs; ++ ++ if (index >= ARRAY_SIZE(ptrs->ptr)) ++ return NULL; ++ ofs = ptrs->ptr[index].fp_timing_offset; ++ if (ofs < data_ofs || ++ ofs + sizeof(struct lvds_fp_timing) > data_ofs + data_size) ++ return NULL; ++ return (const struct lvds_fp_timing *)((const u8 *)bdb + ofs); ++} ++ + /* Try to find integrated panel data */ + static void + parse_lfp_panel_data(struct drm_i915_private *dev_priv, +@@ -183,6 +205,7 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, + const struct bdb_lvds_lfp_data *lvds_lfp_data; + const struct bdb_lvds_lfp_data_ptrs *lvds_lfp_data_ptrs; + const struct lvds_dvo_timing *panel_dvo_timing; ++ const struct lvds_fp_timing *fp_timing; + struct drm_display_mode *panel_fixed_mode; + int i, downclock; + +@@ -244,6 +267,19 @@ parse_lfp_panel_data(struct drm_i915_private *dev_priv, + "Normal Clock %dKHz, downclock %dKHz\n", + panel_fixed_mode->clock, 10*downclock); + } ++ ++ fp_timing = get_lvds_fp_timing(bdb, lvds_lfp_data, ++ lvds_lfp_data_ptrs, ++ lvds_options->panel_type); ++ if (fp_timing) { ++ /* check the resolution, just to be sure */ ++ if (fp_timing->x_res == panel_fixed_mode->hdisplay && ++ fp_timing->y_res == panel_fixed_mode->vdisplay) { ++ dev_priv->bios_lvds_val = fp_timing->lvds_reg_val; ++ DRM_DEBUG_KMS("VBT initial LVDS value %x\n", ++ dev_priv->bios_lvds_val); ++ } ++ } + } + + /* Try to find sdvo panel data */ +diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c +index adac0dd..fdae61f 100644 +--- a/drivers/gpu/drm/i915/intel_display.c ++++ b/drivers/gpu/drm/i915/intel_display.c +@@ -356,6 +356,27 @@ static const intel_limit_t intel_limits_ironlake_display_port = { + .find_pll = intel_find_pll_ironlake_dp, + }; + ++static bool is_dual_link_lvds(struct drm_i915_private *dev_priv, ++ unsigned int reg) ++{ ++ unsigned int val; ++ ++ if (dev_priv->lvds_val) ++ val = dev_priv->lvds_val; ++ else { ++ /* BIOS should set the proper LVDS register value at boot, but ++ * in reality, it doesn't set the value when the lid is closed; ++ * we need to check "the value to be set" in VBT when LVDS ++ * register is uninitialized. ++ */ ++ val = I915_READ(reg); ++ if (!(val & ~LVDS_DETECTED)) ++ val = dev_priv->bios_lvds_val; ++ dev_priv->lvds_val = val; ++ } ++ return (val & LVDS_CLKB_POWER_MASK) == LVDS_CLKB_POWER_UP; ++} ++ + static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, + int refclk) + { +@@ -364,8 +385,7 @@ static const intel_limit_t *intel_ironlake_limit(struct drm_crtc *crtc, + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { +- if ((I915_READ(PCH_LVDS) & LVDS_CLKB_POWER_MASK) == +- LVDS_CLKB_POWER_UP) { ++ if (is_dual_link_lvds(dev_priv, PCH_LVDS)) { + /* LVDS dual channel */ + if (refclk == 100000) + limit = &intel_limits_ironlake_dual_lvds_100m; +@@ -393,8 +413,7 @@ static const intel_limit_t *intel_g4x_limit(struct drm_crtc *crtc) + const intel_limit_t *limit; + + if (intel_pipe_has_type(crtc, INTEL_OUTPUT_LVDS)) { +- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == +- LVDS_CLKB_POWER_UP) ++ if (is_dual_link_lvds(dev_priv, LVDS)) + /* LVDS with dual channel */ + limit = &intel_limits_g4x_dual_channel_lvds; + else +@@ -531,8 +550,7 @@ intel_find_best_PLL(const intel_limit_t *limit, struct drm_crtc *crtc, + * reliably set up different single/dual channel state, if we + * even can. + */ +- if ((I915_READ(LVDS) & LVDS_CLKB_POWER_MASK) == +- LVDS_CLKB_POWER_UP) ++ if (is_dual_link_lvds(dev_priv, LVDS)) + clock.p2 = limit->p2.p2_fast; + else + clock.p2 = limit->p2.p2_slow; +diff --git a/drivers/gpu/drm/i915/intel_sdvo.c b/drivers/gpu/drm/i915/intel_sdvo.c +index 3f4afba..9e24670 100644 +--- a/drivers/gpu/drm/i915/intel_sdvo.c ++++ b/drivers/gpu/drm/i915/intel_sdvo.c +@@ -2264,6 +2264,18 @@ intel_sdvo_output_setup(struct intel_sdvo *intel_sdvo, uint16_t flags) + return true; + } + ++static void intel_sdvo_output_cleanup(struct intel_sdvo *intel_sdvo) ++{ ++ struct drm_device *dev = intel_sdvo->base.base.dev; ++ struct drm_connector *connector, *tmp; ++ ++ list_for_each_entry_safe(connector, tmp, ++ &dev->mode_config.connector_list, head) { ++ if (intel_attached_encoder(connector) == &intel_sdvo->base) ++ intel_sdvo_destroy(connector); ++ } ++} ++ + static bool intel_sdvo_tv_create_property(struct intel_sdvo *intel_sdvo, + struct intel_sdvo_connector *intel_sdvo_connector, + int type) +@@ -2596,7 +2608,8 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + intel_sdvo->caps.output_flags) != true) { + DRM_DEBUG_KMS("SDVO output failed to setup on SDVO%c\n", + IS_SDVOB(sdvo_reg) ? 'B' : 'C'); +- goto err; ++ /* Output_setup can leave behind connectors! */ ++ goto err_output; + } + + /* Only enable the hotplug irq if we need it, to work around noisy +@@ -2609,12 +2622,12 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + + /* Set the input timing to the screen. Assume always input 0. */ + if (!intel_sdvo_set_target_input(intel_sdvo)) +- goto err; ++ goto err_output; + + if (!intel_sdvo_get_input_pixel_clock_range(intel_sdvo, + &intel_sdvo->pixel_clock_min, + &intel_sdvo->pixel_clock_max)) +- goto err; ++ goto err_output; + + DRM_DEBUG_KMS("%s device VID/DID: %02X:%02X.%02X, " + "clock range %dMHz - %dMHz, " +@@ -2634,6 +2647,9 @@ bool intel_sdvo_init(struct drm_device *dev, int sdvo_reg) + (SDVO_OUTPUT_TMDS1 | SDVO_OUTPUT_RGB1) ? 'Y' : 'N'); + return true; + ++err_output: ++ intel_sdvo_output_cleanup(intel_sdvo); ++ + err: + drm_encoder_cleanup(&intel_encoder->base); + i2c_del_adapter(&intel_sdvo->ddc); +diff --git a/drivers/gpu/drm/radeon/atombios_encoders.c b/drivers/gpu/drm/radeon/atombios_encoders.c +index 382e141..aca4755 100644 +--- a/drivers/gpu/drm/radeon/atombios_encoders.c ++++ b/drivers/gpu/drm/radeon/atombios_encoders.c +@@ -1386,7 +1386,7 @@ radeon_atom_encoder_dpms_dig(struct drm_encoder *encoder, int mode) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_SETUP, 0, 0); + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE, 0, 0); + /* some early dce3.2 boards have a bug in their transmitter control table */ +- if ((rdev->family != CHIP_RV710) || (rdev->family != CHIP_RV730)) ++ if ((rdev->family != CHIP_RV710) && (rdev->family != CHIP_RV730)) + atombios_dig_transmitter_setup(encoder, ATOM_TRANSMITTER_ACTION_ENABLE_OUTPUT, 0, 0); + } + if (ENCODER_MODE_IS_DP(atombios_get_encoder_mode(encoder)) && connector) { +diff --git a/drivers/gpu/drm/radeon/evergreen.c b/drivers/gpu/drm/radeon/evergreen.c +index ca94e23..b919b11 100644 +--- a/drivers/gpu/drm/radeon/evergreen.c ++++ b/drivers/gpu/drm/radeon/evergreen.c +@@ -1122,6 +1122,8 @@ void evergreen_mc_stop(struct radeon_device *rdev, struct evergreen_mc_save *sav + break; + udelay(1); + } ++ } else { ++ save->crtc_enabled[i] = false; + } + } + +diff --git a/drivers/gpu/drm/radeon/radeon_agp.c b/drivers/gpu/drm/radeon/radeon_agp.c +index bd2f33e..bc6b64f 100644 +--- a/drivers/gpu/drm/radeon/radeon_agp.c ++++ b/drivers/gpu/drm/radeon/radeon_agp.c +@@ -70,9 +70,12 @@ static struct radeon_agpmode_quirk radeon_agpmode_quirk_list[] = { + /* Intel 82830 830 Chipset Host Bridge / Mobility M6 LY Needs AGPMode 2 (fdo #17360)*/ + { PCI_VENDOR_ID_INTEL, 0x3575, PCI_VENDOR_ID_ATI, 0x4c59, + PCI_VENDOR_ID_DELL, 0x00e3, 2}, +- /* Intel 82852/82855 host bridge / Mobility FireGL 9000 R250 Needs AGPMode 1 (lp #296617) */ ++ /* Intel 82852/82855 host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 (lp #296617) */ + { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4c66, + PCI_VENDOR_ID_DELL, 0x0149, 1}, ++ /* Intel 82855PM host bridge / Mobility FireGL 9000 RV250 Needs AGPMode 1 for suspend/resume */ ++ { PCI_VENDOR_ID_INTEL, 0x3340, PCI_VENDOR_ID_ATI, 0x4c66, ++ PCI_VENDOR_ID_IBM, 0x0531, 1}, + /* Intel 82852/82855 host bridge / Mobility 9600 M10 RV350 Needs AGPMode 1 (deb #467460) */ + { PCI_VENDOR_ID_INTEL, 0x3580, PCI_VENDOR_ID_ATI, 0x4e50, + 0x1025, 0x0061, 1}, +diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c +index 727e93d..9e4313e 100644 +--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c ++++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c +@@ -708,7 +708,10 @@ int ttm_get_pages(struct list_head *pages, int flags, + /* clear the pages coming from the pool if requested */ + if (flags & TTM_PAGE_FLAG_ZERO_ALLOC) { + list_for_each_entry(p, pages, lru) { +- clear_page(page_address(p)); ++ if (PageHighMem(p)) ++ clear_highpage(p); ++ else ++ clear_page(page_address(p)); + } + } + +diff --git a/drivers/hid/hid-ids.h b/drivers/hid/hid-ids.h +index ab75a4e..652f230 100644 +--- a/drivers/hid/hid-ids.h ++++ b/drivers/hid/hid-ids.h +@@ -277,6 +277,9 @@ + #define USB_VENDOR_ID_EZKEY 0x0518 + #define USB_DEVICE_ID_BTC_8193 0x0002 + ++#define USB_VENDOR_ID_FREESCALE 0x15A2 ++#define USB_DEVICE_ID_FREESCALE_MX28 0x004F ++ + #define USB_VENDOR_ID_GAMERON 0x0810 + #define USB_DEVICE_ID_GAMERON_DUAL_PSX_ADAPTOR 0x0001 + #define USB_DEVICE_ID_GAMERON_DUAL_PCS_ADAPTOR 0x0002 +diff --git a/drivers/hid/usbhid/hid-quirks.c b/drivers/hid/usbhid/hid-quirks.c +index afb73af..aec3fa3 100644 +--- a/drivers/hid/usbhid/hid-quirks.c ++++ b/drivers/hid/usbhid/hid-quirks.c +@@ -68,6 +68,7 @@ static const struct hid_blacklist { + { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_AXIS_295, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_DMI, USB_DEVICE_ID_DMI_ENC, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_ELO, USB_DEVICE_ID_ELO_TS2700, HID_QUIRK_NOGET }, ++ { USB_VENDOR_ID_FREESCALE, USB_DEVICE_ID_FREESCALE_MX28, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_MGE, USB_DEVICE_ID_MGE_UPS, HID_QUIRK_NOGET }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN, HID_QUIRK_NO_INIT_REPORTS }, + { USB_VENDOR_ID_PIXART, USB_DEVICE_ID_PIXART_OPTICAL_TOUCH_SCREEN1, HID_QUIRK_NO_INIT_REPORTS }, +diff --git a/drivers/idle/intel_idle.c b/drivers/idle/intel_idle.c +index 5b39216..3f28290 100644 +--- a/drivers/idle/intel_idle.c ++++ b/drivers/idle/intel_idle.c +@@ -163,6 +163,38 @@ static struct cpuidle_state snb_cstates[MWAIT_MAX_NUM_CSTATES] = { + .enter = &intel_idle }, + }; + ++static struct cpuidle_state ivb_cstates[MWAIT_MAX_NUM_CSTATES] = { ++ { /* MWAIT C0 */ }, ++ { /* MWAIT C1 */ ++ .name = "C1-IVB", ++ .desc = "MWAIT 0x00", ++ .flags = CPUIDLE_FLAG_TIME_VALID, ++ .exit_latency = 1, ++ .target_residency = 1, ++ .enter = &intel_idle }, ++ { /* MWAIT C2 */ ++ .name = "C3-IVB", ++ .desc = "MWAIT 0x10", ++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 59, ++ .target_residency = 156, ++ .enter = &intel_idle }, ++ { /* MWAIT C3 */ ++ .name = "C6-IVB", ++ .desc = "MWAIT 0x20", ++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 80, ++ .target_residency = 300, ++ .enter = &intel_idle }, ++ { /* MWAIT C4 */ ++ .name = "C7-IVB", ++ .desc = "MWAIT 0x30", ++ .flags = CPUIDLE_FLAG_TIME_VALID | CPUIDLE_FLAG_TLB_FLUSHED, ++ .exit_latency = 87, ++ .target_residency = 300, ++ .enter = &intel_idle }, ++}; ++ + static struct cpuidle_state atom_cstates[MWAIT_MAX_NUM_CSTATES] = { + { /* MWAIT C0 */ }, + { /* MWAIT C1 */ +@@ -386,6 +418,11 @@ static int intel_idle_probe(void) + cpuidle_state_table = snb_cstates; + break; + ++ case 0x3A: /* IVB */ ++ case 0x3E: /* IVB Xeon */ ++ cpuidle_state_table = ivb_cstates; ++ break; ++ + default: + pr_debug(PREFIX "does not run on family %d model %d\n", + boot_cpu_data.x86, boot_cpu_data.x86_model); +diff --git a/drivers/input/mouse/bcm5974.c b/drivers/input/mouse/bcm5974.c +index ec58f48..1512bd8 100644 +--- a/drivers/input/mouse/bcm5974.c ++++ b/drivers/input/mouse/bcm5974.c +@@ -453,6 +453,9 @@ static void setup_events_to_report(struct input_dev *input_dev, + __set_bit(BTN_TOOL_QUADTAP, input_dev->keybit); + __set_bit(BTN_LEFT, input_dev->keybit); + ++ if (cfg->caps & HAS_INTEGRATED_BUTTON) ++ __set_bit(INPUT_PROP_BUTTONPAD, input_dev->propbit); ++ + input_set_events_per_packet(input_dev, 60); + } + +diff --git a/drivers/input/serio/i8042.c b/drivers/input/serio/i8042.c +index d37a48e..8656441 100644 +--- a/drivers/input/serio/i8042.c ++++ b/drivers/input/serio/i8042.c +@@ -991,7 +991,7 @@ static int i8042_controller_init(void) + * Reset the controller and reset CRT to the original value set by BIOS. + */ + +-static void i8042_controller_reset(void) ++static void i8042_controller_reset(bool force_reset) + { + i8042_flush(); + +@@ -1016,7 +1016,7 @@ static void i8042_controller_reset(void) + * Reset the controller if requested. + */ + +- if (i8042_reset) ++ if (i8042_reset || force_reset) + i8042_controller_selftest(); + + /* +@@ -1139,9 +1139,9 @@ static int i8042_controller_resume(bool force_reset) + * upsetting it. + */ + +-static int i8042_pm_reset(struct device *dev) ++static int i8042_pm_suspend(struct device *dev) + { +- i8042_controller_reset(); ++ i8042_controller_reset(true); + + return 0; + } +@@ -1163,13 +1163,20 @@ static int i8042_pm_thaw(struct device *dev) + return 0; + } + ++static int i8042_pm_reset(struct device *dev) ++{ ++ i8042_controller_reset(false); ++ ++ return 0; ++} ++ + static int i8042_pm_restore(struct device *dev) + { + return i8042_controller_resume(false); + } + + static const struct dev_pm_ops i8042_pm_ops = { +- .suspend = i8042_pm_reset, ++ .suspend = i8042_pm_suspend, + .resume = i8042_pm_resume, + .thaw = i8042_pm_thaw, + .poweroff = i8042_pm_reset, +@@ -1185,7 +1192,7 @@ static const struct dev_pm_ops i8042_pm_ops = { + + static void i8042_shutdown(struct platform_device *dev) + { +- i8042_controller_reset(); ++ i8042_controller_reset(false); + } + + static int __init i8042_create_kbd_port(void) +@@ -1424,7 +1431,7 @@ static int __init i8042_probe(struct platform_device *dev) + out_fail: + i8042_free_aux_ports(); /* in case KBD failed but AUX not */ + i8042_free_irqs(); +- i8042_controller_reset(); ++ i8042_controller_reset(false); + i8042_platform_device = NULL; + + return error; +@@ -1434,7 +1441,7 @@ static int __devexit i8042_remove(struct platform_device *dev) + { + i8042_unregister_ports(); + i8042_free_irqs(); +- i8042_controller_reset(); ++ i8042_controller_reset(false); + i8042_platform_device = NULL; + + return 0; +diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c +index 3913f47..492aa52 100644 +--- a/drivers/isdn/gigaset/bas-gigaset.c ++++ b/drivers/isdn/gigaset/bas-gigaset.c +@@ -616,7 +616,13 @@ static void int_in_work(struct work_struct *work) + if (rc == 0) + /* success, resubmit interrupt read URB */ + rc = usb_submit_urb(urb, GFP_ATOMIC); +- if (rc != 0 && rc != -ENODEV) { ++ ++ switch (rc) { ++ case 0: /* success */ ++ case -ENODEV: /* device gone */ ++ case -EINVAL: /* URB already resubmitted, or terminal badness */ ++ break; ++ default: /* failure: try to recover by resetting the device */ + dev_err(cs->dev, "clear halt failed: %s\n", get_usb_rcmsg(rc)); + rc = usb_lock_device_for_reset(ucs->udev, ucs->interface); + if (rc == 0) { +@@ -2437,7 +2443,9 @@ static void gigaset_disconnect(struct usb_interface *interface) + } + + /* gigaset_suspend +- * This function is called before the USB connection is suspended. ++ * This function is called before the USB connection is suspended ++ * or before the USB device is reset. ++ * In the latter case, message == PMSG_ON. + */ + static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) + { +@@ -2493,7 +2501,12 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message) + del_timer_sync(&ucs->timer_atrdy); + del_timer_sync(&ucs->timer_cmd_in); + del_timer_sync(&ucs->timer_int_in); +- cancel_work_sync(&ucs->int_in_wq); ++ ++ /* don't try to cancel int_in_wq from within reset as it ++ * might be the one requesting the reset ++ */ ++ if (message.event != PM_EVENT_ON) ++ cancel_work_sync(&ucs->int_in_wq); + + gig_dbg(DEBUG_SUSPEND, "suspend complete"); + return 0; +diff --git a/drivers/md/dm.c b/drivers/md/dm.c +index 502dcf7..8953630 100644 +--- a/drivers/md/dm.c ++++ b/drivers/md/dm.c +@@ -755,8 +755,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue) + if (!md_in_flight(md)) + wake_up(&md->wait); + ++ /* ++ * Run this off this callpath, as drivers could invoke end_io while ++ * inside their request_fn (and holding the queue lock). Calling ++ * back into ->request_fn() could deadlock attempting to grab the ++ * queue lock again. ++ */ + if (run_queue) +- blk_run_queue(md->queue); ++ blk_run_queue_async(md->queue); + + /* + * dm_put() must be at the end of this function. See the comment above +diff --git a/drivers/md/md.c b/drivers/md/md.c +index 2887f22..145e378e 100644 +--- a/drivers/md/md.c ++++ b/drivers/md/md.c +@@ -1801,10 +1801,10 @@ retry: + memset(bbp, 0xff, PAGE_SIZE); + + for (i = 0 ; i < bb->count ; i++) { +- u64 internal_bb = *p++; ++ u64 internal_bb = p[i]; + u64 store_bb = ((BB_OFFSET(internal_bb) << 10) + | BB_LEN(internal_bb)); +- *bbp++ = cpu_to_le64(store_bb); ++ bbp[i] = cpu_to_le64(store_bb); + } + bb->changed = 0; + if (read_seqretry(&bb->lock, seq)) +@@ -7650,9 +7650,9 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, + sector_t *first_bad, int *bad_sectors) + { + int hi; +- int lo = 0; ++ int lo; + u64 *p = bb->page; +- int rv = 0; ++ int rv; + sector_t target = s + sectors; + unsigned seq; + +@@ -7667,7 +7667,8 @@ int md_is_badblock(struct badblocks *bb, sector_t s, int sectors, + + retry: + seq = read_seqbegin(&bb->lock); +- ++ lo = 0; ++ rv = 0; + hi = bb->count; + + /* Binary search between lo and hi for 'target' +diff --git a/drivers/mtd/devices/slram.c b/drivers/mtd/devices/slram.c +index e585263..f38c348 100644 +--- a/drivers/mtd/devices/slram.c ++++ b/drivers/mtd/devices/slram.c +@@ -266,7 +266,7 @@ static int parse_cmdline(char *devname, char *szstart, char *szlength) + + if (*(szlength) != '+') { + devlength = simple_strtoul(szlength, &buffer, 0); +- devlength = handle_unit(devlength, buffer) - devstart; ++ devlength = handle_unit(devlength, buffer); + if (devlength < devstart) + goto err_out; + +diff --git a/drivers/mtd/ofpart.c b/drivers/mtd/ofpart.c +index 64be8f0..d9127e2 100644 +--- a/drivers/mtd/ofpart.c ++++ b/drivers/mtd/ofpart.c +@@ -121,7 +121,7 @@ static int parse_ofoldpart_partitions(struct mtd_info *master, + nr_parts = plen / sizeof(part[0]); + + *pparts = kzalloc(nr_parts * sizeof(*(*pparts)), GFP_KERNEL); +- if (!pparts) ++ if (!*pparts) + return -ENOMEM; + + names = of_get_property(dp, "partition-names", &plen); +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +index 4ae26a7..7720721 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +@@ -356,6 +356,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_82599_SFP_FCOE: + case IXGBE_DEV_ID_82599_SFP_EM: + case IXGBE_DEV_ID_82599_SFP_SF2: ++ case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599EN_SFP: + media_type = ixgbe_media_type_fiber; + break; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index f1365fe..2c14e85 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -3157,6 +3157,7 @@ static s32 ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + + switch (hw->device_id) { + case IXGBE_DEV_ID_X540T: ++ case IXGBE_DEV_ID_X540T1: + return 0; + case IXGBE_DEV_ID_82599_T3_LOM: + return 0; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index 8ef92d1..cc96a5a 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -106,6 +106,8 @@ static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, + {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, + /* required last entry */ + {0, } + }; +@@ -7611,6 +7613,7 @@ static int __devinit ixgbe_probe(struct pci_dev *pdev, + adapter->wol = IXGBE_WUFC_MAG; + break; + case IXGBE_DEV_ID_X540T: ++ case IXGBE_DEV_ID_X540T1: + /* Check eeprom to see if it is enabled */ + hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); + wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +index 6c5cca8..f00d6d5 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +@@ -65,6 +65,8 @@ + #define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C + #define IXGBE_DEV_ID_82599_LS 0x154F + #define IXGBE_DEV_ID_X540T 0x1528 ++#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A ++#define IXGBE_DEV_ID_X540T1 0x1560 + + /* VF Device IDs */ + #define IXGBE_DEV_ID_82599_VF 0x10ED +diff --git a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +index 0515862..858a762 100644 +--- a/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c ++++ b/drivers/net/wireless/brcm80211/brcmsmac/ampdu.c +@@ -1072,9 +1072,9 @@ brcms_c_ampdu_dotxstatus_complete(struct ampdu_info *ampdu, struct scb *scb, + IEEE80211_TX_STAT_AMPDU_NO_BACK; + skb_pull(p, D11_PHY_HDR_LEN); + skb_pull(p, D11_TXH_LEN); +- wiphy_err(wiphy, "%s: BA Timeout, seq %d, in_" +- "transit %d\n", "AMPDU status", seq, +- ini->tx_in_transit); ++ BCMMSG(wiphy, ++ "BA Timeout, seq %d, in_transit %d\n", ++ seq, ini->tx_in_transit); + ieee80211_tx_status_irqsafe(wlc->pub->ieee_hw, + p); + } +diff --git a/drivers/net/wireless/mwifiex/cmdevt.c b/drivers/net/wireless/mwifiex/cmdevt.c +index 6e0a3ea..5a25dd2 100644 +--- a/drivers/net/wireless/mwifiex/cmdevt.c ++++ b/drivers/net/wireless/mwifiex/cmdevt.c +@@ -816,9 +816,6 @@ mwifiex_cmd_timeout_func(unsigned long function_context) + return; + } + cmd_node = adapter->curr_cmd; +- if (cmd_node->wait_q_enabled) +- adapter->cmd_wait_q.status = -ETIMEDOUT; +- + if (cmd_node) { + adapter->dbg.timeout_cmd_id = + adapter->dbg.last_cmd_id[adapter->dbg.last_cmd_index]; +@@ -863,6 +860,14 @@ mwifiex_cmd_timeout_func(unsigned long function_context) + + dev_err(adapter->dev, "ps_mode=%d ps_state=%d\n", + adapter->ps_mode, adapter->ps_state); ++ ++ if (cmd_node->wait_q_enabled) { ++ adapter->cmd_wait_q.status = -ETIMEDOUT; ++ wake_up_interruptible(&adapter->cmd_wait_q.wait); ++ mwifiex_cancel_pending_ioctl(adapter); ++ /* reset cmd_sent flag to unblock new commands */ ++ adapter->cmd_sent = false; ++ } + } + if (adapter->hw_status == MWIFIEX_HW_STATUS_INITIALIZING) + mwifiex_init_fw_complete(adapter); +diff --git a/drivers/net/wireless/mwifiex/sdio.c b/drivers/net/wireless/mwifiex/sdio.c +index 283171b..3579a68 100644 +--- a/drivers/net/wireless/mwifiex/sdio.c ++++ b/drivers/net/wireless/mwifiex/sdio.c +@@ -162,7 +162,6 @@ static int mwifiex_sdio_suspend(struct device *dev) + struct sdio_mmc_card *card; + struct mwifiex_adapter *adapter; + mmc_pm_flag_t pm_flag = 0; +- int hs_actived = 0; + int i; + int ret = 0; + +@@ -189,12 +188,14 @@ static int mwifiex_sdio_suspend(struct device *dev) + adapter = card->adapter; + + /* Enable the Host Sleep */ +- hs_actived = mwifiex_enable_hs(adapter); +- if (hs_actived) { +- pr_debug("cmd: suspend with MMC_PM_KEEP_POWER\n"); +- ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); ++ if (!mwifiex_enable_hs(adapter)) { ++ dev_err(adapter->dev, "cmd: failed to suspend\n"); ++ return -EFAULT; + } + ++ dev_dbg(adapter->dev, "cmd: suspend with MMC_PM_KEEP_POWER\n"); ++ ret = sdio_set_host_pm_flags(func, MMC_PM_KEEP_POWER); ++ + /* Indicate device suspended */ + adapter->is_suspended = true; + +diff --git a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +index 0302148..a99be2d0 100644 +--- a/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c ++++ b/drivers/net/wireless/rtlwifi/rtl8192cu/sw.c +@@ -307,6 +307,7 @@ static struct usb_device_id rtl8192c_usb_ids[] = { + /*=== Customer ID ===*/ + /****** 8188CU ********/ + {RTL_USB_DEVICE(0x050d, 0x1102, rtl92cu_hal_cfg)}, /*Belkin - Edimax*/ ++ {RTL_USB_DEVICE(0x050d, 0x11f2, rtl92cu_hal_cfg)}, /*Belkin - ISY*/ + {RTL_USB_DEVICE(0x06f8, 0xe033, rtl92cu_hal_cfg)}, /*Hercules - Edimax*/ + {RTL_USB_DEVICE(0x07b8, 0x8188, rtl92cu_hal_cfg)}, /*Abocom - Abocom*/ + {RTL_USB_DEVICE(0x07b8, 0x8189, rtl92cu_hal_cfg)}, /*Funai - Abocom*/ +diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c +index 86b69f85..9d932f4 100644 +--- a/drivers/pci/setup-bus.c ++++ b/drivers/pci/setup-bus.c +@@ -612,7 +612,7 @@ static void pbus_size_io(struct pci_bus *bus, resource_size_t min_size, + if (children_add_size > add_size) + add_size = children_add_size; + size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : +- calculate_iosize(size, min_size+add_size, size1, ++ calculate_iosize(size, min_size, add_size + size1, + resource_size(b_res), 4096); + if (!size0 && !size1) { + if (b_res->start || b_res->end) +@@ -726,7 +726,7 @@ static int pbus_size_mem(struct pci_bus *bus, unsigned long mask, + if (children_add_size > add_size) + add_size = children_add_size; + size1 = (!realloc_head || (realloc_head && !add_size)) ? size0 : +- calculate_memsize(size, min_size+add_size, 0, ++ calculate_memsize(size, min_size, add_size, + resource_size(b_res), min_align); + if (!size0 && !size1) { + if (b_res->start || b_res->end) +diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c +index 5717509b..8b25f9c 100644 +--- a/drivers/pci/setup-res.c ++++ b/drivers/pci/setup-res.c +@@ -233,11 +233,12 @@ int pci_reassign_resource(struct pci_dev *dev, int resno, resource_size_t addsiz + return -EINVAL; + } + +- new_size = resource_size(res) + addsize + min_align; ++ /* already aligned with min_align */ ++ new_size = resource_size(res) + addsize; + ret = _pci_assign_resource(dev, resno, new_size, min_align); + if (!ret) { + res->flags &= ~IORESOURCE_STARTALIGN; +- dev_info(&dev->dev, "BAR %d: assigned %pR\n", resno, res); ++ dev_info(&dev->dev, "BAR %d: reassigned %pR\n", resno, res); + if (resno < PCI_BRIDGE_RESOURCES) + pci_update_resource(dev, resno); + } +diff --git a/drivers/platform/x86/acer-wmi.c b/drivers/platform/x86/acer-wmi.c +index 110e4af..7d47434 100644 +--- a/drivers/platform/x86/acer-wmi.c ++++ b/drivers/platform/x86/acer-wmi.c +@@ -105,6 +105,7 @@ static const struct key_entry acer_wmi_keymap[] = { + {KE_KEY, 0x22, {KEY_PROG2} }, /* Arcade */ + {KE_KEY, 0x23, {KEY_PROG3} }, /* P_Key */ + {KE_KEY, 0x24, {KEY_PROG4} }, /* Social networking_Key */ ++ {KE_KEY, 0x29, {KEY_PROG3} }, /* P_Key for TM8372 */ + {KE_IGNORE, 0x41, {KEY_MUTE} }, + {KE_IGNORE, 0x42, {KEY_PREVIOUSSONG} }, + {KE_IGNORE, 0x43, {KEY_NEXTSONG} }, +diff --git a/drivers/scsi/isci/request.c b/drivers/scsi/isci/request.c +index 192cb48..f3cbecc 100644 +--- a/drivers/scsi/isci/request.c ++++ b/drivers/scsi/isci/request.c +@@ -1849,7 +1849,7 @@ sci_io_request_frame_handler(struct isci_request *ireq, + frame_index, + (void **)&frame_buffer); + +- sci_controller_copy_sata_response(&ireq->stp.req, ++ sci_controller_copy_sata_response(&ireq->stp.rsp, + frame_header, + frame_buffer); + +diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c +index 0c6fb19..7de9993 100644 +--- a/drivers/usb/host/xhci-ring.c ++++ b/drivers/usb/host/xhci-ring.c +@@ -1934,7 +1934,6 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td, + } + break; + case COMP_SHORT_TX: +- xhci_warn(xhci, "WARN: short transfer on control ep\n"); + if (td->urb->transfer_flags & URB_SHORT_NOT_OK) + *status = -EREMOTEIO; + else +@@ -2291,7 +2290,7 @@ static int handle_tx_event(struct xhci_hcd *xhci, + xhci_dbg(xhci, "Stopped on No-op or Link TRB\n"); + break; + case COMP_STALL: +- xhci_warn(xhci, "WARN: Stalled endpoint\n"); ++ xhci_dbg(xhci, "Stalled endpoint\n"); + ep->ep_state |= EP_HALTED; + status = -EPIPE; + break; +@@ -2301,11 +2300,11 @@ static int handle_tx_event(struct xhci_hcd *xhci, + break; + case COMP_SPLIT_ERR: + case COMP_TX_ERR: +- xhci_warn(xhci, "WARN: transfer error on endpoint\n"); ++ xhci_dbg(xhci, "Transfer error on endpoint\n"); + status = -EPROTO; + break; + case COMP_BABBLE: +- xhci_warn(xhci, "WARN: babble error on endpoint\n"); ++ xhci_dbg(xhci, "Babble error on endpoint\n"); + status = -EOVERFLOW; + break; + case COMP_DB_ERR: +diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c +index 5a23f4d..dab05d1 100644 +--- a/drivers/usb/host/xhci.c ++++ b/drivers/usb/host/xhci.c +@@ -206,14 +206,14 @@ static int xhci_setup_msi(struct xhci_hcd *xhci) + + ret = pci_enable_msi(pdev); + if (ret) { +- xhci_err(xhci, "failed to allocate MSI entry\n"); ++ xhci_dbg(xhci, "failed to allocate MSI entry\n"); + return ret; + } + + ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq, + 0, "xhci_hcd", xhci_to_hcd(xhci)); + if (ret) { +- xhci_err(xhci, "disable MSI interrupt\n"); ++ xhci_dbg(xhci, "disable MSI interrupt\n"); + pci_disable_msi(pdev); + } + +@@ -276,7 +276,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) + + ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count); + if (ret) { +- xhci_err(xhci, "Failed to enable MSI-X\n"); ++ xhci_dbg(xhci, "Failed to enable MSI-X\n"); + goto free_entries; + } + +@@ -292,7 +292,7 @@ static int xhci_setup_msix(struct xhci_hcd *xhci) + return ret; + + disable_msix: +- xhci_err(xhci, "disable MSI-X interrupt\n"); ++ xhci_dbg(xhci, "disable MSI-X interrupt\n"); + xhci_free_irq(xhci); + pci_disable_msix(pdev); + free_entries: +diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c +index c334670..a5f875d 100644 +--- a/drivers/usb/serial/option.c ++++ b/drivers/usb/serial/option.c +@@ -157,6 +157,7 @@ static void option_instat_callback(struct urb *urb); + #define NOVATELWIRELESS_PRODUCT_EVDO_EMBEDDED_HIGHSPEED 0x8001 + #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_FULLSPEED 0x9000 + #define NOVATELWIRELESS_PRODUCT_HSPA_EMBEDDED_HIGHSPEED 0x9001 ++#define NOVATELWIRELESS_PRODUCT_E362 0x9010 + #define NOVATELWIRELESS_PRODUCT_G1 0xA001 + #define NOVATELWIRELESS_PRODUCT_G1_M 0xA002 + #define NOVATELWIRELESS_PRODUCT_G2 0xA010 +@@ -192,6 +193,9 @@ static void option_instat_callback(struct urb *urb); + #define DELL_PRODUCT_5730_MINICARD_TELUS 0x8181 + #define DELL_PRODUCT_5730_MINICARD_VZW 0x8182 + ++#define DELL_PRODUCT_5800_MINICARD_VZW 0x8195 /* Novatel E362 */ ++#define DELL_PRODUCT_5800_V2_MINICARD_VZW 0x8196 /* Novatel E362 */ ++ + #define KYOCERA_VENDOR_ID 0x0c88 + #define KYOCERA_PRODUCT_KPC650 0x17da + #define KYOCERA_PRODUCT_KPC680 0x180a +@@ -282,6 +286,7 @@ static void option_instat_callback(struct urb *urb); + /* ALCATEL PRODUCTS */ + #define ALCATEL_VENDOR_ID 0x1bbb + #define ALCATEL_PRODUCT_X060S_X200 0x0000 ++#define ALCATEL_PRODUCT_X220_X500D 0x0017 + + #define PIRELLI_VENDOR_ID 0x1266 + #define PIRELLI_PRODUCT_C100_1 0x1002 +@@ -705,6 +710,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_G2) }, + /* Novatel Ovation MC551 a.k.a. Verizon USB551L */ + { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_MC551, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(NOVATELWIRELESS_VENDOR_ID, NOVATELWIRELESS_PRODUCT_E362, 0xff, 0xff, 0xff) }, + + { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01) }, + { USB_DEVICE(AMOI_VENDOR_ID, AMOI_PRODUCT_H01A) }, +@@ -727,6 +733,8 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_SPRINT) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_TELUS) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ + { USB_DEVICE(DELL_VENDOR_ID, DELL_PRODUCT_5730_MINICARD_VZW) }, /* Dell Wireless 5730 Mobile Broadband EVDO/HSPA Mini-Card */ ++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_MINICARD_VZW, 0xff, 0xff, 0xff) }, ++ { USB_DEVICE_AND_INTERFACE_INFO(DELL_VENDOR_ID, DELL_PRODUCT_5800_V2_MINICARD_VZW, 0xff, 0xff, 0xff) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_E100A) }, /* ADU-E100, ADU-310 */ + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_500A) }, + { USB_DEVICE(ANYDATA_VENDOR_ID, ANYDATA_PRODUCT_ADU_620UW) }, +@@ -1156,6 +1164,7 @@ static const struct usb_device_id option_ids[] = { + { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X060S_X200), + .driver_info = (kernel_ulong_t)&alcatel_x200_blacklist + }, ++ { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_X220_X500D) }, + { USB_DEVICE(AIRPLUS_VENDOR_ID, AIRPLUS_PRODUCT_MCD650) }, + { USB_DEVICE(TLAYTECH_VENDOR_ID, TLAYTECH_PRODUCT_TEU800) }, + { USB_DEVICE(LONGCHEER_VENDOR_ID, FOUR_G_SYSTEMS_PRODUCT_W14), +diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c +index 8bea45c..e5206de 100644 +--- a/drivers/usb/serial/usb-serial.c ++++ b/drivers/usb/serial/usb-serial.c +@@ -764,7 +764,7 @@ int usb_serial_probe(struct usb_interface *interface, + + if (retval) { + dbg("sub driver rejected device"); +- kfree(serial); ++ usb_serial_put(serial); + module_put(type->driver.owner); + return retval; + } +@@ -836,7 +836,7 @@ int usb_serial_probe(struct usb_interface *interface, + */ + if (num_bulk_in == 0 || num_bulk_out == 0) { + dev_info(&interface->dev, "PL-2303 hack: descriptors matched but endpoints did not\n"); +- kfree(serial); ++ usb_serial_put(serial); + module_put(type->driver.owner); + return -ENODEV; + } +@@ -850,7 +850,7 @@ int usb_serial_probe(struct usb_interface *interface, + if (num_ports == 0) { + dev_err(&interface->dev, + "Generic device with no bulk out, not allowed.\n"); +- kfree(serial); ++ usb_serial_put(serial); + module_put(type->driver.owner); + return -EIO; + } +diff --git a/drivers/watchdog/iTCO_wdt.c b/drivers/watchdog/iTCO_wdt.c +index 99796c5..bdf401b 100644 +--- a/drivers/watchdog/iTCO_wdt.c ++++ b/drivers/watchdog/iTCO_wdt.c +@@ -36,6 +36,7 @@ + * document number TBD : Patsburg (PBG) + * document number TBD : DH89xxCC + * document number TBD : Panther Point ++ * document number TBD : Lynx Point + */ + + /* +@@ -126,6 +127,7 @@ enum iTCO_chipsets { + TCO_PBG, /* Patsburg */ + TCO_DH89XXCC, /* DH89xxCC */ + TCO_PPT, /* Panther Point */ ++ TCO_LPT, /* Lynx Point */ + }; + + static struct { +@@ -189,6 +191,7 @@ static struct { + {"Patsburg", 2}, + {"DH89xxCC", 2}, + {"Panther Point", 2}, ++ {"Lynx Point", 2}, + {NULL, 0} + }; + +@@ -331,6 +334,38 @@ static DEFINE_PCI_DEVICE_TABLE(iTCO_wdt_pci_tbl) = { + { PCI_VDEVICE(INTEL, 0x1e5d), TCO_PPT}, + { PCI_VDEVICE(INTEL, 0x1e5e), TCO_PPT}, + { PCI_VDEVICE(INTEL, 0x1e5f), TCO_PPT}, ++ { PCI_VDEVICE(INTEL, 0x8c40), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c41), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c42), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c43), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c44), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c45), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c46), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c47), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c48), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c49), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4a), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4b), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4c), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4d), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4e), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c4f), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c50), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c51), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c52), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c53), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c54), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c55), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c56), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c57), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c58), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c59), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5a), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5b), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5c), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5d), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5e), TCO_LPT}, ++ { PCI_VDEVICE(INTEL, 0x8c5f), TCO_LPT}, + { 0, }, /* End of list */ + }; + MODULE_DEVICE_TABLE(pci, iTCO_wdt_pci_tbl); +diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c +index 0301be6..465e49a 100644 +--- a/fs/gfs2/lops.c ++++ b/fs/gfs2/lops.c +@@ -165,16 +165,14 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) + struct gfs2_meta_header *mh; + struct gfs2_trans *tr; + +- lock_buffer(bd->bd_bh); +- gfs2_log_lock(sdp); + if (!list_empty(&bd->bd_list_tr)) +- goto out; ++ return; + tr = current->journal_info; + tr->tr_touched = 1; + tr->tr_num_buf++; + list_add(&bd->bd_list_tr, &tr->tr_list_buf); + if (!list_empty(&le->le_list)) +- goto out; ++ return; + set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); + set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); + gfs2_meta_check(sdp, bd->bd_bh); +@@ -185,9 +183,6 @@ static void buf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) + sdp->sd_log_num_buf++; + list_add(&le->le_list, &sdp->sd_log_le_buf); + tr->tr_num_buf_new++; +-out: +- gfs2_log_unlock(sdp); +- unlock_buffer(bd->bd_bh); + } + + static void buf_lo_before_commit(struct gfs2_sbd *sdp) +@@ -518,11 +513,9 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) + struct address_space *mapping = bd->bd_bh->b_page->mapping; + struct gfs2_inode *ip = GFS2_I(mapping->host); + +- lock_buffer(bd->bd_bh); +- gfs2_log_lock(sdp); + if (tr) { + if (!list_empty(&bd->bd_list_tr)) +- goto out; ++ return; + tr->tr_touched = 1; + if (gfs2_is_jdata(ip)) { + tr->tr_num_buf++; +@@ -530,7 +523,7 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) + } + } + if (!list_empty(&le->le_list)) +- goto out; ++ return; + + set_bit(GLF_LFLUSH, &bd->bd_gl->gl_flags); + set_bit(GLF_DIRTY, &bd->bd_gl->gl_flags); +@@ -542,9 +535,6 @@ static void databuf_lo_add(struct gfs2_sbd *sdp, struct gfs2_log_element *le) + } else { + list_add_tail(&le->le_list, &sdp->sd_log_le_ordered); + } +-out: +- gfs2_log_unlock(sdp); +- unlock_buffer(bd->bd_bh); + } + + static void gfs2_check_magic(struct buffer_head *bh) +diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c +index 86ac75d..6ab2a77 100644 +--- a/fs/gfs2/trans.c ++++ b/fs/gfs2/trans.c +@@ -145,14 +145,22 @@ void gfs2_trans_add_bh(struct gfs2_glock *gl, struct buffer_head *bh, int meta) + struct gfs2_sbd *sdp = gl->gl_sbd; + struct gfs2_bufdata *bd; + ++ lock_buffer(bh); ++ gfs2_log_lock(sdp); + bd = bh->b_private; + if (bd) + gfs2_assert(sdp, bd->bd_gl == gl); + else { ++ gfs2_log_unlock(sdp); ++ unlock_buffer(bh); + gfs2_attach_bufdata(gl, bh, meta); + bd = bh->b_private; ++ lock_buffer(bh); ++ gfs2_log_lock(sdp); + } + lops_add(sdp, &bd->bd_le); ++ gfs2_log_unlock(sdp); ++ unlock_buffer(bh); + } + + void gfs2_trans_add_revoke(struct gfs2_sbd *sdp, struct gfs2_bufdata *bd) +diff --git a/fs/jbd/transaction.c b/fs/jbd/transaction.c +index edac004..7c86b37 100644 +--- a/fs/jbd/transaction.c ++++ b/fs/jbd/transaction.c +@@ -1957,7 +1957,9 @@ retry: + spin_unlock(&journal->j_list_lock); + jbd_unlock_bh_state(bh); + spin_unlock(&journal->j_state_lock); ++ unlock_buffer(bh); + log_wait_commit(journal, tid); ++ lock_buffer(bh); + goto retry; + } + /* +diff --git a/fs/jffs2/file.c b/fs/jffs2/file.c +index 61e6723..0095a70 100644 +--- a/fs/jffs2/file.c ++++ b/fs/jffs2/file.c +@@ -135,33 +135,39 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + struct page *pg; + struct inode *inode = mapping->host; + struct jffs2_inode_info *f = JFFS2_INODE_INFO(inode); ++ struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); ++ struct jffs2_raw_inode ri; ++ uint32_t alloc_len = 0; + pgoff_t index = pos >> PAGE_CACHE_SHIFT; + uint32_t pageofs = index << PAGE_CACHE_SHIFT; + int ret = 0; + ++ D1(printk(KERN_DEBUG "%s()\n", __func__)); ++ ++ if (pageofs > inode->i_size) { ++ ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, ++ ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); ++ if (ret) ++ return ret; ++ } ++ ++ mutex_lock(&f->sem); + pg = grab_cache_page_write_begin(mapping, index, flags); +- if (!pg) ++ if (!pg) { ++ if (alloc_len) ++ jffs2_complete_reservation(c); ++ mutex_unlock(&f->sem); + return -ENOMEM; ++ } + *pagep = pg; + +- D1(printk(KERN_DEBUG "jffs2_write_begin()\n")); +- +- if (pageofs > inode->i_size) { ++ if (alloc_len) { + /* Make new hole frag from old EOF to new page */ +- struct jffs2_sb_info *c = JFFS2_SB_INFO(inode->i_sb); +- struct jffs2_raw_inode ri; + struct jffs2_full_dnode *fn; +- uint32_t alloc_len; + + D1(printk(KERN_DEBUG "Writing new hole frag 0x%x-0x%x between current EOF and new page\n", + (unsigned int)inode->i_size, pageofs)); + +- ret = jffs2_reserve_space(c, sizeof(ri), &alloc_len, +- ALLOC_NORMAL, JFFS2_SUMMARY_INODE_SIZE); +- if (ret) +- goto out_page; +- +- mutex_lock(&f->sem); + memset(&ri, 0, sizeof(ri)); + + ri.magic = cpu_to_je16(JFFS2_MAGIC_BITMASK); +@@ -188,7 +194,6 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + if (IS_ERR(fn)) { + ret = PTR_ERR(fn); + jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); + goto out_page; + } + ret = jffs2_add_full_dnode_to_inode(c, f, fn); +@@ -202,12 +207,10 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + jffs2_mark_node_obsolete(c, fn->raw); + jffs2_free_full_dnode(fn); + jffs2_complete_reservation(c); +- mutex_unlock(&f->sem); + goto out_page; + } + jffs2_complete_reservation(c); + inode->i_size = pageofs; +- mutex_unlock(&f->sem); + } + + /* +@@ -216,18 +219,18 @@ static int jffs2_write_begin(struct file *filp, struct address_space *mapping, + * case of a short-copy. + */ + if (!PageUptodate(pg)) { +- mutex_lock(&f->sem); + ret = jffs2_do_readpage_nolock(inode, pg); +- mutex_unlock(&f->sem); + if (ret) + goto out_page; + } ++ mutex_unlock(&f->sem); + D1(printk(KERN_DEBUG "end write_begin(). pg->flags %lx\n", pg->flags)); + return ret; + + out_page: + unlock_page(pg); + page_cache_release(pg); ++ mutex_unlock(&f->sem); + return ret; + } + +diff --git a/fs/reiserfs/inode.c b/fs/reiserfs/inode.c +index 5809abb..fe677c0 100644 +--- a/fs/reiserfs/inode.c ++++ b/fs/reiserfs/inode.c +@@ -1788,8 +1788,9 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, + + BUG_ON(!th->t_trans_id); + +- dquot_initialize(inode); ++ reiserfs_write_unlock(inode->i_sb); + err = dquot_alloc_inode(inode); ++ reiserfs_write_lock(inode->i_sb); + if (err) + goto out_end_trans; + if (!dir->i_nlink) { +@@ -1985,8 +1986,10 @@ int reiserfs_new_inode(struct reiserfs_transaction_handle *th, + + out_end_trans: + journal_end(th, th->t_super, th->t_blocks_allocated); ++ reiserfs_write_unlock(inode->i_sb); + /* Drop can be outside and it needs more credits so it's better to have it outside */ + dquot_drop(inode); ++ reiserfs_write_lock(inode->i_sb); + inode->i_flags |= S_NOQUOTA; + make_bad_inode(inode); + +@@ -3109,10 +3112,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) + /* must be turned off for recursive notify_change calls */ + ia_valid = attr->ia_valid &= ~(ATTR_KILL_SUID|ATTR_KILL_SGID); + +- depth = reiserfs_write_lock_once(inode->i_sb); + if (is_quota_modification(inode, attr)) + dquot_initialize(inode); +- ++ depth = reiserfs_write_lock_once(inode->i_sb); + if (attr->ia_valid & ATTR_SIZE) { + /* version 2 items will be caught by the s_maxbytes check + ** done for us in vmtruncate +@@ -3176,7 +3178,9 @@ int reiserfs_setattr(struct dentry *dentry, struct iattr *attr) + error = journal_begin(&th, inode->i_sb, jbegin_count); + if (error) + goto out; ++ reiserfs_write_unlock_once(inode->i_sb, depth); + error = dquot_transfer(inode, attr); ++ depth = reiserfs_write_lock_once(inode->i_sb); + if (error) { + journal_end(&th, inode->i_sb, jbegin_count); + goto out; +diff --git a/fs/reiserfs/stree.c b/fs/reiserfs/stree.c +index 313d39d..3ae9926 100644 +--- a/fs/reiserfs/stree.c ++++ b/fs/reiserfs/stree.c +@@ -1968,7 +1968,9 @@ int reiserfs_paste_into_item(struct reiserfs_transaction_handle *th, struct tree + key2type(&(key->on_disk_key))); + #endif + ++ reiserfs_write_unlock(inode->i_sb); + retval = dquot_alloc_space_nodirty(inode, pasted_size); ++ reiserfs_write_lock(inode->i_sb); + if (retval) { + pathrelse(search_path); + return retval; +@@ -2061,9 +2063,11 @@ int reiserfs_insert_item(struct reiserfs_transaction_handle *th, + "reiserquota insert_item(): allocating %u id=%u type=%c", + quota_bytes, inode->i_uid, head2type(ih)); + #endif ++ reiserfs_write_unlock(inode->i_sb); + /* We can't dirty inode here. It would be immediately written but + * appropriate stat item isn't inserted yet... */ + retval = dquot_alloc_space_nodirty(inode, quota_bytes); ++ reiserfs_write_lock(inode->i_sb); + if (retval) { + pathrelse(path); + return retval; +diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c +index 5e3527b..569498a 100644 +--- a/fs/reiserfs/super.c ++++ b/fs/reiserfs/super.c +@@ -254,7 +254,9 @@ static int finish_unfinished(struct super_block *s) + retval = remove_save_link_only(s, &save_link_key, 0); + continue; + } ++ reiserfs_write_unlock(s); + dquot_initialize(inode); ++ reiserfs_write_lock(s); + + if (truncate && S_ISDIR(inode->i_mode)) { + /* We got a truncate request for a dir which is impossible. +@@ -1207,7 +1209,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + kfree(qf_names[i]); + #endif + err = -EINVAL; +- goto out_err; ++ goto out_unlock; + } + #ifdef CONFIG_QUOTA + handle_quota_files(s, qf_names, &qfmt); +@@ -1250,7 +1252,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + if (blocks) { + err = reiserfs_resize(s, blocks); + if (err != 0) +- goto out_err; ++ goto out_unlock; + } + + if (*mount_flags & MS_RDONLY) { +@@ -1260,9 +1262,15 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + /* it is read-only already */ + goto out_ok; + ++ /* ++ * Drop write lock. Quota will retake it when needed and lock ++ * ordering requires calling dquot_suspend() without it. ++ */ ++ reiserfs_write_unlock(s); + err = dquot_suspend(s, -1); + if (err < 0) + goto out_err; ++ reiserfs_write_lock(s); + + /* try to remount file system with read-only permissions */ + if (sb_umount_state(rs) == REISERFS_VALID_FS +@@ -1272,7 +1280,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + + err = journal_begin(&th, s, 10); + if (err) +- goto out_err; ++ goto out_unlock; + + /* Mounting a rw partition read-only. */ + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); +@@ -1287,7 +1295,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + + if (reiserfs_is_journal_aborted(journal)) { + err = journal->j_errno; +- goto out_err; ++ goto out_unlock; + } + + handle_data_mode(s, mount_options); +@@ -1296,7 +1304,7 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + s->s_flags &= ~MS_RDONLY; /* now it is safe to call journal_begin */ + err = journal_begin(&th, s, 10); + if (err) +- goto out_err; ++ goto out_unlock; + + /* Mount a partition which is read-only, read-write */ + reiserfs_prepare_for_journal(s, SB_BUFFER_WITH_SB(s), 1); +@@ -1313,11 +1321,17 @@ static int reiserfs_remount(struct super_block *s, int *mount_flags, char *arg) + SB_JOURNAL(s)->j_must_wait = 1; + err = journal_end(&th, s, 10); + if (err) +- goto out_err; ++ goto out_unlock; + s->s_dirt = 0; + + if (!(*mount_flags & MS_RDONLY)) { ++ /* ++ * Drop write lock. Quota will retake it when needed and lock ++ * ordering requires calling dquot_resume() without it. ++ */ ++ reiserfs_write_unlock(s); + dquot_resume(s, -1); ++ reiserfs_write_lock(s); + finish_unfinished(s); + reiserfs_xattr_init(s, *mount_flags); + } +@@ -1327,9 +1341,10 @@ out_ok: + reiserfs_write_unlock(s); + return 0; + ++out_unlock: ++ reiserfs_write_unlock(s); + out_err: + kfree(new_opts); +- reiserfs_write_unlock(s); + return err; + } + +@@ -1953,13 +1968,15 @@ static int reiserfs_write_dquot(struct dquot *dquot) + REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); + if (ret) + goto out; ++ reiserfs_write_unlock(dquot->dq_sb); + ret = dquot_commit(dquot); ++ reiserfs_write_lock(dquot->dq_sb); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_TRANS_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; +- out: ++out: + reiserfs_write_unlock(dquot->dq_sb); + return ret; + } +@@ -1975,13 +1992,15 @@ static int reiserfs_acquire_dquot(struct dquot *dquot) + REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); + if (ret) + goto out; ++ reiserfs_write_unlock(dquot->dq_sb); + ret = dquot_acquire(dquot); ++ reiserfs_write_lock(dquot->dq_sb); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_INIT_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; +- out: ++out: + reiserfs_write_unlock(dquot->dq_sb); + return ret; + } +@@ -1995,19 +2014,21 @@ static int reiserfs_release_dquot(struct dquot *dquot) + ret = + journal_begin(&th, dquot->dq_sb, + REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); ++ reiserfs_write_unlock(dquot->dq_sb); + if (ret) { + /* Release dquot anyway to avoid endless cycle in dqput() */ + dquot_release(dquot); + goto out; + } + ret = dquot_release(dquot); ++ reiserfs_write_lock(dquot->dq_sb); + err = + journal_end(&th, dquot->dq_sb, + REISERFS_QUOTA_DEL_BLOCKS(dquot->dq_sb)); + if (!ret && err) + ret = err; +- out: + reiserfs_write_unlock(dquot->dq_sb); ++out: + return ret; + } + +@@ -2032,11 +2053,13 @@ static int reiserfs_write_info(struct super_block *sb, int type) + ret = journal_begin(&th, sb, 2); + if (ret) + goto out; ++ reiserfs_write_unlock(sb); + ret = dquot_commit_info(sb, type); ++ reiserfs_write_lock(sb); + err = journal_end(&th, sb, 2); + if (!ret && err) + ret = err; +- out: ++out: + reiserfs_write_unlock(sb); + return ret; + } +@@ -2060,8 +2083,11 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, + struct inode *inode; + struct reiserfs_transaction_handle th; + +- if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) +- return -EINVAL; ++ reiserfs_write_lock(sb); ++ if (!(REISERFS_SB(sb)->s_mount_opt & (1 << REISERFS_QUOTA))) { ++ err = -EINVAL; ++ goto out; ++ } + + /* Quotafile not on the same filesystem? */ + if (path->mnt->mnt_sb != sb) { +@@ -2103,8 +2129,10 @@ static int reiserfs_quota_on(struct super_block *sb, int type, int format_id, + if (err) + goto out; + } +- err = dquot_quota_on(sb, type, format_id, path); ++ reiserfs_write_unlock(sb); ++ return dquot_quota_on(sb, type, format_id, path); + out: ++ reiserfs_write_unlock(sb); + return err; + } + +@@ -2178,7 +2206,9 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, + tocopy = sb->s_blocksize - offset < towrite ? + sb->s_blocksize - offset : towrite; + tmp_bh.b_state = 0; ++ reiserfs_write_lock(sb); + err = reiserfs_get_block(inode, blk, &tmp_bh, GET_BLOCK_CREATE); ++ reiserfs_write_unlock(sb); + if (err) + goto out; + if (offset || tocopy != sb->s_blocksize) +@@ -2194,10 +2224,12 @@ static ssize_t reiserfs_quota_write(struct super_block *sb, int type, + flush_dcache_page(bh->b_page); + set_buffer_uptodate(bh); + unlock_buffer(bh); ++ reiserfs_write_lock(sb); + reiserfs_prepare_for_journal(sb, bh, 1); + journal_mark_dirty(current->journal_info, sb, bh); + if (!journal_quota) + reiserfs_add_ordered_list(inode, bh); ++ reiserfs_write_unlock(sb); + brelse(bh); + offset = 0; + towrite -= tocopy; +diff --git a/fs/ubifs/find.c b/fs/ubifs/find.c +index 2559d17..5dc48ca 100644 +--- a/fs/ubifs/find.c ++++ b/fs/ubifs/find.c +@@ -681,8 +681,16 @@ int ubifs_find_free_leb_for_idx(struct ubifs_info *c) + if (!lprops) { + lprops = ubifs_fast_find_freeable(c); + if (!lprops) { +- ubifs_assert(c->freeable_cnt == 0); +- if (c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { ++ /* ++ * The first condition means the following: go scan the ++ * LPT if there are uncategorized lprops, which means ++ * there may be freeable LEBs there (UBIFS does not ++ * store the information about freeable LEBs in the ++ * master node). ++ */ ++ if (c->in_a_category_cnt != c->main_lebs || ++ c->lst.empty_lebs - c->lst.taken_empty_lebs > 0) { ++ ubifs_assert(c->freeable_cnt == 0); + lprops = scan_for_leb_for_idx(c); + if (IS_ERR(lprops)) { + err = PTR_ERR(lprops); +diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c +index f8a181e..ea9d491 100644 +--- a/fs/ubifs/lprops.c ++++ b/fs/ubifs/lprops.c +@@ -300,8 +300,11 @@ void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops, + default: + ubifs_assert(0); + } ++ + lprops->flags &= ~LPROPS_CAT_MASK; + lprops->flags |= cat; ++ c->in_a_category_cnt += 1; ++ ubifs_assert(c->in_a_category_cnt <= c->main_lebs); + } + + /** +@@ -334,6 +337,9 @@ static void ubifs_remove_from_cat(struct ubifs_info *c, + default: + ubifs_assert(0); + } ++ ++ c->in_a_category_cnt -= 1; ++ ubifs_assert(c->in_a_category_cnt >= 0); + } + + /** +diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h +index 27f2255..8bbc99e 100644 +--- a/fs/ubifs/ubifs.h ++++ b/fs/ubifs/ubifs.h +@@ -1187,6 +1187,8 @@ struct ubifs_debug_info; + * @freeable_list: list of freeable non-index LEBs (free + dirty == @leb_size) + * @frdi_idx_list: list of freeable index LEBs (free + dirty == @leb_size) + * @freeable_cnt: number of freeable LEBs in @freeable_list ++ * @in_a_category_cnt: count of lprops which are in a certain category, which ++ * basically meants that they were loaded from the flash + * + * @ltab_lnum: LEB number of LPT's own lprops table + * @ltab_offs: offset of LPT's own lprops table +@@ -1416,6 +1418,7 @@ struct ubifs_info { + struct list_head freeable_list; + struct list_head frdi_idx_list; + int freeable_cnt; ++ int in_a_category_cnt; + + int ltab_lnum; + int ltab_offs; +diff --git a/fs/xfs/xfs_buf.c b/fs/xfs/xfs_buf.c +index cf0ac05..2f5a8f7 100644 +--- a/fs/xfs/xfs_buf.c ++++ b/fs/xfs/xfs_buf.c +@@ -1167,9 +1167,14 @@ xfs_buf_bio_end_io( + { + xfs_buf_t *bp = (xfs_buf_t *)bio->bi_private; + +- xfs_buf_ioerror(bp, -error); ++ /* ++ * don't overwrite existing errors - otherwise we can lose errors on ++ * buffers that require multiple bios to complete. ++ */ ++ if (!bp->b_error) ++ xfs_buf_ioerror(bp, -error); + +- if (!error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) ++ if (!bp->b_error && xfs_buf_is_vmapped(bp) && (bp->b_flags & XBF_READ)) + invalidate_kernel_vmap_range(bp->b_addr, xfs_buf_vmap_len(bp)); + + _xfs_buf_ioend(bp, 1); +@@ -1245,6 +1250,11 @@ next_chunk: + if (size) + goto next_chunk; + } else { ++ /* ++ * This is guaranteed not to be the last io reference count ++ * because the caller (xfs_buf_iorequest) holds a count itself. ++ */ ++ atomic_dec(&bp->b_io_remaining); + xfs_buf_ioerror(bp, EIO); + bio_put(bio); + } +diff --git a/include/linux/ptp_clock_kernel.h b/include/linux/ptp_clock_kernel.h +index dd2e44f..9d709d1 100644 +--- a/include/linux/ptp_clock_kernel.h ++++ b/include/linux/ptp_clock_kernel.h +@@ -50,7 +50,8 @@ struct ptp_clock_request { + * clock operations + * + * @adjfreq: Adjusts the frequency of the hardware clock. +- * parameter delta: Desired period change in parts per billion. ++ * parameter delta: Desired frequency offset from nominal frequency ++ * in parts per billion + * + * @adjtime: Shifts the time of the hardware clock. + * parameter delta: Desired change in nanoseconds. +diff --git a/kernel/futex.c b/kernel/futex.c +index 80fb1c6..77bccfc 100644 +--- a/kernel/futex.c ++++ b/kernel/futex.c +@@ -716,7 +716,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, + struct futex_pi_state **ps, + struct task_struct *task, int set_waiters) + { +- int lock_taken, ret, ownerdied = 0; ++ int lock_taken, ret, force_take = 0; + u32 uval, newval, curval, vpid = task_pid_vnr(task); + + retry: +@@ -755,17 +755,15 @@ retry: + newval = curval | FUTEX_WAITERS; + + /* +- * There are two cases, where a futex might have no owner (the +- * owner TID is 0): OWNER_DIED. We take over the futex in this +- * case. We also do an unconditional take over, when the owner +- * of the futex died. +- * +- * This is safe as we are protected by the hash bucket lock ! ++ * Should we force take the futex? See below. + */ +- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { +- /* Keep the OWNER_DIED bit */ ++ if (unlikely(force_take)) { ++ /* ++ * Keep the OWNER_DIED and the WAITERS bit and set the ++ * new TID value. ++ */ + newval = (curval & ~FUTEX_TID_MASK) | vpid; +- ownerdied = 0; ++ force_take = 0; + lock_taken = 1; + } + +@@ -775,7 +773,7 @@ retry: + goto retry; + + /* +- * We took the lock due to owner died take over. ++ * We took the lock due to forced take over. + */ + if (unlikely(lock_taken)) + return 1; +@@ -790,20 +788,25 @@ retry: + switch (ret) { + case -ESRCH: + /* +- * No owner found for this futex. Check if the +- * OWNER_DIED bit is set to figure out whether +- * this is a robust futex or not. ++ * We failed to find an owner for this ++ * futex. So we have no pi_state to block ++ * on. This can happen in two cases: ++ * ++ * 1) The owner died ++ * 2) A stale FUTEX_WAITERS bit ++ * ++ * Re-read the futex value. + */ + if (get_futex_value_locked(&curval, uaddr)) + return -EFAULT; + + /* +- * We simply start over in case of a robust +- * futex. The code above will take the futex +- * and return happy. ++ * If the owner died or we have a stale ++ * WAITERS bit the owner TID in the user space ++ * futex is 0. + */ +- if (curval & FUTEX_OWNER_DIED) { +- ownerdied = 1; ++ if (!(curval & FUTEX_TID_MASK)) { ++ force_take = 1; + goto retry; + } + default: +@@ -840,6 +843,9 @@ static void wake_futex(struct futex_q *q) + { + struct task_struct *p = q->task; + ++ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) ++ return; ++ + /* + * We set q->lock_ptr = NULL _before_ we wake up the task. If + * a non-futex wake up happens on another CPU then the task +@@ -1075,6 +1081,10 @@ retry_private: + + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, &key1)) { ++ if (this->pi_state || this->rt_waiter) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } + wake_futex(this); + if (++ret >= nr_wake) + break; +@@ -1087,6 +1097,10 @@ retry_private: + op_ret = 0; + plist_for_each_entry_safe(this, next, head, list) { + if (match_futex (&this->key, &key2)) { ++ if (this->pi_state || this->rt_waiter) { ++ ret = -EINVAL; ++ goto out_unlock; ++ } + wake_futex(this); + if (++op_ret >= nr_wake2) + break; +@@ -1095,6 +1109,7 @@ retry_private: + ret += op_ret; + } + ++out_unlock: + double_unlock_hb(hb1, hb2); + out_put_keys: + put_futex_key(&key2); +@@ -1384,9 +1399,13 @@ retry_private: + /* + * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always + * be paired with each other and no other futex ops. ++ * ++ * We should never be requeueing a futex_q with a pi_state, ++ * which is awaiting a futex_unlock_pi(). + */ + if ((requeue_pi && !this->rt_waiter) || +- (!requeue_pi && this->rt_waiter)) { ++ (!requeue_pi && this->rt_waiter) || ++ this->pi_state) { + ret = -EINVAL; + break; + } +diff --git a/kernel/watchdog.c b/kernel/watchdog.c +index 1d7bca7..a8bc4d9 100644 +--- a/kernel/watchdog.c ++++ b/kernel/watchdog.c +@@ -113,7 +113,7 @@ static unsigned long get_timestamp(int this_cpu) + return cpu_clock(this_cpu) >> 30LL; /* 2^30 ~= 10^9 */ + } + +-static unsigned long get_sample_period(void) ++static u64 get_sample_period(void) + { + /* + * convert watchdog_thresh from seconds to ns +@@ -121,7 +121,7 @@ static unsigned long get_sample_period(void) + * increment before the hardlockup detector generates + * a warning + */ +- return get_softlockup_thresh() * (NSEC_PER_SEC / 5); ++ return get_softlockup_thresh() * ((u64)NSEC_PER_SEC / 5); + } + + /* Commands for resetting the watchdog */ +diff --git a/kernel/workqueue.c b/kernel/workqueue.c +index 43a19c5..d551d5f 100644 +--- a/kernel/workqueue.c ++++ b/kernel/workqueue.c +@@ -2052,8 +2052,10 @@ static int rescuer_thread(void *__wq) + repeat: + set_current_state(TASK_INTERRUPTIBLE); + +- if (kthread_should_stop()) ++ if (kthread_should_stop()) { ++ __set_current_state(TASK_RUNNING); + return 0; ++ } + + /* + * See whether any cpu is asking for help. Unbounded +diff --git a/mm/memcontrol.c b/mm/memcontrol.c +index c8425b1..d027a24 100644 +--- a/mm/memcontrol.c ++++ b/mm/memcontrol.c +@@ -1457,17 +1457,26 @@ static int mem_cgroup_count_children(struct mem_cgroup *memcg) + u64 mem_cgroup_get_limit(struct mem_cgroup *memcg) + { + u64 limit; +- u64 memsw; + + limit = res_counter_read_u64(&memcg->res, RES_LIMIT); +- limit += total_swap_pages << PAGE_SHIFT; + +- memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); + /* +- * If memsw is finite and limits the amount of swap space available +- * to this memcg, return that limit. ++ * Do not consider swap space if we cannot swap due to swappiness + */ +- return min(limit, memsw); ++ if (mem_cgroup_swappiness(memcg)) { ++ u64 memsw; ++ ++ limit += total_swap_pages << PAGE_SHIFT; ++ memsw = res_counter_read_u64(&memcg->memsw, RES_LIMIT); ++ ++ /* ++ * If memsw is finite and limits the amount of swap space ++ * available to this memcg, return that limit. ++ */ ++ limit = min(limit, memsw); ++ } ++ ++ return limit; + } + + /* +diff --git a/mm/memory-failure.c b/mm/memory-failure.c +index 5bd5bb1..1b03878 100644 +--- a/mm/memory-failure.c ++++ b/mm/memory-failure.c +@@ -1475,9 +1475,17 @@ int soft_offline_page(struct page *page, int flags) + { + int ret; + unsigned long pfn = page_to_pfn(page); ++ struct page *hpage = compound_trans_head(page); + + if (PageHuge(page)) + return soft_offline_huge_page(page, flags); ++ if (PageTransHuge(hpage)) { ++ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { ++ pr_info("soft offline: %#lx: failed to split THP\n", ++ pfn); ++ return -EBUSY; ++ } ++ } + + ret = get_any_page(page, pfn, flags); + if (ret < 0) +diff --git a/mm/shmem.c b/mm/shmem.c +index 126ca35..2d46e23 100644 +--- a/mm/shmem.c ++++ b/mm/shmem.c +@@ -595,7 +595,7 @@ static void shmem_evict_inode(struct inode *inode) + kfree(xattr->name); + kfree(xattr); + } +- BUG_ON(inode->i_blocks); ++ WARN_ON(inode->i_blocks); + shmem_free_inode(inode->i_sb); + end_writeback(inode); + } +diff --git a/mm/sparse.c b/mm/sparse.c +index bf7d3cc..42935b5 100644 +--- a/mm/sparse.c ++++ b/mm/sparse.c +@@ -622,7 +622,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) + { + return; /* XXX: Not implemented yet */ + } +-static void free_map_bootmem(struct page *page, unsigned long nr_pages) ++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) + { + } + #else +@@ -663,10 +663,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) + get_order(sizeof(struct page) * nr_pages)); + } + +-static void free_map_bootmem(struct page *page, unsigned long nr_pages) ++static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) + { + unsigned long maps_section_nr, removing_section_nr, i; + unsigned long magic; ++ struct page *page = virt_to_page(memmap); + + for (i = 0; i < nr_pages; i++, page++) { + magic = (unsigned long) page->lru.next; +@@ -715,13 +716,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) + */ + + if (memmap) { +- struct page *memmap_page; +- memmap_page = virt_to_page(memmap); +- + nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) + >> PAGE_SHIFT; + +- free_map_bootmem(memmap_page, nr_pages); ++ free_map_bootmem(memmap, nr_pages); + } + } + +diff --git a/mm/vmscan.c b/mm/vmscan.c +index 313381c..1e4ee1a 100644 +--- a/mm/vmscan.c ++++ b/mm/vmscan.c +@@ -2492,6 +2492,19 @@ unsigned long try_to_free_mem_cgroup_pages(struct mem_cgroup *mem_cont, + } + #endif + ++static bool zone_balanced(struct zone *zone, int order, ++ unsigned long balance_gap, int classzone_idx) ++{ ++ if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) + ++ balance_gap, classzone_idx, 0)) ++ return false; ++ ++ if (COMPACTION_BUILD && order && !compaction_suitable(zone, order)) ++ return false; ++ ++ return true; ++} ++ + /* + * pgdat_balanced is used when checking if a node is balanced for high-order + * allocations. Only zones that meet watermarks and are in a zone allowed +@@ -2551,8 +2564,7 @@ static bool sleeping_prematurely(pg_data_t *pgdat, int order, long remaining, + continue; + } + +- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone), +- i, 0)) ++ if (!zone_balanced(zone, order, 0, i)) + all_zones_ok = false; + else + balanced += zone->present_pages; +@@ -2655,8 +2667,7 @@ loop_again: + shrink_active_list(SWAP_CLUSTER_MAX, zone, + &sc, priority, 0); + +- if (!zone_watermark_ok_safe(zone, order, +- high_wmark_pages(zone), 0, 0)) { ++ if (!zone_balanced(zone, order, 0, 0)) { + end_zone = i; + break; + } else { +@@ -2717,9 +2728,8 @@ loop_again: + (zone->present_pages + + KSWAPD_ZONE_BALANCE_GAP_RATIO-1) / + KSWAPD_ZONE_BALANCE_GAP_RATIO); +- if (!zone_watermark_ok_safe(zone, order, +- high_wmark_pages(zone) + balance_gap, +- end_zone, 0)) { ++ if (!zone_balanced(zone, order, ++ balance_gap, end_zone)) { + shrink_zone(priority, zone, &sc); + + reclaim_state->reclaimed_slab = 0; +@@ -2746,8 +2756,7 @@ loop_again: + continue; + } + +- if (!zone_watermark_ok_safe(zone, order, +- high_wmark_pages(zone), end_zone, 0)) { ++ if (!zone_balanced(zone, order, 0, end_zone)) { + all_zones_ok = 0; + /* + * We are still under min water mark. This +diff --git a/net/can/bcm.c b/net/can/bcm.c +index 151b773..3910c1f 100644 +--- a/net/can/bcm.c ++++ b/net/can/bcm.c +@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, + op->sk = sk; + op->ifindex = ifindex; + ++ /* ifindex for timeout events w/o previous frame reception */ ++ op->rx_ifindex = ifindex; ++ + /* initialize uninitialized (kzalloc) structure */ + hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + op->timer.function = bcm_rx_timeout_handler; +diff --git a/net/core/dev.c b/net/core/dev.c +index 480be72..2aac4ec 100644 +--- a/net/core/dev.c ++++ b/net/core/dev.c +@@ -2829,8 +2829,10 @@ static int get_rps_cpu(struct net_device *dev, struct sk_buff *skb, + if (unlikely(tcpu != next_cpu) && + (tcpu == RPS_NO_CPU || !cpu_online(tcpu) || + ((int)(per_cpu(softnet_data, tcpu).input_queue_head - +- rflow->last_qtail)) >= 0)) ++ rflow->last_qtail)) >= 0)) { ++ tcpu = next_cpu; + rflow = set_rps_cpu(dev, skb, rflow, next_cpu); ++ } + + if (tcpu != RPS_NO_CPU && cpu_online(tcpu)) { + *rflowp = rflow; +diff --git a/net/core/dev_addr_lists.c b/net/core/dev_addr_lists.c +index 277faef..0387da0 100644 +--- a/net/core/dev_addr_lists.c ++++ b/net/core/dev_addr_lists.c +@@ -308,7 +308,8 @@ int dev_addr_del(struct net_device *dev, unsigned char *addr, + */ + ha = list_first_entry(&dev->dev_addrs.list, + struct netdev_hw_addr, list); +- if (ha->addr == dev->dev_addr && ha->refcount == 1) ++ if (!memcmp(ha->addr, addr, dev->addr_len) && ++ ha->type == addr_type && ha->refcount == 1) + return -ENOENT; + + err = __hw_addr_del(&dev->dev_addrs, addr, dev->addr_len, +diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c +index 09ff51b..0106d25 100644 +--- a/net/ipv4/ip_sockglue.c ++++ b/net/ipv4/ip_sockglue.c +@@ -468,18 +468,27 @@ static int do_ip_setsockopt(struct sock *sk, int level, + struct inet_sock *inet = inet_sk(sk); + int val = 0, err; + +- if (((1<<optname) & ((1<<IP_PKTINFO) | (1<<IP_RECVTTL) | +- (1<<IP_RECVOPTS) | (1<<IP_RECVTOS) | +- (1<<IP_RETOPTS) | (1<<IP_TOS) | +- (1<<IP_TTL) | (1<<IP_HDRINCL) | +- (1<<IP_MTU_DISCOVER) | (1<<IP_RECVERR) | +- (1<<IP_ROUTER_ALERT) | (1<<IP_FREEBIND) | +- (1<<IP_PASSSEC) | (1<<IP_TRANSPARENT) | +- (1<<IP_MINTTL) | (1<<IP_NODEFRAG))) || +- optname == IP_MULTICAST_TTL || +- optname == IP_MULTICAST_ALL || +- optname == IP_MULTICAST_LOOP || +- optname == IP_RECVORIGDSTADDR) { ++ switch (optname) { ++ case IP_PKTINFO: ++ case IP_RECVTTL: ++ case IP_RECVOPTS: ++ case IP_RECVTOS: ++ case IP_RETOPTS: ++ case IP_TOS: ++ case IP_TTL: ++ case IP_HDRINCL: ++ case IP_MTU_DISCOVER: ++ case IP_RECVERR: ++ case IP_ROUTER_ALERT: ++ case IP_FREEBIND: ++ case IP_PASSSEC: ++ case IP_TRANSPARENT: ++ case IP_MINTTL: ++ case IP_NODEFRAG: ++ case IP_MULTICAST_TTL: ++ case IP_MULTICAST_ALL: ++ case IP_MULTICAST_LOOP: ++ case IP_RECVORIGDSTADDR: + if (optlen >= sizeof(int)) { + if (get_user(val, (int __user *) optval)) + return -EFAULT; +diff --git a/net/ipv4/netfilter/nf_nat_standalone.c b/net/ipv4/netfilter/nf_nat_standalone.c +index 9290048..e32b542 100644 +--- a/net/ipv4/netfilter/nf_nat_standalone.c ++++ b/net/ipv4/netfilter/nf_nat_standalone.c +@@ -194,7 +194,8 @@ nf_nat_out(unsigned int hooknum, + + if ((ct->tuplehash[dir].tuple.src.u3.ip != + ct->tuplehash[!dir].tuple.dst.u3.ip) || +- (ct->tuplehash[dir].tuple.src.u.all != ++ (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ++ ct->tuplehash[dir].tuple.src.u.all != + ct->tuplehash[!dir].tuple.dst.u.all) + ) + return ip_xfrm_me_harder(skb) == 0 ? ret : NF_DROP; +@@ -230,7 +231,8 @@ nf_nat_local_fn(unsigned int hooknum, + ret = NF_DROP; + } + #ifdef CONFIG_XFRM +- else if (ct->tuplehash[dir].tuple.dst.u.all != ++ else if (ct->tuplehash[dir].tuple.dst.protonum != IPPROTO_ICMP && ++ ct->tuplehash[dir].tuple.dst.u.all != + ct->tuplehash[!dir].tuple.src.u.all) + if (ip_xfrm_me_harder(skb)) + ret = NF_DROP; +diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c +index 26cb08c..b204df8 100644 +--- a/net/ipv6/ipv6_sockglue.c ++++ b/net/ipv6/ipv6_sockglue.c +@@ -798,6 +798,7 @@ pref_skip_coa: + if (val < 0 || val > 255) + goto e_inval; + np->min_hopcount = val; ++ retv = 0; + break; + case IPV6_DONTFRAG: + np->dontfrag = valbool; +diff --git a/net/mac80211/ibss.c b/net/mac80211/ibss.c +index 8c7364b..9e20cb8 100644 +--- a/net/mac80211/ibss.c ++++ b/net/mac80211/ibss.c +@@ -965,10 +965,6 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) + + mutex_lock(&sdata->u.ibss.mtx); + +- sdata->u.ibss.state = IEEE80211_IBSS_MLME_SEARCH; +- memset(sdata->u.ibss.bssid, 0, ETH_ALEN); +- sdata->u.ibss.ssid_len = 0; +- + active_ibss = ieee80211_sta_active_ibss(sdata); + + if (!active_ibss && !is_zero_ether_addr(ifibss->bssid)) { +@@ -989,6 +985,10 @@ int ieee80211_ibss_leave(struct ieee80211_sub_if_data *sdata) + } + } + ++ ifibss->state = IEEE80211_IBSS_MLME_SEARCH; ++ memset(ifibss->bssid, 0, ETH_ALEN); ++ ifibss->ssid_len = 0; ++ + sta_info_flush(sdata->local, sdata); + + /* remove beacon */ +diff --git a/net/mac80211/sta_info.c b/net/mac80211/sta_info.c +index 1fdd8ff..1c775f0 100644 +--- a/net/mac80211/sta_info.c ++++ b/net/mac80211/sta_info.c +@@ -1129,6 +1129,7 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) + struct ieee80211_local *local = sdata->local; + struct sk_buff_head pending; + int filtered = 0, buffered = 0, ac; ++ unsigned long flags; + + clear_sta_flag(sta, WLAN_STA_SP); + +@@ -1144,12 +1145,16 @@ void ieee80211_sta_ps_deliver_wakeup(struct sta_info *sta) + for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { + int count = skb_queue_len(&pending), tmp; + ++ spin_lock_irqsave(&sta->tx_filtered[ac].lock, flags); + skb_queue_splice_tail_init(&sta->tx_filtered[ac], &pending); ++ spin_unlock_irqrestore(&sta->tx_filtered[ac].lock, flags); + tmp = skb_queue_len(&pending); + filtered += tmp - count; + count = tmp; + ++ spin_lock_irqsave(&sta->ps_tx_buf[ac].lock, flags); + skb_queue_splice_tail_init(&sta->ps_tx_buf[ac], &pending); ++ spin_unlock_irqrestore(&sta->ps_tx_buf[ac].lock, flags); + tmp = skb_queue_len(&pending); + buffered += tmp - count; + } +diff --git a/net/netfilter/nf_conntrack_proto_tcp.c b/net/netfilter/nf_conntrack_proto_tcp.c +index 8235b86..57ad466 100644 +--- a/net/netfilter/nf_conntrack_proto_tcp.c ++++ b/net/netfilter/nf_conntrack_proto_tcp.c +@@ -159,21 +159,18 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = { + * sCL -> sSS + */ + /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ +-/*synack*/ { sIV, sIV, sIG, sIG, sIG, sIG, sIG, sIG, sIG, sSR }, ++/*synack*/ { sIV, sIV, sSR, sIV, sIV, sIV, sIV, sIV, sIV, sSR }, + /* + * sNO -> sIV Too late and no reason to do anything + * sSS -> sIV Client can't send SYN and then SYN/ACK + * sS2 -> sSR SYN/ACK sent to SYN2 in simultaneous open +- * sSR -> sIG +- * sES -> sIG Error: SYNs in window outside the SYN_SENT state +- * are errors. Receiver will reply with RST +- * and close the connection. +- * Or we are not in sync and hold a dead connection. +- * sFW -> sIG +- * sCW -> sIG +- * sLA -> sIG +- * sTW -> sIG +- * sCL -> sIG ++ * sSR -> sSR Late retransmitted SYN/ACK in simultaneous open ++ * sES -> sIV Invalid SYN/ACK packets sent by the client ++ * sFW -> sIV ++ * sCW -> sIV ++ * sLA -> sIV ++ * sTW -> sIV ++ * sCL -> sIV + */ + /* sNO, sSS, sSR, sES, sFW, sCW, sLA, sTW, sCL, sS2 */ + /*fin*/ { sIV, sIV, sFW, sFW, sLA, sLA, sLA, sTW, sCL, sIV }, +@@ -628,15 +625,9 @@ static bool tcp_in_window(const struct nf_conn *ct, + ack = sack = receiver->td_end; + } + +- if (seq == end +- && (!tcph->rst +- || (seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT))) ++ if (tcph->rst && seq == 0 && state->state == TCP_CONNTRACK_SYN_SENT) + /* +- * Packets contains no data: we assume it is valid +- * and check the ack value only. +- * However RST segments are always validated by their +- * SEQ number, except when seq == 0 (reset sent answering +- * SYN. ++ * RST sent answering SYN. + */ + seq = end = sender->td_end; + +diff --git a/net/wireless/reg.c b/net/wireless/reg.c +index fa39731..0b08905 100644 +--- a/net/wireless/reg.c ++++ b/net/wireless/reg.c +@@ -125,9 +125,8 @@ static const struct ieee80211_regdomain world_regdom = { + .reg_rules = { + /* IEEE 802.11b/g, channels 1..11 */ + REG_RULE(2412-10, 2462+10, 40, 6, 20, 0), +- /* IEEE 802.11b/g, channels 12..13. No HT40 +- * channel fits here. */ +- REG_RULE(2467-10, 2472+10, 20, 6, 20, ++ /* IEEE 802.11b/g, channels 12..13. */ ++ REG_RULE(2467-10, 2472+10, 40, 6, 20, + NL80211_RRF_PASSIVE_SCAN | + NL80211_RRF_NO_IBSS), + /* IEEE 802.11 channel 14 - Only JP enables +diff --git a/security/device_cgroup.c b/security/device_cgroup.c +index 4450fbe..92e24bb 100644 +--- a/security/device_cgroup.c ++++ b/security/device_cgroup.c +@@ -202,8 +202,8 @@ static void devcgroup_destroy(struct cgroup_subsys *ss, + + dev_cgroup = cgroup_to_devcgroup(cgroup); + list_for_each_entry_safe(wh, tmp, &dev_cgroup->whitelist, list) { +- list_del(&wh->list); +- kfree(wh); ++ list_del_rcu(&wh->list); ++ kfree_rcu(wh, rcu); + } + kfree(dev_cgroup); + } +@@ -278,7 +278,7 @@ static int may_access_whitelist(struct dev_cgroup *c, + { + struct dev_whitelist_item *whitem; + +- list_for_each_entry(whitem, &c->whitelist, list) { ++ list_for_each_entry_rcu(whitem, &c->whitelist, list) { + if (whitem->type & DEV_ALL) + return 1; + if ((refwh->type & DEV_BLOCK) && !(whitem->type & DEV_BLOCK)) +diff --git a/security/selinux/netnode.c b/security/selinux/netnode.c +index 3bf46ab..46a5b81 100644 +--- a/security/selinux/netnode.c ++++ b/security/selinux/netnode.c +@@ -174,7 +174,8 @@ static void sel_netnode_insert(struct sel_netnode *node) + if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) { + struct sel_netnode *tail; + tail = list_entry( +- rcu_dereference(sel_netnode_hash[idx].list.prev), ++ rcu_dereference_protected(sel_netnode_hash[idx].list.prev, ++ lockdep_is_held(&sel_netnode_lock)), + struct sel_netnode, list); + list_del_rcu(&tail->list); + kfree_rcu(tail, rcu); +diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c +index 3ce2da2..1a09fbf 100644 +--- a/sound/pci/hda/patch_realtek.c ++++ b/sound/pci/hda/patch_realtek.c +@@ -6026,6 +6026,9 @@ static const struct hda_codec_preset snd_hda_preset_realtek[] = { + { .id = 0x10ec0276, .name = "ALC276", .patch = patch_alc269 }, + { .id = 0x10ec0280, .name = "ALC280", .patch = patch_alc269 }, + { .id = 0x10ec0282, .name = "ALC282", .patch = patch_alc269 }, ++ { .id = 0x10ec0283, .name = "ALC283", .patch = patch_alc269 }, ++ { .id = 0x10ec0290, .name = "ALC290", .patch = patch_alc269 }, ++ { .id = 0x10ec0292, .name = "ALC292", .patch = patch_alc269 }, + { .id = 0x10ec0861, .rev = 0x100340, .name = "ALC660", + .patch = patch_alc861 }, + { .id = 0x10ec0660, .name = "ALC660-VD", .patch = patch_alc861vd }, +diff --git a/sound/soc/codecs/wm8978.c b/sound/soc/codecs/wm8978.c +index 41ca4d9..f81b185 100644 +--- a/sound/soc/codecs/wm8978.c ++++ b/sound/soc/codecs/wm8978.c +@@ -749,7 +749,7 @@ static int wm8978_hw_params(struct snd_pcm_substream *substream, + wm8978->mclk_idx = -1; + f_sel = wm8978->f_mclk; + } else { +- if (!wm8978->f_pllout) { ++ if (!wm8978->f_opclk) { + /* We only enter here, if OPCLK is not used */ + int ret = wm8978_configure_pll(codec); + if (ret < 0) +diff --git a/sound/soc/soc-dapm.c b/sound/soc/soc-dapm.c +index 0dc441c..b516488 100644 +--- a/sound/soc/soc-dapm.c ++++ b/sound/soc/soc-dapm.c +@@ -3009,7 +3009,7 @@ void snd_soc_dapm_shutdown(struct snd_soc_card *card) + { + struct snd_soc_codec *codec; + +- list_for_each_entry(codec, &card->codec_dev_list, list) { ++ list_for_each_entry(codec, &card->codec_dev_list, card_list) { + soc_dapm_shutdown_codec(&codec->dapm); + if (codec->dapm.bias_level == SND_SOC_BIAS_STANDBY) + snd_soc_dapm_set_bias_level(&codec->dapm, +diff --git a/sound/usb/midi.c b/sound/usb/midi.c +index c83f614..eeefbce 100644 +--- a/sound/usb/midi.c ++++ b/sound/usb/midi.c +@@ -148,6 +148,7 @@ struct snd_usb_midi_out_endpoint { + struct snd_usb_midi_out_endpoint* ep; + struct snd_rawmidi_substream *substream; + int active; ++ bool autopm_reference; + uint8_t cable; /* cable number << 4 */ + uint8_t state; + #define STATE_UNKNOWN 0 +@@ -1076,7 +1077,8 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) + return -ENXIO; + } + err = usb_autopm_get_interface(umidi->iface); +- if (err < 0) ++ port->autopm_reference = err >= 0; ++ if (err < 0 && err != -EACCES) + return -EIO; + substream->runtime->private_data = port; + port->state = STATE_UNKNOWN; +@@ -1087,9 +1089,11 @@ static int snd_usbmidi_output_open(struct snd_rawmidi_substream *substream) + static int snd_usbmidi_output_close(struct snd_rawmidi_substream *substream) + { + struct snd_usb_midi* umidi = substream->rmidi->private_data; ++ struct usbmidi_out_port *port = substream->runtime->private_data; + + substream_open(substream, 0); +- usb_autopm_put_interface(umidi->iface); ++ if (port->autopm_reference) ++ usb_autopm_put_interface(umidi->iface); + return 0; + } + diff --git a/3.2.34/4420_grsecurity-2.9.1-3.2.34-201212041903.patch b/3.2.35/4420_grsecurity-2.9.1-3.2.35-201212071641.patch index 3205faf..23da6f5 100644 --- a/3.2.34/4420_grsecurity-2.9.1-3.2.34-201212041903.patch +++ b/3.2.35/4420_grsecurity-2.9.1-3.2.35-201212071641.patch @@ -255,7 +255,7 @@ index 88fd7f5..b318a78 100644 ============================================================== diff --git a/Makefile b/Makefile -index 14ebacf..f7e0add 100644 +index d985af0..163e574 100644 --- a/Makefile +++ b/Makefile @@ -245,8 +245,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -3515,32 +3515,8 @@ index 5e34ccf..672bc9c 100644 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", me->arch.unwind_section, table, end, gp); -diff --git a/arch/parisc/kernel/signal32.c b/arch/parisc/kernel/signal32.c -index e141324..d0ea054 100644 ---- a/arch/parisc/kernel/signal32.c -+++ b/arch/parisc/kernel/signal32.c -@@ -67,7 +67,8 @@ put_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) - { - compat_sigset_t s; - -- if (sz != sizeof *set) panic("put_sigset32()"); -+ if (sz != sizeof *set) -+ return -EINVAL; - sigset_64to32(&s, set); - - return copy_to_user(up, &s, sizeof s); -@@ -79,7 +80,8 @@ get_sigset32(compat_sigset_t __user *up, sigset_t *set, size_t sz) - compat_sigset_t s; - int r; - -- if (sz != sizeof *set) panic("put_sigset32()"); -+ if (sz != sizeof *set) -+ return -EINVAL; - - if ((r = copy_from_user(&s, up, sz)) == 0) { - sigset_32to64(set, &s); diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c -index c9b9322..02d8940 100644 +index 7ea75d1..7b64ef5 100644 --- a/arch/parisc/kernel/sys_parisc.c +++ b/arch/parisc/kernel/sys_parisc.c @@ -43,7 +43,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len) @@ -3552,7 +3528,7 @@ index c9b9322..02d8940 100644 return addr; addr = vma->vm_end; } -@@ -79,7 +79,7 @@ static unsigned long get_shared_area(struct address_space *mapping, +@@ -81,7 +81,7 @@ static unsigned long get_shared_area(struct address_space *mapping, /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr) return -ENOMEM; @@ -3561,7 +3537,7 @@ index c9b9322..02d8940 100644 return addr; addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; if (addr < vma->vm_end) /* handle wraparound */ -@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, +@@ -100,7 +100,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, if (flags & MAP_FIXED) return addr; if (!addr) @@ -12116,7 +12092,7 @@ index f7c89e2..07d412d 100644 /* Get/set a process' ability to use the timestamp counter instruction */ diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h -index 3566454..4bdfb8c 100644 +index 3b96fd4..8790004 100644 --- a/arch/x86/include/asm/ptrace.h +++ b/arch/x86/include/asm/ptrace.h @@ -156,28 +156,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs) @@ -14234,10 +14210,10 @@ index 25f24dc..4094a7f 100644 obj-y += proc.o capflags.o powerflags.o common.o obj-y += vmware.o hypervisor.o sched.o mshyperv.o diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c -index ff8557e..8b1007b 100644 +index f07becc..b17b101 100644 --- a/arch/x86/kernel/cpu/amd.c +++ b/arch/x86/kernel/cpu/amd.c -@@ -680,7 +680,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, +@@ -694,7 +694,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c, unsigned int size) { /* AMD errata T13 (order #21922) */ @@ -18909,10 +18885,29 @@ index 6a364a6..b147d11 100644 ip = *(u64 *)(fp+8); if (!in_sched_functions(ip)) diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c -index 8252879..bc7cda1 100644 +index 2dc4121..3c2bbcd 100644 --- a/arch/x86/kernel/ptrace.c +++ b/arch/x86/kernel/ptrace.c -@@ -822,7 +822,7 @@ long arch_ptrace(struct task_struct *child, long request, +@@ -181,14 +181,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs) + { + unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1); + unsigned long sp = (unsigned long)®s->sp; +- struct thread_info *tinfo; + +- if (context == (sp & ~(THREAD_SIZE - 1))) ++ if (context == ((sp + 8) & ~(THREAD_SIZE - 1))) + return sp; + +- tinfo = (struct thread_info *)context; +- if (tinfo->previous_esp) +- return tinfo->previous_esp; ++ sp = *(unsigned long *)context; ++ if (sp) ++ return sp; + + return (unsigned long)regs; + } +@@ -852,7 +851,7 @@ long arch_ptrace(struct task_struct *child, long request, unsigned long addr, unsigned long data) { int ret; @@ -18921,7 +18916,7 @@ index 8252879..bc7cda1 100644 switch (request) { /* read the word at location addr in the USER area. */ -@@ -907,14 +907,14 @@ long arch_ptrace(struct task_struct *child, long request, +@@ -937,14 +936,14 @@ long arch_ptrace(struct task_struct *child, long request, if ((int) addr < 0) return -EIO; ret = do_get_thread_area(child, addr, @@ -18938,7 +18933,7 @@ index 8252879..bc7cda1 100644 break; #endif -@@ -1331,7 +1331,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, +@@ -1361,7 +1360,7 @@ static void fill_sigtrap_info(struct task_struct *tsk, memset(info, 0, sizeof(*info)); info->si_signo = SIGTRAP; info->si_code = si_code; @@ -18947,7 +18942,7 @@ index 8252879..bc7cda1 100644 } void user_single_step_siginfo(struct task_struct *tsk, -@@ -1360,6 +1360,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, +@@ -1390,6 +1389,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs, # define IS_IA32 0 #endif @@ -18958,7 +18953,7 @@ index 8252879..bc7cda1 100644 /* * We must return the syscall number to actually look up in the table. * This can be -1L to skip running any syscall at all. -@@ -1368,6 +1372,11 @@ long syscall_trace_enter(struct pt_regs *regs) +@@ -1398,6 +1401,11 @@ long syscall_trace_enter(struct pt_regs *regs) { long ret = 0; @@ -18970,7 +18965,7 @@ index 8252879..bc7cda1 100644 /* * If we stepped into a sysenter/syscall insn, it trapped in * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP. -@@ -1413,6 +1422,11 @@ void syscall_trace_leave(struct pt_regs *regs) +@@ -1443,6 +1451,11 @@ void syscall_trace_leave(struct pt_regs *regs) { bool step; @@ -19180,7 +19175,7 @@ index 7a6f3b3..bed145d7 100644 1: diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c -index cf0ef98..e3f780b 100644 +index 0d403aa..42a31fa 100644 --- a/arch/x86/kernel/setup.c +++ b/arch/x86/kernel/setup.c @@ -447,7 +447,7 @@ static void __init parse_setup_data(void) @@ -24891,7 +24886,7 @@ index df7d12c..abafe9e 100644 } if (mm->get_unmapped_area == arch_get_unmapped_area) diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c -index 87488b9..ec24280 100644 +index 34a7f40..579f383 100644 --- a/arch/x86/mm/init.c +++ b/arch/x86/mm/init.c @@ -3,6 +3,7 @@ @@ -24911,16 +24906,16 @@ index 87488b9..ec24280 100644 unsigned long __initdata pgt_buf_start; unsigned long __meminitdata pgt_buf_end; -@@ -31,7 +34,7 @@ int direct_gbpages - static void __init find_early_table_space(unsigned long end, int use_pse, - int use_gbpages) +@@ -43,7 +46,7 @@ static void __init find_early_table_space(struct map_range *mr, int nr_range) { -- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end; -+ unsigned long puds, pmds, ptes, tables, start = 0x100000, good_end = end; + int i; + unsigned long puds = 0, pmds = 0, ptes = 0, tables; +- unsigned long start = 0, good_end; ++ unsigned long start = 0x100000, good_end; phys_addr_t base; - puds = (end + PUD_SIZE - 1) >> PUD_SHIFT; -@@ -310,10 +313,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, + for (i = 0; i < nr_range; i++) { +@@ -319,10 +322,40 @@ unsigned long __init_refok init_memory_mapping(unsigned long start, * Access has to be given to non-kernel-ram areas as well, these contain the PCI * mmio resources as well as potential bios/acpi data regions. */ @@ -24962,7 +24957,7 @@ index 87488b9..ec24280 100644 if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) return 0; if (!page_is_ram(pagenr)) -@@ -370,8 +403,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) +@@ -379,8 +412,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) #endif } @@ -31310,7 +31305,7 @@ index ca67338..0003ba7 100644 return can_switch; } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h -index c364358..317c8de 100644 +index 791df46..88fc1f9 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -229,7 +229,7 @@ struct drm_i915_display_funcs { @@ -31331,7 +31326,7 @@ index c364358..317c8de 100644 /* protects the irq masks */ spinlock_t irq_lock; -@@ -894,7 +894,7 @@ struct drm_i915_gem_object { +@@ -896,7 +896,7 @@ struct drm_i915_gem_object { * will be page flipped away on the next vblank. When it * reaches 0, dev_priv->pending_flip_queue will be woken up. */ @@ -31340,7 +31335,7 @@ index c364358..317c8de 100644 }; #define to_intel_bo(x) container_of(x, struct drm_i915_gem_object, base) -@@ -1271,7 +1271,7 @@ extern int intel_setup_gmbus(struct drm_device *dev); +@@ -1273,7 +1273,7 @@ extern int intel_setup_gmbus(struct drm_device *dev); extern void intel_teardown_gmbus(struct drm_device *dev); extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed); extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit); @@ -31424,10 +31419,10 @@ index 2812d7b..c35ade7 100644 INIT_WORK(&dev_priv->hotplug_work, i915_hotplug_work_func); INIT_WORK(&dev_priv->error_work, i915_error_work_func); diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c -index adac0dd..bb551dd 100644 +index fdae61f..6aecf8b 100644 --- a/drivers/gpu/drm/i915/intel_display.c +++ b/drivers/gpu/drm/i915/intel_display.c -@@ -2196,7 +2196,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb) +@@ -2214,7 +2214,7 @@ intel_finish_fb(struct drm_framebuffer *old_fb) wait_event(dev_priv->pending_flip_queue, atomic_read(&dev_priv->mm.wedged) || @@ -31436,7 +31431,7 @@ index adac0dd..bb551dd 100644 /* Big Hammer, we also need to ensure that any pending * MI_WAIT_FOR_EVENT inside a user batch buffer on the -@@ -6971,8 +6971,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, +@@ -6989,8 +6989,7 @@ static void do_intel_finish_page_flip(struct drm_device *dev, obj = work->old_fb_obj; @@ -31446,7 +31441,7 @@ index adac0dd..bb551dd 100644 wake_up(&dev_priv->pending_flip_queue); schedule_work(&work->work); -@@ -7167,7 +7166,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev, +@@ -7185,7 +7184,13 @@ static int intel_gen6_queue_flip(struct drm_device *dev, OUT_RING(fb->pitch | obj->tiling_mode); OUT_RING(obj->gtt_offset); @@ -31461,7 +31456,7 @@ index adac0dd..bb551dd 100644 pipesrc = I915_READ(PIPESRC(intel_crtc->pipe)) & 0x0fff0fff; OUT_RING(pf | pipesrc); ADVANCE_LP_RING(); -@@ -7299,7 +7304,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, +@@ -7317,7 +7322,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, /* Block clients from rendering to the new back buffer until * the flip occurs and the object is no longer visible. */ @@ -31470,7 +31465,7 @@ index adac0dd..bb551dd 100644 ret = dev_priv->display.queue_flip(dev, crtc, fb, obj); if (ret) -@@ -7313,7 +7318,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, +@@ -7331,7 +7336,7 @@ static int intel_crtc_page_flip(struct drm_crtc *crtc, return 0; cleanup_pending: @@ -31953,7 +31948,7 @@ index a9049ed..501f284 100644 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full && rdev->pm.k8_bandwidth.full) diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c -index 727e93d..1565650 100644 +index 9e4313e..46fad36 100644 --- a/drivers/gpu/drm/ttm/ttm_page_alloc.c +++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c @@ -398,9 +398,9 @@ static int ttm_pool_get_num_unused_pages(void) @@ -34301,7 +34296,7 @@ index 237571a..fb6d19b 100644 pmd->bl_info.value_type.inc = data_block_inc; pmd->bl_info.value_type.dec = data_block_dec; diff --git a/drivers/md/dm.c b/drivers/md/dm.c -index 502dcf7..27f0a1e 100644 +index 8953630..29b12d9 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -177,9 +177,9 @@ struct mapped_device { @@ -34316,7 +34311,7 @@ index 502dcf7..27f0a1e 100644 struct list_head uevent_list; spinlock_t uevent_lock; /* Protect access to uevent_list */ -@@ -1865,8 +1865,8 @@ static struct mapped_device *alloc_dev(int minor) +@@ -1871,8 +1871,8 @@ static struct mapped_device *alloc_dev(int minor) rwlock_init(&md->map_lock); atomic_set(&md->holders, 1); atomic_set(&md->open_count, 0); @@ -34327,7 +34322,7 @@ index 502dcf7..27f0a1e 100644 INIT_LIST_HEAD(&md->uevent_list); spin_lock_init(&md->uevent_lock); -@@ -2000,7 +2000,7 @@ static void event_callback(void *context) +@@ -2006,7 +2006,7 @@ static void event_callback(void *context) dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj); @@ -34336,7 +34331,7 @@ index 502dcf7..27f0a1e 100644 wake_up(&md->eventq); } -@@ -2642,18 +2642,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, +@@ -2648,18 +2648,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action, uint32_t dm_next_uevent_seq(struct mapped_device *md) { @@ -34359,7 +34354,7 @@ index 502dcf7..27f0a1e 100644 void dm_uevent_add(struct mapped_device *md, struct list_head *elist) diff --git a/drivers/md/md.c b/drivers/md/md.c -index 2887f22..1c9aaa4 100644 +index 145e378e..f65704c 100644 --- a/drivers/md/md.c +++ b/drivers/md/md.c @@ -278,10 +278,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio); @@ -36051,10 +36046,10 @@ index d7ed58f..64cde36 100644 u32 timeout; u32 usec_delay; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -index 6c5cca8..de8ef63 100644 +index f00d6d5..81dcf1d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h -@@ -2708,6 +2708,7 @@ struct ixgbe_eeprom_operations { +@@ -2710,6 +2710,7 @@ struct ixgbe_eeprom_operations { s32 (*update_checksum)(struct ixgbe_hw *); u16 (*calc_checksum)(struct ixgbe_hw *); }; @@ -36062,7 +36057,7 @@ index 6c5cca8..de8ef63 100644 struct ixgbe_mac_operations { s32 (*init_hw)(struct ixgbe_hw *); -@@ -2769,6 +2770,7 @@ struct ixgbe_mac_operations { +@@ -2771,6 +2772,7 @@ struct ixgbe_mac_operations { /* Manageability interface */ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); }; @@ -36070,7 +36065,7 @@ index 6c5cca8..de8ef63 100644 struct ixgbe_phy_operations { s32 (*identify)(struct ixgbe_hw *); -@@ -2788,9 +2790,10 @@ struct ixgbe_phy_operations { +@@ -2790,9 +2792,10 @@ struct ixgbe_phy_operations { s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); s32 (*check_overtemp)(struct ixgbe_hw *); }; @@ -36082,7 +36077,7 @@ index 6c5cca8..de8ef63 100644 enum ixgbe_eeprom_type type; u32 semaphore_delay; u16 word_size; -@@ -2800,7 +2803,7 @@ struct ixgbe_eeprom_info { +@@ -2802,7 +2805,7 @@ struct ixgbe_eeprom_info { #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 struct ixgbe_mac_info { @@ -36091,7 +36086,7 @@ index 6c5cca8..de8ef63 100644 enum ixgbe_mac_type type; u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; -@@ -2828,7 +2831,7 @@ struct ixgbe_mac_info { +@@ -2830,7 +2833,7 @@ struct ixgbe_mac_info { }; struct ixgbe_phy_info { @@ -36100,7 +36095,7 @@ index 6c5cca8..de8ef63 100644 struct mdio_if_info mdio; enum ixgbe_phy_type type; u32 id; -@@ -2856,6 +2859,7 @@ struct ixgbe_mbx_operations { +@@ -2858,6 +2861,7 @@ struct ixgbe_mbx_operations { s32 (*check_for_ack)(struct ixgbe_hw *, u16); s32 (*check_for_rst)(struct ixgbe_hw *, u16); }; @@ -36108,7 +36103,7 @@ index 6c5cca8..de8ef63 100644 struct ixgbe_mbx_stats { u32 msgs_tx; -@@ -2867,7 +2871,7 @@ struct ixgbe_mbx_stats { +@@ -2869,7 +2873,7 @@ struct ixgbe_mbx_stats { }; struct ixgbe_mbx_info { @@ -45429,7 +45424,7 @@ index a6f3763..f38ed00 100644 out_free_fd: diff --git a/fs/exec.c b/fs/exec.c -index 121ccae..d0404a2 100644 +index 121ccae..c3cc8b9 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,12 +55,33 @@ @@ -46268,7 +46263,7 @@ index 121ccae..d0404a2 100644 audit_core_dumps(signr); + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) -+ gr_handle_brute_attach(current, cprm.mm_flags); ++ gr_handle_brute_attach(cprm.mm_flags); + binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) @@ -60514,10 +60509,10 @@ index 0000000..f7f29aa +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..7a5b2de +index 0000000..1080a03 --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,207 @@ +@@ -0,0 +1,220 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/delay.h> @@ -60595,6 +60590,7 @@ index 0000000..7a5b2de + +#ifdef CONFIG_GRKERNSEC_BRUTE +#define GR_USER_BAN_TIME (15 * 60) ++#define GR_DAEMON_BRUTE_TIME (30 * 60) + +static int __get_dumpable(unsigned long mm_flags) +{ @@ -60605,10 +60601,12 @@ index 0000000..7a5b2de +} +#endif + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) ++void gr_handle_brute_attach(unsigned long mm_flags) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct task_struct *p = current; + uid_t uid = 0; ++ int daemon = 0; + + if (!grsec_enable_brute) + return; @@ -60616,9 +60614,11 @@ index 0000000..7a5b2de + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; -+ else { ++ daemon = 1; ++ } else { + const struct cred *cred = __task_cred(p), *cred2; + struct task_struct *tsk, *tsk2; + @@ -60650,6 +60650,8 @@ index 0000000..7a5b2de + + if (uid) + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", uid, GR_USER_BAN_TIME / 60); ++ else if (daemon) ++ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); + +#endif + return; @@ -60658,8 +60660,14 @@ index 0000000..7a5b2de +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE -+ if (current->brute) -+ msleep(30 * 1000); ++ struct task_struct *p = current; ++ ++ if (unlikely(p->brute)) { ++ if (!grsec_enable_brute) ++ p->brute = 0; ++ else if (time_before(get_seconds(), p->brute_expires)) ++ msleep(30 * 1000); ++ } +#endif + return; +} @@ -63732,10 +63740,10 @@ index 0000000..c9292f7 +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..54f4e85 +index 0000000..2bd4c8d --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,110 @@ +@@ -0,0 +1,111 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -63846,9 +63854,10 @@ index 0000000..54f4e85 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " ++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..90ccf22 +index 0000000..88c3d04 --- /dev/null +++ b/include/linux/grsecurity.h @@ -0,0 +1,236 @@ @@ -63887,7 +63896,7 @@ index 0000000..90ccf22 + } ptr; +}; + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); ++void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); +int gr_process_user_ban(void); @@ -64481,6 +64490,40 @@ index 6bea2c2..a27245a 100644 /* * Kernel text modification mutex, used for code patching. Users of this lock +diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h +index 3e8f2f7..f85c5ab 100644 +--- a/include/linux/mempolicy.h ++++ b/include/linux/mempolicy.h +@@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) + __mpol_put(pol); + } + +-extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol); +-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol) +-{ +- if (!frompol) +- return frompol; +- return __mpol_cond_copy(tompol, frompol); +-} +- + extern struct mempolicy *__mpol_dup(struct mempolicy *pol); + static inline struct mempolicy *mpol_dup(struct mempolicy *pol) + { +@@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) + { + } + +-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, +- struct mempolicy *from) +-{ +- return from; +-} +- + static inline void mpol_get(struct mempolicy *pol) + { + } diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 9970337..9444122 100644 --- a/include/linux/mfd/abx500.h @@ -65376,7 +65419,7 @@ index 2148b12..519b820 100644 static inline void anon_vma_merge(struct vm_area_struct *vma, diff --git a/include/linux/sched.h b/include/linux/sched.h -index 1e86bb4..37d6860 100644 +index 1e86bb4..ab37e2e 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio_list; @@ -65497,7 +65540,7 @@ index 1e86bb4..37d6860 100644 #ifdef CONFIG_DEBUG_MUTEXES /* mutex deadlock detection */ struct mutex_waiter *blocked_on; -@@ -1544,6 +1575,27 @@ struct task_struct { +@@ -1544,6 +1575,28 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -65514,6 +65557,7 @@ index 1e86bb4..37d6860 100644 + struct acl_subject_label *acl; + struct acl_role_label *role; + struct file *exec_file; ++ unsigned long brute_expires; + u16 acl_role_id; + /* is this the task that authenticated to the special role */ + u8 acl_sp_role; @@ -65525,7 +65569,7 @@ index 1e86bb4..37d6860 100644 #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; -@@ -1578,6 +1630,51 @@ struct task_struct { +@@ -1578,6 +1631,51 @@ struct task_struct { #endif }; @@ -65577,7 +65621,7 @@ index 1e86bb4..37d6860 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -2093,7 +2190,9 @@ void yield(void); +@@ -2093,7 +2191,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -65587,7 +65631,7 @@ index 1e86bb4..37d6860 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2126,6 +2225,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2126,6 +2226,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -65595,7 +65639,7 @@ index 1e86bb4..37d6860 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2247,6 +2347,12 @@ static inline void mmdrop(struct mm_struct * mm) +@@ -2247,6 +2348,12 @@ static inline void mmdrop(struct mm_struct * mm) extern void mmput(struct mm_struct *); /* Grab a reference to a task's mm, if it is not already going away */ extern struct mm_struct *get_task_mm(struct task_struct *task); @@ -65608,7 +65652,7 @@ index 1e86bb4..37d6860 100644 /* Remove the current tasks stale references to the old mm_struct */ extern void mm_release(struct task_struct *, struct mm_struct *); /* Allocate a new mm structure and copy contents from tsk->mm */ -@@ -2263,7 +2369,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2263,7 +2370,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -65617,7 +65661,7 @@ index 1e86bb4..37d6860 100644 extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2428,9 +2534,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2428,9 +2535,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -68937,7 +68981,7 @@ index 222457a..de637ca 100644 else new_fs = fs; diff --git a/kernel/futex.c b/kernel/futex.c -index 80fb1c6..f2b5e1f 100644 +index 77bccfc..f2b5e1f 100644 --- a/kernel/futex.c +++ b/kernel/futex.c @@ -54,6 +54,7 @@ @@ -68960,139 +69004,7 @@ index 80fb1c6..f2b5e1f 100644 /* * The futex address must be "naturally" aligned. */ -@@ -716,7 +722,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, - struct futex_pi_state **ps, - struct task_struct *task, int set_waiters) - { -- int lock_taken, ret, ownerdied = 0; -+ int lock_taken, ret, force_take = 0; - u32 uval, newval, curval, vpid = task_pid_vnr(task); - - retry: -@@ -755,17 +761,15 @@ retry: - newval = curval | FUTEX_WAITERS; - - /* -- * There are two cases, where a futex might have no owner (the -- * owner TID is 0): OWNER_DIED. We take over the futex in this -- * case. We also do an unconditional take over, when the owner -- * of the futex died. -- * -- * This is safe as we are protected by the hash bucket lock ! -+ * Should we force take the futex? See below. - */ -- if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { -- /* Keep the OWNER_DIED bit */ -+ if (unlikely(force_take)) { -+ /* -+ * Keep the OWNER_DIED and the WAITERS bit and set the -+ * new TID value. -+ */ - newval = (curval & ~FUTEX_TID_MASK) | vpid; -- ownerdied = 0; -+ force_take = 0; - lock_taken = 1; - } - -@@ -775,7 +779,7 @@ retry: - goto retry; - - /* -- * We took the lock due to owner died take over. -+ * We took the lock due to forced take over. - */ - if (unlikely(lock_taken)) - return 1; -@@ -790,20 +794,25 @@ retry: - switch (ret) { - case -ESRCH: - /* -- * No owner found for this futex. Check if the -- * OWNER_DIED bit is set to figure out whether -- * this is a robust futex or not. -+ * We failed to find an owner for this -+ * futex. So we have no pi_state to block -+ * on. This can happen in two cases: -+ * -+ * 1) The owner died -+ * 2) A stale FUTEX_WAITERS bit -+ * -+ * Re-read the futex value. - */ - if (get_futex_value_locked(&curval, uaddr)) - return -EFAULT; - - /* -- * We simply start over in case of a robust -- * futex. The code above will take the futex -- * and return happy. -+ * If the owner died or we have a stale -+ * WAITERS bit the owner TID in the user space -+ * futex is 0. - */ -- if (curval & FUTEX_OWNER_DIED) { -- ownerdied = 1; -+ if (!(curval & FUTEX_TID_MASK)) { -+ force_take = 1; - goto retry; - } - default: -@@ -840,6 +849,9 @@ static void wake_futex(struct futex_q *q) - { - struct task_struct *p = q->task; - -+ if (WARN(q->pi_state || q->rt_waiter, "refusing to wake PI futex\n")) -+ return; -+ - /* - * We set q->lock_ptr = NULL _before_ we wake up the task. If - * a non-futex wake up happens on another CPU then the task -@@ -1075,6 +1087,10 @@ retry_private: - - plist_for_each_entry_safe(this, next, head, list) { - if (match_futex (&this->key, &key1)) { -+ if (this->pi_state || this->rt_waiter) { -+ ret = -EINVAL; -+ goto out_unlock; -+ } - wake_futex(this); - if (++ret >= nr_wake) - break; -@@ -1087,6 +1103,10 @@ retry_private: - op_ret = 0; - plist_for_each_entry_safe(this, next, head, list) { - if (match_futex (&this->key, &key2)) { -+ if (this->pi_state || this->rt_waiter) { -+ ret = -EINVAL; -+ goto out_unlock; -+ } - wake_futex(this); - if (++op_ret >= nr_wake2) - break; -@@ -1095,6 +1115,7 @@ retry_private: - ret += op_ret; - } - -+out_unlock: - double_unlock_hb(hb1, hb2); - out_put_keys: - put_futex_key(&key2); -@@ -1384,9 +1405,13 @@ retry_private: - /* - * FUTEX_WAIT_REQEUE_PI and FUTEX_CMP_REQUEUE_PI should always - * be paired with each other and no other futex ops. -+ * -+ * We should never be requeueing a futex_q with a pi_state, -+ * which is awaiting a futex_unlock_pi(). - */ - if ((requeue_pi && !this->rt_waiter) || -- (!requeue_pi && this->rt_waiter)) { -+ (!requeue_pi && this->rt_waiter) || -+ this->pi_state) { - ret = -EINVAL; - break; - } -@@ -2724,6 +2749,7 @@ static int __init futex_init(void) +@@ -2743,6 +2749,7 @@ static int __init futex_init(void) { u32 curval; int i; @@ -69100,7 +69012,7 @@ index 80fb1c6..f2b5e1f 100644 /* * This will fail and we want it. Some arch implementations do -@@ -2735,8 +2761,11 @@ static int __init futex_init(void) +@@ -2754,8 +2761,11 @@ static int __init futex_init(void) * implementation, the non-functional ones will return * -ENOSYS. */ @@ -72777,22 +72689,10 @@ index 209b379..7f76423 100644 put_task_struct(tsk); } diff --git a/kernel/workqueue.c b/kernel/workqueue.c -index 43a19c5..a6ca577 100644 +index d551d5f..a6ca577 100644 --- a/kernel/workqueue.c +++ b/kernel/workqueue.c -@@ -2052,8 +2052,10 @@ static int rescuer_thread(void *__wq) - repeat: - set_current_state(TASK_INTERRUPTIBLE); - -- if (kthread_should_stop()) -+ if (kthread_should_stop()) { -+ __set_current_state(TASK_RUNNING); - return 0; -+ } - - /* - * See whether any cpu is asking for help. Unbounded -@@ -3468,7 +3470,7 @@ static int __cpuinit trustee_thread(void *__gcwq) +@@ -3470,7 +3470,7 @@ static int __cpuinit trustee_thread(void *__gcwq) */ worker_flags |= WORKER_REBIND; worker_flags &= ~WORKER_ROGUE; @@ -73531,7 +73431,7 @@ index 23d3a6b..e10d35a 100644 if (end == start) goto out; diff --git a/mm/memory-failure.c b/mm/memory-failure.c -index 5bd5bb1..de5405e 100644 +index 1b03878..de5405e 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; @@ -73606,25 +73506,7 @@ index 5bd5bb1..de5405e 100644 set_page_hwpoison_huge_page(hpage); dequeue_hwpoisoned_huge_page(hpage); /* keep elevated page count for bad page */ -@@ -1475,9 +1475,17 @@ int soft_offline_page(struct page *page, int flags) - { - int ret; - unsigned long pfn = page_to_pfn(page); -+ struct page *hpage = compound_trans_head(page); - - if (PageHuge(page)) - return soft_offline_huge_page(page, flags); -+ if (PageTransHuge(hpage)) { -+ if (PageAnon(hpage) && unlikely(split_huge_page(hpage))) { -+ pr_info("soft offline: %#lx: failed to split THP\n", -+ pfn); -+ return -EBUSY; -+ } -+ } - - ret = get_any_page(page, pfn, flags); - if (ret < 0) -@@ -1573,7 +1581,7 @@ int soft_offline_page(struct page *page, int flags) +@@ -1581,7 +1581,7 @@ int soft_offline_page(struct page *page, int flags) return ret; done: @@ -74314,7 +74196,7 @@ index 9ad7d1e..09d87b7 100644 writeback_set_ratelimit(); diff --git a/mm/mempolicy.c b/mm/mempolicy.c -index 4c82c21..d7fdbbd 100644 +index 4c82c21..16ca616 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, @@ -74388,6 +74270,35 @@ index 4c82c21..d7fdbbd 100644 rcu_read_unlock(); err = -EPERM; goto out; +@@ -1999,28 +2031,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) + return new; + } + +-/* +- * If *frompol needs [has] an extra ref, copy *frompol to *tompol , +- * eliminate the * MPOL_F_* flags that require conditional ref and +- * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly +- * after return. Use the returned value. +- * +- * Allows use of a mempolicy for, e.g., multiple allocations with a single +- * policy lookup, even if the policy needs/has extra ref on lookup. +- * shmem_readahead needs this. +- */ +-struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol) +-{ +- if (!mpol_needs_cond_ref(frompol)) +- return frompol; +- +- *tompol = *frompol; +- tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ +- __mpol_put(frompol); +- return tompol; +-} +- + /* Slow path of a mempolicy comparison */ + int __mpol_equal(struct mempolicy *a, struct mempolicy *b) + { diff --git a/mm/migrate.c b/mm/migrate.c index 180d97f..c75ef28 100644 --- a/mm/migrate.c @@ -76489,7 +76400,7 @@ index 8685697..b490361 100644 struct anon_vma_chain *avc; struct anon_vma *anon_vma; diff --git a/mm/shmem.c b/mm/shmem.c -index 126ca35..a89cd4c 100644 +index 2d46e23..5118865 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -31,7 +31,7 @@ @@ -76510,7 +76421,59 @@ index 126ca35..a89cd4c 100644 struct shmem_xattr { struct list_head list; /* anchored by shmem_inode_info->xattr_list */ -@@ -1803,6 +1803,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = { +@@ -798,24 +798,28 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) + static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) + { +- struct mempolicy mpol, *spol; + struct vm_area_struct pvma; +- +- spol = mpol_cond_copy(&mpol, +- mpol_shared_policy_lookup(&info->policy, index)); ++ struct page *page; + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; + pvma.vm_pgoff = index; + pvma.vm_ops = NULL; +- pvma.vm_policy = spol; +- return swapin_readahead(swap, gfp, &pvma, 0); ++ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); ++ ++ page = swapin_readahead(swap, gfp, &pvma, 0); ++ ++ /* Drop reference taken by mpol_shared_policy_lookup() */ ++ mpol_cond_put(pvma.vm_policy); ++ ++ return page; + } + + static struct page *shmem_alloc_page(gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) + { + struct vm_area_struct pvma; ++ struct page *page; + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; +@@ -823,10 +827,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, + pvma.vm_ops = NULL; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + +- /* +- * alloc_page_vma() will drop the shared policy reference +- */ +- return alloc_page_vma(gfp, &pvma, 0); ++ page = alloc_page_vma(gfp, &pvma, 0); ++ ++ /* Drop reference taken by mpol_shared_policy_lookup() */ ++ mpol_cond_put(pvma.vm_policy); ++ ++ return page; + } + #else /* !CONFIG_NUMA */ + #ifdef CONFIG_TMPFS +@@ -1803,6 +1809,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = { static int shmem_xattr_validate(const char *name) { struct { const char *prefix; size_t len; } arr[] = { @@ -76522,7 +76485,7 @@ index 126ca35..a89cd4c 100644 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } }; -@@ -1856,6 +1861,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name, +@@ -1856,6 +1867,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name, if (err) return err; @@ -76538,7 +76501,7 @@ index 126ca35..a89cd4c 100644 if (size == 0) value = ""; /* empty EA, do not remove */ -@@ -2183,8 +2197,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) +@@ -2183,8 +2203,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) int err = -ENOMEM; /* Round up to L1_CACHE_BYTES to resist false sharing */ @@ -77428,47 +77391,6 @@ index 1b7e22a..3fcd4f3 100644 } return pgd; } -diff --git a/mm/sparse.c b/mm/sparse.c -index bf7d3cc..42935b5 100644 ---- a/mm/sparse.c -+++ b/mm/sparse.c -@@ -622,7 +622,7 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) - { - return; /* XXX: Not implemented yet */ - } --static void free_map_bootmem(struct page *page, unsigned long nr_pages) -+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) - { - } - #else -@@ -663,10 +663,11 @@ static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) - get_order(sizeof(struct page) * nr_pages)); - } - --static void free_map_bootmem(struct page *page, unsigned long nr_pages) -+static void free_map_bootmem(struct page *memmap, unsigned long nr_pages) - { - unsigned long maps_section_nr, removing_section_nr, i; - unsigned long magic; -+ struct page *page = virt_to_page(memmap); - - for (i = 0; i < nr_pages; i++, page++) { - magic = (unsigned long) page->lru.next; -@@ -715,13 +716,10 @@ static void free_section_usemap(struct page *memmap, unsigned long *usemap) - */ - - if (memmap) { -- struct page *memmap_page; -- memmap_page = virt_to_page(memmap); -- - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) - >> PAGE_SHIFT; - -- free_map_bootmem(memmap_page, nr_pages); -+ free_map_bootmem(memmap, nr_pages); - } - } - diff --git a/mm/swap.c b/mm/swap.c index 55b266d..a532537 100644 --- a/mm/swap.c @@ -78480,20 +78402,6 @@ index 8656909..a2ae45d 100644 } /* Update statistics. */ -diff --git a/net/can/bcm.c b/net/can/bcm.c -index 151b773..3910c1f 100644 ---- a/net/can/bcm.c -+++ b/net/can/bcm.c -@@ -1084,6 +1084,9 @@ static int bcm_rx_setup(struct bcm_msg_head *msg_head, struct msghdr *msg, - op->sk = sk; - op->ifindex = ifindex; - -+ /* ifindex for timeout events w/o previous frame reception */ -+ op->rx_ifindex = ifindex; -+ - /* initialize uninitialized (kzalloc) structure */ - hrtimer_init(&op->timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); - op->timer.function = bcm_rx_timeout_handler; diff --git a/net/can/gw.c b/net/can/gw.c index 3d79b12..8de85fa 100644 --- a/net/can/gw.c @@ -78661,7 +78569,7 @@ index 68bbf9f..5ef0d12 100644 return err; diff --git a/net/core/dev.c b/net/core/dev.c -index 480be72..cd4758c 100644 +index 2aac4ec..2f9871c 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -1142,10 +1142,14 @@ void dev_load(struct net *net, const char *name) @@ -78706,7 +78614,7 @@ index 480be72..cd4758c 100644 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb) -@@ -2964,7 +2968,7 @@ enqueue: +@@ -2966,7 +2970,7 @@ enqueue: local_irq_restore(flags); @@ -78715,7 +78623,7 @@ index 480be72..cd4758c 100644 kfree_skb(skb); return NET_RX_DROP; } -@@ -3038,7 +3042,7 @@ int netif_rx_ni(struct sk_buff *skb) +@@ -3040,7 +3044,7 @@ int netif_rx_ni(struct sk_buff *skb) } EXPORT_SYMBOL(netif_rx_ni); @@ -78724,7 +78632,7 @@ index 480be72..cd4758c 100644 { struct softnet_data *sd = &__get_cpu_var(softnet_data); -@@ -3330,7 +3334,7 @@ ncls: +@@ -3332,7 +3336,7 @@ ncls: if (pt_prev) { ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev); } else { @@ -78733,7 +78641,7 @@ index 480be72..cd4758c 100644 kfree_skb(skb); /* Jamal, now you will not able to escape explaining * me how you were going to use this. :-) -@@ -3895,7 +3899,7 @@ void netif_napi_del(struct napi_struct *napi) +@@ -3897,7 +3901,7 @@ void netif_napi_del(struct napi_struct *napi) } EXPORT_SYMBOL(netif_napi_del); @@ -78742,7 +78650,7 @@ index 480be72..cd4758c 100644 { struct softnet_data *sd = &__get_cpu_var(softnet_data); unsigned long time_limit = jiffies + 2; -@@ -4365,8 +4369,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v) +@@ -4367,8 +4371,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v) else seq_printf(seq, "%04x", ntohs(pt->type)); @@ -78756,7 +78664,7 @@ index 480be72..cd4758c 100644 } return 0; -@@ -5923,7 +5932,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, +@@ -5925,7 +5934,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev, } else { netdev_stats_to_stats64(storage, &dev->stats); } @@ -79184,10 +79092,10 @@ index fdaabf2..0ec3205 100644 rc = qp->q.fragments && (end - start) > max; diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c -index 09ff51b..d3968eb 100644 +index 0106d25..cc0b33e 100644 --- a/net/ipv4/ip_sockglue.c +++ b/net/ipv4/ip_sockglue.c -@@ -1111,7 +1111,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, +@@ -1120,7 +1120,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, len = min_t(unsigned int, len, opt->optlen); if (put_user(len, optlen)) return -EFAULT; @@ -79197,7 +79105,7 @@ index 09ff51b..d3968eb 100644 return -EFAULT; return 0; } -@@ -1239,7 +1240,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, +@@ -1248,7 +1249,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname, if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; @@ -79821,10 +79729,10 @@ index 1567fb1..29af910 100644 dst = NULL; } diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c -index 26cb08c..8af9877 100644 +index b204df8..8f274f4 100644 --- a/net/ipv6/ipv6_sockglue.c +++ b/net/ipv6/ipv6_sockglue.c -@@ -960,7 +960,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, +@@ -961,7 +961,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname, if (sk->sk_type != SOCK_STREAM) return -ENOPROTOOPT; diff --git a/3.2.34/4425-tmpfs-user-namespace.patch b/3.2.35/4425-tmpfs-user-namespace.patch index a7d2649..e919350 100644 --- a/3.2.34/4425-tmpfs-user-namespace.patch +++ b/3.2.35/4425-tmpfs-user-namespace.patch @@ -13,12 +13,11 @@ Signed-off-by: Anthony G. Basile <blueness@gentoo.org> --- diff --git a/mm/shmem.c b/mm/shmem.c -index 67afba5..697a181 100644 --- a/mm/shmem.c +++ b/mm/shmem.c -@@ -1804,7 +1804,8 @@ static int shmem_xattr_validate(const char *name) - { - struct { const char *prefix; size_t len; } arr[] = { +@@ -1815,7 +1815,8 @@ + #endif + { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, - { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } + { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }, diff --git a/3.2.34/4430_grsec-remove-localversion-grsec.patch b/3.2.35/4430_grsec-remove-localversion-grsec.patch index 31cf878..31cf878 100644 --- a/3.2.34/4430_grsec-remove-localversion-grsec.patch +++ b/3.2.35/4430_grsec-remove-localversion-grsec.patch diff --git a/3.2.34/4435_grsec-mute-warnings.patch b/3.2.35/4435_grsec-mute-warnings.patch index e85abd6..e85abd6 100644 --- a/3.2.34/4435_grsec-mute-warnings.patch +++ b/3.2.35/4435_grsec-mute-warnings.patch diff --git a/3.2.34/4440_grsec-remove-protected-paths.patch b/3.2.35/4440_grsec-remove-protected-paths.patch index 637934a..637934a 100644 --- a/3.2.34/4440_grsec-remove-protected-paths.patch +++ b/3.2.35/4440_grsec-remove-protected-paths.patch diff --git a/3.2.34/4450_grsec-kconfig-default-gids.patch b/3.2.35/4450_grsec-kconfig-default-gids.patch index d4b0b7e..5c5b013 100644 --- a/3.2.34/4450_grsec-kconfig-default-gids.patch +++ b/3.2.35/4450_grsec-kconfig-default-gids.patch @@ -16,7 +16,7 @@ from shooting themselves in the foot. diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig --- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400 +++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400 -@@ -522,7 +522,7 @@ +@@ -539,7 +539,7 @@ config GRKERNSEC_AUDIT_GID int "GID for auditing" depends on GRKERNSEC_AUDIT_GROUP @@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig config GRKERNSEC_EXECLOG bool "Exec logging" -@@ -742,7 +742,7 @@ +@@ -759,7 +759,7 @@ config GRKERNSEC_TPE_UNTRUSTED_GID int "GID for TPE-untrusted users" depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT @@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Setting this GID determines what group TPE restrictions will be *enabled* for. If the sysctl option is enabled, a sysctl option -@@ -751,7 +751,7 @@ +@@ -768,7 +768,7 @@ config GRKERNSEC_TPE_TRUSTED_GID int "GID for TPE-trusted users" depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT @@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Setting this GID determines what group TPE restrictions will be *disabled* for. If the sysctl option is enabled, a sysctl option -@@ -826,7 +826,7 @@ +@@ -843,7 +843,7 @@ config GRKERNSEC_SOCKET_ALL_GID int "GID to deny all sockets for" depends on GRKERNSEC_SOCKET_ALL @@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Here you can choose the GID to disable socket access for. Remember to add the users you want socket access disabled for to the GID -@@ -847,7 +847,7 @@ +@@ -864,7 +864,7 @@ config GRKERNSEC_SOCKET_CLIENT_GID int "GID to deny client sockets for" depends on GRKERNSEC_SOCKET_CLIENT @@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Here you can choose the GID to disable client socket access for. Remember to add the users you want client socket access disabled for to -@@ -865,7 +865,7 @@ +@@ -882,7 +882,7 @@ config GRKERNSEC_SOCKET_SERVER_GID int "GID to deny server sockets for" depends on GRKERNSEC_SOCKET_SERVER diff --git a/3.2.34/4465_selinux-avc_audit-log-curr_ip.patch b/3.2.35/4465_selinux-avc_audit-log-curr_ip.patch index 3ea7bcc..439ddca 100644 --- a/3.2.34/4465_selinux-avc_audit-log-curr_ip.patch +++ b/3.2.35/4465_selinux-avc_audit-log-curr_ip.patch @@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org> diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig --- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400 +++ b/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400 -@@ -924,6 +924,27 @@ +@@ -941,6 +941,27 @@ menu "Logging Options" depends on GRKERNSEC diff --git a/3.2.34/4470_disable-compat_vdso.patch b/3.2.35/4470_disable-compat_vdso.patch index 4742d01..4742d01 100644 --- a/3.2.34/4470_disable-compat_vdso.patch +++ b/3.2.35/4470_disable-compat_vdso.patch diff --git a/3.6.9/0000_README b/3.6.9/0000_README index 8e5aecd..a275502 100644 --- a/3.6.9/0000_README +++ b/3.6.9/0000_README @@ -6,7 +6,7 @@ Patch: 1008_linux-3.6.9.patch From: http://www.kernel.org Desc: Linux 3.6.9 -Patch: 4420_grsecurity-2.9.1-3.6.9-201212041903.patch +Patch: 4420_grsecurity-2.9.1-3.6.9-201212071641.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.6.9/4420_grsecurity-2.9.1-3.6.9-201212041903.patch b/3.6.9/4420_grsecurity-2.9.1-3.6.9-201212071641.patch index af317c3..9724617 100644 --- a/3.6.9/4420_grsecurity-2.9.1-3.6.9-201212041903.patch +++ b/3.6.9/4420_grsecurity-2.9.1-3.6.9-201212071641.patch @@ -28237,7 +28237,7 @@ index f9b983a..887b9d8 100644 return 0; } diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c -index 89b30f3..7964211 100644 +index 89b30f3..7964211d4 100644 --- a/drivers/atm/ambassador.c +++ b/drivers/atm/ambassador.c @@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) { @@ -45262,7 +45262,7 @@ index b2a34a1..162fa69 100644 return rc; } diff --git a/fs/exec.c b/fs/exec.c -index fab2c6d..6a13dff 100644 +index fab2c6d..9b2fad8 100644 --- a/fs/exec.c +++ b/fs/exec.c @@ -55,6 +55,15 @@ @@ -46010,7 +46010,7 @@ index fab2c6d..6a13dff 100644 audit_core_dumps(signr); + if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL) -+ gr_handle_brute_attach(current, cprm.mm_flags); ++ gr_handle_brute_attach(cprm.mm_flags); + binfmt = mm->binfmt; if (!binfmt || !binfmt->core_dump) @@ -59887,10 +59887,10 @@ index 0000000..f7f29aa +} diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c new file mode 100644 -index 0000000..b4ac94c +index 0000000..5c00416 --- /dev/null +++ b/grsecurity/grsec_sig.c -@@ -0,0 +1,209 @@ +@@ -0,0 +1,222 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/delay.h> @@ -59968,6 +59968,7 @@ index 0000000..b4ac94c + +#ifdef CONFIG_GRKERNSEC_BRUTE +#define GR_USER_BAN_TIME (15 * 60) ++#define GR_DAEMON_BRUTE_TIME (30 * 60) + +static int __get_dumpable(unsigned long mm_flags) +{ @@ -59978,10 +59979,12 @@ index 0000000..b4ac94c +} +#endif + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags) ++void gr_handle_brute_attach(unsigned long mm_flags) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE ++ struct task_struct *p = current; + kuid_t uid = GLOBAL_ROOT_UID; ++ int daemon = 0; + + if (!grsec_enable_brute) + return; @@ -59989,9 +59992,11 @@ index 0000000..b4ac94c + rcu_read_lock(); + read_lock(&tasklist_lock); + read_lock(&grsec_exec_file_lock); -+ if (p->real_parent && p->real_parent->exec_file == p->exec_file) ++ if (p->real_parent && p->real_parent->exec_file == p->exec_file) { ++ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME; + p->real_parent->brute = 1; -+ else { ++ daemon = 1; ++ } else { + const struct cred *cred = __task_cred(p), *cred2; + struct task_struct *tsk, *tsk2; + @@ -60024,6 +60029,8 @@ index 0000000..b4ac94c + if (!uid_eq(uid, GLOBAL_ROOT_UID)) + printk(KERN_ALERT "grsec: bruteforce prevention initiated against uid %u, banning for %d minutes\n", + from_kuid_munged(&init_user_ns, uid), GR_USER_BAN_TIME / 60); ++ else if (daemon) ++ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG); + +#endif + return; @@ -60032,8 +60039,14 @@ index 0000000..b4ac94c +void gr_handle_brute_check(void) +{ +#ifdef CONFIG_GRKERNSEC_BRUTE -+ if (current->brute) -+ msleep(30 * 1000); ++ struct task_struct *p = current; ++ ++ if (unlikely(p->brute)) { ++ if (!grsec_enable_brute) ++ p->brute = 0; ++ else if (time_before(get_seconds(), p->brute_expires)) ++ msleep(30 * 1000); ++ } +#endif + return; +} @@ -63091,10 +63104,10 @@ index 0000000..c9292f7 +#endif diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h new file mode 100644 -index 0000000..54f4e85 +index 0000000..2bd4c8d --- /dev/null +++ b/include/linux/grmsg.h -@@ -0,0 +1,110 @@ +@@ -0,0 +1,111 @@ +#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" +#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" +#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " @@ -63205,9 +63218,10 @@ index 0000000..54f4e85 +#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by " +#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by " +#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by " ++#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for " diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h new file mode 100644 -index 0000000..187b3ed +index 0000000..28d8b19 --- /dev/null +++ b/include/linux/grsecurity.h @@ -0,0 +1,239 @@ @@ -63246,7 +63260,7 @@ index 0000000..187b3ed + } ptr; +}; + -+void gr_handle_brute_attach(struct task_struct *p, unsigned long mm_flags); ++void gr_handle_brute_attach(unsigned long mm_flags); +void gr_handle_brute_check(void); +void gr_handle_kernel_exploit(void); +int gr_process_user_ban(void); @@ -63829,6 +63843,40 @@ index ff9a9f8..c715deb 100644 /* * Kernel text modification mutex, used for code patching. Users of this lock +diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h +index df08254..5f6c835 100644 +--- a/include/linux/mempolicy.h ++++ b/include/linux/mempolicy.h +@@ -137,16 +137,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) + __mpol_put(pol); + } + +-extern struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol); +-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol) +-{ +- if (!frompol) +- return frompol; +- return __mpol_cond_copy(tompol, frompol); +-} +- + extern struct mempolicy *__mpol_dup(struct mempolicy *pol); + static inline struct mempolicy *mpol_dup(struct mempolicy *pol) + { +@@ -270,12 +260,6 @@ static inline void mpol_cond_put(struct mempolicy *pol) + { + } + +-static inline struct mempolicy *mpol_cond_copy(struct mempolicy *to, +- struct mempolicy *from) +-{ +- return from; +-} +- + static inline void mpol_get(struct mempolicy *pol) + { + } diff --git a/include/linux/mfd/abx500.h b/include/linux/mfd/abx500.h index 1318ca6..7521340 100644 --- a/include/linux/mfd/abx500.h @@ -64657,7 +64705,7 @@ index 3fce545..b4fed6e 100644 static inline void anon_vma_merge(struct vm_area_struct *vma, struct vm_area_struct *next) diff --git a/include/linux/sched.h b/include/linux/sched.h -index 23bddac..5976055 100644 +index 23bddac..e08f087 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -101,6 +101,7 @@ struct bio_list; @@ -64768,7 +64816,7 @@ index 23bddac..5976055 100644 #ifdef CONFIG_FUTEX struct robust_list_head __user *robust_list; #ifdef CONFIG_COMPAT -@@ -1589,8 +1616,74 @@ struct task_struct { +@@ -1589,8 +1616,75 @@ struct task_struct { #ifdef CONFIG_UPROBES struct uprobe_task *utask; #endif @@ -64785,6 +64833,7 @@ index 23bddac..5976055 100644 + struct acl_subject_label *acl; + struct acl_role_label *role; + struct file *exec_file; ++ unsigned long brute_expires; + u16 acl_role_id; + /* is this the task that authenticated to the special role */ + u8 acl_sp_role; @@ -64843,7 +64892,7 @@ index 23bddac..5976055 100644 /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -2125,7 +2218,9 @@ void yield(void); +@@ -2125,7 +2219,9 @@ void yield(void); extern struct exec_domain default_exec_domain; union thread_union { @@ -64853,7 +64902,7 @@ index 23bddac..5976055 100644 unsigned long stack[THREAD_SIZE/sizeof(long)]; }; -@@ -2158,6 +2253,7 @@ extern struct pid_namespace init_pid_ns; +@@ -2158,6 +2254,7 @@ extern struct pid_namespace init_pid_ns; */ extern struct task_struct *find_task_by_vpid(pid_t nr); @@ -64861,7 +64910,7 @@ index 23bddac..5976055 100644 extern struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns); -@@ -2314,7 +2410,7 @@ extern void __cleanup_sighand(struct sighand_struct *); +@@ -2314,7 +2411,7 @@ extern void __cleanup_sighand(struct sighand_struct *); extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -64870,7 +64919,7 @@ index 23bddac..5976055 100644 extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2515,9 +2611,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) +@@ -2515,9 +2612,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p) #endif @@ -73238,7 +73287,7 @@ index 5736170..8e04800 100644 return 0; } diff --git a/mm/mempolicy.c b/mm/mempolicy.c -index 3d64b36..c6ab69c 100644 +index 3d64b36..5c2d5d3 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -655,6 +655,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start, @@ -73317,6 +73366,35 @@ index 3d64b36..c6ab69c 100644 err = do_migrate_pages(mm, old, new, capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE); +@@ -2035,28 +2069,6 @@ struct mempolicy *__mpol_dup(struct mempolicy *old) + return new; + } + +-/* +- * If *frompol needs [has] an extra ref, copy *frompol to *tompol , +- * eliminate the * MPOL_F_* flags that require conditional ref and +- * [NOTE!!!] drop the extra ref. Not safe to reference *frompol directly +- * after return. Use the returned value. +- * +- * Allows use of a mempolicy for, e.g., multiple allocations with a single +- * policy lookup, even if the policy needs/has extra ref on lookup. +- * shmem_readahead needs this. +- */ +-struct mempolicy *__mpol_cond_copy(struct mempolicy *tompol, +- struct mempolicy *frompol) +-{ +- if (!mpol_needs_cond_ref(frompol)) +- return frompol; +- +- *tompol = *frompol; +- tompol->flags &= ~MPOL_F_SHARED; /* copy doesn't need unref */ +- __mpol_put(frompol); +- return tompol; +-} +- + /* Slow path of a mempolicy comparison */ + bool __mpol_equal(struct mempolicy *a, struct mempolicy *b) + { diff --git a/mm/migrate.c b/mm/migrate.c index 77ed2d7..317d528 100644 --- a/mm/migrate.c @@ -75265,7 +75343,7 @@ index aa95e59..b681a63 100644 struct anon_vma_chain *avc; struct anon_vma *anon_vma; diff --git a/mm/shmem.c b/mm/shmem.c -index 31e1506..dbf3647 100644 +index 31e1506..7b42549 100644 --- a/mm/shmem.c +++ b/mm/shmem.c @@ -31,7 +31,7 @@ @@ -75286,7 +75364,60 @@ index 31e1506..dbf3647 100644 struct shmem_xattr { struct list_head list; /* anchored by shmem_inode_info->xattr_list */ -@@ -2219,6 +2219,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = { +@@ -921,25 +921,29 @@ static struct mempolicy *shmem_get_sbmpol(struct shmem_sb_info *sbinfo) + static struct page *shmem_swapin(swp_entry_t swap, gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) + { +- struct mempolicy mpol, *spol; + struct vm_area_struct pvma; +- +- spol = mpol_cond_copy(&mpol, +- mpol_shared_policy_lookup(&info->policy, index)); ++ struct page *page; + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; + /* Bias interleave by inode number to distribute better across nodes */ + pvma.vm_pgoff = index + info->vfs_inode.i_ino; + pvma.vm_ops = NULL; +- pvma.vm_policy = spol; +- return swapin_readahead(swap, gfp, &pvma, 0); ++ pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); ++ ++ page = swapin_readahead(swap, gfp, &pvma, 0); ++ ++ /* Drop reference taken by mpol_shared_policy_lookup() */ ++ mpol_cond_put(pvma.vm_policy); ++ ++ return page; + } + + static struct page *shmem_alloc_page(gfp_t gfp, + struct shmem_inode_info *info, pgoff_t index) + { + struct vm_area_struct pvma; ++ struct page *page; + + /* Create a pseudo vma that just contains the policy */ + pvma.vm_start = 0; +@@ -948,10 +952,12 @@ static struct page *shmem_alloc_page(gfp_t gfp, + pvma.vm_ops = NULL; + pvma.vm_policy = mpol_shared_policy_lookup(&info->policy, index); + +- /* +- * alloc_page_vma() will drop the shared policy reference +- */ +- return alloc_page_vma(gfp, &pvma, 0); ++ page = alloc_page_vma(gfp, &pvma, 0); ++ ++ /* Drop reference taken by mpol_shared_policy_lookup() */ ++ mpol_cond_put(pvma.vm_policy); ++ ++ return page; + } + #else /* !CONFIG_NUMA */ + #ifdef CONFIG_TMPFS +@@ -2219,6 +2225,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = { static int shmem_xattr_validate(const char *name) { struct { const char *prefix; size_t len; } arr[] = { @@ -75298,7 +75429,7 @@ index 31e1506..dbf3647 100644 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } }; -@@ -2272,6 +2277,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name, +@@ -2272,6 +2283,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name, if (err) return err; @@ -75314,7 +75445,7 @@ index 31e1506..dbf3647 100644 if (size == 0) value = ""; /* empty EA, do not remove */ -@@ -2606,8 +2620,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) +@@ -2606,8 +2626,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent) int err = -ENOMEM; /* Round up to L1_CACHE_BYTES to resist false sharing */ diff --git a/3.6.9/4425-tmpfs-user-namespace.patch b/3.6.9/4425-tmpfs-user-namespace.patch index b48d735..f5f4f20 100644 --- a/3.6.9/4425-tmpfs-user-namespace.patch +++ b/3.6.9/4425-tmpfs-user-namespace.patch @@ -13,12 +13,11 @@ Signed-off-by: Anthony G. Basile <blueness@gentoo.org> --- diff --git a/mm/shmem.c b/mm/shmem.c -index 67afba5..697a181 100644 --- a/mm/shmem.c +++ b/mm/shmem.c -@@ -2208,7 +2208,8 @@ static int shmem_xattr_validate(const char *name) - { - struct { const char *prefix; size_t len; } arr[] = { +@@ -2231,7 +2231,8 @@ + #endif + { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN }, - { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN } + { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }, diff --git a/3.6.9/4450_grsec-kconfig-default-gids.patch b/3.6.9/4450_grsec-kconfig-default-gids.patch index d4b0b7e..5c5b013 100644 --- a/3.6.9/4450_grsec-kconfig-default-gids.patch +++ b/3.6.9/4450_grsec-kconfig-default-gids.patch @@ -16,7 +16,7 @@ from shooting themselves in the foot. diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig --- a/grsecurity/Kconfig 2012-10-13 09:51:35.000000000 -0400 +++ b/grsecurity/Kconfig 2012-10-13 09:52:32.000000000 -0400 -@@ -522,7 +522,7 @@ +@@ -539,7 +539,7 @@ config GRKERNSEC_AUDIT_GID int "GID for auditing" depends on GRKERNSEC_AUDIT_GROUP @@ -25,7 +25,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig config GRKERNSEC_EXECLOG bool "Exec logging" -@@ -742,7 +742,7 @@ +@@ -759,7 +759,7 @@ config GRKERNSEC_TPE_UNTRUSTED_GID int "GID for TPE-untrusted users" depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT @@ -34,7 +34,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Setting this GID determines what group TPE restrictions will be *enabled* for. If the sysctl option is enabled, a sysctl option -@@ -751,7 +751,7 @@ +@@ -768,7 +768,7 @@ config GRKERNSEC_TPE_TRUSTED_GID int "GID for TPE-trusted users" depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT @@ -43,7 +43,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Setting this GID determines what group TPE restrictions will be *disabled* for. If the sysctl option is enabled, a sysctl option -@@ -826,7 +826,7 @@ +@@ -843,7 +843,7 @@ config GRKERNSEC_SOCKET_ALL_GID int "GID to deny all sockets for" depends on GRKERNSEC_SOCKET_ALL @@ -52,7 +52,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Here you can choose the GID to disable socket access for. Remember to add the users you want socket access disabled for to the GID -@@ -847,7 +847,7 @@ +@@ -864,7 +864,7 @@ config GRKERNSEC_SOCKET_CLIENT_GID int "GID to deny client sockets for" depends on GRKERNSEC_SOCKET_CLIENT @@ -61,7 +61,7 @@ diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig help Here you can choose the GID to disable client socket access for. Remember to add the users you want client socket access disabled for to -@@ -865,7 +865,7 @@ +@@ -882,7 +882,7 @@ config GRKERNSEC_SOCKET_SERVER_GID int "GID to deny server sockets for" depends on GRKERNSEC_SOCKET_SERVER diff --git a/3.6.9/4465_selinux-avc_audit-log-curr_ip.patch b/3.6.9/4465_selinux-avc_audit-log-curr_ip.patch index 4fb50f4..217480f 100644 --- a/3.6.9/4465_selinux-avc_audit-log-curr_ip.patch +++ b/3.6.9/4465_selinux-avc_audit-log-curr_ip.patch @@ -28,7 +28,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org> diff -Naur a/grsecurity/Kconfig b/grsecurity/Kconfig --- a/grsecurity/Kconfig 2011-04-17 19:25:54.000000000 -0400 +++ b/grsecurity/Kconfig 2011-04-17 19:32:53.000000000 -0400 -@@ -924,6 +924,27 @@ +@@ -941,6 +941,27 @@ menu "Logging Options" depends on GRKERNSEC |