diff options
author | Anthony G. Basile <blueness@gentoo.org> | 2012-12-26 11:12:15 -0500 |
---|---|---|
committer | Anthony G. Basile <blueness@gentoo.org> | 2012-12-26 11:12:15 -0500 |
commit | d92b42aa041e4af32620d59d11a277a259ca74fd (patch) | |
tree | 37e3d9f25e8ba2ace7ae9104f1534d5d72daede9 | |
parent | Grsec/PaX: 2.9.1-{2.6.32.60,3.2.35,3.7.0}-201212151422 (diff) | |
download | hardened-patchset-d92b42aa041e4af32620d59d11a277a259ca74fd.tar.gz hardened-patchset-d92b42aa041e4af32620d59d11a277a259ca74fd.tar.bz2 hardened-patchset-d92b42aa041e4af32620d59d11a277a259ca74fd.zip |
Grsec/PaX: 2.9.1-3.7.1-20121217173420121217
-rw-r--r-- | 3.7.1/0000_README (renamed from 3.7.0/0000_README) | 2 | ||||
-rw-r--r-- | 3.7.1/4420_grsecurity-2.9.1-3.7.1-201212171734.patch (renamed from 3.7.0/4420_grsecurity-2.9.1-3.7.0-201212151422.patch) | 133 | ||||
-rw-r--r-- | 3.7.1/4425_grsec_remove_EI_PAX.patch | 19 | ||||
-rw-r--r-- | 3.7.1/4430_grsec-remove-localversion-grsec.patch (renamed from 3.7.0/4430_grsec-remove-localversion-grsec.patch) | 0 | ||||
-rw-r--r-- | 3.7.1/4435_grsec-mute-warnings.patch (renamed from 3.7.0/4435_grsec-mute-warnings.patch) | 0 | ||||
-rw-r--r-- | 3.7.1/4440_grsec-remove-protected-paths.patch (renamed from 3.7.0/4440_grsec-remove-protected-paths.patch) | 0 | ||||
-rw-r--r-- | 3.7.1/4450_grsec-kconfig-default-gids.patch (renamed from 3.7.0/4450_grsec-kconfig-default-gids.patch) | 0 | ||||
-rw-r--r-- | 3.7.1/4465_selinux-avc_audit-log-curr_ip.patch (renamed from 3.7.0/4465_selinux-avc_audit-log-curr_ip.patch) | 0 | ||||
-rw-r--r-- | 3.7.1/4470_disable-compat_vdso.patch (renamed from 3.7.0/4470_disable-compat_vdso.patch) | 0 |
9 files changed, 87 insertions, 67 deletions
diff --git a/3.7.0/0000_README b/3.7.1/0000_README index c9d0060..84caa16 100644 --- a/3.7.0/0000_README +++ b/3.7.1/0000_README @@ -2,7 +2,7 @@ README ----------------------------------------------------------------------------- Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-2.9.1-3.7.0-201212151422.patch +Patch: 4420_grsecurity-2.9.1-3.7.1-201212171734.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/3.7.0/4420_grsecurity-2.9.1-3.7.0-201212151422.patch b/3.7.1/4420_grsecurity-2.9.1-3.7.1-201212171734.patch index aaefb83..18a4557 100644 --- a/3.7.0/4420_grsecurity-2.9.1-3.7.0-201212151422.patch +++ b/3.7.1/4420_grsecurity-2.9.1-3.7.1-201212171734.patch @@ -251,7 +251,7 @@ index 9776f06..18b1856 100644 pcd. [PARIDE] diff --git a/Makefile b/Makefile -index 540f7b2..c823fc5 100644 +index fbf84a4..339f6de 100644 --- a/Makefile +++ b/Makefile @@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \ @@ -20650,7 +20650,7 @@ index d017df3..61ae42e 100644 local_irq_disable(); diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c -index f858159..491d386 100644 +index f858159..4ab7dba 100644 --- a/arch/x86/kvm/vmx.c +++ b/arch/x86/kvm/vmx.c @@ -1332,7 +1332,11 @@ static void reload_tss(void) @@ -20701,15 +20701,16 @@ index f858159..491d386 100644 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */ #ifdef CONFIG_X86_64 -@@ -3734,6 +3748,7 @@ static void vmx_set_constant_host_state(void) +@@ -3733,7 +3747,7 @@ static void vmx_set_constant_host_state(void) + native_store_idt(&dt); vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ - vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ +- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */ + vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */ rdmsr(MSR_IA32_SYSENTER_CS, low32, high32); vmcs_write32(HOST_IA32_SYSENTER_CS, low32); -@@ -6279,6 +6294,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6279,6 +6293,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) "jmp 2f \n\t" "1: " __ex(ASM_VMX_VMRESUME) "\n\t" "2: " @@ -20722,7 +20723,7 @@ index f858159..491d386 100644 /* Save guest registers, load host registers, keep flags */ "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t" "pop %0 \n\t" -@@ -6331,6 +6352,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6331,6 +6351,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) #endif [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)), [wordsize]"i"(sizeof(ulong)) @@ -20734,7 +20735,7 @@ index f858159..491d386 100644 : "cc", "memory" #ifdef CONFIG_X86_64 , "rax", "rbx", "rdi", "rsi" -@@ -6344,7 +6370,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6344,7 +6369,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) if (debugctlmsr) update_debugctlmsr(debugctlmsr); @@ -20743,7 +20744,7 @@ index f858159..491d386 100644 /* * The sysexit path does not restore ds/es, so we must set them to * a reasonable value ourselves. -@@ -6353,8 +6379,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) +@@ -6353,8 +6378,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu) * may be executed in interrupt context, which saves and restore segments * around it, nullifying its effect. */ @@ -51614,7 +51615,7 @@ index 0000000..1b9afa9 +endif diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c new file mode 100644 -index 0000000..4428c82 +index 0000000..b1810d9 --- /dev/null +++ b/grsecurity/gracl.c @@ -0,0 +1,4056 @@ @@ -52007,7 +52008,7 @@ index 0000000..4428c82 +struct acl_subject_label * +lookup_subject_map(const struct acl_subject_label *userp) +{ -+ unsigned int index = shash(userp, subj_map_set.s_size); ++ unsigned int index = gr_shash(userp, subj_map_set.s_size); + struct subject_map *match; + + match = subj_map_set.s_hash[index]; @@ -52024,7 +52025,7 @@ index 0000000..4428c82 +static void +insert_subj_map_entry(struct subject_map *subjmap) +{ -+ unsigned int index = shash(subjmap->user, subj_map_set.s_size); ++ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size); + struct subject_map **curr; + + subjmap->prev = NULL; @@ -52043,7 +52044,7 @@ index 0000000..4428c82 +lookup_acl_role_label(const struct task_struct *task, const uid_t uid, + const gid_t gid) +{ -+ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); ++ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size); + struct acl_role_label *match; + struct role_allowed_ip *ipp; + unsigned int x; @@ -52066,7 +52067,7 @@ index 0000000..4428c82 +found: + if (match == NULL) { + try_group: -+ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); ++ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); + match = acl_role_set.r_hash[index]; + + while (match) { @@ -52112,7 +52113,7 @@ index 0000000..4428c82 +lookup_acl_subj_label(const ino_t ino, const dev_t dev, + const struct acl_role_label *role) +{ -+ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; @@ -52132,7 +52133,7 @@ index 0000000..4428c82 +lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, + const struct acl_role_label *role) +{ -+ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; @@ -52152,7 +52153,7 @@ index 0000000..4428c82 +lookup_acl_obj_label(const ino_t ino, const dev_t dev, + const struct acl_subject_label *subj) +{ -+ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; @@ -52172,7 +52173,7 @@ index 0000000..4428c82 +lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, + const struct acl_subject_label *subj) +{ -+ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; @@ -52246,7 +52247,7 @@ index 0000000..4428c82 +static struct inodev_entry * +lookup_inodev_entry(const ino_t ino, const dev_t dev) +{ -+ unsigned int index = fhash(ino, dev, inodev_set.i_size); ++ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size); + struct inodev_entry *match; + + match = inodev_set.i_hash[index]; @@ -52260,7 +52261,7 @@ index 0000000..4428c82 +static void +insert_inodev_entry(struct inodev_entry *entry) +{ -+ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, ++ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device, + inodev_set.i_size); + struct inodev_entry **curr; + @@ -52280,7 +52281,7 @@ index 0000000..4428c82 +__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) +{ + unsigned int index = -+ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); ++ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); + struct acl_role_label **curr; + struct acl_role_label *tmp, *tmp2; + @@ -52413,7 +52414,7 @@ index 0000000..4428c82 + struct acl_subject_label *subj) +{ + unsigned int index = -+ fhash(obj->inode, obj->device, subj->obj_hash_size); ++ gr_fhash(obj->inode, obj->device, subj->obj_hash_size); + struct acl_object_label **curr; + + @@ -52433,7 +52434,7 @@ index 0000000..4428c82 +insert_acl_subj_label(struct acl_subject_label *obj, + struct acl_role_label *role) +{ -+ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); ++ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size); + struct acl_subject_label **curr; + + obj->prev = NULL; @@ -54297,7 +54298,7 @@ index 0000000..4428c82 + const ino_t newinode, const dev_t newdevice, + struct acl_subject_label *subj) +{ -+ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); ++ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size); + struct acl_object_label *match; + + match = subj->obj_hash[index]; @@ -54336,7 +54337,7 @@ index 0000000..4428c82 + const ino_t newinode, const dev_t newdevice, + struct acl_role_label *role) +{ -+ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); ++ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size); + struct acl_subject_label *match; + + match = role->subj_hash[index]; @@ -54374,7 +54375,7 @@ index 0000000..4428c82 +update_inodev_entry(const ino_t oldinode, const dev_t olddevice, + const ino_t newinode, const dev_t newdevice) +{ -+ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); ++ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size); + struct inodev_entry *match; + + match = inodev_set.i_hash[index]; @@ -61755,7 +61756,7 @@ index d0a7967..63c4c47 100644 { diff --git a/include/linux/gracl.h b/include/linux/gracl.h new file mode 100644 -index 0000000..c938b1f +index 0000000..ebe6d72 --- /dev/null +++ b/include/linux/gracl.h @@ -0,0 +1,319 @@ @@ -62019,25 +62020,25 @@ index 0000000..c938b1f + Shift/add algorithm with modulus of table size and an XOR*/ + +static __inline__ unsigned int -+rhash(const uid_t uid, const __u16 type, const unsigned int sz) ++gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz) +{ + return ((((uid + type) << (16 + type)) ^ uid) % sz); +} + + static __inline__ unsigned int -+shash(const struct acl_subject_label *userp, const unsigned int sz) ++gr_shash(const struct acl_subject_label *userp, const unsigned int sz) +{ + return ((const unsigned long)userp % sz); +} + +static __inline__ unsigned int -+fhash(const ino_t ino, const dev_t dev, const unsigned int sz) ++gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz) +{ + return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); +} + +static __inline__ unsigned int -+nhash(const char *name, const __u16 len, const unsigned int sz) ++gr_nhash(const char *name, const __u16 len, const unsigned int sz) +{ + return full_name_hash((const unsigned char *)name, len) % sz; +} @@ -69684,7 +69685,7 @@ index aaa7b9f..055ff1e 100644 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) { per_cpu(rcu_torture_count, cpu)[i] = 0; diff --git a/kernel/rcutree.c b/kernel/rcutree.c -index 74df86b..e0702bb 100644 +index 2682295..0f2297e 100644 --- a/kernel/rcutree.c +++ b/kernel/rcutree.c @@ -348,9 +348,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval, @@ -69791,7 +69792,7 @@ index 74df86b..e0702bb 100644 } /* -@@ -1830,7 +1830,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) +@@ -1831,7 +1831,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp) } smp_mb(); /* List handling before counting for rcu_barrier(). */ rdp->qlen_lazy -= count_lazy; @@ -69800,7 +69801,7 @@ index 74df86b..e0702bb 100644 rdp->n_cbs_invoked += count; /* Reinstate batch limit if we have worked down the excess. */ -@@ -2023,7 +2023,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) +@@ -2024,7 +2024,7 @@ __rcu_process_callbacks(struct rcu_state *rsp) /* * Do RCU core processing for the current CPU. */ @@ -69809,7 +69810,7 @@ index 74df86b..e0702bb 100644 { struct rcu_state *rsp; -@@ -2135,7 +2135,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), +@@ -2136,7 +2136,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu), local_irq_restore(flags); return; } @@ -69818,7 +69819,7 @@ index 74df86b..e0702bb 100644 if (lazy) rdp->qlen_lazy++; else -@@ -2249,8 +2249,8 @@ void synchronize_rcu_bh(void) +@@ -2250,8 +2250,8 @@ void synchronize_rcu_bh(void) } EXPORT_SYMBOL_GPL(synchronize_rcu_bh); @@ -69829,7 +69830,7 @@ index 74df86b..e0702bb 100644 static int synchronize_sched_expedited_cpu_stop(void *data) { -@@ -2311,7 +2311,7 @@ void synchronize_sched_expedited(void) +@@ -2312,7 +2312,7 @@ void synchronize_sched_expedited(void) int firstsnap, s, snap, trycount = 0; /* Note that atomic_inc_return() implies full memory barrier. */ @@ -69838,7 +69839,7 @@ index 74df86b..e0702bb 100644 get_online_cpus(); WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id())); -@@ -2333,7 +2333,7 @@ void synchronize_sched_expedited(void) +@@ -2334,7 +2334,7 @@ void synchronize_sched_expedited(void) } /* Check to see if someone else did our work for us. */ @@ -69847,7 +69848,7 @@ index 74df86b..e0702bb 100644 if (UINT_CMP_GE((unsigned)s, (unsigned)firstsnap)) { smp_mb(); /* ensure test happens before caller kfree */ return; -@@ -2348,7 +2348,7 @@ void synchronize_sched_expedited(void) +@@ -2349,7 +2349,7 @@ void synchronize_sched_expedited(void) * grace period works for us. */ get_online_cpus(); @@ -69856,7 +69857,7 @@ index 74df86b..e0702bb 100644 smp_mb(); /* ensure read is before try_stop_cpus(). */ } -@@ -2359,12 +2359,12 @@ void synchronize_sched_expedited(void) +@@ -2360,12 +2360,12 @@ void synchronize_sched_expedited(void) * than we did beat us to the punch. */ do { @@ -69871,7 +69872,7 @@ index 74df86b..e0702bb 100644 put_online_cpus(); } -@@ -2538,7 +2538,7 @@ static void _rcu_barrier(struct rcu_state *rsp) +@@ -2539,7 +2539,7 @@ static void _rcu_barrier(struct rcu_state *rsp) * ACCESS_ONCE() to prevent the compiler from speculating * the increment to precede the early-exit check. */ @@ -69880,7 +69881,7 @@ index 74df86b..e0702bb 100644 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1); _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done); smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */ -@@ -2580,7 +2580,7 @@ static void _rcu_barrier(struct rcu_state *rsp) +@@ -2581,7 +2581,7 @@ static void _rcu_barrier(struct rcu_state *rsp) /* Increment ->n_barrier_done to prevent duplicate work. */ smp_mb(); /* Keep increment after above mechanism. */ @@ -69889,7 +69890,7 @@ index 74df86b..e0702bb 100644 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0); _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done); smp_mb(); /* Keep increment before caller's subsequent code. */ -@@ -2625,10 +2625,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) +@@ -2626,10 +2626,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp) rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo); init_callback_list(rdp); rdp->qlen_lazy = 0; @@ -69902,7 +69903,7 @@ index 74df86b..e0702bb 100644 #ifdef CONFIG_RCU_USER_QS WARN_ON_ONCE(rdp->dynticks->in_user); #endif -@@ -2663,8 +2663,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) +@@ -2664,8 +2664,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible) rdp->blimit = blimit; init_callback_list(rdp); /* Re-enable callbacks on this CPU. */ rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE; @@ -71034,7 +71035,7 @@ index c0bd030..62a1927 100644 ret = -EIO; bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt, diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c -index 9dcf15d..9bab704 100644 +index 51b7159..7f83cf8 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -1874,12 +1874,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec) @@ -71078,7 +71079,7 @@ index 9dcf15d..9bab704 100644 start_pg = ftrace_allocate_pages(count); if (!start_pg) diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c -index b979426..c54ff13 100644 +index 4cb5e51..e7e05d9 100644 --- a/kernel/trace/ring_buffer.c +++ b/kernel/trace/ring_buffer.c @@ -346,9 +346,9 @@ struct buffer_data_page { @@ -71150,7 +71151,7 @@ index b979426..c54ff13 100644 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); } -@@ -1903,7 +1903,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, +@@ -1905,7 +1905,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer, * it is our responsibility to update * the counters. */ @@ -71159,7 +71160,7 @@ index b979426..c54ff13 100644 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes); /* -@@ -2053,7 +2053,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2055,7 +2055,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, if (tail == BUF_PAGE_SIZE) tail_page->real_end = 0; @@ -71168,7 +71169,7 @@ index b979426..c54ff13 100644 return; } -@@ -2088,7 +2088,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2090,7 +2090,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, rb_event_set_padding(event); /* Set the write back to the previous setting */ @@ -71177,7 +71178,7 @@ index b979426..c54ff13 100644 return; } -@@ -2100,7 +2100,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2102,7 +2102,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer, /* Set write to end of buffer */ length = (tail + length) - BUF_PAGE_SIZE; @@ -71186,7 +71187,7 @@ index b979426..c54ff13 100644 } /* -@@ -2126,7 +2126,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2128,7 +2128,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, * about it. */ if (unlikely(next_page == commit_page)) { @@ -71195,7 +71196,7 @@ index b979426..c54ff13 100644 goto out_reset; } -@@ -2180,7 +2180,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2182,7 +2182,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer, cpu_buffer->tail_page) && (cpu_buffer->commit_page == cpu_buffer->reader_page))) { @@ -71204,7 +71205,7 @@ index b979426..c54ff13 100644 goto out_reset; } } -@@ -2228,7 +2228,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2230,7 +2230,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, length += RB_LEN_TIME_EXTEND; tail_page = cpu_buffer->tail_page; @@ -71213,7 +71214,7 @@ index b979426..c54ff13 100644 /* set write to only the index of the write */ write &= RB_WRITE_MASK; -@@ -2245,7 +2245,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2247,7 +2247,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer, kmemcheck_annotate_bitfield(event, bitfield); rb_update_event(cpu_buffer, event, length, add_timestamp, delta); @@ -71222,7 +71223,7 @@ index b979426..c54ff13 100644 /* * If this is the first commit on the page, then update -@@ -2278,7 +2278,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2280,7 +2280,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) { unsigned long write_mask = @@ -71231,7 +71232,7 @@ index b979426..c54ff13 100644 unsigned long event_length = rb_event_length(event); /* * This is on the tail page. It is possible that -@@ -2288,7 +2288,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2290,7 +2290,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer, */ old_index += write_mask; new_index += write_mask; @@ -71240,7 +71241,7 @@ index b979426..c54ff13 100644 if (index == old_index) { /* update counters */ local_sub(event_length, &cpu_buffer->entries_bytes); -@@ -2627,7 +2627,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2629,7 +2629,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, /* Do the likely case first */ if (likely(bpage->page == (void *)addr)) { @@ -71249,7 +71250,7 @@ index b979426..c54ff13 100644 return; } -@@ -2639,7 +2639,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, +@@ -2641,7 +2641,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer, start = bpage; do { if (bpage->page == (void *)addr) { @@ -71258,7 +71259,7 @@ index b979426..c54ff13 100644 return; } rb_inc_page(cpu_buffer, &bpage); -@@ -2921,7 +2921,7 @@ static inline unsigned long +@@ -2923,7 +2923,7 @@ static inline unsigned long rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer) { return local_read(&cpu_buffer->entries) - @@ -71267,7 +71268,7 @@ index b979426..c54ff13 100644 } /** -@@ -3008,7 +3008,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) +@@ -3011,7 +3011,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu) return 0; cpu_buffer = buffer->buffers[cpu]; @@ -71276,7 +71277,7 @@ index b979426..c54ff13 100644 return ret; } -@@ -3029,7 +3029,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) +@@ -3032,7 +3032,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu) return 0; cpu_buffer = buffer->buffers[cpu]; @@ -71285,7 +71286,7 @@ index b979426..c54ff13 100644 return ret; } -@@ -3074,7 +3074,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) +@@ -3077,7 +3077,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer) /* if you care about this being correct, lock the buffer */ for_each_buffer_cpu(buffer, cpu) { cpu_buffer = buffer->buffers[cpu]; @@ -71294,7 +71295,7 @@ index b979426..c54ff13 100644 } return overruns; -@@ -3250,8 +3250,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) +@@ -3253,8 +3253,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) /* * Reset the reader page to size zero. */ @@ -71305,7 +71306,7 @@ index b979426..c54ff13 100644 local_set(&cpu_buffer->reader_page->page->commit, 0); cpu_buffer->reader_page->real_end = 0; -@@ -3283,7 +3283,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) +@@ -3288,7 +3288,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer) * want to compare with the last_overrun. */ smp_mb(); @@ -71314,7 +71315,7 @@ index b979426..c54ff13 100644 /* * Here's the tricky part. -@@ -3848,8 +3848,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) +@@ -3858,8 +3858,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) cpu_buffer->head_page = list_entry(cpu_buffer->pages, struct buffer_page, list); @@ -71325,7 +71326,7 @@ index b979426..c54ff13 100644 local_set(&cpu_buffer->head_page->page->commit, 0); cpu_buffer->head_page->read = 0; -@@ -3859,14 +3859,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) +@@ -3869,14 +3869,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer) INIT_LIST_HEAD(&cpu_buffer->reader_page->list); INIT_LIST_HEAD(&cpu_buffer->new_pages); @@ -71344,7 +71345,7 @@ index b979426..c54ff13 100644 local_set(&cpu_buffer->entries, 0); local_set(&cpu_buffer->committing, 0); local_set(&cpu_buffer->commits, 0); -@@ -4269,8 +4269,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, +@@ -4279,8 +4279,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer, rb_init_page(bpage); bpage = reader->page; reader->page = *data_page; diff --git a/3.7.1/4425_grsec_remove_EI_PAX.patch b/3.7.1/4425_grsec_remove_EI_PAX.patch new file mode 100644 index 0000000..97e6951 --- /dev/null +++ b/3.7.1/4425_grsec_remove_EI_PAX.patch @@ -0,0 +1,19 @@ +From: Anthony G. Basile <blueness@gentoo.org> + +Deprecate EI_PAX. + +X-Gentoo-Bug: 445600 +X-Gentoo-Bug-URL: https://bugs.gentoo.org/445600 + +diff -Nuar linux-3.7.1-hardened.orig/security/Kconfig linux-3.7.1-hardened/security/Kconfig +--- linux-3.7.1-hardened.orig/security/Kconfig 2012-12-26 08:39:29.000000000 -0500 ++++ linux-3.7.1-hardened/security/Kconfig 2012-12-26 09:05:44.000000000 -0500 +@@ -263,7 +263,7 @@ + + config PAX_EI_PAX + bool 'Use legacy ELF header marking' +- default y if GRKERNSEC_CONFIG_AUTO ++ depends on BROKEN + help + Enabling this option will allow you to control PaX features on + a per executable basis via the 'chpax' utility available at diff --git a/3.7.0/4430_grsec-remove-localversion-grsec.patch b/3.7.1/4430_grsec-remove-localversion-grsec.patch index 31cf878..31cf878 100644 --- a/3.7.0/4430_grsec-remove-localversion-grsec.patch +++ b/3.7.1/4430_grsec-remove-localversion-grsec.patch diff --git a/3.7.0/4435_grsec-mute-warnings.patch b/3.7.1/4435_grsec-mute-warnings.patch index e1a7a3c..e1a7a3c 100644 --- a/3.7.0/4435_grsec-mute-warnings.patch +++ b/3.7.1/4435_grsec-mute-warnings.patch diff --git a/3.7.0/4440_grsec-remove-protected-paths.patch b/3.7.1/4440_grsec-remove-protected-paths.patch index 637934a..637934a 100644 --- a/3.7.0/4440_grsec-remove-protected-paths.patch +++ b/3.7.1/4440_grsec-remove-protected-paths.patch diff --git a/3.7.0/4450_grsec-kconfig-default-gids.patch b/3.7.1/4450_grsec-kconfig-default-gids.patch index 5c5b013..5c5b013 100644 --- a/3.7.0/4450_grsec-kconfig-default-gids.patch +++ b/3.7.1/4450_grsec-kconfig-default-gids.patch diff --git a/3.7.0/4465_selinux-avc_audit-log-curr_ip.patch b/3.7.1/4465_selinux-avc_audit-log-curr_ip.patch index 217480f..217480f 100644 --- a/3.7.0/4465_selinux-avc_audit-log-curr_ip.patch +++ b/3.7.1/4465_selinux-avc_audit-log-curr_ip.patch diff --git a/3.7.0/4470_disable-compat_vdso.patch b/3.7.1/4470_disable-compat_vdso.patch index d32044a..d32044a 100644 --- a/3.7.0/4470_disable-compat_vdso.patch +++ b/3.7.1/4470_disable-compat_vdso.patch |