summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Brindle <method@gentoo.org>2004-04-16 04:18:29 +0000
committerJoshua Brindle <method@gentoo.org>2004-04-16 04:18:29 +0000
commita362f2b8ee6b16f17614a066c865423db4f4d55d (patch)
tree0016296a477bf6fa6ffc4e947e258b64347c8afa /src/kernel
parentfix has_version info #47992 (diff)
downloadgentoo-a362f2b8ee6b16f17614a066c865423db4f4d55d.tar.gz
gentoo-a362f2b8ee6b16f17614a066c865423db4f4d55d.tar.bz2
gentoo-a362f2b8ee6b16f17614a066c865423db4f4d55d.zip
2.6.5 baby
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/0000_README60
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/1000_grsecurity-2.0-2.6.5.patch21344
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/1300_linux-2.6.4-selinux-hooks.patch137
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/1305_linux-2.6.4-selinux-ipaddr.patch14
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/1310_linux-2.6.5-extra_sec_ops.patch63
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/1315_linux-2.6.5-selinux.patch968
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/2005_modules_off-2.6.3.patch75
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/2010_tcp-stealth.patch184
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/2015_tcp-nmap-freak.patch130
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/3005_netdev-random-core-2.6.3.patch283
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/3010_netdev-random-drivers-2.6.5.patch2346
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-4.5/4005_CAN-2004-0109.patch88
12 files changed, 25692 insertions, 0 deletions
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/0000_README b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/0000_README
new file mode 100644
index 0000000000..18e7799eb8
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/0000_README
@@ -0,0 +1,60 @@
+README
+-------------------------------------------------------------------------------
+This patchset is to be the 2.6 series of hardened-sources.
+It includes both SELinux and GRSecurity, as well as enhancements to each.
+Also included are additional hardening features useful in either system
+(note that with this release that GRSecurity and SELinux can also be used
+in tandem.
+
+
+
+Patchset Numbering Scheme
+-------------------------------------------------------------------------------
+1XXX Base patches
+ 2XX GRSecurity extras
+ 3XX SELinux extras
+2XXX Universal hardening features
+3XXX Netdevrand
+
+Invididual Patch Descriptions:
+-------------------------------------------------------------------------------
+Patch: 1000_grsecurity-2.0-2.6.5.patch
+from: Brad Spender, http://grsecurity.net
+desc: GRSecurity for 2.6.5
+
+Patch: 1300_linux-2.6.4-selinux-hooks.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: PaX hooks for SELinux
+
+Patch: 1305_linux-2.6.4-selinux-ipaddr.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: Support for SELinux to log an IP address of the origin of an abuse
+
+Patch: 1310_linux-2.6.5-extra_sec_ops.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: Adds additional secondary ops to selinux
+
+Patch: 1315_linux-2.6.5-selinux.patch
+from: NSA
+desc: Adds ipv6 support, changes not yet upstream
+
+Patch: 2005_modules_off-2.6.3.patch
+from: Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Support for disabling module loading via sysctl
+
+Patch: 2010_tcp-stealth.patch
+from: Rediffed from WOLK 2.6 by Brandon Hale <tseng@gentoo.org>
+desc: Stealth TCP features
+
+Patch: 2015_tcp-nmap-freak.patch
+from: Rediffed from WOLK 2.6 by Brandon Hale <tseng@gentoo.org>
+desc: More stealth TCP features, targetted blocking nmap syn/fin scan
+and OS detection
+
+Patch: 3005_netdev-random-core-2.6.3.patch
+from: Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Core functionality for netdev random
+
+Patch: 3010_netdev-random-drivers-2.6.5.patch
+from: Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Patch to allow network drivers to contribute to system entropy
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1000_grsecurity-2.0-2.6.5.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1000_grsecurity-2.0-2.6.5.patch
new file mode 100644
index 0000000000..3953dfbc13
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1000_grsecurity-2.0-2.6.5.patch
@@ -0,0 +1,21344 @@
+diff -urN linux-2.6.5/Makefile linux-2.6.5-new/Makefile
+--- linux-2.6.5/Makefile 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/Makefile 2004-04-14 14:51:28.000000000 -0400
+@@ -424,7 +424,7 @@
+ CFLAGS := $(CPPFLAGS) $(CFLAGS)
+ AFLAGS := $(CPPFLAGS) $(AFLAGS)
+
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ grsecurity/
+
+ SUBDIRS += $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+diff -urN linux-2.6.5/arch/alpha/kernel/osf_sys.c linux-2.6.5-new/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.5/arch/alpha/kernel/osf_sys.c 2004-04-03 22:36:11.000000000 -0500
++++ linux-2.6.5-new/arch/alpha/kernel/osf_sys.c 2004-04-14 09:15:11.000000000 -0400
+@@ -37,6 +37,7 @@
+ #include <linux/namei.h>
+ #include <linux/uio.h>
+ #include <linux/vfs.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/fpu.h>
+ #include <asm/io.h>
+@@ -179,6 +180,11 @@
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ #if 0
+ if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
+ printk("%s: unimplemented OSF mmap flags %04lx\n",
+@@ -189,6 +195,13 @@
+ if (!file)
+ goto out;
+ }
++
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ ret = -EACCES;
++ goto out;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(&current->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, off);
+@@ -1295,6 +1308,10 @@
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1302,8 +1319,16 @@
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++
++ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ addr += current->mm->delta_mmap;
++#endif
++
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urN linux-2.6.5/arch/alpha/kernel/ptrace.c linux-2.6.5-new/arch/alpha/kernel/ptrace.c
+--- linux-2.6.5/arch/alpha/kernel/ptrace.c 2004-04-03 22:38:13.000000000 -0500
++++ linux-2.6.5-new/arch/alpha/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/user.h>
+ #include <linux/slab.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -288,6 +289,9 @@
+ if (!child)
+ goto out_notsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out;
+diff -urN linux-2.6.5/arch/alpha/mm/fault.c linux-2.6.5-new/arch/alpha/mm/fault.c
+--- linux-2.6.5/arch/alpha/mm/fault.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/alpha/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -56,6 +57,142 @@
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->r26 == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long*)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U)== 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -133,8 +270,34 @@
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)rdusp());
++ do_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urN linux-2.6.5/arch/i386/Kconfig linux-2.6.5-new/arch/i386/Kconfig
+--- linux-2.6.5/arch/i386/Kconfig 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/arch/i386/Kconfig 2004-04-14 09:15:11.000000000 -0400
+@@ -390,7 +390,7 @@
+
+ config X86_ALIGNMENT_16
+ bool
+- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
++ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+ default y
+
+ config X86_GOOD_APIC
+diff -urN linux-2.6.5/arch/i386/kernel/apm.c linux-2.6.5-new/arch/i386/kernel/apm.c
+--- linux-2.6.5/arch/i386/kernel/apm.c 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/apm.c 2004-04-14 09:15:11.000000000 -0400
+@@ -597,19 +597,40 @@
+ int cpu;
+ struct desc_struct save_desc_40;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr3;
++#endif
++
+ cpus = apm_save_cpus();
+
+ cpu = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
++#ifndef CONFIG_PAX_KERNEXEC
+ local_save_flags(flags);
+ APM_DO_CLI;
++#endif
++
+ APM_DO_SAVE_SEGS;
+ apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
+ APM_DO_RESTORE_SEGS;
++
++#ifndef CONFIG_PAX_KERNEXEC
+ local_irq_restore(flags);
++#endif
++
+ cpu_gdt_table[cpu][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+ apm_restore_cpus(cpus);
+
+@@ -639,20 +660,40 @@
+ int cpu;
+ struct desc_struct save_desc_40;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr3;
++#endif
+
+ cpus = apm_save_cpus();
+
+ cpu = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
++#ifndef CONFIG_PAX_KERNEXEC
+ local_save_flags(flags);
+ APM_DO_CLI;
++#endif
++
+ APM_DO_SAVE_SEGS;
+ error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
+ APM_DO_RESTORE_SEGS;
++
++#ifndef CONFIG_PAX_KERNEXEC
+ local_irq_restore(flags);
++#endif
++
+ cpu_gdt_table[smp_processor_id()][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+ apm_restore_cpus(cpus);
+ return error;
+diff -urN linux-2.6.5/arch/i386/kernel/cpu/common.c linux-2.6.5-new/arch/i386/kernel/cpu/common.c
+--- linux-2.6.5/arch/i386/kernel/cpu/common.c 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/cpu/common.c 2004-04-14 09:15:11.000000000 -0400
+@@ -319,6 +319,10 @@
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_NOVSYSCALL)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++#endif
++
+ /* Disable the PN if appropriate */
+ squash_the_stupid_serial_number(c);
+
+@@ -514,7 +518,7 @@
+ set_tss_desc(cpu,t);
+ cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
+ load_TR_desc();
+- load_LDT(&init_mm.context);
++ _load_LDT(&init_mm.context);
+
+ /* Set up doublefault TSS pointer in the GDT */
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+diff -urN linux-2.6.5/arch/i386/kernel/entry.S linux-2.6.5-new/arch/i386/kernel/entry.S
+--- linux-2.6.5/arch/i386/kernel/entry.S 2004-04-03 22:36:52.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/entry.S 2004-04-14 09:15:11.000000000 -0400
+@@ -272,6 +272,11 @@
+ movl TI_FLAGS(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+ jne syscall_exit_work
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ /* if something modifies registers it must also disable sysexit */
+ movl EIP(%esp), %edx
+ movl OLDESP(%esp), %ecx
+@@ -299,6 +304,11 @@
+ movl TI_FLAGS(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx # current->work
+ jne syscall_exit_work
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ restore_all:
+ RESTORE_ALL
+
+@@ -591,7 +601,13 @@
+ jmp error_code
+
+ ENTRY(page_fault)
++#ifdef CONFIG_PAX_PAGEEXEC
++ ALIGN
++ pushl $pax_do_page_fault
++#else
+ pushl $do_page_fault
++#endif
++
+ jmp error_code
+
+ #ifdef CONFIG_X86_MCE
+@@ -606,7 +622,7 @@
+ pushl $do_spurious_interrupt_bug
+ jmp error_code
+
+-.data
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urN linux-2.6.5/arch/i386/kernel/head.S linux-2.6.5-new/arch/i386/kernel/head.S
+--- linux-2.6.5/arch/i386/kernel/head.S 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/head.S 2004-04-14 09:15:11.000000000 -0400
+@@ -49,6 +49,12 @@
+
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -80,9 +86,9 @@
+
+ movl $(pg0 - __PAGE_OFFSET), %edi
+ movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
+- movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
++ movl $0x067, %eax /* 0x067 = DIRTY+ACCESSED+PRESENT+RW+USER */
+ 10:
+- leal 0x007(%edi),%ecx /* Create PDE entry */
++ leal 0x067(%edi),%ecx /* Create PDE entry */
+ movl %ecx,(%edx) /* Store identity PDE entry */
+ movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
+ addl $4,%edx
+@@ -92,8 +98,8 @@
+ addl $0x1000,%eax
+ loop 11b
+ /* End condition: we must map up to and including INIT_MAP_BEYOND_END */
+- /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */
+- leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp
++ /* bytes beyond the end of our own page tables; the +0x067 is the attribute bits */
++ leal (INIT_MAP_BEYOND_END+0x067)(%edi),%ebp
+ cmpl %ebp,%eax
+ jb 10b
+ movl %edi,(init_pg_tables_end - __PAGE_OFFSET)
+@@ -152,7 +158,7 @@
+ movl %cr0,%eax
+ orl $0x80000000,%eax
+ movl %eax,%cr0 /* ..and set paging (PG) bit */
+- ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
++ ljmp $__BOOT_CS,$1f + __KERNEL_TEXT_OFFSET /* Clear prefetch and normalize %eip */
+ 1:
+ /* Set up the stack pointer */
+ lss stack_start,%esp
+@@ -304,8 +310,6 @@
+ jmp L6 # main should never return here, but
+ # just in case, we know what happens.
+
+-ready: .byte 0
+-
+ /*
+ * We depend on ET to be correct. This checks for 287/387.
+ */
+@@ -353,13 +357,6 @@
+ jne rp_sidt
+ ret
+
+-ENTRY(stack_start)
+- .long init_thread_union+THREAD_SIZE
+- .long __BOOT_DS
+-
+-/* This is the default interrupt "handler" :-) */
+-int_msg:
+- .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+ ALIGN
+ ignore_int:
+ cld
+@@ -386,6 +383,47 @@
+ iret
+
+ /*
++ * This starts the data section. Note that the above is all
++ * in the text section because it has alignment requirements
++ * that we cannot fulfill any other way except for PaX ;-).
++ */
++.data
++ready: .byte 0
++
++/*
++ * swapper_pg_dir is the main page directory, address 0x00101000
++ *
++ * This is initialized to create an identity-mapping at 0 (for bootup
++ * purposes) and another mapping at virtual address PAGE_OFFSET. The
++ * values put here should be all invalid (zero); the valid
++ * entries are created dynamically at boot time.
++ *
++ * The code creates enough page tables to map 0-_end, the page tables
++ * themselves, plus INIT_MAP_BEYOND_END bytes; see comment at beginning.
++ */
++.section .data.swapper_pg_dir,"a",@progbits
++ENTRY(swapper_pg_dir)
++ .fill 1024,4,0
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(kernexec_pg_dir)
++ .fill 1024,4,0
++#endif
++
++.section .rodata.empty_zero_page,"a",@progbits
++ENTRY(empty_zero_page)
++ .fill 4096,1,0
++
++.section .rodata,"a",@progbits
++ENTRY(stack_start)
++ .long init_thread_union+THREAD_SIZE
++ .long __BOOT_DS
++
++/* This is the default interrupt "handler" :-) */
++int_msg:
++ .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
++
++/*
+ * The IDT and GDT 'descriptors' are a strange 48-bit object
+ * only used by the lidt and lgdt instructions. They are not
+ * like usual segment descriptors - they consist of a 16-bit
+@@ -417,47 +455,14 @@
+ .fill NR_CPUS-1,8,0 # space for the other GDT descriptors
+
+ /*
+- * swapper_pg_dir is the main page directory, address 0x00101000
+- *
+- * This is initialized to create an identity-mapping at 0 (for bootup
+- * purposes) and another mapping at virtual address PAGE_OFFSET. The
+- * values put here should be all invalid (zero); the valid
+- * entries are created dynamically at boot time.
+- *
+- * The code creates enough page tables to map 0-_end, the page tables
+- * themselves, plus INIT_MAP_BEYOND_END bytes; see comment at beginning.
+- */
+-.org 0x1000
+-ENTRY(swapper_pg_dir)
+- .fill 1024,4,0
+-
+-.org 0x2000
+-ENTRY(empty_zero_page)
+- .fill 4096,1,0
+-
+-.org 0x3000
+-/*
+- * Real beginning of normal "text" segment
+- */
+-ENTRY(stext)
+-ENTRY(_stext)
+-
+-/*
+- * This starts the data section. Note that the above is all
+- * in the text section because it has alignment requirements
+- * that we cannot fulfill any other way.
+- */
+-.data
+-
+-/*
+ * The boot_gdt_table must mirror the equivalent in setup.S and is
+ * used only for booting.
+ */
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt_table)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
+
+ /*
+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
+@@ -468,7 +473,13 @@
+ .quad 0x0000000000000000 /* 0x0b reserved */
+ .quad 0x0000000000000000 /* 0x13 reserved */
+ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++ .quad 0x00cf9b000000ffff /* 0x20 kernel 4GB code at 0x00000000 */
++#else
+ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
+ .quad 0x0000000000000000 /* 0x28 unused */
+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
+@@ -477,27 +488,32 @@
+ .quad 0x0000000000000000 /* 0x53 reserved */
+ .quad 0x0000000000000000 /* 0x5b reserved */
+
+- .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
+- .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
+- .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0xc0cf9b400000ffff /* 0x60 kernel 4GB code at 0xc0400000 */
++#else
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++#endif
++
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
+
+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
+
+ /* Segments used for calling PnP BIOS */
+- .quad 0x00c09a0000000000 /* 0x90 32-bit code */
+- .quad 0x00809a0000000000 /* 0x98 16-bit code */
+- .quad 0x0080920000000000 /* 0xa0 16-bit data */
+- .quad 0x0080920000000000 /* 0xa8 16-bit data */
+- .quad 0x0080920000000000 /* 0xb0 16-bit data */
++ .quad 0x00c09b0000000000 /* 0x90 32-bit code */
++ .quad 0x00809b0000000000 /* 0x98 16-bit code */
++ .quad 0x0080930000000000 /* 0xa0 16-bit data */
++ .quad 0x0080930000000000 /* 0xa8 16-bit data */
++ .quad 0x0080930000000000 /* 0xb0 16-bit data */
+ /*
+ * The APM segments have byte granularity and their bases
+ * and limits are set at run time.
+ */
+- .quad 0x00409a0000000000 /* 0xb8 APM CS code */
+- .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */
+- .quad 0x0040920000000000 /* 0xc8 APM DS data */
++ .quad 0x00409b0000000000 /* 0xb8 APM CS code */
++ .quad 0x00009b0000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0040930000000000 /* 0xc8 APM DS data */
+
+ .quad 0x0000000000000000 /* 0xd0 - unused */
+ .quad 0x0000000000000000 /* 0xd8 - unused */
+diff -urN linux-2.6.5/arch/i386/kernel/ioport.c linux-2.6.5-new/arch/i386/kernel/ioport.c
+--- linux-2.6.5/arch/i386/kernel/ioport.c 2004-04-03 22:37:06.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/ioport.c 2004-04-14 09:06:28.000000000 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/thread_info.h>
++#include <linux/grsecurity.h>
+
+ /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+ static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
+@@ -62,9 +63,16 @@
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on) {
++ gr_handle_ioperm();
++#else
+ if (turn_on && !capable(CAP_SYS_RAWIO))
++#endif
+ return -EPERM;
+-
++#ifdef CONFIG_GRKERNSEC_IO
++ }
++#endif
+ /*
+ * If it's the first ioperm() call in this thread's lifetime, set the
+ * IO bitmap up. ioperm() is much less timing critical than clone(),
+@@ -115,8 +123,13 @@
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ gr_handle_iopl();
++ return -EPERM;
++#else
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++#endif
+ }
+ regs->eflags = (regs->eflags &~ 0x3000UL) | (level << 12);
+ /* Make sure we return the long way (not sysenter) */
+diff -urN linux-2.6.5/arch/i386/kernel/ldt.c linux-2.6.5-new/arch/i386/kernel/ldt.c
+--- linux-2.6.5/arch/i386/kernel/ldt.c 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/ldt.c 2004-04-14 09:15:11.000000000 -0400
+@@ -154,7 +154,7 @@
+ {
+ int err;
+ unsigned long size;
+- void *address;
++ const void *address;
+
+ err = 0;
+ address = &default_ldt[0];
+@@ -211,6 +211,13 @@
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (ldt_info.contents & 2)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ entry_1 = LDT_entry_a(&ldt_info);
+ entry_2 = LDT_entry_b(&ldt_info);
+ if (oldmode)
+diff -urN linux-2.6.5/arch/i386/kernel/process.c linux-2.6.5-new/arch/i386/kernel/process.c
+--- linux-2.6.5/arch/i386/kernel/process.c 2004-04-03 22:36:10.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/process.c 2004-04-14 09:15:11.000000000 -0400
+@@ -344,7 +344,7 @@
+ struct task_struct *tsk;
+ int err;
+
+- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
++ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info - sizeof(unsigned long))) - 1;
+ struct_cpy(childregs, regs);
+ childregs->eax = 0;
+ childregs->esp = esp;
+@@ -446,9 +446,8 @@
+ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+ {
+ struct pt_regs ptregs;
+-
+- ptregs = *(struct pt_regs *)
+- ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
++
++ ptregs = *(struct pt_regs *)(tsk->thread.esp0 - sizeof(ptregs));
+ ptregs.xcs &= 0xffff;
+ ptregs.xds &= 0xffff;
+ ptregs.xes &= 0xffff;
+@@ -501,10 +500,22 @@
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = init_tss + cpu;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++#endif
++
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+ __unlazy_fpu(prev_p);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_switch_segments(next_p, cpu);
++#endif
++
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ */
+@@ -515,6 +526,10 @@
+ */
+ load_TLS(next, cpu);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as
+ * those are always kernel segments while inside the kernel.
+@@ -689,6 +704,10 @@
+ struct desc_struct *desc;
+ int cpu, idx;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++#endif
++
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+ idx = info.entry_number;
+@@ -722,8 +741,17 @@
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ load_TLS(t, cpu);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+
+ return 0;
+@@ -777,3 +805,29 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
++{
++ struct tss_struct *tss = init_tss + smp_processor_id();
++ unsigned long time;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (!pax_aslr)
++ return;
++#endif
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 1;
++#else
++ time &= 0x1FUL;
++ time <<= 2;
++#endif
++
++ tss->esp0 ^= time;
++ current->thread.esp0 = tss->esp0;
++}
++#endif
+diff -urN linux-2.6.5/arch/i386/kernel/ptrace.c linux-2.6.5-new/arch/i386/kernel/ptrace.c
+--- linux-2.6.5/arch/i386/kernel/ptrace.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -262,6 +263,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+@@ -340,6 +344,17 @@
+ if(addr == (long) &dummy->u_debugreg[5]) break;
+ if(addr < (long) &dummy->u_debugreg[4] &&
+ ((unsigned long) data) >= TASK_SIZE-3) break;
++
++#ifdef CONFIG_GRKERNSEC
++ if(addr >= (long) &dummy->u_debugreg[0] &&
++ addr <= (long) &dummy->u_debugreg[3]){
++ long reg = (addr - (long) &dummy->u_debugreg[0]) >> 2;
++ long type = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3;
++ long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3;
++ if((type & 1) && (data & align))
++ break;
++ }
++#endif
+
+ if(addr == (long) &dummy->u_debugreg[7]) {
+ data &= ~DR_CONTROL_RESERVED;
+diff -urN linux-2.6.5/arch/i386/kernel/reboot.c linux-2.6.5-new/arch/i386/kernel/reboot.c
+--- linux-2.6.5/arch/i386/kernel/reboot.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/reboot.c 2004-04-14 09:15:11.000000000 -0400
+@@ -74,18 +74,18 @@
+ doesn't work with at least one type of 486 motherboard. It is easy
+ to stop this code working; hence the copious comments. */
+
+-static unsigned long long
++static const unsigned long long
+ real_mode_gdt_entries [3] =
+ {
+ 0x0000000000000000ULL, /* Null descriptor */
+- 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
+- 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
++ 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
++ 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
+ };
+
+ static struct
+ {
+ unsigned short size __attribute__ ((packed));
+- unsigned long long * base __attribute__ ((packed));
++ const unsigned long long * base __attribute__ ((packed));
+ }
+ real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
+ real_mode_idt = { 0x3ff, 0 },
+diff -urN linux-2.6.5/arch/i386/kernel/setup.c linux-2.6.5-new/arch/i386/kernel/setup.c
+--- linux-2.6.5/arch/i386/kernel/setup.c 2004-04-03 22:37:06.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/setup.c 2004-04-14 09:15:11.000000000 -0400
+@@ -1202,6 +1202,15 @@
+ #endif
+ }
+
++#ifdef CONFIG_PAX_SOFTMODE
++static int __init setup_pax_softmode(char *str)
++{
++ get_option (&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ #include "setup_arch_post.h"
+ /*
+ * Local Variables:
+diff -urN linux-2.6.5/arch/i386/kernel/signal.c linux-2.6.5-new/arch/i386/kernel/signal.c
+--- linux-2.6.5/arch/i386/kernel/signal.c 2004-04-03 22:36:58.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/signal.c 2004-04-14 09:15:11.000000000 -0400
+@@ -371,7 +371,17 @@
+ if (err)
+ goto give_sigsegv;
+
++#ifdef CONFIG_PAX_NOVSYSCALL
++ restorer = frame->retcode;
++#else
+ restorer = &__kernel_sigreturn;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ restorer -= SEGMEXEC_TASK_SIZE;
++#endif
++#endif
++
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -454,7 +464,18 @@
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. */
++
++#ifdef CONFIG_PAX_NOVSYSCALL
++ restorer = frame->retcode;
++#else
+ restorer = &__kernel_rt_sigreturn;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ restorer -= SEGMEXEC_TASK_SIZE;
++#endif
++#endif
++
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ err |= __put_user(restorer, &frame->pretcode);
+diff -urN linux-2.6.5/arch/i386/kernel/sys_i386.c linux-2.6.5-new/arch/i386/kernel/sys_i386.c
+--- linux-2.6.5/arch/i386/kernel/sys_i386.c 2004-04-03 22:38:21.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/sys_i386.c 2004-04-14 09:15:11.000000000 -0400
+@@ -19,6 +19,7 @@
+ #include <linux/mman.h>
+ #include <linux/file.h>
+ #include <linux/utsname.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -49,6 +50,11 @@
+ int error = -EBADF;
+ struct file * file = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+@@ -56,8 +62,14 @@
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ error = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
++ error = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+diff -urN linux-2.6.5/arch/i386/kernel/sysenter.c linux-2.6.5-new/arch/i386/kernel/sysenter.c
+--- linux-2.6.5/arch/i386/kernel/sysenter.c 2004-04-03 22:38:24.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/sysenter.c 2004-04-14 09:15:11.000000000 -0400
+@@ -41,13 +41,15 @@
+ extern const char vsyscall_int80_start, vsyscall_int80_end;
+ extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+
++#ifndef CONFIG_PAX_NOVSYSCALL
+ static int __init sysenter_setup(void)
+ {
+ unsigned long page = get_zeroed_page(GFP_ATOMIC);
+
+ __set_fixmap(FIX_VSYSCALL, __pa(page), PAGE_READONLY);
+
+- if (!boot_cpu_has(X86_FEATURE_SEP)) {
++ if (!boot_cpu_has(X86_FEATURE_SEP))
++ {
+ memcpy((void *) page,
+ &vsyscall_int80_start,
+ &vsyscall_int80_end - &vsyscall_int80_start);
+@@ -63,3 +65,4 @@
+ }
+
+ __initcall(sysenter_setup);
++#endif
+diff -urN linux-2.6.5/arch/i386/kernel/trampoline.S linux-2.6.5-new/arch/i386/kernel/trampoline.S
+--- linux-2.6.5/arch/i386/kernel/trampoline.S 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/trampoline.S 2004-04-14 09:15:11.000000000 -0400
+@@ -58,7 +58,7 @@
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $(startup_32_smp+__KERNEL_TEXT_OFFSET-__PAGE_OFFSET)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff -urN linux-2.6.5/arch/i386/kernel/traps.c linux-2.6.5-new/arch/i386/kernel/traps.c
+--- linux-2.6.5/arch/i386/kernel/traps.c 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/traps.c 2004-04-14 09:15:11.000000000 -0400
+@@ -59,7 +59,7 @@
+ asmlinkage void lcall7(void);
+ asmlinkage void lcall27(void);
+
+-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++const struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 } };
+
+ /* Do we ignore FPU interrupts ? */
+@@ -70,7 +70,9 @@
+ * F0 0F bug workaround.. We have a special link segment
+ * for this.
+ */
+-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++__asm__(".section .rodata.idt,\"a\",@progbits");
++struct desc_struct idt_table[256] = { {0, 0}, };
++__asm__(".previous");
+
+ asmlinkage void divide_error(void);
+ asmlinkage void debug(void);
+@@ -97,6 +99,7 @@
+ void show_trace(struct task_struct *task, unsigned long * stack)
+ {
+ unsigned long addr;
++ int i = kstack_depth_to_print;
+
+ if (!stack)
+ stack = (unsigned long*)&stack;
+@@ -105,11 +108,12 @@
+ #ifdef CONFIG_KALLSYMS
+ printk("\n");
+ #endif
+- while (!kstack_end(stack)) {
++ while (i && !kstack_end(stack)) {
+ addr = *stack++;
+ if (kernel_text_address(addr)) {
+ printk(" [<%08lx>] ", addr);
+ print_symbol("%s\n", addr);
++ --i;
+ }
+ }
+ printk("\n");
+@@ -199,14 +203,23 @@
+ show_stack(NULL, (unsigned long*)esp);
+
+ printk("Code: ");
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if(regs->eip < PAGE_OFFSET)
+ goto bad;
++#endif
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if(__get_user(c, &((unsigned char*)regs->eip)[i+__KERNEL_TEXT_OFFSET])) {
++#else
+ if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
+ bad:
++#endif
++
+ printk(" Bad EIP value.");
+ break;
+ }
+@@ -229,8 +242,13 @@
+
+ eip = regs->eip;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ eip += __KERNEL_TEXT_OFFSET;
++#else
+ if (eip < PAGE_OFFSET)
+ goto no_bug;
++#endif
++
+ if (__get_user(ud2, (unsigned short *)eip))
+ goto no_bug;
+ if (ud2 != 0x0b0f)
+@@ -238,7 +256,13 @@
+ if (__get_user(line, (unsigned short *)(eip + 2)))
+ goto bug;
+ if (__get_user(file, (char **)(eip + 4)) ||
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __get_user(c, file + __KERNEL_TEXT_OFFSET))
++#else
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++#endif
++
+ file = "<bad filename>";
+
+ printk("------------[ cut here ]------------\n");
+@@ -411,8 +435,16 @@
+ return;
+
+ gp_in_kernel:
+- if (!fixup_exception(regs))
++ if (!fixup_exception(regs)) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((regs->xcs & 0xFFFF) == __KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
++ }
+ }
+
+ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+@@ -845,7 +877,7 @@
+ _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
+ }
+
+-static void __init set_call_gate(void *a, void *addr)
++static void __init set_call_gate(const void *a, void *addr)
+ {
+ _set_gate(a,12,3,addr,__KERNEL_CS);
+ }
+diff -urN linux-2.6.5/arch/i386/kernel/vmlinux.lds.S linux-2.6.5-new/arch/i386/kernel/vmlinux.lds.S
+--- linux-2.6.5/arch/i386/kernel/vmlinux.lds.S 2004-04-03 22:36:26.000000000 -0500
++++ linux-2.6.5-new/arch/i386/kernel/vmlinux.lds.S 2004-04-14 09:15:11.000000000 -0400
+@@ -2,7 +2,12 @@
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+
++#include <linux/config.h>
++
+ #include <asm-generic/vmlinux.lds.h>
++#include <asm-i386/page.h>
++#include <asm-i386/segment.h>
++
+ #include <asm/thread_info.h>
+
+ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+@@ -11,25 +16,16 @@
+ jiffies = jiffies_64;
+ SECTIONS
+ {
+- . = 0xC0000000 + 0x100000;
+- /* read-only */
+- _text = .; /* Text and read-only data */
+- .text : {
+- *(.text)
+- *(.fixup)
+- *(.gnu.warning)
+- } = 0x9090
+-
+- _etext = .; /* End of text section */
+-
+- . = ALIGN(16); /* Exception table */
+- __start___ex_table = .;
+- __ex_table : { *(__ex_table) }
+- __stop___ex_table = .;
+-
+- RODATA
++ . = __PAGE_OFFSET + 0x100000;
++ .text.startup : {
++ BYTE(0xEA) /* jmp far */
++ LONG(startup_32 + __KERNEL_TEXT_OFFSET - __PAGE_OFFSET)
++ SHORT(__BOOT_CS)
++ }
+
+ /* writeable */
++ . = ALIGN(32);
++ _data = .;
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+@@ -41,25 +37,28 @@
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+- . = ALIGN(4096);
+- .data.page_aligned : { *(.data.idt) }
+-
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+- _edata = .; /* End of data section */
+-
+ . = ALIGN(THREAD_SIZE); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
++ . = ALIGN(4096);
++ .data.page_aligned : { *(.data.swapper_pg_dir) }
++
++ _edata = .; /* End of data section */
++
++ __bss_start = .; /* BSS */
++ .bss : {
++ *(.bss)
++ LONG(0)
++ }
++ . = ALIGN(4);
++ __bss_stop = .;
++
+ /* will be freed after init */
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+- .init.text : {
+- _sinittext = .;
+- *(.init.text)
+- _einittext = .;
+- }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+@@ -100,16 +99,68 @@
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
++
++ /* read-only */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __init_text_start = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET) : AT (__init_text_start) {
++ _sinittext = .;
++ *(.init.text)
++ _einittext = .;
++ . = ALIGN(4*1024*1024) - 1;
++ BYTE(0)
++ }
++ . = ALIGN(4096);
++ __init_end = . + __KERNEL_TEXT_OFFSET;
++ /* freed after init ends here */
++
++/*
++ * PaX: this must be kept in synch with the KERNEL_CS base
++ * in the GDTs in arch/i386/kernel/head.S
++ */
++ _text = .; /* Text and read-only data */
++ .text : AT (. + __KERNEL_TEXT_OFFSET) {
++#else
++ .init.text : {
++ _sinittext = .;
++ *(.init.text)
++ _einittext = .;
++ }
+ . = ALIGN(4096);
+ __init_end = .;
+ /* freed after init ends here */
+-
+- __bss_start = .; /* BSS */
+- .bss : { *(.bss) }
+- . = ALIGN(4);
+- __bss_stop = .;
+
++ _text = .; /* Text and read-only data */
++ .text : {
++#endif
++
++ *(.text)
++ *(.fixup)
++ *(.gnu.warning)
++ } = 0x9090
++
++ _etext = .; /* End of text section */
++ . += __KERNEL_TEXT_OFFSET;
++ . = ALIGN(16); /* Exception table */
++ __start___ex_table = .;
++ __ex_table : { *(__ex_table) }
++ __stop___ex_table = .;
++
++ . = ALIGN(4096);
++ .rodata.page_aligned : {
++ *(.rodata.empty_zero_page)
++ *(.rodata.idt)
++ }
++
++ RODATA
++
++#ifdef CONFIG_PAX_KERNEXEC
++ _end = ALIGN(4*1024*1024);
++ . = _end ;
++#else
+ _end = . ;
++#endif
+
+ /* This is where the kernel creates the early boot page tables */
+ . = ALIGN(4096);
+diff -urN linux-2.6.5/arch/i386/mm/fault.c linux-2.6.5-new/arch/i386/mm/fault.c
+--- linux-2.6.5/arch/i386/mm/fault.c 2004-04-03 22:36:13.000000000 -0500
++++ linux-2.6.5-new/arch/i386/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -21,6 +21,9 @@
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/highmem.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -199,6 +202,10 @@
+
+ asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -209,22 +216,31 @@
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static void do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++#else
+ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
++#endif
+ {
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
++#ifndef CONFIG_PAX_PAGEEXEC
+ unsigned long address;
++#endif
+ unsigned long page;
+ int write;
+ siginfo_t info;
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ /* get the address */
+ __asm__("movl %%cr2,%0":"=r" (address));
+
+ /* It's safe to allow irq's after cr2 has been saved */
+ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
+ local_irq_enable();
++#endif
+
+ tsk = current;
+
+@@ -358,6 +374,34 @@
+ if (is_prefetch(regs, address))
+ return;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++
++#if defined(CONFIG_PAX_EMUTRAMP) || defined(CONFIG_PAX_RANDEXEC)
++ if ((error_code == 4) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ }
++#endif
++
++ if (address >= SEGMEXEC_TASK_SIZE) {
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+@@ -408,6 +452,13 @@
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++
++#ifdef CONFIG_PAX_KERNEXEC
++ else if (init_mm.start_code + __KERNEL_TEXT_OFFSET <= address && address < init_mm.end_code + __KERNEL_TEXT_OFFSET)
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
++ tsk->comm, tsk->pid, tsk->uid, tsk->euid);
++#endif
++
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+@@ -509,3 +560,361 @@
+ return;
+ }
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++/*
++ * PaX: decide what to do with offenders (regs->eip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ * 3 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ static const unsigned char trans[8] = {6, 1, 2, 0, 13, 5, 3, 4};
++#endif
++
++#if defined(CONFIG_PAX_RANDEXEC) || defined(CONFIG_PAX_EMUTRAMP)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ unsigned long esp_4;
++
++ if (regs->eip >= current->mm->start_code &&
++ regs->eip < current->mm->end_code)
++ {
++ err = get_user(esp_4, (unsigned long*)(regs->esp-4UL));
++ if (err || esp_4 == regs->eip)
++ return 1;
++
++ regs->eip += current->mm->delta_exec;
++ return 3;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov1, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(mov2, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(jmp, (unsigned short *)(regs->eip + 10));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov1 & 0xF8) == 0xB8 &&
++ (mov2 & 0xF8) == 0xB8 &&
++ (mov1 & 0x07) != (mov2 & 0x07) &&
++ (jmp & 0xF8FF) == 0xE0FF &&
++ (mov2 & 0x07) == ((jmp>>8) & 0x07) &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1;
++ ((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2;
++ regs->eip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #3 */
++ unsigned char mov, jmp;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-3));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x55FF)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->ebp + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #4 */
++ unsigned char mov, jmp, sib;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-4));
++ err |= get_user(sib, (unsigned char *)(ret-2));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x54FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #5 */
++ unsigned char mov, jmp, sib;
++ unsigned long addr1, addr2, ret, offset;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-7));
++ err |= get_user(sib, (unsigned char *)(ret-5));
++ err |= get_user(offset, (unsigned long *)(ret-4));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x94FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1; /* PaX in action */
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP: ");
++ for (i = 0; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (unsigned long*)sp+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/* PaX: called with the page_table_lock spinlock held */
++static inline pte_t * pax_get_pte(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd || !pgd_present(*pgd))
++ return 0;
++ pmd = pmd_offset(pgd, address);
++ if (!pmd || !pmd_present(*pmd))
++ return 0;
++ return pte_offset_map(pmd, address);
++}
++
++/*
++ * PaX: handle the extra page faults or pass it down to the original handler
++ */
++asmlinkage void pax_do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long address;
++ pte_t *pte;
++ unsigned char pte_mask;
++ int ret;
++
++ __asm__("movl %%cr2,%0":"=r" (address));
++
++ /* It's safe to allow irq's after cr2 has been saved */
++ if (regs->eflags & (X86_EFLAGS_IF|VM_MASK))
++ local_irq_enable();
++
++ if (unlikely((error_code & 5) != 5 ||
++ address >= TASK_SIZE ||
++ !(current->flags & PF_PAX_PAGEEXEC)))
++ return do_page_fault(regs, error_code, address);
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely((error_code == 5) && (regs->eip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ ret = pax_handle_fetch_fault(regs);
++ switch (ret) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ case 1:
++ default:
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1));
++
++ spin_lock(&mm->page_table_lock);
++ pte = pax_get_pte(mm, address);
++ if (unlikely(!pte || !(pte_val(*pte) & _PAGE_PRESENT) || pte_exec(*pte))) {
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ do_page_fault(regs, error_code, address);
++ return;
++ }
++
++ if (unlikely((error_code == 7) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ do_page_fault(regs, error_code, address);
++ return;
++ }
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,%1\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg %0\n"
++#endif
++ "testb $0,%0\n"
++ "xorb %3,%1\n"
++ :
++ : "m" (*(char*)address), "m" (*(char*)pte), "q" (pte_mask), "i" (_PAGE_USER)
++ : "memory", "cc");
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ return;
++}
++#endif
+diff -urN linux-2.6.5/arch/i386/mm/init.c linux-2.6.5-new/arch/i386/mm/init.c
+--- linux-2.6.5/arch/i386/mm/init.c 2004-04-03 22:37:39.000000000 -0500
++++ linux-2.6.5-new/arch/i386/mm/init.c 2004-04-14 09:15:11.000000000 -0400
+@@ -40,6 +40,7 @@
+ #include <asm/tlb.h>
+ #include <asm/tlbflush.h>
+ #include <asm/sections.h>
++#include <asm/desc.h>
+
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ unsigned long highstart_pfn, highend_pfn;
+@@ -394,6 +395,10 @@
+ #endif
+ __flush_tlb_all();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
++#endif
++
+ kmap_init();
+ zone_sizes_init();
+ }
+@@ -488,7 +493,7 @@
+ set_highmem_pages_init(bad_ppro);
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+@@ -587,6 +592,42 @@
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ /* PaX: limit KERNEL_CS to actual size */
++ {
++ unsigned long limit;
++ int cpu;
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ limit = (unsigned long)&_etext >> PAGE_SHIFT;
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].a = (cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
++ cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].b = (cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
++
++#ifdef CONFIG_PCI_BIOS
++ printk(KERN_INFO "PAX: warning, PCI BIOS might still be in use, keeping flat KERNEL_CS.\n");
++#endif
++
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
++ pgd = pgd_offset_k(addr);
++ pmd = pmd_offset(pgd, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_GLOBAL));
++ }
++ memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
++ for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
++ pgd = pgd_offset_k(addr);
++ pmd = pmd_offset(pgd, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++ flush_tlb_all();
++ }
++#endif
++
+ }
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+diff -urN linux-2.6.5/arch/i386/pci/pcbios.c linux-2.6.5-new/arch/i386/pci/pcbios.c
+--- linux-2.6.5/arch/i386/pci/pcbios.c 2004-04-03 22:36:17.000000000 -0500
++++ linux-2.6.5-new/arch/i386/pci/pcbios.c 2004-04-14 09:15:11.000000000 -0400
+@@ -6,7 +6,7 @@
+ #include <linux/init.h>
+ #include "pci.h"
+ #include "pci-functions.h"
+-
++#include <asm/desc.h>
+
+ /* BIOS32 signature: "_32_" */
+ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
+@@ -33,6 +33,12 @@
+ * and the PCI BIOS specification.
+ */
+
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++#define __FLAT_KERNEL_CS 0x20
++#else
++#define __FLAT_KERNEL_CS __KERNEL_CS
++#endif
++
+ union bios32 {
+ struct {
+ unsigned long signature; /* _32_ */
+@@ -55,7 +61,7 @@
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect = { 0, __FLAT_KERNEL_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+@@ -96,7 +102,9 @@
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect = { 0, __FLAT_KERNEL_CS };
++
++#undef __FLAT_KERNEL_CS
+
+ static int pci_bios_present;
+
+diff -urN linux-2.6.5/arch/ia64/ia32/binfmt_elf32.c linux-2.6.5-new/arch/ia64/ia32/binfmt_elf32.c
+--- linux-2.6.5/arch/ia64/ia32/binfmt_elf32.c 2004-04-03 22:38:16.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/ia32/binfmt_elf32.c 2004-04-14 09:15:11.000000000 -0400
+@@ -41,6 +41,17 @@
+ #undef SET_PERSONALITY
+ #define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+@@ -178,7 +189,7 @@
+ mpnt->vm_mm = current->mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = IA32_STACK_TOP;
+- mpnt->vm_page_prot = PAGE_COPY;
++ mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
+ mpnt->vm_flags = VM_STACK_FLAGS;
+ mpnt->vm_ops = NULL;
+ mpnt->vm_pgoff = 0;
+diff -urN linux-2.6.5/arch/ia64/ia32/ia32priv.h linux-2.6.5-new/arch/ia64/ia32/ia32priv.h
+--- linux-2.6.5/arch/ia64/ia32/ia32priv.h 2004-04-03 22:38:22.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/ia32/ia32priv.h 2004-04-14 09:15:11.000000000 -0400
+@@ -295,7 +295,14 @@
+ #define ELF_ARCH EM_386
+
+ #define IA32_PAGE_OFFSET 0xc0000000
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
+
+ /*
+ * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
+diff -urN linux-2.6.5/arch/ia64/ia32/sys_ia32.c linux-2.6.5-new/arch/ia64/ia32/sys_ia32.c
+--- linux-2.6.5/arch/ia64/ia32/sys_ia32.c 2004-04-03 22:37:24.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/ia32/sys_ia32.c 2004-04-14 09:15:11.000000000 -0400
+@@ -475,6 +475,11 @@
+
+ flags = a.flags;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(a.fd);
+@@ -496,6 +501,11 @@
+ struct file *file = NULL;
+ unsigned long retval;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -urN linux-2.6.5/arch/ia64/kernel/ptrace.c linux-2.6.5-new/arch/ia64/kernel/ptrace.c
+--- linux-2.6.5/arch/ia64/kernel/ptrace.c 2004-04-03 22:38:22.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -17,6 +17,7 @@
+ #include <linux/smp_lock.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -1314,6 +1315,9 @@
+ if (pid == 1) /* no messing around with init! */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.5/arch/ia64/kernel/sys_ia64.c linux-2.6.5-new/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.5/arch/ia64/kernel/sys_ia64.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/kernel/sys_ia64.c 2004-04-14 09:15:11.000000000 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/highuid.h>
+ #include <linux/hugetlb.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/shmparam.h>
+ #include <asm/uaccess.h>
+@@ -38,6 +39,13 @@
+ if (REGION_NUMBER(addr) == REGION_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && addr && filp)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -58,6 +66,13 @@
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+ if (start_addr != TASK_UNMAPPED_BASE) {
+ /* Start a new search --- just in case we missed some holes. */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ addr = TASK_UNMAPPED_BASE + mm->delta_mmap;
++ else
++#endif
++
+ addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+@@ -185,6 +200,11 @@
+ unsigned long roff;
+ struct file *file = 0;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+@@ -216,6 +236,11 @@
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ addr = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.5/arch/ia64/mm/fault.c linux-2.6.5-new/arch/ia64/mm/fault.c
+--- linux-2.6.5/arch/ia64/mm/fault.c 2004-04-03 22:38:20.000000000 -0500
++++ linux-2.6.5-new/arch/ia64/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -9,6 +9,7 @@
+ #include <linux/mm.h>
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
++#include <linux/binfmts.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -70,6 +71,54 @@
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->cr_iip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_RANDEXEC
++ int err;
++
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->cr_iip >= current->mm->start_code &&
++ regs->cr_iip < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if (regs->b0 == regs->cr_iip)
++ return 1;
++#endif
++ regs->cr_iip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -125,9 +174,31 @@
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
+ | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->cr_iip, (void*)regs->r12);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urN linux-2.6.5/arch/mips/kernel/binfmt_elfn32.c linux-2.6.5-new/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.5/arch/mips/kernel/binfmt_elfn32.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/arch/mips/kernel/binfmt_elfn32.c 2004-04-14 09:15:11.000000000 -0400
+@@ -50,6 +50,17 @@
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -urN linux-2.6.5/arch/mips/kernel/binfmt_elfo32.c linux-2.6.5-new/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.5/arch/mips/kernel/binfmt_elfo32.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/arch/mips/kernel/binfmt_elfo32.c 2004-04-14 09:15:11.000000000 -0400
+@@ -52,6 +52,17 @@
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -urN linux-2.6.5/arch/mips/kernel/syscall.c linux-2.6.5-new/arch/mips/kernel/syscall.c
+--- linux-2.6.5/arch/mips/kernel/syscall.c 2004-04-03 22:37:41.000000000 -0500
++++ linux-2.6.5-new/arch/mips/kernel/syscall.c 2004-04-14 09:15:11.000000000 -0400
+@@ -79,6 +79,11 @@
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -89,6 +94,13 @@
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ addr = TASK_UNMAPPED_BASE;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+diff -urN linux-2.6.5/arch/mips/mm/fault.c linux-2.6.5-new/arch/mips/mm/fault.c
+--- linux-2.6.5/arch/mips/mm/fault.c 2004-04-03 22:36:53.000000000 -0500
++++ linux-2.6.5-new/arch/mips/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -28,6 +28,24 @@
+ #include <asm/uaccess.h>
+ #include <asm/ptrace.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urN linux-2.6.5/arch/parisc/kernel/ptrace.c linux-2.6.5-new/arch/parisc/kernel/ptrace.c
+--- linux-2.6.5/arch/parisc/kernel/ptrace.c 2004-04-03 22:36:19.000000000 -0500
++++ linux-2.6.5-new/arch/parisc/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -17,6 +17,7 @@
+ #include <linux/personality.h>
+ #include <linux/security.h>
+ #include <linux/compat.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -114,6 +115,9 @@
+ if (pid == 1) /* no messing around with init! */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.5/arch/parisc/kernel/sys_parisc.c linux-2.6.5-new/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.5/arch/parisc/kernel/sys_parisc.c 2004-04-03 22:38:12.000000000 -0500
++++ linux-2.6.5-new/arch/parisc/kernel/sys_parisc.c 2004-04-14 09:15:11.000000000 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/shm.h>
+ #include <linux/smp_lock.h>
+ #include <linux/syscalls.h>
++#include <linux/grsecurity.h>
+
+ int sys_pipe(int *fildes)
+ {
+@@ -114,6 +115,13 @@
+ {
+ if (len > TASK_SIZE)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -131,12 +139,23 @@
+ {
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ return -EACCES;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.5/arch/parisc/kernel/sys_parisc32.c linux-2.6.5-new/arch/parisc/kernel/sys_parisc32.c
+--- linux-2.6.5/arch/parisc/kernel/sys_parisc32.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/arch/parisc/kernel/sys_parisc32.c 2004-04-14 09:17:18.000000000 -0400
+@@ -48,6 +48,8 @@
+ #include <linux/ptrace.h>
+ #include <linux/swap.h>
+ #include <linux/syscalls.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/types.h>
+ #include <asm/uaccess.h>
+@@ -171,6 +173,11 @@
+ struct file *file;
+ int retval;
+ int i;
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+
+ file = open_exec(filename);
+
+@@ -178,7 +185,26 @@
+ if (IS_ERR(file))
+ return retval;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes, 1);
++
++ if (gr_handle_nproc()) {
++ allow_write_access(file);
++ fput(file);
++ return -EAGAIN;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ allow_write_access(file);
++ fput(file);
++ return -EACCES;
++ }
++
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ DBG(("do_execve32(%s, %p, %p, %p)\n", filename, argv, envp, regs));
+@@ -209,11 +235,24 @@
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
+ retval = copy_strings32(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+@@ -222,10 +261,32 @@
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
++
+ retval = search_binary_handler(&bprm,regs);
+- if (retval >= 0)
++ if (retval >= 0) {
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+ /* execve success */
+ return retval;
++ }
++
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
+
+ out:
+ /* Something went wrong, return the inode and free the argument pages*/
+diff -urN linux-2.6.5/arch/parisc/kernel/traps.c linux-2.6.5-new/arch/parisc/kernel/traps.c
+--- linux-2.6.5/arch/parisc/kernel/traps.c 2004-04-03 22:38:27.000000000 -0500
++++ linux-2.6.5-new/arch/parisc/kernel/traps.c 2004-04-14 09:15:11.000000000 -0400
+@@ -661,9 +661,7 @@
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urN linux-2.6.5/arch/parisc/mm/fault.c linux-2.6.5-new/arch/parisc/mm/fault.c
+--- linux-2.6.5/arch/parisc/mm/fault.c 2004-04-03 22:38:13.000000000 -0500
++++ linux-2.6.5-new/arch/parisc/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -16,6 +16,8 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
++#include <linux/binfmts.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -54,7 +56,7 @@
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -140,6 +142,139 @@
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUTRAMP)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (instruction_pointer(regs) >= current->mm->start_code &&
++ instruction_pointer(regs) < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if ((regs->gr[2] & ~3UL) == instruction_pointer(regs))
++ return 1;
++#endif
++ regs->iaoq[0] += current->mm->delta_exec;
++ if ((regs->iaoq[1] & ~3UL) >= current->mm->start_code &&
++ (regs->iaoq[1] & ~3UL) < current->mm->end_code)
++ regs->iaoq[1] += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int*)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int*)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int*)addr);
++ err |= get_user(bv, (unsigned int*)(addr+4));
++ err |= get_user(ldw2, (unsigned int*)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int*)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int*)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void do_page_fault(struct pt_regs *regs, unsigned long code,
+ unsigned long address)
+ {
+@@ -165,8 +300,38 @@
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)instruction_pointer(regs), (void*)regs->gr[30]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urN linux-2.6.5/arch/ppc/kernel/ptrace.c linux-2.6.5-new/arch/ppc/kernel/ptrace.c
+--- linux-2.6.5/arch/ppc/kernel/ptrace.c 2004-04-03 22:36:52.000000000 -0500
++++ linux-2.6.5-new/arch/ppc/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -202,6 +203,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.5/arch/ppc/kernel/syscalls.c linux-2.6.5-new/arch/ppc/kernel/syscalls.c
+--- linux-2.6.5/arch/ppc/kernel/syscalls.c 2004-04-03 22:38:18.000000000 -0500
++++ linux-2.6.5-new/arch/ppc/kernel/syscalls.c 2004-04-14 09:15:11.000000000 -0400
+@@ -36,6 +36,7 @@
+ #include <linux/utsname.h>
+ #include <linux/file.h>
+ #include <linux/unistd.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -165,14 +166,25 @@
+ struct file * file = NULL;
+ int ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ ret = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+- ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
++ ret = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
+ up_write(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+diff -urN linux-2.6.5/arch/ppc/mm/fault.c linux-2.6.5-new/arch/ppc/mm/fault.c
+--- linux-2.6.5/arch/ppc/mm/fault.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/ppc/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -28,6 +28,11 @@
+ #include <linux/interrupt.h>
+ #include <linux/highmem.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
++#include <linux/unistd.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -56,6 +61,366 @@
+ void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+ extern int get_pteptr(struct mm_struct *mm, unsigned long addr, pte_t **ptep);
+
++#ifdef CONFIG_PAX_EMUSIGRT
++void pax_syscall_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_syscall = 0UL;
++}
++
++static struct page* pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x44000002U; /* sc */
++ __flush_dcache_icache(kaddr);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_syscall_close,
++ nopage: pax_syscall_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ INIT_LIST_HEAD(&vma->shared);
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched GOT trampoline was detected
++ * 3 when patched PLT trampoline was detected
++ * 4 when unpatched PLT trampoline was detected
++ * 5 when legitimate ET_EXEC was detected
++ * 6 when sigreturn trampoline was detected
++ * 7 when rt_sigreturn trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->nip >= current->mm->start_code &&
++ regs->nip < current->mm->end_code)
++ {
++ if (regs->link == regs->nip)
++ return 1;
++
++ regs->nip += current->mm->delta_exec;
++ return 5;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched GOT emulation */
++ unsigned int blrl;
++
++ err = get_user(blrl, (unsigned int*)regs->nip);
++
++ if (!err && blrl == 0x4E800021U) {
++ unsigned long temp = regs->nip;
++
++ regs->nip = regs->link & 0xFFFFFFFCUL;
++ regs->link = temp + 4UL;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int b;
++
++ err = get_user(b, (unsigned int *)regs->nip);
++
++ if (!err && (b & 0xFC000003U) == 0x48000000U) {
++ regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
++ return 3;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation #1 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(rlwinm, (unsigned int*)addr);
++ err |= get_user(add, (unsigned int*)(addr+4));
++ err |= get_user(li2, (unsigned int*)(addr+8));
++ err |= get_user(addis2, (unsigned int*)(addr+12));
++ err |= get_user(mtctr, (unsigned int*)(addr+16));
++ err |= get_user(li3, (unsigned int*)(addr+20));
++ err |= get_user(addis3, (unsigned int*)(addr+24));
++ err |= get_user(bctr, (unsigned int*)(addr+28));
++
++ if (err)
++ break;
++
++ if (rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++
++#if 0
++ do { /* PaX: unpatched PLT emulation #2 */
++ unsigned int lis, lwzu, b, bctr;
++
++ err = get_user(lis, (unsigned int *)regs->nip);
++ err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
++ err |= get_user(b, (unsigned int *)(regs->nip+8));
++ err |= get_user(bctr, (unsigned int *)(regs->nip+12));
++
++ if (err)
++ break;
++
++ if ((lis & 0xFFFF0000U) == 0x39600000U &&
++ (lwzu & 0xU) == 0xU &&
++ (b & 0xFC000003U) == 0x48000000U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(addi, (unsigned int*)(addr+4));
++ err |= get_user(rlwinm, (unsigned int*)(addr+8));
++ err |= get_user(add, (unsigned int*)(addr+12));
++ err |= get_user(li2, (unsigned int*)(addr+16));
++ err |= get_user(addis2, (unsigned int*)(addr+20));
++ err |= get_user(mtctr, (unsigned int*)(addr+24));
++ err |= get_user(li3, (unsigned int*)(addr+28));
++ err |= get_user(addis3, (unsigned int*)(addr+32));
++ err |= get_user(bctr, (unsigned int*)(addr+36));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (addi & 0xFFFF0000U) == 0x396B0000U &&
++ rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] =
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: unpatched PLT emulation #3 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int addis, lwz, mtctr, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(lwz, (unsigned int*)(addr+4));
++ err |= get_user(mtctr, (unsigned int*)(addr+8));
++ err |= get_user(bctr, (unsigned int*)(addr+12));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (lwz & 0xFFFF0000U) == 0x816B0000U &&
++ mtctr == 0x7D6903A6U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int r11;
++
++ addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++
++ err = get_user(r11, (unsigned int*)addr);
++ if (err)
++ break;
++
++ regs->gpr[PT_R11] = r11;
++ regs->ctr = r11;
++ regs->nip = r11;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ do { /* PaX: sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned long call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->gpr[PT_R0] = __NR_sigreturn;
++ regs->nip = call_syscall;
++ return 6;
++ }
++ } while (0);
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned int call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto rt_emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto rt_emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++rt_emulate:
++ regs->gpr[PT_R0] = __NR_rt_sigreturn;
++ regs->nip = call_syscall;
++ return 7;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -116,7 +481,7 @@
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (TRAP(regs) == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & 0x02000000;
+ #endif /* CONFIG_4xx */
+@@ -211,15 +576,14 @@
+ } else if (TRAP(regs) == 0x400) {
+ pte_t *ptep;
+
+-#if 0
++#if 1
+ /* It would be nice to actually enforce the VM execute
+ permission on CPUs which can do so, but far too
+ much stuff in userspace doesn't get the permissions
+ right, so we let any page be executed for now. */
+ if (! (vma->vm_flags & VM_EXEC))
+ goto bad_area;
+-#endif
+-
++#else
+ /* Since 4xx supports per-page execute permission,
+ * we lazily flush dcache to icache. */
+ ptep = NULL;
+@@ -240,6 +604,7 @@
+ if (ptep != NULL)
+ pte_unmap(ptep);
+ #endif
++#endif
+ /* a read */
+ } else {
+ /* protection fault */
+@@ -285,6 +650,38 @@
+
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC) {
++ if ((TRAP(regs) == 0x400) && (regs->nip == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ case 4:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 5:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ case 6:
++ case 7:
++ return;
++#endif
++
++ }
++
++ pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = code;
+diff -urN linux-2.6.5/arch/sparc/kernel/ptrace.c linux-2.6.5-new/arch/sparc/kernel/ptrace.c
+--- linux-2.6.5/arch/sparc/kernel/ptrace.c 2004-04-03 22:38:24.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -320,6 +321,11 @@
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ pt_error_return(regs, EPERM);
++ goto out_tsk;
++ }
++
+ if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+ || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+ if (ptrace_attach(child)) {
+diff -urN linux-2.6.5/arch/sparc/kernel/sys_sparc.c linux-2.6.5-new/arch/sparc/kernel/sys_sparc.c
+--- linux-2.6.5/arch/sparc/kernel/sys_sparc.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/kernel/sys_sparc.c 2004-04-14 09:15:11.000000000 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/utsname.h>
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -55,6 +56,13 @@
+ return -ENOMEM;
+ if (ARCH_SUN4C_SUN4 && len > 0x20000000)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -224,6 +232,11 @@
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+@@ -242,6 +255,12 @@
+ if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
+ goto out_putf;
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ retval = -EACCES;
++ goto out;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.5/arch/sparc/kernel/sys_sunos.c linux-2.6.5-new/arch/sparc/kernel/sys_sunos.c
+--- linux-2.6.5/arch/sparc/kernel/sys_sunos.c 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/kernel/sys_sunos.c 2004-04-14 09:15:11.000000000 -0400
+@@ -71,6 +71,11 @@
+ struct file * file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if(flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -urN linux-2.6.5/arch/sparc/mm/fault.c linux-2.6.5-new/arch/sparc/mm/fault.c
+--- linux-2.6.5/arch/sparc/mm/fault.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/mm/fault.c 2004-04-14 09:15:11.000000000 -0400
+@@ -21,6 +21,10 @@
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/segment.h>
+@@ -201,6 +205,272 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ INIT_LIST_HEAD(&vma->shared);
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code &&
++ regs->npc < current->mm->end_code)
++ regs->npc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->pc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->pc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->pc -= current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->npc < current->mm->end_code + current->mm->delta_exec)
++ regs->npc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->pc);
++ err |= get_user(sethi2, (unsigned int*)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + 4 + (((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(ba, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->pc-4));
++ err |= get_user(call, (unsigned int*)regs->pc);
++ err |= get_user(nop, (unsigned int*)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -264,6 +534,29 @@
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)regs->u_regs[UREG_FP]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urN linux-2.6.5/arch/sparc/mm/init.c linux-2.6.5-new/arch/sparc/mm/init.c
+--- linux-2.6.5/arch/sparc/mm/init.c 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/mm/init.c 2004-04-14 09:15:11.000000000 -0400
+@@ -337,17 +337,17 @@
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urN linux-2.6.5/arch/sparc/mm/srmmu.c linux-2.6.5-new/arch/sparc/mm/srmmu.c
+--- linux-2.6.5/arch/sparc/mm/srmmu.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/arch/sparc/mm/srmmu.c 2004-04-14 09:15:11.000000000 -0400
+@@ -2138,6 +2138,13 @@
+ BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC));
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+ pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
+diff -urN linux-2.6.5/arch/sparc64/kernel/itlb_base.S linux-2.6.5-new/arch/sparc64/kernel/itlb_base.S
+--- linux-2.6.5/arch/sparc64/kernel/itlb_base.S 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/kernel/itlb_base.S 2004-04-14 09:15:11.000000000 -0400
+@@ -41,7 +41,9 @@
+ CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
+ 1: brgez,pn %g5, 3f ! Not valid, branch out
+- nop ! Delay-slot
++ and %g5, _PAGE_EXEC, %g4
++ brz,pn %g4, 3f ! Not executable, branch out
++ nop ! Delay-slot
+ 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
+ retry ! Trap return
+ 3: rdpr %pstate, %g4 ! Move into alternate globals
+@@ -74,8 +76,6 @@
+ nop
+ nop
+ nop
+- nop
+- nop
+ CREATE_VPTE_NOP
+
+ #undef CREATE_VPTE_OFFSET1
+diff -urN linux-2.6.5/arch/sparc64/kernel/ptrace.c linux-2.6.5-new/arch/sparc64/kernel/ptrace.c
+--- linux-2.6.5/arch/sparc64/kernel/ptrace.c 2004-04-03 22:38:22.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -19,6 +19,7 @@
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/asi.h>
+ #include <asm/pgtable.h>
+@@ -169,6 +170,11 @@
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, (long)request)) {
++ pt_error_return(regs, EPERM);
++ goto out_tsk;
++ }
++
+ if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+ || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+ if (ptrace_attach(child)) {
+diff -urN linux-2.6.5/arch/sparc64/kernel/sys_sparc.c linux-2.6.5-new/arch/sparc64/kernel/sys_sparc.c
+--- linux-2.6.5/arch/sparc64/kernel/sys_sparc.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/kernel/sys_sparc.c 2004-04-14 09:15:11.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/ipc.h>
+ #include <linux/personality.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -71,6 +72,10 @@
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -101,6 +106,13 @@
+ }
+ if (task_size < addr) {
+ if (start_addr != TASK_UNMAPPED_BASE) {
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ start_addr = addr = TASK_UNMAPPED_BASE + mm->delta_mmap;
++ else
++#endif
++
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+@@ -310,11 +322,22 @@
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
++
++ if (gr_handle_mmap(file, prot)) {
++ retval = -EACCES;
++ goto out_putf;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ len = PAGE_ALIGN(len);
+ retval = -EINVAL;
+diff -urN linux-2.6.5/arch/sparc64/kernel/sys_sparc32.c linux-2.6.5-new/arch/sparc64/kernel/sys_sparc32.c
+--- linux-2.6.5/arch/sparc64/kernel/sys_sparc32.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/kernel/sys_sparc32.c 2004-04-14 09:16:57.000000000 -0400
+@@ -54,6 +54,8 @@
+ #include <linux/netfilter_ipv4/ip_tables.h>
+ #include <linux/ptrace.h>
+ #include <linux/highuid.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/types.h>
+ #include <asm/ipc.h>
+@@ -1759,6 +1761,11 @@
+ struct file * file;
+ int retval;
+ int i;
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+
+ sched_balance_exec();
+
+@@ -1768,7 +1775,26 @@
+ if (IS_ERR(file))
+ return retval;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ allow_write_access(file);
++ fput(file);
++ return -EAGAIN;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ allow_write_access(file);
++ fput(file);
++ return -EACCES;
++ }
++
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
+
+ bprm.file = file;
+@@ -1803,11 +1829,24 @@
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
+ retval = copy_strings32(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+@@ -1816,13 +1855,34 @@
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
++
+ retval = search_binary_handler(&bprm, regs);
+ if (retval >= 0) {
+ /* execve success */
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+ security_bprm_free(&bprm);
+ return retval;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+diff -urN linux-2.6.5/arch/sparc64/kernel/sys_sunos32.c linux-2.6.5-new/arch/sparc64/kernel/sys_sunos32.c
+--- linux-2.6.5/arch/sparc64/kernel/sys_sunos32.c 2004-04-03 22:37:24.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/kernel/sys_sunos32.c 2004-04-14 09:15:11.000000000 -0400
+@@ -75,6 +75,11 @@
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -urN linux-2.6.5/arch/sparc64/mm/fault.c linux-2.6.5-new/arch/sparc64/mm/fault.c
+--- linux-2.6.5/arch/sparc64/mm/fault.c 2004-04-03 22:38:20.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/mm/fault.c 2004-04-14 09:15:12.000000000 -0400
+@@ -18,6 +18,10 @@
+ #include <linux/smp_lock.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -303,6 +307,364 @@
+ unhandled_fault (address, current, regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_EMUPLT
++static void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ INIT_LIST_HEAD(&vma->shared);
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->tpc >= current->mm->start_code &&
++ regs->tpc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->tpc)
++ return 1;
++
++ regs->tpc += current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code &&
++ regs->tnpc < current->mm->end_code)
++ regs->tnpc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->tpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tpc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->tpc -= current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tnpc < current->mm->end_code + current->mm->delta_exec)
++ regs->tnpc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + 4 + (((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int mov1, call, mov2;
++
++ err = get_user(mov1, (unsigned int*)regs->tpc);
++ err |= get_user(call, (unsigned int*)(regs->tpc+4));
++ err |= get_user(mov2, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if (mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(or1, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or2, (unsigned int*)(regs->tpc+12));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020 &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or, (unsigned int*)(regs->tpc+12));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+16));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+20));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020 &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(ba, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->tpc-4));
++ err |= get_user(call, (unsigned int*)regs->tpc);
++ err |= get_user(nop, (unsigned int*)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -340,8 +702,10 @@
+ goto intr_or_no_mm;
+
+ if (test_thread_flag(TIF_32BIT)) {
+- if (!(regs->tstate & TSTATE_PRIV))
++ if (!(regs->tstate & TSTATE_PRIV)) {
+ regs->tpc &= 0xffffffff;
++ regs->tnpc &= 0xffffffff;
++ }
+ address &= 0xffffffff;
+ }
+
+@@ -350,6 +714,34 @@
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((current->flags & PF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ goto fault_done;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ goto fault_done;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urN linux-2.6.5/arch/sparc64/solaris/misc.c linux-2.6.5-new/arch/sparc64/solaris/misc.c
+--- linux-2.6.5/arch/sparc64/solaris/misc.c 2004-04-03 22:38:19.000000000 -0500
++++ linux-2.6.5-new/arch/sparc64/solaris/misc.c 2004-04-14 09:15:12.000000000 -0400
+@@ -56,6 +56,11 @@
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ /* Do we need it here? */
+ set_personality(PER_SVR4);
+ if (flags & MAP_NORESERVE) {
+diff -urN linux-2.6.5/arch/x86_64/ia32/ia32_binfmt.c linux-2.6.5-new/arch/x86_64/ia32/ia32_binfmt.c
+--- linux-2.6.5/arch/x86_64/ia32/ia32_binfmt.c 2004-04-03 22:36:15.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/ia32/ia32_binfmt.c 2004-04-14 09:15:12.000000000 -0400
+@@ -185,6 +185,17 @@
+ //#include <asm/ia32.h>
+ #include <linux/elf.h>
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#endif
++
+ typedef struct user_i387_ia32_struct elf_fpregset_t;
+ typedef struct user32_fxsr_struct elf_fpxregset_t;
+
+@@ -354,7 +365,13 @@
+ mpnt->vm_mm = mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = IA32_STACK_TOP;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ mpnt->vm_flags = VM_STACK_FLAGS;
++#else
+ mpnt->vm_flags = vm_stack_flags32;
++#endif
++
+ mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
+ PAGE_COPY_EXEC : PAGE_COPY;
+ mpnt->vm_ops = NULL;
+diff -urN linux-2.6.5/arch/x86_64/ia32/sys_ia32.c linux-2.6.5-new/arch/x86_64/ia32/sys_ia32.c
+--- linux-2.6.5/arch/x86_64/ia32/sys_ia32.c 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/ia32/sys_ia32.c 2004-04-14 09:15:12.000000000 -0400
+@@ -1235,6 +1235,11 @@
+ unsigned long error;
+ struct file * file = NULL;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -urN linux-2.6.5/arch/x86_64/kernel/ptrace.c linux-2.6.5-new/arch/x86_64/kernel/ptrace.c
+--- linux-2.6.5/arch/x86_64/kernel/ptrace.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/kernel/ptrace.c 2004-04-14 09:06:28.000000000 -0400
+@@ -16,6 +16,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -212,6 +213,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.5/arch/x86_64/kernel/setup64.c linux-2.6.5-new/arch/x86_64/kernel/setup64.c
+--- linux-2.6.5/arch/x86_64/kernel/setup64.c 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/kernel/setup64.c 2004-04-14 09:15:12.000000000 -0400
+@@ -42,8 +42,15 @@
+
+ unsigned long __supported_pte_mask = ~0UL;
+ static int do_not_nx __initdata = 0;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++unsigned long vm_stack_flags = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++unsigned long vm_stack_flags32 = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++#else
+ unsigned long vm_stack_flags = __VM_STACK_FLAGS;
+ unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
++#endif
++
+ unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_force_exec32 = PROT_EXEC;
+diff -urN linux-2.6.5/arch/x86_64/kernel/sys_x86_64.c linux-2.6.5-new/arch/x86_64/kernel/sys_x86_64.c
+--- linux-2.6.5/arch/x86_64/kernel/sys_x86_64.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/kernel/sys_x86_64.c 2004-04-14 09:15:12.000000000 -0400
+@@ -48,6 +48,11 @@
+ if (off & ~PAGE_MASK)
+ goto out;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ goto out;
++#endif
++
+ error = -EBADF;
+ file = NULL;
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+@@ -102,6 +107,15 @@
+
+ find_start_end(flags, &begin, &end);
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) {
++ if (begin == 0x40000000)
++ begin += current->mm->delta_mmap & 0x0FFFFFFFU;
++ else
++ begin += current->mm->delta_mmap;
++ }
++#endif
++
+ if (len > end)
+ return -ENOMEM;
+
+diff -urN linux-2.6.5/arch/x86_64/mm/fault.c linux-2.6.5-new/arch/x86_64/mm/fault.c
+--- linux-2.6.5/arch/x86_64/mm/fault.c 2004-04-03 22:36:14.000000000 -0500
++++ linux-2.6.5-new/arch/x86_64/mm/fault.c 2004-04-14 09:15:12.000000000 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/compiler.h>
+ #include <linux/module.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -217,6 +218,63 @@
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->rip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_RANDEXEC
++ int err;
++
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->rip >= current->mm->start_code &&
++ regs->rip < current->mm->end_code)
++ {
++ if (test_thread_flag(TIF_IA32)) {
++ unsigned int esp_4;
++
++ err = get_user(esp_4, (unsigned int*)(regs->rsp-4UL));
++ if (err || esp_4 == regs->rip)
++ return 1;
++ } else {
++ unsigned long esp_8;
++
++ err = get_user(esp_8, (unsigned long*)(regs->rsp-8UL));
++ if (err || esp_8 == regs->rip)
++ return 1;
++ }
++
++ regs->rip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int page_fault_trace;
+ int exception_trace = 1;
+
+@@ -302,6 +360,23 @@
+ * we can handle it..
+ */
+ good_area:
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (error_code & 16) && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+diff -urN linux-2.6.5/drivers/char/keyboard.c linux-2.6.5-new/drivers/char/keyboard.c
+--- linux-2.6.5/drivers/char/keyboard.c 2004-04-03 22:38:28.000000000 -0500
++++ linux-2.6.5-new/drivers/char/keyboard.c 2004-04-14 09:06:28.000000000 -0400
+@@ -606,6 +606,16 @@
+ kbd->kbdmode == VC_MEDIUMRAW) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc, regs);
+ }
+
+diff -urN linux-2.6.5/drivers/char/mem.c linux-2.6.5-new/drivers/char/mem.c
+--- linux-2.6.5/drivers/char/mem.c 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/drivers/char/mem.c 2004-04-14 09:15:12.000000000 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/devfs_fs_kernel.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -39,6 +40,10 @@
+ extern void tapechar_init(void);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern struct file_operations grsec_fops;
++#endif
++
+ /*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+@@ -178,6 +183,12 @@
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_write();
++ return -EPERM;
++#endif
++
+ return do_write_mem(__va(p), p, buf, count, ppos);
+ }
+
+@@ -192,6 +203,11 @@
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ if (gr_handle_mem_mmap(offset, vma))
++ return -EPERM;
++#endif
++
+ /* Don't try to swap out physical pages.. */
+ vma->vm_flags |= VM_RESERVED;
+
+@@ -285,6 +301,11 @@
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_kmem_write();
++ return -EPERM;
++#endif
++
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+@@ -411,7 +432,23 @@
+ count = size;
+
+ zap_page_range(vma, addr, count);
+- zeromap_page_range(vma, addr, count, PAGE_COPY);
++ zeromap_page_range(vma, addr, count, vma->vm_page_prot);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ unsigned long addr_m;
++ struct vm_area_struct * vma_m;
++
++ addr_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, addr_m);
++ if (vma_m && vma_m->vm_start == addr_m && (vma_m->vm_flags & VM_MIRROR)) {
++ addr_m = addr + (unsigned long)vma->vm_private_data;
++ zap_page_range(vma_m, addr_m, count);
++ } else
++ printk(KERN_ERR "PAX: VMMIRROR: read_zero bug, %08lx, %08lx\n",
++ addr, vma->vm_start);
++ }
++#endif
+
+ size -= count;
+ buf += count;
+@@ -560,6 +597,16 @@
+
+ static int open_port(struct inode * inode, struct file * filp)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_open_port();
++ return -EPERM;
++#endif
++
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ }
+
+@@ -568,7 +615,6 @@
+ #define full_lseek null_lseek
+ #define write_zero write_null
+ #define read_full read_zero
+-#define open_mem open_port
+ #define open_kmem open_mem
+
+ static struct file_operations mem_fops = {
+@@ -666,6 +712,11 @@
+ case 9:
+ filp->f_op = &urandom_fops;
+ break;
++#ifdef CONFIG_GRKERNSEC
++ case 10:
++ filp->f_op = &grsec_fops;
++ break;
++#endif
+ case 11:
+ filp->f_op = &kmsg_fops;
+ break;
+@@ -697,6 +748,9 @@
+ {7, "full", S_IRUGO | S_IWUGO, &full_fops},
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
++#ifdef CONFIG_GRKERNSEC
++ {10,"grsec", S_IRUSR | S_IWUGO, &grsec_fops},
++#endif
+ {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
+ };
+
+diff -urN linux-2.6.5/drivers/char/random.c linux-2.6.5-new/drivers/char/random.c
+--- linux-2.6.5/drivers/char/random.c 2004-04-03 22:36:17.000000000 -0500
++++ linux-2.6.5-new/drivers/char/random.c 2004-04-14 09:15:12.000000000 -0400
+@@ -263,9 +263,15 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define DEFAULT_POOL_SIZE 1024
++#define SECONDARY_POOL_SIZE 256
++#define BATCH_ENTROPY_SIZE 512
++#else
+ #define DEFAULT_POOL_SIZE 512
+ #define SECONDARY_POOL_SIZE 128
+ #define BATCH_ENTROPY_SIZE 256
++#endif
+ #define USE_SHA
+
+ /*
+@@ -2361,6 +2367,29 @@
+ return halfMD4Transform(hash, keyptr->secret);
+ }
+
++#ifdef CONFIG_GRKERNSEC
++/* the following function is provided by PaX under the GPL */
++unsigned long get_random_long(void)
++{
++ static time_t rekey_time;
++ static __u32 secret[12];
++ time_t t;
++
++ /*
++ * Pick a random secret every REKEY_INTERVAL seconds
++ */
++ t = get_seconds();
++ if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
++ rekey_time = t;
++ get_random_bytes(secret, sizeof(secret));
++ }
++
++ secret[1] = halfMD4Transform(secret+8, secret);
++ secret[0] = halfMD4Transform(secret+8, secret);
++ return *(unsigned long *)secret;
++}
++#endif
++
+ #ifdef CONFIG_SYN_COOKIES
+ /*
+ * Secure SYN cookie computation. This is the algorithm worked out by
+@@ -2460,3 +2489,25 @@
+ return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
+ }
+ #endif
++
++#ifdef CONFIG_PAX_ASLR
++unsigned long pax_get_random_long(void)
++{
++ static time_t rekey_time;
++ static __u32 secret[12];
++ time_t t;
++
++ /*
++ * Pick a random secret every REKEY_INTERVAL seconds.
++ */
++ t = get_seconds();
++ if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
++ rekey_time = t;
++ get_random_bytes(secret, sizeof(secret));
++ }
++
++ secret[1] = halfMD4Transform(secret+8, secret);
++ secret[0] = halfMD4Transform(secret+8, secret);
++ return *(unsigned long *)secret;
++}
++#endif
+diff -urN linux-2.6.5/drivers/char/vt_ioctl.c linux-2.6.5-new/drivers/char/vt_ioctl.c
+--- linux-2.6.5/drivers/char/vt_ioctl.c 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/drivers/char/vt_ioctl.c 2004-04-14 09:06:29.000000000 -0400
+@@ -96,6 +96,12 @@
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ return -EPERM;
++#endif
++
+ if (!i && v == K_NOSUCHMAP) {
+ /* disallocate map */
+ key_map = key_maps[s];
+@@ -232,6 +238,13 @@
+ goto reterr;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG)) {
++ return -EPERM;
++ goto reterr;
++ }
++#endif
++
+ q = func_table[i];
+ first_free = funcbufptr + (funcbufsize - funcbufleft);
+ for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+diff -urN linux-2.6.5/drivers/pci/proc.c linux-2.6.5-new/drivers/pci/proc.c
+--- linux-2.6.5/drivers/pci/proc.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/drivers/pci/proc.c 2004-04-14 09:06:29.000000000 -0400
+@@ -565,7 +565,15 @@
+
+ static void legacy_proc_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ struct proc_dir_entry * entry = create_proc_entry("pci", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ struct proc_dir_entry * entry = create_proc_entry("pci", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ struct proc_dir_entry * entry = create_proc_entry("pci", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_pci_operations;
+ }
+@@ -594,7 +602,15 @@
+ {
+ struct proc_dir_entry *entry;
+ struct pci_dev *dev = NULL;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR, proc_bus);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, proc_bus);
++#endif
++#else
+ proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
++#endif
+ entry = create_proc_entry("devices", 0, proc_bus_pci_dir);
+ if (entry)
+ entry->proc_fops = &proc_bus_pci_dev_operations;
+diff -urN linux-2.6.5/drivers/pnp/pnpbios/bioscalls.c linux-2.6.5-new/drivers/pnp/pnpbios/bioscalls.c
+--- linux-2.6.5/drivers/pnp/pnpbios/bioscalls.c 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/drivers/pnp/pnpbios/bioscalls.c 2004-04-14 09:15:12.000000000 -0400
+@@ -79,7 +79,7 @@
+ set_limit(cpu_gdt_table[cpu][(selname) >> 3], size); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
++static struct desc_struct bad_bios_desc = { 0, 0x00409300 };
+
+ /*
+ * At some point we want to use this stack frame pointer to unwind
+@@ -107,6 +107,10 @@
+ struct desc_struct save_desc_40;
+ int cpu;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++#endif
++
+ /*
+ * PnP BIOSes are generally not terribly re-entrant.
+ * Also, don't rely on them to save everything correctly.
+@@ -115,6 +119,11 @@
+ return PNP_FUNCTION_NOT_SUPPORTED;
+
+ cpu = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
+@@ -159,6 +168,11 @@
+ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
+ cpu_gdt_table[cpu][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+diff -urN linux-2.6.5/drivers/scsi/scsi_devinfo.c linux-2.6.5-new/drivers/scsi/scsi_devinfo.c
+--- linux-2.6.5/drivers/scsi/scsi_devinfo.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/drivers/scsi/scsi_devinfo.c 2004-04-14 09:15:12.000000000 -0400
+@@ -27,7 +27,7 @@
+ static const char spaces[] = " "; /* 16 of them */
+ static unsigned scsi_default_dev_flags;
+ static LIST_HEAD(scsi_dev_info_list);
+-static __init char scsi_dev_flags[256];
++static __initdata char scsi_dev_flags[256];
+
+ /*
+ * scsi_static_device_list: deprecated list of devices that require
+diff -urN linux-2.6.5/fs/Kconfig linux-2.6.5-new/fs/Kconfig
+--- linux-2.6.5/fs/Kconfig 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/fs/Kconfig 2004-04-14 09:06:29.000000000 -0400
+@@ -778,6 +778,7 @@
+
+ config PROC_KCORE
+ bool
++ depends on !GRKERNSEC_PROC_ADD
+ default y if !ARM
+
+ config DEVFS_FS
+diff -urN linux-2.6.5/fs/binfmt_aout.c linux-2.6.5-new/fs/binfmt_aout.c
+--- linux-2.6.5/fs/binfmt_aout.c 2004-04-03 22:36:26.000000000 -0500
++++ linux-2.6.5-new/fs/binfmt_aout.c 2004-04-14 09:15:12.000000000 -0400
+@@ -24,6 +24,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/personality.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -118,10 +119,12 @@
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
+ #ifdef __sparc__
++ gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1);
+ if ((dump.u_dsize+dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+ #else
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+@@ -129,10 +132,12 @@
+
+ /* Make sure we have enough room to write the stack and data areas. */
+ #ifdef __sparc__
++ gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1);
+ if ((dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+ #else
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_ssize+1) * PAGE_SIZE >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+@@ -281,6 +286,8 @@
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
++
++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+@@ -309,10 +316,33 @@
+ (current->mm->start_brk = N_BSSADDR(ex));
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ current->mm->free_area_cache += current->mm->delta_mmap;
++#endif
++
+ current->mm->rss = 0;
+ current->mm->mmap = NULL;
+ compute_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->flags |= PF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->flags |= PF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ #ifdef __sparc__
+ if (N_MAGIC(ex) == NMAGIC) {
+ loff_t pos = fd_offset;
+@@ -399,7 +429,7 @@
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.5/fs/binfmt_elf.c linux-2.6.5-new/fs/binfmt_elf.c
+--- linux-2.6.5/fs/binfmt_elf.c 2004-04-03 22:36:58.000000000 -0500
++++ linux-2.6.5-new/fs/binfmt_elf.c 2004-04-14 15:19:02.000000000 -0400
+@@ -37,11 +37,17 @@
+ #include <linux/pagemap.h>
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/pgalloc.h>
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#include <asm/desc.h>
++#endif
++
+ #include <linux/elf.h>
+
+ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
+@@ -85,14 +91,22 @@
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ current->mm->start_brk = current->mm->brk = end;
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+ unsigned long addr = do_brk(start, end - start);
+ if (BAD_ADDR(addr))
+ return addr;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC)
++ addr = do_mmap_pgoff(NULL, ELF_PAGEALIGN(start + current->mm->delta_exec), 0UL, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, start);
++ if (BAD_ADDR(addr))
++ return addr;
++#endif
++
+ }
+- current->mm->start_brk = current->mm->brk = end;
+ return 0;
+ }
+
+@@ -444,6 +458,203 @@
+ return elf_entry;
+ }
+
++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDMMAP)
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDEXEC)
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDEXEC))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++static int pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if ((elf_ex->e_ident[EI_PAX] & EF_PAX_RANDEXEC) && (elf_ex->e_type == ET_EXEC) && (pax_flags & PF_PAX_MPROTECT))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++static int pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++ pax_flags = pax_parse_ei_pax(elf_ex);
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)) ||
++ ((elf_phdata[i].p_flags & PF_RANDEXEC) && ((elf_phdata[i].p_flags & PF_NORANDEXEC) || elf_ex->e_type == ET_DYN || !(elf_phdata[i].p_flags & PF_MPROTECT))) ||
++ (!(elf_phdata[i].p_flags & PF_NORANDEXEC) && (elf_ex->e_type == ET_DYN || (elf_phdata[i].p_flags & PF_NOMPROTECT))))
++ return -EINVAL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->flags |= pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -476,7 +687,12 @@
+ struct exec interp_ex;
+ char passed_fileno[6];
+ struct files_struct *files;
+-
++
++#ifdef CONFIG_PAX_RANDEXEC
++ unsigned long load_addr_random = 0UL;
++ unsigned long load_bias_random = 0UL;
++#endif
++
+ /* Get the exec-header */
+ elf_ex = *((struct elfhdr *) bprm->buf);
+
+@@ -664,8 +880,44 @@
+ current->mm->end_data = 0;
+ current->mm->end_code = 0;
+ current->mm->mmap = NULL;
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_exec = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
+ current->flags &= ~PF_FORKNOEXEC;
+
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (0 > pax_parse_elf_flags(&elf_ex, elf_phdata)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_flags_func)
++ (pax_set_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (current->flags & PF_PAX_PAGEEXEC)
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->flags & PF_PAX_RANDMMAP) {
++#define pax_delta_mask(delta, lsb, len) (((delta) & ((1UL << (len)) - 1)) << (lsb))
++
++ current->mm->delta_mmap = pax_delta_mask(pax_get_random_long(), PAX_DELTA_MMAP_LSB(current), PAX_DELTA_MMAP_LEN(current));
++ current->mm->delta_exec = pax_delta_mask(pax_get_random_long(), PAX_DELTA_EXEC_LSB(current), PAX_DELTA_EXEC_LEN(current));
++ current->mm->delta_stack = pax_delta_mask(pax_get_random_long(), PAX_DELTA_STACK_LSB(current), PAX_DELTA_STACK_LEN(current));
++ }
++#endif
++
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+@@ -674,6 +926,12 @@
+ change some of these later */
+ current->mm->rss = 0;
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ current->mm->free_area_cache += current->mm->delta_mmap;
++#endif
++
+ retval = setup_arg_pages(bprm);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+@@ -729,11 +987,85 @@
+ base, as well as whatever program they might try to exec. This
+ is because the brk will follow the loader, and is not movable. */
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if (current->flags & PF_PAX_RANDMMAP) {
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE(current) - vaddr + current->mm->delta_exec);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+- if (BAD_ADDR(error))
+- continue;
++#ifdef CONFIG_PAX_RANDEXEC
++ if ((current->flags & PF_PAX_RANDEXEC) && (elf_ex.e_type == ET_EXEC)) {
++ error = -ENOMEM;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot & ~PROT_EXEC, elf_flags);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ unsigned long addr, len;
++
++ addr = ELF_PAGESTART(load_bias + vaddr);
++ len = elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr);
++ if (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)
++ continue;
++ down_write(&current->mm->mmap_sem);
++ error = do_mmap_pgoff(bprm->file, addr, len, elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT);
++ up_write(&current->mm->mmap_sem);
++ }
++#endif
++
++ if (BAD_ADDR(error))
++ continue;
++
++ /* PaX: mirror at a randomized base */
++ down_write(&current->mm->mmap_sem);
++
++ if (!load_addr_set) {
++ load_addr_random = get_unmapped_area(bprm->file, 0UL, elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT, MAP_PRIVATE);
++ if (BAD_ADDR(load_addr_random)) {
++ up_write(&current->mm->mmap_sem);
++ continue;
++ }
++ load_bias_random = load_addr_random - vaddr;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (elf_prot & PROT_EXEC) {
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), PROT_NONE, MAP_PRIVATE | MAP_FIXED, 0UL);
++ if (!BAD_ADDR(load_addr_random)) {
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr + SEGMEXEC_TASK_SIZE), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ if (!BAD_ADDR(load_addr_random))
++ load_addr_random -= SEGMEXEC_TASK_SIZE;
++ }
++ } else
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ if (BAD_ADDR(load_addr_random))
++ continue;
++ } else
++#endif
++
++ {
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
++ if (BAD_ADDR(error))
++ continue;
++ }
+
+ if (!load_addr_set) {
+ load_addr_set = 1;
+@@ -744,6 +1076,11 @@
+ load_addr += load_bias;
+ reloc_func_desc = load_bias;
+ }
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->mm->delta_exec = load_addr_random - load_addr;
++#endif
++
+ }
+ k = elf_ppnt->p_vaddr;
+ if (k < start_code) start_code = k;
+@@ -770,6 +1107,16 @@
+ start_data += load_bias;
+ end_data += load_bias;
+
++#ifdef CONFIG_PAX_RANDMMAP
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ elf_brk += pax_delta_mask(pax_get_random_long(), 4, PAGE_SHIFT);
++#undef pax_delta_mask
++#endif
++
+ /* Calling set_brk effectively mmaps the pages that we need
+ * for the bss and break sections. We must do this before
+ * mapping in the interpreter, to make sure it doesn't wind
+@@ -850,6 +1197,26 @@
+ ELF_PLAT_INIT(regs, reloc_func_desc);
+ #endif
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ i = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ {
++ unsigned long flags, cr3;
++
++ pax_open_kernel(flags, cr3);
++#endif
++
++ pax_switch_segments(current, i);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++ }
++#endif
++
++ put_cpu();
++#endif
++
+ start_thread(regs, elf_entry, bprm->p);
+ if (unlikely(current->ptrace & PT_PTRACED)) {
+ if (current->ptrace & PT_TRACE_EXEC)
+@@ -1062,8 +1429,11 @@
+ #undef DUMP_SEEK
+
+ #define DUMP_WRITE(addr, nr) \
++ do { \
++ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
+ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+- goto end_coredump;
++ goto end_coredump; \
++ } while (0);
+ #define DUMP_SEEK(off) \
+ if (!dump_seek(file, (off))) \
+ goto end_coredump;
+diff -urN linux-2.6.5/fs/binfmt_misc.c linux-2.6.5-new/fs/binfmt_misc.c
+--- linux-2.6.5/fs/binfmt_misc.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/fs/binfmt_misc.c 2004-04-14 09:15:12.000000000 -0400
+@@ -108,9 +108,11 @@
+ int retval;
+
+ retval = -ENOEXEC;
+- if (!enabled)
++ if (!enabled || bprm->misc)
+ goto _ret;
+
++ bprm->misc++;
++
+ /* to keep locking time low, we copy the interpreter string */
+ read_lock(&entries_lock);
+ fmt = check_file(bprm);
+diff -urN linux-2.6.5/fs/buffer.c linux-2.6.5-new/fs/buffer.c
+--- linux-2.6.5/fs/buffer.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/fs/buffer.c 2004-04-14 09:06:29.000000000 -0400
+@@ -37,6 +37,7 @@
+ #include <linux/bio.h>
+ #include <linux/notifier.h>
+ #include <linux/cpu.h>
++#include <linux/grsecurity.h>
+ #include <asm/bitops.h>
+
+ static void invalidate_bh_lrus(void);
+@@ -2162,6 +2163,9 @@
+ int err;
+
+ err = -EFBIG;
++
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1);
++
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+diff -urN linux-2.6.5/fs/dcache.c linux-2.6.5-new/fs/dcache.c
+--- linux-2.6.5/fs/dcache.c 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/fs/dcache.c 2004-04-14 09:06:29.000000000 -0400
+@@ -1256,7 +1256,7 @@
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ */
+-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
++char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+ struct dentry *root, struct vfsmount *rootmnt,
+ char *buffer, int buflen)
+ {
+diff -urN linux-2.6.5/fs/exec.c linux-2.6.5-new/fs/exec.c
+--- linux-2.6.5/fs/exec.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/fs/exec.c 2004-04-14 10:10:41.000000000 -0400
+@@ -46,6 +46,8 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/rmap-locking.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+@@ -62,6 +64,20 @@
+ static struct linux_binfmt *formats;
+ static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+
++#ifdef CONFIG_PAX_SOFTMODE
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++unsigned int pax_aslr=1;
++#endif
++
++unsigned int pax_softmode;
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_flags_func)(struct linux_binprm * bprm);
++EXPORT_SYMBOL(pax_set_flags_func);
++#endif
++
+ int register_binfmt(struct linux_binfmt * fmt)
+ {
+ struct linux_binfmt ** tmp = &formats;
+@@ -303,7 +319,12 @@
+ pte_t * pte;
+ struct pte_chain *pte_chain;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) != 1 && (!(tsk->flags & PF_PAX_SEGMEXEC) || page_count(page) != 3))
++#else
+ if (page_count(page) != 1)
++#endif
++
+ printk(KERN_ERR "mem_map disagrees with %p at %08lx\n",
+ page, address);
+
+@@ -322,8 +343,18 @@
+ pte_unmap(pte);
+ goto out;
+ }
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) == 1) {
++#endif
++
+ lru_cache_add_active(page);
+ flush_dcache_page(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ }
++#endif
++
+ set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(page, prot))));
+ pte_chain = page_add_rmap(page, pte, pte_chain);
+ pte_unmap(pte);
+@@ -350,6 +381,10 @@
+ int i;
+ long arg_size;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *mpnt_m = NULL;
++#endif
++
+ #ifdef CONFIG_STACK_GROWSUP
+ /* Move the argument and environment strings to the bottom of the
+ * stack space.
+@@ -409,6 +444,16 @@
+ if (!mpnt)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (VM_STACK_FLAGS & VM_MAYEXEC)) {
++ mpnt_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!mpnt_m) {
++ kmem_cache_free(vm_area_cachep, mpnt);
++ return -ENOMEM;
++ }
++ }
++#endif
++
+ if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
+ kmem_cache_free(vm_area_cachep, mpnt);
+ return -ENOMEM;
+@@ -425,6 +470,13 @@
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = STACK_TOP;
+ #endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC))
++ mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7];
++ else
++#endif
++
+ mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
+ mpnt->vm_flags = VM_STACK_FLAGS;
+ mpnt->vm_ops = NULL;
+@@ -434,6 +486,26 @@
+ mpnt->vm_private_data = (void *) 0;
+ insert_vm_struct(mm, mpnt);
+ mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mpnt_m) {
++ *mpnt_m = *mpnt;
++ INIT_LIST_HEAD(&mpnt_m->shared);
++ if (!(VM_STACK_FLAGS & VM_EXEC)) {
++ mpnt_m->vm_flags &= ~(VM_READ | VM_WRITE | VM_EXEC);
++ mpnt_m->vm_page_prot = PAGE_NONE;
++ }
++ mpnt_m->vm_start += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_end += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_flags |= VM_MIRROR;
++ mpnt->vm_flags |= VM_MIRROR;
++ mpnt_m->vm_private_data = (void *)(mpnt->vm_start - mpnt_m->vm_start);
++ mpnt->vm_private_data = (void *)(mpnt_m->vm_start - mpnt->vm_start);
++ insert_vm_struct(mm, mpnt_m);
++ current->mm->total_vm = (mpnt_m->vm_end - mpnt_m->vm_start) >> PAGE_SHIFT;
++ }
++#endif
++
+ }
+
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+@@ -442,6 +514,15 @@
+ bprm->page[i] = NULL;
+ put_dirty_page(current, page, stack_base,
+ mpnt->vm_page_prot);
++
++#if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
++ if (mpnt_m) {
++ page_cache_get(page);
++ put_dirty_page(current, page, stack_base + SEGMEXEC_TASK_SIZE,
++ mpnt_m->vm_page_prot);
++ }
++#endif
++
+ }
+ stack_base += PAGE_SIZE;
+ }
+@@ -830,6 +911,30 @@
+ }
+ current->comm[i] = '\0';
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ current->flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ current->flags &= ~PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ current->flags &= ~PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->flags &= ~PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->flags &= ~PF_PAX_RANDEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ current->flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
+ flush_thread();
+
+ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
+@@ -908,6 +1013,9 @@
+ if (retval)
+ return retval;
+
++ if (gr_handle_ptrace_exec(bprm->file->f_dentry, bprm->file->f_vfsmnt))
++ return -EACCES;
++
+ memset(bprm->buf,0,BINPRM_BUF_SIZE);
+ return kernel_read(bprm->file,0,bprm->buf,BINPRM_BUF_SIZE);
+ }
+@@ -946,8 +1054,13 @@
+ }
+ }
+
+- current->suid = current->euid = current->fsuid = bprm->e_uid;
+- current->sgid = current->egid = current->fsgid = bprm->e_gid;
++ if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
++ current->suid = current->euid = current->fsuid = bprm->e_uid;
++
++ if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
++ current->sgid = current->egid = current->fsgid = bprm->e_gid;
++
++ gr_handle_chroot_caps(current);
+
+ task_unlock(current);
+
+@@ -1091,6 +1204,11 @@
+ struct file *file;
+ int retval;
+ int i;
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+
+ sched_balance_exec();
+
+@@ -1100,13 +1218,39 @@
+ if (IS_ERR(file))
+ return retval;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ allow_write_access(file);
++ fput(file);
++ return -EAGAIN;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ allow_write_access(file);
++ fput(file);
++ return -EACCES;
++ }
++
++
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ bprm.file = file;
+ bprm.filename = filename;
+ bprm.interp = filename;
+ bprm.sh_bang = 0;
++ bprm.misc = 0;
+ bprm.loader = 0;
+ bprm.exec = 0;
+ bprm.security = NULL;
+@@ -1135,11 +1279,26 @@
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(&bprm, argv);
++
+ retval = copy_strings(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+@@ -1148,8 +1307,22 @@
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
++
+ retval = search_binary_handler(&bprm,regs);
+ if (retval >= 0) {
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+ free_arg_pages(&bprm);
+
+ /* execve success */
+@@ -1157,6 +1330,13 @@
+ return retval;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+@@ -1314,6 +1494,128 @@
+ *out_ptr = 0;
+ }
+
++int pax_check_flags(unsigned long * flags)
++{
++ int retval = 0;
++
++#if !defined(__i386__) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & PF_PAX_SEGMEXEC)
++ {
++ *flags &= ~PF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & PF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & PF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_RANDEXEC)
++
++#ifdef CONFIG_PAX_RANDEXEC
++ && !(*flags & PF_PAX_MPROTECT)
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_RANDEXEC;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char* buffer_exec = (char*)__get_free_page(GFP_ATOMIC);
++ char* buffer_fault = (char*)__get_free_page(GFP_ATOMIC);
++ char* path_exec=NULL;
++ char* path_fault=NULL;
++ unsigned long start=0UL, end=0UL, offset=0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct* vma, * vma_exec=NULL, * vma_fault=NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(vma_exec->vm_file->f_dentry, vma_exec->vm_file->f_vfsmnt, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(vma_fault->vm_file->f_dentry, vma_fault->vm_file->f_vfsmnt, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++#ifdef CONFIG_GRKERNSEC
++ if (tsk->curr_ip)
++ printk(KERN_ERR "PAX: execution attempt from %u.%u.%u.%u in: %s, %08lx-%08lx %08lx\n", NIPQUAD(tsk->curr_ip), path_fault, start, end, offset);
++ else
++#endif
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ if (buffer_exec) free_page((unsigned long)buffer_exec);
++ if (buffer_fault) free_page((unsigned long)buffer_fault);
++ pax_report_insns(pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
+ static void zap_threads (struct mm_struct *mm)
+ {
+ struct task_struct *g, *p;
+@@ -1383,6 +1685,7 @@
+ current->signal->group_exit_code = exit_code;
+ coredump_wait(mm);
+
++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
+ if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
+ goto fail_unlock;
+
+@@ -1402,7 +1705,7 @@
+ goto close_fail;
+ if (!file->f_op->write)
+ goto close_fail;
+- if (do_truncate(file->f_dentry, 0) != 0)
++ if (do_truncate(file->f_dentry, 0, file->f_vfsmnt) != 0)
+ goto close_fail;
+
+ retval = binfmt->core_dump(signr, regs, file);
+diff -urN linux-2.6.5/fs/fcntl.c linux-2.6.5-new/fs/fcntl.c
+--- linux-2.6.5/fs/fcntl.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/fs/fcntl.c 2004-04-14 09:06:29.000000000 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/poll.h>
+ #include <asm/siginfo.h>
+@@ -86,6 +87,9 @@
+ int error;
+
+ error = -EINVAL;
++
++ gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0);
++
+ if (orig_start >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+@@ -105,6 +109,9 @@
+ }
+
+ error = -EMFILE;
++
++ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
++
+ if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+@@ -154,6 +161,8 @@
+ struct file * file, *tofree;
+ struct files_struct * files = current->files;
+
++ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
++
+ spin_lock(&files->file_lock);
+ if (!(file = fcheck(oldfd)))
+ goto out_unlock;
+@@ -485,13 +494,15 @@
+ if (pid > 0) {
+ p = find_task_by_pid(pid);
+ if (p) {
+- send_sigio_to_task(p, fown, fd, band);
++ if (!gr_check_protected_task(p))
++ send_sigio_to_task(p, fown, fd, band);
+ }
+ } else {
+ struct list_head *l;
+ struct pid *pidptr;
+ for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
+- send_sigio_to_task(p, fown, fd, band);
++ if (!gr_check_protected_task(p) && !gr_pid_is_chrooted(p))
++ send_sigio_to_task(p, fown, fd, band);
+ }
+ }
+ read_unlock(&tasklist_lock);
+diff -urN linux-2.6.5/fs/namei.c linux-2.6.5-new/fs/namei.c
+--- linux-2.6.5/fs/namei.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/fs/namei.c 2004-04-14 09:09:23.000000000 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/personality.h>
+ #include <linux/security.h>
+ #include <linux/mount.h>
++#include <linux/grsecurity.h>
+ #include <asm/namei.h>
+ #include <asm/uaccess.h>
+
+@@ -410,6 +411,13 @@
+ err = security_inode_follow_link(dentry, nd);
+ if (err)
+ goto loop;
++
++ if (gr_handle_follow_link(dentry->d_parent->d_inode,
++ dentry->d_inode, dentry, nd->mnt)) {
++ err = -EACCES;
++ goto loop;
++ }
++
+ current->link_count++;
+ current->total_link_count++;
+ touch_atime(nd->mnt, dentry);
+@@ -761,6 +769,10 @@
+ break;
+ }
+ return_base:
++ if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) {
++ path_release(nd);
++ return -ENOENT;
++ }
+ return 0;
+ out_dput:
+ dput(next.dentry);
+@@ -1214,7 +1226,7 @@
+ if (!error) {
+ DQUOT_INIT(inode);
+
+- error = do_truncate(dentry, 0);
++ error = do_truncate(dentry, 0, nd->mnt);
+ }
+ put_write_access(inode);
+ if (error)
+@@ -1265,6 +1277,17 @@
+ error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+ if (error)
+ return error;
++
++ if (gr_handle_rawio(nd->dentry->d_inode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) {
++ error = -EACCES;
++ goto exit;
++ }
++
+ goto ok;
+ }
+
+@@ -1298,9 +1321,19 @@
+
+ /* Negative dentry, just create the file */
+ if (!dentry->d_inode) {
++ if (!gr_acl_handle_creat(dentry, nd->dentry, nd->mnt, flag, mode)) {
++ error = -EACCES;
++ up(&dir->d_inode->i_sem);
++ goto exit_dput;
++ }
++
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current->fs->umask;
+ error = vfs_create(dir->d_inode, dentry, mode, nd);
++
++ if (!error)
++ gr_handle_create(dentry, nd->mnt);
++
+ up(&dir->d_inode->i_sem);
+ dput(nd->dentry);
+ nd->dentry = dentry;
+@@ -1315,6 +1348,25 @@
+ /*
+ * It already exists.
+ */
++
++ if (gr_handle_rawio(dentry->d_inode)) {
++ error = -EPERM;
++ up(&dir->d_inode->i_sem);
++ goto exit_dput;
++ }
++
++ if (!gr_acl_handle_open(dentry, nd->mnt, flag)) {
++ up(&dir->d_inode->i_sem);
++ error = -EACCES;
++ goto exit_dput;
++ }
++
++ if (gr_handle_fifo(dentry, nd->mnt, dir, flag, acc_mode)) {
++ up(&dir->d_inode->i_sem);
++ error = -EACCES;
++ goto exit_dput;
++ }
++
+ up(&dir->d_inode->i_sem);
+
+ error = -EEXIST;
+@@ -1368,6 +1420,13 @@
+ error = security_inode_follow_link(dentry, nd);
+ if (error)
+ goto exit_dput;
++
++ if (gr_handle_follow_link(dentry->d_parent->d_inode, dentry->d_inode,
++ dentry, nd->mnt)) {
++ error = -EACCES;
++ goto exit_dput;
++ }
++
+ touch_atime(nd->mnt, dentry);
+ error = dentry->d_inode->i_op->follow_link(dentry, nd);
+ dput(dentry);
+@@ -1475,6 +1534,22 @@
+ if (!IS_POSIXACL(nd.dentry->d_inode))
+ mode &= ~current->fs->umask;
+ if (!IS_ERR(dentry)) {
++ if (gr_handle_chroot_mknod(dentry, nd.mnt, mode)) {
++ error = -EPERM;
++ dput(dentry);
++ up(&nd.dentry->d_inode->i_sem);
++ path_release(&nd);
++ goto out;
++ }
++
++ if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ dput(dentry);
++ up(&nd.dentry->d_inode->i_sem);
++ path_release(&nd);
++ goto out;
++ }
++
+ switch (mode & S_IFMT) {
+ case 0: case S_IFREG:
+ error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd);
+@@ -1492,6 +1567,10 @@
+ default:
+ error = -EINVAL;
+ }
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
++
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1543,9 +1622,19 @@
+ dentry = lookup_create(&nd, 1);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
++ error = 0;
+ if (!IS_POSIXACL(nd.dentry->d_inode))
+ mode &= ~current->fs->umask;
+- error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
++
++ if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt))
++ error = -EACCES;
++
++ if (!error)
++ error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
++
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1629,6 +1718,8 @@
+ char * name;
+ struct dentry *dentry;
+ struct nameidata nd;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ name = getname(pathname);
+ if(IS_ERR(name))
+@@ -1653,7 +1744,21 @@
+ dentry = lookup_hash(&nd.last, nd.dentry);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+- error = vfs_rmdir(nd.dentry->d_inode, dentry);
++ error = 0;
++ if (dentry->d_inode) {
++ if (dentry->d_inode->i_nlink <= 1) {
++ saved_ino = dentry->d_inode->i_ino;
++ saved_dev = dentry->d_inode->i_sb->s_dev;
++ }
++
++ if (!gr_acl_handle_rmdir(dentry, nd.mnt))
++ error = -EACCES;
++ }
++
++ if (!error)
++ error = vfs_rmdir(nd.dentry->d_inode, dentry);
++ if (!error && (saved_dev || saved_ino))
++ gr_handle_delete(saved_ino, saved_dev);
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1707,6 +1812,8 @@
+ struct dentry *dentry;
+ struct nameidata nd;
+ struct inode *inode = NULL;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ name = getname(pathname);
+ if(IS_ERR(name))
+@@ -1722,13 +1829,26 @@
+ dentry = lookup_hash(&nd.last, nd.dentry);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
++ error = 0;
+ /* Why not before? Because we want correct error value */
+ if (nd.last.name[nd.last.len])
+ goto slashes;
+ inode = dentry->d_inode;
+- if (inode)
++ if (inode) {
++ if (inode->i_nlink <= 1) {
++ saved_ino = inode->i_ino;
++ saved_dev = inode->i_sb->s_dev;
++ }
++
++ if (!gr_acl_handle_unlink(dentry, nd.mnt))
++ error = -EACCES;
++
+ atomic_inc(&inode->i_count);
+- error = vfs_unlink(nd.dentry->d_inode, dentry);
++ }
++ if (!error)
++ error = vfs_unlink(nd.dentry->d_inode, dentry);
++ if (!error && (saved_ino || saved_dev))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit2:
+ dput(dentry);
+ }
+@@ -1792,7 +1912,15 @@
+ dentry = lookup_create(&nd, 0);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+- error = vfs_symlink(nd.dentry->d_inode, dentry, from);
++ error = 0;
++ if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from))
++ error = -EACCES;
++
++ if (!error)
++ error = vfs_symlink(nd.dentry->d_inode, dentry, from);
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1876,7 +2004,20 @@
+ new_dentry = lookup_create(&nd, 0);
+ error = PTR_ERR(new_dentry);
+ if (!IS_ERR(new_dentry)) {
+- error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
++ error = 0;
++ if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt,
++ old_nd.dentry->d_inode,
++ old_nd.dentry->d_inode->i_mode, to))
++ error = -EPERM;
++ if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt,
++ old_nd.dentry, old_nd.mnt, to))
++ error = -EACCES;
++ if (!error)
++ error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
++
++ if (!error)
++ gr_handle_create(new_dentry, nd.mnt);
++
+ dput(new_dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -2098,8 +2239,16 @@
+ if (new_dentry == trap)
+ goto exit5;
+
+- error = vfs_rename(old_dir->d_inode, old_dentry,
++ error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt,
++ old_dentry, old_dir->d_inode, oldnd.mnt,
++ newname);
++
++ if (!error)
++ error = vfs_rename(old_dir->d_inode, old_dentry,
+ new_dir->d_inode, new_dentry);
++ if (!error)
++ gr_handle_rename(old_dir->d_inode, newnd.dentry->d_inode, old_dentry,
++ new_dentry, oldnd.mnt, new_dentry->d_inode ? 1 : 0);
+ exit5:
+ dput(new_dentry);
+ exit4:
+diff -urN linux-2.6.5/fs/namespace.c linux-2.6.5-new/fs/namespace.c
+--- linux-2.6.5/fs/namespace.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/fs/namespace.c 2004-04-14 09:06:29.000000000 -0400
+@@ -21,6 +21,8 @@
+ #include <linux/namei.h>
+ #include <linux/security.h>
+ #include <linux/mount.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ extern int __init init_rootfs(void);
+@@ -334,6 +336,8 @@
+ lock_kernel();
+ retval = do_remount_sb(sb, MS_RDONLY, 0, 0);
+ unlock_kernel();
++
++ gr_log_remount(mnt->mnt_devname, retval);
+ }
+ up_write(&sb->s_umount);
+ return retval;
+@@ -362,6 +366,9 @@
+ if (retval)
+ security_sb_umount_busy(mnt);
+ up_write(&current->namespace->sem);
++
++ gr_log_unmount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -780,6 +787,11 @@
+ if (retval)
+ goto dput_out;
+
++ if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
+ data_page);
+@@ -792,6 +804,9 @@
+ dev_name, data_page);
+ dput_out:
+ path_release(&nd);
++
++ gr_log_mount(dev_name, dir_name, retval);
++
+ return retval;
+ }
+
+@@ -1014,6 +1029,9 @@
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (gr_handle_chroot_pivot())
++ return -EPERM;
++
+ lock_kernel();
+
+ error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
+diff -urN linux-2.6.5/fs/open.c linux-2.6.5-new/fs/open.c
+--- linux-2.6.5/fs/open.c 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/fs/open.c 2004-04-14 09:06:29.000000000 -0400
+@@ -22,6 +22,7 @@
+ #include <asm/uaccess.h>
+ #include <linux/fs.h>
+ #include <linux/pagemap.h>
++#include <linux/grsecurity.h>
+
+ int vfs_statfs(struct super_block *sb, struct kstatfs *buf)
+ {
+@@ -180,7 +181,7 @@
+ return error;
+ }
+
+-int do_truncate(struct dentry *dentry, loff_t length)
++int do_truncate(struct dentry *dentry, loff_t length, struct vfsmount *mnt)
+ {
+ int err;
+ struct iattr newattrs;
+@@ -189,6 +190,9 @@
+ if (length < 0)
+ return -EINVAL;
+
++ if (!gr_acl_handle_truncate(dentry, mnt))
++ return -EACCES;
++
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ down(&dentry->d_inode->i_sem);
+@@ -247,7 +251,7 @@
+ error = locks_verify_truncate(inode, NULL, length);
+ if (!error) {
+ DQUOT_INIT(inode);
+- error = do_truncate(nd.dentry, length);
++ error = do_truncate(nd.dentry, length, nd.mnt);
+ }
+ put_write_access(inode);
+
+@@ -299,7 +303,7 @@
+
+ error = locks_verify_truncate(inode, file, length);
+ if (!error)
+- error = do_truncate(dentry, length);
++ error = do_truncate(dentry, length, file->f_vfsmnt);
+ out_putf:
+ fput(file);
+ out:
+@@ -378,6 +382,11 @@
+ (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ goto dput_and_out;
+ }
++ if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ error = notify_change(nd.dentry, &newattrs);
+ up(&inode->i_sem);
+@@ -431,6 +440,12 @@
+ (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ goto dput_and_out;
+ }
++
++ if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ error = notify_change(nd.dentry, &newattrs);
+ up(&inode->i_sem);
+@@ -492,6 +507,10 @@
+ if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
+ && !special_file(nd.dentry->d_inode->i_mode))
+ res = -EROFS;
++
++ if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode))
++ res = -EACCES;
++
+ path_release(&nd);
+ }
+
+@@ -515,6 +534,8 @@
+ if (error)
+ goto dput_and_out;
+
++ gr_log_chdir(nd.dentry, nd.mnt);
++
+ set_fs_pwd(current->fs, nd.mnt, nd.dentry);
+
+ dput_and_out:
+@@ -545,6 +566,13 @@
+ goto out_putf;
+
+ error = permission(inode, MAY_EXEC, NULL);
++
++ if (!error && !gr_chroot_fchdir(dentry, mnt))
++ error = -EPERM;
++
++ if (!error)
++ gr_log_chdir(dentry, mnt);
++
+ if (!error)
+ set_fs_pwd(current->fs, mnt, dentry);
+ out_putf:
+@@ -570,8 +598,16 @@
+ if (!capable(CAP_SYS_CHROOT))
+ goto dput_and_out;
+
++ if (gr_handle_chroot_chroot(nd.dentry, nd.mnt))
++ goto dput_and_out;
++
+ set_fs_root(current->fs, nd.mnt, nd.dentry);
+ set_fs_altroot();
++
++ gr_handle_chroot_caps(current);
++
++ gr_handle_chroot_chdir(nd.dentry, nd.mnt);
++
+ error = 0;
+ dput_and_out:
+ path_release(&nd);
+@@ -600,9 +636,22 @@
+ err = -EPERM;
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto out_putf;
++
++ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EACCES;
++ goto out_putf;
++ }
++
+ down(&inode->i_sem);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EPERM;
++ up(&inode->i_sem);
++ goto out_putf;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ err = notify_change(dentry, &newattrs);
+@@ -634,9 +683,21 @@
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto dput_and_out;
+
++ if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ up(&inode->i_sem);
++ goto dput_and_out;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ error = notify_change(nd.dentry, &newattrs);
+@@ -648,7 +709,7 @@
+ return error;
+ }
+
+-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
++static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
+ {
+ struct inode * inode;
+ int error;
+@@ -665,6 +726,12 @@
+ error = -EPERM;
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto out;
++
++ if (!gr_acl_handle_chown(dentry, mnt)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -690,7 +757,7 @@
+
+ error = user_path_walk(filename, &nd);
+ if (!error) {
+- error = chown_common(nd.dentry, user, group);
++ error = chown_common(nd.dentry, user, group, nd.mnt);
+ path_release(&nd);
+ }
+ return error;
+@@ -703,7 +770,7 @@
+
+ error = user_path_walk_link(filename, &nd);
+ if (!error) {
+- error = chown_common(nd.dentry, user, group);
++ error = chown_common(nd.dentry, user, group, nd.mnt);
+ path_release(&nd);
+ }
+ return error;
+@@ -717,7 +784,8 @@
+
+ file = fget(fd);
+ if (file) {
+- error = chown_common(file->f_dentry, user, group);
++ error = chown_common(file->f_dentry, user,
++ group, file->f_vfsmnt);
+ fput(file);
+ }
+ return error;
+@@ -839,6 +907,7 @@
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
++ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
+ if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+diff -urN linux-2.6.5/fs/proc/array.c linux-2.6.5-new/fs/proc/array.c
+--- linux-2.6.5/fs/proc/array.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/fs/proc/array.c 2004-04-14 09:15:12.000000000 -0400
+@@ -271,6 +271,19 @@
+ cap_t(p->cap_effective));
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline char *task_pax(struct task_struct *p, char *buffer)
++{
++ return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c%c\n",
++ p->flags & PF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->flags & PF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->flags & PF_PAX_MPROTECT ? 'M' : 'm',
++ p->flags & PF_PAX_RANDMMAP ? 'R' : 'r',
++ p->flags & PF_PAX_RANDEXEC ? 'X' : 'x',
++ p->flags & PF_PAX_SEGMEXEC ? 'S' : 's');
++}
++#endif
++
+ extern char *task_mem(struct mm_struct *, char *);
+ int proc_pid_status(struct task_struct *task, char * buffer)
+ {
+@@ -289,9 +302,20 @@
+ #if defined(CONFIG_ARCH_S390)
+ buffer = task_show_regs(task, buffer);
+ #endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ buffer = task_pax(task, buffer);
++#endif
++
+ return buffer - orig;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \
++ task->flags & PF_PAX_SEGMEXEC || \
++ task->flags & PF_PAX_RANDEXEC)
++#endif
++
+ extern unsigned long task_vsize(struct mm_struct *);
+ int proc_pid_stat(struct task_struct *task, char * buffer)
+ {
+@@ -326,6 +350,19 @@
+
+ wchan = get_wchan(task);
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS) {
++ eip = 0;
++ esp = 0;
++ wchan = 0;
++ }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ wchan = 0;
++ eip =0;
++ esp =0;
++#endif
++
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+ read_lock(&tasklist_lock);
+@@ -374,9 +411,15 @@
+ vsize,
+ mm ? mm->rss : 0, /* you might want to shift this left 3 */
+ task->rlim[RLIMIT_RSS].rlim_cur,
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->start_code : 0),
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->end_code : 0),
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->start_stack : 0),
++#else
+ mm ? mm->start_code : 0,
+ mm ? mm->end_code : 0,
+ mm ? mm->start_stack : 0,
++#endif
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+@@ -416,3 +459,14 @@
+ return sprintf(buffer,"%d %d %d %d %d %d %d\n",
+ size, resident, shared, text, lib, data, 0);
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char * buffer)
++{
++ int len;
++
++ len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->curr_ip));
++ return len;
++}
++#endif
++
+diff -urN linux-2.6.5/fs/proc/base.c linux-2.6.5-new/fs/proc/base.c
+--- linux-2.6.5/fs/proc/base.c 2004-04-03 22:37:25.000000000 -0500
++++ linux-2.6.5-new/fs/proc/base.c 2004-04-14 09:06:29.000000000 -0400
+@@ -32,6 +32,7 @@
+ #include <linux/mount.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+
+ /*
+ * For hysterical raisins we keep the same inumbers as in the old procfs.
+@@ -67,6 +68,9 @@
+ PROC_TGID_ATTR_EXEC,
+ PROC_TGID_ATTR_FSCREATE,
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ PROC_TGID_IPADDR,
++#endif
+ PROC_TGID_FD_DIR,
+ PROC_TID_INO,
+ PROC_TID_STATUS,
+@@ -117,6 +121,9 @@
+ E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO),
+ E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
+ E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ E(PROC_TGID_IPADDR, "ipaddr", S_IFREG|S_IRUSR),
++#endif
+ #ifdef CONFIG_SECURITY
+ E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
+ #endif
+@@ -181,6 +188,9 @@
+ int proc_pid_status(struct task_struct*,char*);
+ int proc_pid_statm(struct task_struct*,char*);
+ int proc_pid_cpu(struct task_struct*,char*);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct*,char*);
++#endif
+
+ static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+ {
+@@ -281,7 +291,7 @@
+ (task == current || \
+ (task->parent == current && \
+ (task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED && \
+- security_ptrace(current,task) == 0))
++ security_ptrace(current,task) == 0 && !gr_handle_proc_ptrace(task)))
+
+ static int may_ptrace_attach(struct task_struct *task)
+ {
+@@ -296,13 +306,15 @@
+ (current->uid != task->uid) ||
+ (current->gid != task->egid) ||
+ (current->gid != task->sgid) ||
+- (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
++ (current->gid != task->gid)) && !capable_nolog(CAP_SYS_PTRACE))
+ goto out;
+ rmb();
+- if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
++ if (!task->mm->dumpable && !capable_nolog(CAP_SYS_PTRACE))
+ goto out;
+ if (security_ptrace(current, task))
+ goto out;
++ if (gr_handle_proc_ptrace(task))
++ goto out;
+
+ retval = 1;
+ out:
+@@ -449,9 +461,22 @@
+
+ static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
++ int ret;
++ struct task_struct *task;
++
+ if (vfs_permission(inode, mask) != 0)
+ return -EACCES;
+- return proc_check_root(inode);
++ ret = proc_check_root(inode);
++
++ if (ret)
++ return ret;
++
++ task = proc_task(inode);
++
++ if (!task)
++ return 0;
++
++ return gr_acl_handle_procpidmem(task);
+ }
+
+ extern struct seq_operations proc_pid_maps_op;
+@@ -962,6 +987,9 @@
+ inode->i_uid = task->euid;
+ inode->i_gid = task->egid;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#endif
+ security_task_to_inode(task, inode);
+
+ out:
+@@ -990,7 +1018,9 @@
+ if (pid_alive(task)) {
+ if (proc_type(inode) == PROC_TGID_INO || proc_type(inode) == PROC_TID_INO || task_dumpable(task)) {
+ inode->i_uid = task->euid;
++#ifndef CONFIG_GRKERNSEC_PROC_USERGROUP
+ inode->i_gid = task->egid;
++#endif
+ } else {
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+@@ -1334,6 +1364,12 @@
+ inode->i_fop = &proc_info_file_operations;
+ ei->op.proc_read = proc_pid_status;
+ break;
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ case PROC_TGID_IPADDR:
++ inode->i_fop = &proc_info_file_operations;
++ ei->op.proc_read = proc_pid_ipaddr;
++ break;
++#endif
+ case PROC_TID_STAT:
+ case PROC_TGID_STAT:
+ inode->i_fop = &proc_info_file_operations;
+@@ -1583,6 +1619,22 @@
+ if (!task)
+ goto out;
+
++ if (gr_check_hidden_task(task)) {
++ put_task_struct(task);
++ goto out;
++ }
++
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (current->uid && (task->uid != current->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ ) {
++ put_task_struct(task);
++ goto out;
++ }
++#endif
++
+ inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
+
+
+@@ -1590,7 +1642,15 @@
+ put_task_struct(task);
+ goto out;
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP;
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+ inode->i_op = &proc_tgid_base_inode_operations;
+ inode->i_fop = &proc_tgid_base_operations;
+ inode->i_nlink = 3;
+@@ -1674,6 +1734,9 @@
+ static int get_tgid_list(int index, unsigned long version, unsigned int *tgids)
+ {
+ struct task_struct *p;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ struct task_struct *tmp = current;
++#endif
+ int nr_tgids = 0;
+
+ index--;
+@@ -1694,6 +1757,18 @@
+ int tgid = p->pid;
+ if (!pid_alive(p))
+ continue;
++ if (gr_pid_is_chrooted(p))
++ continue;
++ if (gr_check_hidden_task(p))
++ continue;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (tmp->uid && (p->uid != tmp->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ )
++ continue;
++#endif
+ if (--index >= 0)
+ continue;
+ tgids[nr_tgids] = tgid;
+diff -urN linux-2.6.5/fs/proc/inode.c linux-2.6.5-new/fs/proc/inode.c
+--- linux-2.6.5/fs/proc/inode.c 2004-04-03 22:38:14.000000000 -0500
++++ linux-2.6.5-new/fs/proc/inode.c 2004-04-14 09:06:29.000000000 -0400
+@@ -205,7 +205,11 @@
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = de->gid;
++#endif
+ }
+ if (de->size)
+ inode->i_size = de->size;
+diff -urN linux-2.6.5/fs/proc/proc_misc.c linux-2.6.5-new/fs/proc/proc_misc.c
+--- linux-2.6.5/fs/proc/proc_misc.c 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/fs/proc/proc_misc.c 2004-04-14 09:06:29.000000000 -0400
+@@ -654,6 +654,8 @@
+ void __init proc_misc_init(void)
+ {
+ struct proc_dir_entry *entry;
++ int gr_mode = 0;
++
+ static struct {
+ char *name;
+ int (*read_proc)(char*,char**,off_t,int,int*,void*);
+@@ -668,9 +670,13 @@
+ #ifdef CONFIG_STRAM_PROC
+ {"stram", stram_read_proc},
+ #endif
++#ifndef CONFIG_GRKERNSEC_PROC_ADD
+ {"devices", devices_read_proc},
++#endif
+ {"filesystems", filesystems_read_proc},
++#ifndef CONFIG_GRKERNSEC_PROC_ADD
+ {"cmdline", cmdline_read_proc},
++#endif
+ #ifdef CONFIG_SGI_DS1286
+ {"rtc", ds1286_read_proc},
+ #endif
+@@ -681,24 +687,39 @@
+ for (p = simple_ones; p->name; p++)
+ create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
+
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ gr_mode = S_IRUSR;
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ gr_mode = S_IRUSR | S_IRGRP;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ create_proc_read_entry("devices", gr_mode, NULL, &devices_read_proc, NULL);
++ create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL);
++#endif
++
+ proc_symlink("mounts", NULL, "self/mounts");
+
+ /* And now for trickier ones */
+ entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
+ if (entry)
+ entry->proc_fops = &proc_kmsg_operations;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ create_seq_entry("cpuinfo", gr_mode, &proc_cpuinfo_operations);
++ create_seq_entry("slabinfo",gr_mode,&proc_slabinfo_operations);
++#else
+ create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
++ create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
++#endif
+ create_seq_entry("partitions", 0, &proc_partitions_operations);
+ create_seq_entry("stat", 0, &proc_stat_operations);
+ create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+- create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+ create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
+ create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
+ create_seq_entry("diskstats", 0, &proc_diskstats_operations);
+ #ifdef CONFIG_MODULES
+- create_seq_entry("modules", 0, &proc_modules_operations);
++ create_seq_entry("modules", gr_mode, &proc_modules_operations);
+ #endif
+-#ifdef CONFIG_PROC_KCORE
++#if defined(CONFIG_PROC_KCORE)
+ proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
+ if (proc_root_kcore) {
+ proc_root_kcore->proc_fops = &proc_kcore_operations;
+diff -urN linux-2.6.5/fs/proc/root.c linux-2.6.5-new/fs/proc/root.c
+--- linux-2.6.5/fs/proc/root.c 2004-04-03 22:37:40.000000000 -0500
++++ linux-2.6.5-new/fs/proc/root.c 2004-04-14 09:06:29.000000000 -0400
+@@ -52,13 +52,26 @@
+ return;
+ }
+ proc_misc_init();
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#else
+ proc_net = proc_mkdir("net", 0);
++#endif
+ #ifdef CONFIG_SYSVIPC
+ proc_mkdir("sysvipc", 0);
+ #endif
+ #ifdef CONFIG_SYSCTL
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_sys_root = proc_mkdir_mode("sys", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_sys_root = proc_mkdir_mode("sys", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#else
+ proc_sys_root = proc_mkdir("sys", 0);
+ #endif
++#endif
+ #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
+ proc_mkdir("sys/fs", 0);
+ proc_mkdir("sys/fs/binfmt_misc", 0);
+@@ -74,7 +87,15 @@
+ #ifdef CONFIG_PROC_DEVICETREE
+ proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#endif
++#else
+ proc_bus = proc_mkdir("bus", 0);
++#endif
+ }
+
+ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+diff -urN linux-2.6.5/fs/proc/task_mmu.c linux-2.6.5-new/fs/proc/task_mmu.c
+--- linux-2.6.5/fs/proc/task_mmu.c 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/fs/proc/task_mmu.c 2004-04-14 09:15:12.000000000 -0400
+@@ -76,8 +76,17 @@
+ return size;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \
++ task->flags & PF_PAX_SEGMEXEC || \
++ task->flags & PF_PAX_RANDEXEC)
++#endif
++
+ static int show_map(struct seq_file *m, void *v)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ struct task_struct *task = m->private;
++#endif
+ struct vm_area_struct *map = v;
+ struct file *file = map->vm_file;
+ int flags = map->vm_flags;
+@@ -92,8 +101,14 @@
+ }
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS ? 0UL : map->vm_start,
++ PAX_RAND_FLAGS ? 0UL : map->vm_end,
++#else
+ map->vm_start,
+ map->vm_end,
++#endif
++
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+diff -urN linux-2.6.5/fs/readdir.c linux-2.6.5-new/fs/readdir.c
+--- linux-2.6.5/fs/readdir.c 2004-04-03 22:37:06.000000000 -0500
++++ linux-2.6.5-new/fs/readdir.c 2004-04-14 09:06:29.000000000 -0400
+@@ -14,6 +14,8 @@
+ #include <linux/fs.h>
+ #include <linux/dirent.h>
+ #include <linux/security.h>
++#include <linux/namei.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+
+@@ -64,6 +66,7 @@
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
+ int result;
++ struct nameidata nd;
+ };
+
+ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -74,6 +77,10 @@
+
+ if (buf->result)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, (unsigned long)dirent,
+@@ -106,6 +113,9 @@
+ buf.result = 0;
+ buf.dirent = dirent;
+
++ buf.nd.dentry = file->f_dentry;
++ buf.nd.mnt = file->f_vfsmnt;
++
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (error >= 0)
+ error = buf.result;
+@@ -133,6 +143,7 @@
+ struct linux_dirent __user * previous;
+ int count;
+ int error;
++ struct nameidata nd;
+ };
+
+ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -145,6 +156,10 @@
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -192,6 +207,9 @@
+ buf.count = count;
+ buf.error = 0;
+
++ buf.nd.dentry = file->f_dentry;
++ buf.nd.mnt = file->f_vfsmnt;
++
+ error = vfs_readdir(file, filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+@@ -217,6 +235,7 @@
+ struct linux_dirent64 __user * previous;
+ int count;
+ int error;
++ struct nameidata nd;
+ };
+
+ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -229,6 +248,10 @@
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -278,6 +301,9 @@
+ buf.count = count;
+ buf.error = 0;
+
++ buf.nd.mnt = file->f_vfsmnt;
++ buf.nd.dentry = file->f_dentry;
++
+ error = vfs_readdir(file, filldir64, &buf);
+ if (error < 0)
+ goto out_putf;
+diff -urN linux-2.6.5/grsecurity/Kconfig linux-2.6.5-new/grsecurity/Kconfig
+--- linux-2.6.5/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/Kconfig 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,864 @@
++#
++# grecurity configuration
++#
++
++menu "Grsecurity"
++
++config GRKERNSEC
++ bool "Grsecurity"
++ select CRYPTO
++ select CRYPTO_SHA256
++ help
++ If you say Y here, you will be able to configure many features
++ that will enhance the security of your system. It is highly
++ recommended that you say Y here and read through the help
++ for each option so that you fully understand the features and
++ can evaluate their usefulness for your machine.
++
++choice
++ prompt "Security Level"
++ depends GRKERNSEC
++ default GRKERNSEC_CUSTOM
++
++config GRKERNSEC_LOW
++ bool "Low"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_CHROOT_CHDIR
++ help
++ If you choose this option, several of the grsecurity options will
++ be enabled that will give you greater protection against a number
++ of attacks, while assuring that none of your software will have any
++ conflicts with the additional security measures. If you run a lot
++ of unusual software, or you are having problems with the higher
++ security levels, you should say Y here. With this option, the
++ following features are enabled:
++
++ - Linking Restrictions
++ - FIFO Restrictions
++ - Randomized PIDs
++ - Enforcing RLIMIT_NPROC on execve
++ - Restricted dmesg
++ - Randomized IP IDs
++ - Enforced chdir("/") on chroot
++
++config GRKERNSEC_MEDIUM
++ bool "Medium"
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_NO_ACL_FLAGS
++ select GRKERNSEC_PROC_MEMMAP
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_RANDSRC
++ select GRKERNSEC_RANDRPC
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_USERGROUP
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++
++ help
++ If you say Y here, several features in addition to those included
++ in the low additional security level will be enabled. These
++ features provide even more security to your system, though in rare
++ cases they may be incompatible with very old or poorly written
++ software. If you enable this option, make sure that your auth
++ service (identd) is running as gid 10 (usually group wheel).
++ With this option, the following features (in addition to those
++ provided in the low additional security level) will be enabled:
++
++ - Randomized TCP Source Ports
++ - Failed Fork Logging
++ - Time Change Logging
++ - Signal Logging
++ - Deny Mounts in chroot
++ - Deny Double chrooting
++ - Deny Sysctl Writes in chroot
++ - Deny Mknod in chroot
++ - Deny Access to Abstract AF_UNIX Sockets out of chroot
++ - Deny pivot_root in chroot
++ - Denied Writes of /dev/kmem, /dev/mem, and /dev/port
++ - /proc restrictions with special GID set to 10 (usually wheel)
++ - Address Space Layout Randomization (ASLR)
++
++config GRKERNSEC_HIGH
++ bool "High"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_RANDSRC
++ select GRKERNSEC_RANDRPC
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT_SHMAT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_FCHDIR
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_CHROOT_CAPS
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_CHROOT_FINDTASK
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_MEMMAP
++ select GRKERNSEC_HIDESYM
++ select GRKERNSEC_PROC_USERGROUP
++ select GRKERNSEC_KMEM
++ select GRKERNSEC_RESLOG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_PROC_ADD
++ select GRKERNSEC_CHROOT_CHMOD
++ select GRKERNSEC_CHROOT_NICE
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_NOEXEC
++ select PAX_MPROTECT
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_NO_ACL_FLAGS
++ select PAX_KERNEXEC
++ select PAX_RANDKSTACK
++ select PAX_RANDEXEC
++ select PAX_SEGMEXEC
++ select PAX_EMUTRAMP
++ select PAX_EMUSIGRT
++ select GRKERNSEC_AUDIT_MOUNT
++ help
++ If you say Y here, many of the features of grsecurity will be
++ enabled, which will protect you against many kinds of attacks
++ against your system. The heightened security comes at a cost
++ of an increased chance of incompatibilities with rare software
++ on your machine. Since this security level enables PaX, you should
++ view <http://pax.grsecurity.net> and read about the PaX
++ project. While you are there, download chpax and run it on
++ binaries that cause problems with PaX. Also remember that
++ since the /proc restrictions are enabled, you must run your
++ identd as group wheel (GID 10). This security level enables the
++ following features in addition to those listed in the low and
++ medium security levels:
++
++ - Additional /proc Restrictions
++ - Chmod Restrictions in chroot
++ - No Signals, Ptrace, or Viewing of Processes Outside of chroot
++ - Capability Restrictions in chroot
++ - Deny fchdir out of chroot
++ - Priority Restrictions in chroot
++ - Segmentation-based Implementation of PaX
++ - Mprotect Restrictions
++ - Removal of Addresses from /proc/<pid>/[maps|stat]
++ - Kernel Stack Randomization
++ - Mount/Unmount/Remount Logging
++ - Kernel Symbol Hiding
++
++config GRKERNSEC_CUSTOM
++ bool "Custom"
++ help
++ If you say Y here, you will be able to configure every grsecurity
++ option, which allows you to enable many more features that aren't
++ covered in the basic security levels. These additional features
++ include TPE, socket restrictions, and the sysctl system for
++ grsecurity. It is advised that you read through the help for
++ each option to determine its usefulness in your situation.
++
++endchoice
++
++menu "Address Space Protection"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
++ help
++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++ be written to via mmap or otherwise to modify the running kernel.
++ /dev/port will also not be allowed to be opened. If you have module
++ support disabled, enabling this will close up four ways that are
++ currently used to insert malicious code into the running kernel.
++ Even with all these features enabled, we still highly recommend that
++ you use the ACL system, as it is still possible for an attacker to
++ modify the running kernel through privileged I/O granted by ioperm/iopl.
++ If you are not using XFree86, you may be able to stop this additional
++ case by enabling the 'Disable privileged I/O' option. Though nothing
++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++ but only to video memory, which is the only writing we allow in this
++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++ not be allowed to mprotect it with PROT_WRITE later.
++ Enabling this feature could make certain apps like VMWare stop working,
++ as they need to write to other locations in /dev/mem.
++ It is highly recommended that you say Y here if you meet all the
++ conditions above.
++
++config GRKERNSEC_IO
++ bool "Disable privileged I/O"
++ depends on X86
++ select RTC
++ help
++ If you say Y here, all ioperm and iopl calls will return an error.
++ Ioperm and iopl can be used to modify the running kernel.
++ Unfortunately, some programs need this access to operate properly,
++ the most notable of which are XFree86 and hwclock. hwclock can be
++ remedied by having RTC support in the kernel, so CONFIG_RTC is
++ enabled if this option is enabled, to ensure that hwclock operates
++ correctly. XFree86 still will not operate correctly with this option
++ enabled, so DO NOT CHOOSE Y IF YOU USE XFree86. If you use XFree86
++ and you still want to protect your kernel against modification,
++ use the ACL system.
++
++config GRKERNSEC_PROC_MEMMAP
++ bool "Remove addresses from /proc/<pid>/[maps|stat]"
++ help
++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++ give no information about the addresses of its mappings if
++ PaX features that rely on random addresses are enabled on the task.
++ If you use PaX it is greatly recommended that you say Y here as it
++ closes up a hole that makes the full ASLR useless for suid
++ binaries.
++
++config GRKERNSEC_HIDESYM
++ bool "Hide kernel symbols"
++ help
++ If you say Y here, getting information on loaded modules, and
++ displaying all kernel symbols through a syscall will be restricted
++ to users with CAP_SYS_MODULE. This option is only effective
++ provided the following conditions are met:
++ 1) The kernel using grsecurity is not precompiled by some distribution
++ 2) You are using the ACL system and hiding other files such as your
++ kernel image and System.map
++ 3) You have the additional /proc restrictions enabled, which removes
++ /proc/kcore
++ If the above conditions are met, this option will aid to provide a
++ useful protection against local and remote kernel exploitation of
++ overflows and arbitrary read/write vulnerabilities.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_ACL_HIDEKERN
++ bool "Hide kernel processes"
++ help
++ If you say Y here, when the RBAC system is enabled via gradm -E,
++ an additional ACL will be passed to the kernel that hides all kernel
++ processes. These processes will only be viewable by the authenticated
++ admin, or processes that have viewing access set.
++
++config GRKERNSEC_ACL_MAXTRIES
++ int "Maximum tries before password lockout"
++ default 3
++ help
++ This option enforces the maximum number of times a user can attempt
++ to authorize themselves with the grsecurity ACL system before being
++ denied the ability to attempt authorization again for a specified time.
++ The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++ int "Time to wait after max password tries, in seconds"
++ default 30
++ help
++ This option specifies the time the user must wait after attempting to
++ authorize to the ACL system with the maximum number of invalid
++ passwords. The higher the number, the harder it will be to brute-force
++ a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++ bool "Proc restrictions"
++ help
++ If you say Y here, the permissions of the /proc filesystem
++ will be altered to enhance system security and privacy. Depending
++ upon the options you choose, you can either restrict users to see
++ only the processes they themselves run, or choose a group that can
++ view all processes and files normally restricted to root if you choose
++ the "restrict to user only" option. NOTE: If you're running identd as
++ a non-root user, you will have to run it as the group you specify here.
++
++config GRKERNSEC_PROC_USER
++ bool "Restrict /proc to user only"
++ depends on GRKERNSEC_PROC
++ help
++ If you say Y here, non-root users will only be able to view their own
++ processes, and restricts them from viewing network-related information,
++ and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++ bool "Allow special group"
++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++ help
++ If you say Y here, you will be able to select a group that will be
++ able to view all processes, network-related information, and
++ kernel and symbol information. This option is useful if you want
++ to run identd as a non-root user.
++
++config GRKERNSEC_PROC_GID
++ int "GID for special group"
++ depends on GRKERNSEC_PROC_USERGROUP
++ default 1001
++
++config GRKERNSEC_PROC_ADD
++ bool "Additional restrictions"
++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++ help
++ If you say Y here, additional restrictions will be placed on
++ /proc that keep normal users from viewing cpu and device information.
++
++config GRKERNSEC_LINK
++ bool "Linking restrictions"
++ help
++ If you say Y here, /tmp race exploits will be prevented, since users
++ will no longer be able to follow symlinks owned by other users in
++ world-writable +t directories (i.e. /tmp), unless the owner of the
++ symlink is the owner of the directory. users will also not be
++ able to hardlink to files they do not own. If the sysctl option is
++ enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_FIFO
++ bool "FIFO restrictions"
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (i.e. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++config GRKERNSEC_CHROOT
++ bool "Chroot jail restrictions"
++ help
++ If you say Y here, you will be able to choose several options that will
++ make breaking out of a chrooted jail much more difficult. If you
++ encounter no software incompatibilities with the following options, it
++ is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++ bool "Deny mounts"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ mount or remount filesystems. If the sysctl option is enabled, a
++ sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++ bool "Deny double-chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chroot
++ again outside the chroot. This is a widely used method of breaking
++ out of a chroot jail and should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name
++ "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++ bool "Deny pivot_root in chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to use
++ a function called pivot_root() that was introduced in Linux 2.3.41. It
++ works similar to chroot in that it changes the root filesystem. This
++ function could be misused in a chrooted process to attempt to break out
++ of the chroot, and therefore should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
++ created.
++
++config GRKERNSEC_CHROOT_CHDIR
++ bool "Enforce chdir(\"/\") on all chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the current working directory of all newly-chrooted
++ applications will be set to the the root directory of the chroot.
++ The man page on chroot(2) states:
++ Note that this call does not change the current working
++ directory, so that `.' can be outside the tree rooted at
++ `/'. In particular, the super-user can escape from a
++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++ It is recommended that you say Y here, since it's not known to break
++ any software. If the sysctl option is enabled, a sysctl option with
++ name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++ bool "Deny (f)chmod +s"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chmod
++ or fchmod files to make them have suid or sgid bits. This protects
++ against another published method of breaking a chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
++ created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++ bool "Deny fchdir out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
++ to a file descriptor of the chrooting process that points to a directory
++ outside the filesystem will be stopped. If the sysctl option
++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++ bool "Deny mknod"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be allowed to
++ mknod. The problem with using mknod inside a chroot is that it
++ would allow an attacker to create a device entry that is the same
++ as one on the physical root of your system, which could range from
++ anything from the console device to a device for your harddrive (which
++ they could then use to wipe the drive or steal data). It is recommended
++ that you say Y here, unless you run into software incompatibilities.
++ If the sysctl option is enabled, a sysctl option with name
++ "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++ bool "Deny shmat() out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to attach
++ to shared memory segments that were created outside of the chroot jail.
++ It is recommended that you say Y here. If the sysctl option is enabled,
++ a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ connect to abstract (meaning not belonging to a filesystem) Unix
++ domain sockets that were bound outside of a chroot. It is recommended
++ that you say Y here. If the sysctl option is enabled, a sysctl option
++ with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++ bool "Protect outside processes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ kill, send signals with fcntl, ptrace, capget, setpgid, getpgid,
++ getsid, or view any process outside of the chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_findtask" is
++ created.
++
++config GRKERNSEC_CHROOT_NICE
++ bool "Restrict priority changes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to raise
++ the priority of processes in the chroot, or alter the priority of
++ processes outside the chroot. This provides more security than simply
++ removing CAP_SYS_NICE from the process' capability set. If the
++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++ is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++ bool "Deny sysctl writes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, an attacker in a chroot will not be able to
++ write to sysctl entries, either by sysctl(2) or through a /proc
++ interface. It is strongly recommended that you say Y here. If the
++ sysctl option is enabled, a sysctl option with name
++ "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++ bool "Capability restrictions"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the capabilities on all root processes within a
++ chroot jail will be lowered to stop module insertion, raw i/o,
++ system and net admin tasks, rebooting the system, modifying immutable
++ files, modifying IPC owned by another, and changing the system time.
++ This is left an option because it can break some apps. Disable this
++ if your chrooted apps are having problems performing those kinds of
++ tasks. If the sysctl option is enabled, a sysctl option with
++ name "chroot_caps" is created.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++ bool "Single group for auditing"
++ help
++ If you say Y here, the exec, chdir, (un)mount, and ipc logging features
++ will only operate on a group you specify. This option is recommended
++ if you only want to watch certain users instead of having a large
++ amount of logs from the entire system. If the sysctl option is enabled,
++ a sysctl option with name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++ int "GID for auditing"
++ depends on GRKERNSEC_AUDIT_GROUP
++ default 1007
++
++config GRKERNSEC_EXECLOG
++ bool "Exec logging"
++ help
++ If you say Y here, all execve() calls will be logged (since the
++ other exec*() calls are frontends to execve(), all execution
++ will be logged). Useful for shell-servers that like to keep track
++ of their users. If the sysctl option is enabled, a sysctl option with
++ name "exec_logging" is created.
++ WARNING: This option when enabled will produce a LOT of logs, especially
++ on an active system.
++
++config GRKERNSEC_RESLOG
++ bool "Resource logging"
++ help
++ If you say Y here, all attempts to overstep resource limits will
++ be logged with the resource name, the requested size, and the current
++ limit. It is highly recommended that you say Y here.
++
++config GRKERNSEC_CHROOT_EXECLOG
++ bool "Log execs within chroot"
++ help
++ If you say Y here, all executions inside a chroot jail will be logged
++ to syslog. This can cause a large amount of logs if certain
++ applications (eg. djb's daemontools) are installed on the system, and
++ is therefore left as an option. If the sysctl option is enabled, a
++ sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++ bool "Chdir logging"
++ help
++ If you say Y here, all chdir() calls will be logged. If the sysctl
++ option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++ bool "(Un)Mount logging"
++ help
++ If you say Y here, all mounts and unmounts will be logged. If the
++ sysctl option is enabled, a sysctl option with name "audit_mount" is
++ created.
++
++config GRKERNSEC_AUDIT_IPC
++ bool "IPC logging"
++ help
++ If you say Y here, creation and removal of message queues, semaphores,
++ and shared memory will be logged. If the sysctl option is enabled, a
++ sysctl option with name "audit_ipc" is created.
++
++config GRKERNSEC_SIGNAL
++ bool "Signal logging"
++ help
++ If you say Y here, certain important signals will be logged, such as
++ SIGSEGV, which will as a result inform you of when a error in a program
++ occurred, which in some cases could mean a possible exploit attempt.
++ If the sysctl option is enabled, a sysctl option with name
++ "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++ bool "Fork failure logging"
++ help
++ If you say Y here, all failed fork() attempts will be logged.
++ This could suggest a fork bomb, or someone attempting to overstep
++ their process limit. If the sysctl option is enabled, a sysctl option
++ with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++ bool "Time change logging"
++ help
++ If you say Y here, any changes of the system clock will be logged.
++ If the sysctl option is enabled, a sysctl option with name
++ "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++ bool "/proc/<pid>/ipaddr support"
++ help
++ If you say Y here, a new entry will be added to each /proc/<pid>
++ directory that contains the IP address of the person using the task.
++ The IP is carried across local TCP and AF_UNIX stream sockets.
++ This information can be useful for IDS/IPSes to perform remote response
++ to a local attack. The entry is readable by only the owner of the
++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++ the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_AUDIT_TEXTREL
++ bool 'ELF text relocations logging (READ HELP)'
++ depends on PAX_MPROTECT
++ help
++ If you say Y here, text relocations will be logged with the filename
++ of the offending library or binary. The purpose of the feature is
++ to help Linux distribution developers get rid of libraries and
++ binaries that need text relocations which hinder the future progress
++ of PaX. Only Linux distribution developers should say Y here, and
++ never on a production machine, as this option creates an information
++ leak that could aid an attacker in defeating the randomization of
++ a single memory region. If the sysctl option is enabled, a sysctl
++ option with name "audit_textrel" is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_EXECVE
++ bool "Enforce RLIMIT_NPROC on execs"
++ help
++ If you say Y here, users with a resource limit on processes will
++ have the value checked during execve() calls. The current system
++ only checks the system limit during fork() calls. If the sysctl option
++ is enabled, a sysctl option with name "execve_limiting" is created.
++
++config GRKERNSEC_DMESG
++ bool "Dmesg(8) restriction"
++ help
++ If you say Y here, non-root users will not be able to use dmesg(8)
++ to view up to the last 4kb of messages in the kernel's log buffer.
++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
++ created.
++
++config GRKERNSEC_RANDPID
++ bool "Randomized PIDs"
++ help
++ If you say Y here, all PIDs created on the system will be
++ pseudo-randomly generated. This is extremely effective along
++ with the /proc restrictions to disallow an attacker from guessing
++ pids of daemons, etc. PIDs are also used in some cases as part
++ of a naming system for temporary files, so this option would keep
++ those filenames from being predicted as well. We also use code
++ to make sure that PID numbers aren't reused too soon. If the sysctl
++ option is enabled, a sysctl option with name "rand_pids" is created.
++
++config GRKERNSEC_TPE
++ bool "Trusted Path Execution (TPE)"
++ help
++ If you say Y here, you will be able to choose a gid to add to the
++ supplementary groups of users you want to mark as "untrusted."
++ These users will not be able to execute any files that are not in
++ root-owned directories writable only by root. If the sysctl option
++ is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++ bool "Partially restrict non-root users"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, All non-root users other than the ones in the
++ group specified in the main TPE option will only be allowed to
++ execute files in directories they own that are not group or
++ world-writable, or in directories owned by root and writable only by
++ root. If the sysctl option is enabled, a sysctl option with name
++ "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_GID
++ int "GID for untrusted users"
++ depends on GRKERNSEC_TPE
++ default 1005
++ help
++ Here you can choose the GID to enable trusted path protection for.
++ Remember to add the users you want protection enabled for to the GID
++ specified here. If the sysctl option is enabled, whatever you choose
++ here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++ bool "Larget entropy pools"
++ help
++ If you say Y here, the entropy pools used for many features of Linux
++ and grsecurity will be doubled in size. Since several grsecurity
++ features use additional randomness, it is recommended that you say Y
++ here. Saying Y here has a similar effect as modifying
++ /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_RANDISN
++ bool "Truly random TCP ISN selection"
++ help
++ If you say Y here, Linux's default selection of TCP Initial Sequence
++ Numbers (ISNs) will be replaced with that of OpenBSD. Linux uses
++ an MD4 hash based on the connection plus a time value to create the
++ ISN, while OpenBSD's selection is random. If the sysctl option is
++ enabled, a sysctl option with name "rand_isns" is created.
++
++config GRKERNSEC_RANDID
++ bool "Randomized IP IDs"
++ help
++ If you say Y here, all the id field on all outgoing packets
++ will be randomized. This hinders os fingerprinters and
++ keeps your machine from being used as a bounce for an untraceable
++ portscan. Ids are used for fragmented packets, fragments belonging
++ to the same packet have the same id. By default linux only
++ increments the id value on each packet sent to an individual host.
++ We use a port of the OpenBSD random ip id code to achieve the
++ randomness, while keeping the possibility of id duplicates to
++ near none. If the sysctl option is enabled, a sysctl option with name
++ "rand_ip_ids" is created.
++
++config GRKERNSEC_RANDSRC
++ bool "Randomized TCP source ports"
++ default n if GRKERNSEC_LOW || GRKERNSEC_MID
++ default y if GRKERNSEC_HIGH
++ help
++ If you say Y here, situations where a source port is generated on the
++ fly for the TCP protocol (ie. with connect() ) will be altered so that
++ the source port is generated at random, instead of a simple incrementing
++ algorithm. If the sysctl option is enabled, a sysctl option with name
++ "rand_tcp_src_ports" is created.
++
++config GRKERNSEC_RANDRPC
++ bool "Randomized RPC XIDs"
++ help
++ If you say Y here, the method of determining XIDs for RPC requests will
++ be randomized, instead of using linux's default behavior of simply
++ incrementing the XID. If you want your RPC connections to be more
++ secure, say Y here. If the sysctl option is enabled, a sysctl option
++ with name "rand_rpc" is created.
++
++config GRKERNSEC_SOCKET
++ bool "Socket restrictions"
++ help
++ If you say Y here, you will be able to choose from several options.
++ If you assign a GID on your system and add it to the supplementary
++ groups of users you want to restrict socket access to, this patch
++ will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++ bool "Deny any sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine or run server
++ applications from your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++ int "GID to deny all sockets for"
++ depends on GRKERNSEC_SOCKET_ALL
++ default 1004
++ help
++ Here you can choose the GID to disable socket access for. Remember to
++ add the users you want socket access disabled for to the GID
++ specified here. If the sysctl option is enabled, whatever you choose
++ here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++ bool "Deny client sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine, but will be
++ able to run servers. If this option is enabled, all users in the group
++ you specify will have to use passive mode when initiating ftp transfers
++ from the shell on your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++ int "GID to deny client sockets for"
++ depends on GRKERNSEC_SOCKET_CLIENT
++ default 1003
++ help
++ Here you can choose the GID to disable client socket access for.
++ Remember to add the users you want client socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, whatever you
++ choose here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++ bool "Deny server sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to run server applications from your machine. If the sysctl
++ option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++ int "GID to deny server sockets for"
++ depends on GRKERNSEC_SOCKET_SERVER
++ default 1002
++ help
++ Here you can choose the GID to disable server socket access for.
++ Remember to add the users you want server socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, whatever you
++ choose here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_server_gid" is created.
++
++endmenu
++menu "Sysctl support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++ bool "Sysctl support"
++ help
++ If you say Y here, you will be able to change the options that
++ grsecurity runs with at bootup, without having to recompile your
++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
++ to enable (1) or disable (0) various features. All the sysctl entries
++ are mutable until the "grsec_lock" entry is set to a non-zero value.
++ All features are disabled by default. Please note that this option could
++ reduce the effectiveness of the added security of this patch if an ACL
++ system is not put in place. Your init scripts should be read-only, and
++ root should not have access to adding modules or performing raw i/o
++ operations. All options should be set at startup, and the grsec_lock
++ entry should be set to a non-zero value after all the options are set.
++ *THIS IS EXTREMELY IMPORTANT*
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++ int "Seconds in between log messages (minimum)"
++ default 10
++ help
++ This option allows you to enforce the number of seconds between
++ grsecurity log messages. The default should be suitable for most
++ people, however, if you choose to change it, choose a value small enough
++ to allow informative logs to be produced, but large enough to
++ prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++ int "Number of messages in a burst (maximum)"
++ default 4
++ help
++ This option allows you to choose the maximum number of messages allowed
++ within the flood time interval you chose in a separate option. The
++ default should be suitable for most people, however if you find that
++ many of your logs are being interpreted as flooding, you may want to
++ raise this value.
++
++endmenu
++
++endmenu
+diff -urN linux-2.6.5/grsecurity/Makefile linux-2.6.5-new/grsecurity/Makefile
+--- linux-2.6.5/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/Makefile 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,21 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001, 2002, and 2003 it has been completely redesigned by
++# Brad Spengler
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler, and released under the GPL, unless otherwise
++# noted (as in obsd_rand.c)
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++ grsec_mount.o grsec_rand.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
++ grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o grsec_textrel.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o obsd_rand.o \
++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++ gracl_learn.o
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
+diff -urN linux-2.6.5/grsecurity/gracl.c linux-2.6.5-new/grsecurity/gracl.c
+--- linux-2.6.5/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,3340 @@
++/*
++ * grsecurity/gracl.c
++ * Copyright Brad Spengler 2001, 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/capability.h>
++#include <linux/sysctl.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/percpu.h>
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++static struct acl_role_db acl_role_set;
++static struct acl_role_label *role_list_head;
++static struct name_db name_set;
++static struct name_db inodev_set;
++
++/* for keeping track of userspace pointers used for subjects, so we
++ can share references in the kernel as well
++*/
++static struct acl_subj_map_db subj_map_set;
++
++static struct acl_role_label *default_role;
++
++static u16 acl_sp_role_value;
++
++extern char *gr_shared_page[4];
++static DECLARE_MUTEX(gr_dev_sem);
++rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED;
++
++struct gr_arg *gr_usermode;
++
++static unsigned long gr_status = GR_STATUS_INIT;
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt);
++#endif
++
++extern char * __d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
++ struct dentry *root, struct vfsmount *rootmnt,
++ char *buffer, int buflen);
++
++unsigned char *gr_system_salt;
++unsigned char *gr_system_sum;
++
++static struct sprole_pw **acl_special_roles = NULL;
++static __u16 num_sprole_pws = 0;
++
++static struct acl_role_label *kernel_role = NULL;
++
++/* The following are used to keep a place held in the hash table when we move
++ entries around. They can be replaced during insert. */
++
++static struct acl_subject_label *deleted_subject;
++static struct acl_object_label *deleted_object;
++static struct name_entry *deleted_inodev;
++
++/* for keeping track of the last and final allocated subjects, since
++ nested subject parsing is tricky
++*/
++static struct acl_subject_label *s_last = NULL;
++static struct acl_subject_label *s_final = NULL;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return (gr_status & GR_READY);
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++ if (current->role->roletype & GR_ROLE_TPE)
++ return 1;
++ else
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++ if (inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO) &&
++ ((gr_status & GR_READY)
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ || (grsec_enable_chroot_caps && proc_is_chrooted(current))
++#endif
++ ))
++ return 1;
++ return 0;
++}
++
++
++static __inline__ int
++gr_streq(const char *a, const char *b, const __u16 lena, const __u16 lenb)
++{
++ int i;
++ unsigned long *l1;
++ unsigned long *l2;
++ unsigned char *c1;
++ unsigned char *c2;
++ int num_longs;
++
++ if (likely(lena != lenb))
++ return 0;
++
++ l1 = (unsigned long *)a;
++ l2 = (unsigned long *)b;
++
++ num_longs = lena / sizeof(unsigned long);
++
++ for (i = num_longs; i--; l1++, l2++) {
++ if (unlikely(*l1 != *l2))
++ return 0;
++ }
++
++ c1 = (unsigned char *) l1;
++ c2 = (unsigned char *) l2;
++
++ i = lena - (num_longs * sizeof(unsigned long));
++
++ for (; i--; c1++, c2++) {
++ if (unlikely(*c1 != *c2))
++ return 0;
++ }
++
++ return 1;
++}
++
++static __inline__ char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct dentry *our_dentry;
++ struct vfsmount *our_mount;
++ struct vfsmount *rootmnt;
++ struct dentry *root;
++
++ our_dentry = (struct dentry *) dentry;
++ our_mount = (struct vfsmount *) vfsmnt;
++
++ read_lock(&child_reaper->fs->lock);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++
++ res = __d_path(our_dentry, our_mount, root, rootmnt, buf, buflen);
++ if (unlikely(IS_ERR(res)))
++ res = strcpy(buf, "<path too long>");
++ dput(root);
++ mntput(rootmnt);
++ return res;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++static __inline__ char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct dentry *our_dentry;
++ struct vfsmount *our_mount;
++ struct vfsmount *rootmnt;
++ struct dentry *root;
++
++ our_dentry = (struct dentry *) dentry;
++ our_mount = (struct vfsmount *) vfsmnt;
++
++ read_lock(&child_reaper->fs->lock);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++
++ spin_lock(&dcache_lock);
++ res = __d_path(our_dentry, our_mount, root, rootmnt, buf, buflen);
++ spin_unlock(&dcache_lock);
++ if (unlikely(IS_ERR(res)))
++ res = strcpy(buf, "<path too long>");
++ dput(root);
++ mntput(rootmnt);
++ return res;
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++ __u32 retmode = 0;
++
++ retmode |= (reqmode & GR_READ) ? GR_AUDIT_READ : 0;
++ retmode |= (reqmode & GR_WRITE) ? GR_AUDIT_WRITE | GR_AUDIT_APPEND : 0;
++ retmode |= (reqmode & GR_APPEND) ? GR_AUDIT_APPEND : 0;
++ retmode |= (reqmode & GR_EXEC) ? GR_AUDIT_EXEC : 0;
++ retmode |= (reqmode & GR_INHERIT) ? GR_AUDIT_INHERIT : 0;
++ retmode |= (reqmode & GR_FIND) ? GR_AUDIT_FIND : 0;
++ retmode |= (reqmode & GR_SETID) ? GR_AUDIT_SETID : 0;
++ retmode |= (reqmode & GR_CREATE) ? GR_AUDIT_CREATE : 0;
++ retmode |= (reqmode & GR_DELETE) ? GR_AUDIT_DELETE : 0;
++
++ return retmode;
++}
++
++__inline__ struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++ unsigned long index = shash(userp, subj_map_set.s_size);
++ struct subject_map *match;
++ __u8 i = 0;
++
++ match = subj_map_set.s_hash[index];
++
++ while (match && match->user != userp) {
++ index = (index + (1 << i)) % subj_map_set.s_size;
++ match = subj_map_set.s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match)
++ return match->kernel;
++ else
++ return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++ unsigned long index = shash(subjmap->user, subj_map_set.s_size);
++ struct subject_map **curr;
++ __u8 i = 0;
++
++ curr = &subj_map_set.s_hash[index];
++
++ while (*curr) {
++ index = (index + (1 << i)) % subj_map_set.s_size;
++ curr = &subj_map_set.s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = subjmap;
++
++ return;
++}
++
++__inline__ struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ unsigned long index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++ struct acl_role_label *match;
++ struct role_allowed_ip *ipp;
++ __u8 i = 0;
++
++ match = acl_role_set.r_hash[index];
++
++ while (match
++ && (match->uidgid != uid || !(match->roletype & GR_ROLE_USER))) {
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ match = acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match == NULL) {
++ try_group:
++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++ match = acl_role_set.r_hash[index];
++ i = 0;
++
++ while (match && (match->uidgid != gid
++ || !(match->roletype & GR_ROLE_GROUP))) {
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ match = acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match == NULL)
++ match = default_role;
++ if (match->allowed_ips == NULL)
++ return match;
++ else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((task->curr_ip & ipp->netmask) ==
++ (ipp->addr & ipp->netmask)))
++ return match;
++ }
++ match = default_role;
++ }
++ } else if (match->allowed_ips == NULL) {
++ return match;
++ } else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((task->curr_ip & ipp->netmask) ==
++ (ipp->addr & ipp->netmask)))
++ return match;
++ }
++ goto try_group;
++ }
++
++ return match;
++}
++
++__inline__ struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned long subj_size = role->subj_hash_size;
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long index = fhash(ino, dev, subj_size);
++ struct acl_subject_label *match;
++ __u8 i = 0;
++
++ match = s_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj_size;
++ match = s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_subject) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned long obj_size = subj->obj_hash_size;
++ struct acl_object_label **o_hash = subj->obj_hash;
++ unsigned long index = fhash(ino, dev, obj_size);
++ struct acl_object_label *match;
++ __u8 i = 0;
++
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned long obj_size = subj->obj_hash_size;
++ struct acl_object_label **o_hash = subj->obj_hash;
++ unsigned long index = fhash(ino, dev, obj_size);
++ struct acl_object_label *match;
++ __u8 i = 0;
++
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && (match->mode & GR_DELETED))
++ return match;
++
++ i = 0;
++ index = fhash(ino, dev, obj_size);
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct name_entry *
++lookup_name_entry(const char *name)
++{
++ __u16 len = strlen(name);
++ unsigned long index = nhash(name, len, name_set.n_size);
++ struct name_entry *match;
++ __u8 i = 0;
++
++ match = name_set.n_hash[index];
++
++ while (match && !gr_streq(match->name, name, match->len, len)) {
++ index = (index + (1 << i)) % name_set.n_size;
++ match = name_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ return match;
++}
++
++static __inline__ struct name_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++ unsigned long index = fhash(ino, dev, inodev_set.n_size);
++ struct name_entry *match;
++ __u8 i = 0;
++
++ match = inodev_set.n_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev)) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ match = inodev_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_inodev))
++ return match;
++ else
++ return NULL;
++}
++
++static void
++insert_inodev_entry(struct name_entry *nentry)
++{
++ unsigned long index = fhash(nentry->inode, nentry->device,
++ inodev_set.n_size);
++ struct name_entry **curr;
++ __u8 i = 0;
++
++ curr = &inodev_set.n_hash[index];
++
++ while (*curr && *curr != deleted_inodev) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ curr = &inodev_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = nentry;
++
++ return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++ unsigned long index =
++ rhash(role->uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++ struct acl_role_label **curr;
++ __u8 i = 0;
++
++ curr = &acl_role_set.r_hash[index];
++
++ while (*curr) {
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ curr = &acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = role;
++
++ return;
++}
++
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device)
++{
++ struct name_entry **curr;
++ __u8 i = 0;
++ __u16 len = strlen(name);
++ unsigned long index = nhash(name, len, name_set.n_size);
++
++ curr = &name_set.n_hash[index];
++
++ while (*curr && !gr_streq((*curr)->name, name, (*curr)->len, len)) {
++ index = (index + (1 << i)) % name_set.n_size;
++ curr = &name_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (!(*curr)) {
++ struct name_entry *nentry =
++ acl_alloc(sizeof (struct name_entry));
++ if (!nentry)
++ return 0;
++ nentry->name = name;
++ nentry->inode = inode;
++ nentry->device = device;
++ nentry->len = len;
++ *curr = nentry;
++ /* insert us into the table searchable by inode/dev */
++ insert_inodev_entry(nentry);
++ }
++
++ return 1;
++}
++
++static void
++insert_acl_obj_label(struct acl_object_label *obj,
++ struct acl_subject_label *subj)
++{
++ unsigned long index =
++ fhash(obj->inode, obj->device, subj->obj_hash_size);
++ struct acl_object_label **curr;
++ __u8 i = 0;
++
++ curr = &subj->obj_hash[index];
++
++ while (*curr && *curr != deleted_object) {
++ index = (index + (1 << i)) % subj->obj_hash_size;
++ curr = &subj->obj_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = obj;
++
++ return;
++}
++
++static void
++insert_acl_subj_label(struct acl_subject_label *obj,
++ struct acl_role_label *role)
++{
++ unsigned long subj_size = role->subj_hash_size;
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long index = fhash(obj->inode, obj->device, subj_size);
++ struct acl_subject_label **curr;
++ __u8 i = 0;
++
++ curr = &s_hash[index];
++
++ while (*curr && *curr != deleted_subject) {
++ index = (index + (1 << i)) % subj_size;
++ curr = &s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = obj;
++
++ return;
++}
++
++static void **
++create_table(__u32 * len)
++{
++ unsigned long table_sizes[] = {
++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++ 4194301, 8388593, 16777213, 33554393, 67108859, 134217689,
++ 268435399, 536870909, 1073741789, 2147483647
++ };
++ void *newtable = NULL;
++ unsigned int pwr = 0;
++
++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++ table_sizes[pwr] <= (2 * (*len)))
++ pwr++;
++
++ if (table_sizes[pwr] <= (2 * (*len)))
++ return newtable;
++
++ if ((table_sizes[pwr] * sizeof (void *)) <= PAGE_SIZE)
++ newtable =
++ kmalloc(table_sizes[pwr] * sizeof (void *), GFP_KERNEL);
++ else
++ newtable = vmalloc(table_sizes[pwr] * sizeof (void *));
++
++ *len = table_sizes[pwr];
++
++ return newtable;
++}
++
++static int
++init_variables(const unsigned long acl_obj_size,
++ const unsigned long acl_glob_size,
++ const unsigned long acl_subj_size,
++ const unsigned long acl_ip_size,
++ const unsigned long acl_role_size,
++ const unsigned long allowed_ip_size,
++ const unsigned long acl_trans_size,
++ const __u16 num_sprole_pws)
++{
++ unsigned long stacksize;
++
++ subj_map_set.s_size = acl_subj_size;
++ acl_role_set.r_size = acl_role_size;
++ name_set.n_size = (acl_obj_size + acl_subj_size);
++ inodev_set.n_size = (acl_obj_size + acl_subj_size);
++
++ if (!gr_init_uidset())
++ return 1;
++
++ /* set up the stack that holds allocation info */
++
++ stacksize = (3 * acl_obj_size) + (3 * acl_role_size) +
++ (6 * acl_subj_size) + acl_ip_size + (2 * acl_trans_size) +
++ allowed_ip_size + (2 * num_sprole_pws) + (2 * acl_glob_size) + 5;
++
++ if (!acl_alloc_stack_init(stacksize))
++ return 1;
++
++ /* create our empty, fake deleted acls */
++ deleted_subject =
++ (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label));
++ deleted_object =
++ (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ deleted_inodev =
++ (struct name_entry *) acl_alloc(sizeof (struct name_entry));
++
++ if (!deleted_subject || !deleted_object || !deleted_inodev)
++ return 1;
++
++ memset(deleted_subject, 0, sizeof (struct acl_subject_label));
++ memset(deleted_object, 0, sizeof (struct acl_object_label));
++ memset(deleted_inodev, 0, sizeof (struct name_entry));
++
++ /* We only want 50% full tables for now */
++
++ subj_map_set.s_hash =
++ (struct subject_map **) create_table(&subj_map_set.s_size);
++ acl_role_set.r_hash =
++ (struct acl_role_label **) create_table(&acl_role_set.r_size);
++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size);
++ inodev_set.n_hash =
++ (struct name_entry **) create_table(&inodev_set.n_size);
++
++ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
++ !name_set.n_hash || !inodev_set.n_hash)
++ return 1;
++
++ memset(subj_map_set.s_hash, 0,
++ sizeof(struct subject_map *) * subj_map_set.s_size);
++ memset(acl_role_set.r_hash, 0,
++ sizeof (struct acl_role_label *) * acl_role_set.r_size);
++ memset(name_set.n_hash, 0,
++ sizeof (struct name_entry *) * name_set.n_size);
++ memset(inodev_set.n_hash, 0,
++ sizeof (struct name_entry *) * inodev_set.n_size);
++
++ return 0;
++}
++
++/* free information not needed after startup
++ currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++ __u32 i;
++
++ if (subj_map_set.s_hash) {
++ for (i = 0; i < subj_map_set.s_size; i++) {
++ if (subj_map_set.s_hash[i]) {
++ kfree(subj_map_set.s_hash[i]);
++ subj_map_set.s_hash[i] = NULL;
++ }
++ }
++
++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
++ PAGE_SIZE)
++ kfree(subj_map_set.s_hash);
++ else
++ vfree(subj_map_set.s_hash);
++ }
++
++ return;
++}
++
++static void
++free_variables(void)
++{
++ struct acl_subject_label *s;
++ struct acl_role_label *r;
++ struct task_struct *task, *task2;
++
++ gr_clear_learn_entries();
++
++ read_lock(&tasklist_lock);
++ for_each_process(task) {
++ task2 = task;
++ do {
++ task2->acl_sp_role = 0;
++ task2->acl_role_id = 0;
++ task2->acl = NULL;
++ task2->role = NULL;
++ } while ((task2 = next_thread(task2)) != task);
++ }
++ read_unlock(&tasklist_lock);
++
++ /* free all object hash tables */
++
++ if (role_list_head) {
++ for (r = role_list_head; r; r = r->next) {
++ if (!r->subj_hash)
++ break;
++ for (s = r->hash->first; s; s = s->next) {
++ if (!s->obj_hash)
++ break;
++ if ((s->obj_hash_size *
++ sizeof (struct acl_object_label *)) <=
++ PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ }
++ if ((r->subj_hash_size *
++ sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++ kfree(r->subj_hash);
++ else
++ vfree(r->subj_hash);
++ }
++ }
++
++ acl_free_all();
++
++ if (acl_role_set.r_hash) {
++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++ PAGE_SIZE)
++ kfree(acl_role_set.r_hash);
++ else
++ vfree(acl_role_set.r_hash);
++ }
++ if (name_set.n_hash) {
++ if ((name_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(name_set.n_hash);
++ else
++ vfree(name_set.n_hash);
++ }
++
++ if (inodev_set.n_hash) {
++ if ((inodev_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(inodev_set.n_hash);
++ else
++ vfree(inodev_set.n_hash);
++ }
++
++ gr_free_uidset();
++
++ memset(&name_set, 0, sizeof (struct name_db));
++ memset(&inodev_set, 0, sizeof (struct name_db));
++ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++ role_list_head = NULL;
++ default_role = NULL;
++
++ return;
++}
++
++static __u32
++count_user_objs(struct acl_object_label *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++ struct acl_object_label *g_tmp, **guser, *glast = NULL;
++ unsigned int len;
++ char *tmp;
++
++ if (obj->globbed == NULL)
++ return 0;
++
++ guser = &obj->globbed;
++ while (*guser) {
++ g_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ if (g_tmp == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(g_tmp, *guser,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ len = strnlen_user(g_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, g_tmp->filename, len))
++ return -EFAULT;
++
++ g_tmp->filename = tmp;
++
++ if (glast)
++ glast->next = g_tmp;
++ g_tmp->prev = glast;
++ *guser = g_tmp;
++ glast = g_tmp;
++ guser = &((*guser)->next);
++ }
++
++ return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++ struct acl_role_label *role)
++{
++ struct acl_object_label *o_tmp;
++ unsigned int len;
++ int ret;
++ char *tmp;
++
++ while (userp) {
++ if ((o_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ userp = o_tmp->prev;
++
++ len = strnlen_user(o_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, o_tmp->filename, len))
++ return -EFAULT;
++
++ o_tmp->filename = tmp;
++
++ insert_acl_obj_label(o_tmp, subj);
++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++ o_tmp->device))
++ return -ENOMEM;
++
++ ret = copy_user_glob(o_tmp);
++ if (ret)
++ return ret;
++
++ if (o_tmp->nested) {
++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
++ if (IS_ERR(o_tmp->nested))
++ return PTR_ERR(o_tmp->nested);
++
++ s_final = o_tmp->nested;
++ }
++ }
++
++ return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++ struct acl_subject_label s_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ break;
++
++ userp = s_tmp.prev;
++ /* do not count nested subjects against this count, since
++ they are not included in the hash table, but are
++ attached to objects. We have already counted
++ the subjects in userspace for the allocation
++ stack
++ */
++ if (!(s_tmp.mode & GR_NESTED))
++ num++;
++ }
++
++ return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++ ruserip = rolep->allowed_ips;
++
++ while (ruserip) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_allowed_ip *)
++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, ruserip,
++ sizeof (struct role_allowed_ip)))
++ return -EFAULT;
++
++ ruserip = rtmp->prev;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->allowed_ips = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!ruserip)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
++ unsigned int len;
++ char *tmp;
++
++ rusertp = rolep->transitions;
++
++ while (rusertp) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_transition *)
++ acl_alloc(sizeof (struct role_transition))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, rusertp,
++ sizeof (struct role_transition)))
++ return -EFAULT;
++
++ rusertp = rtmp->prev;
++
++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, rtmp->rolename, len))
++ return -EFAULT;
++
++ rtmp->rolename = tmp;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->transitions = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!rusertp)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++ unsigned int len;
++ char *tmp;
++ __u32 num_objs;
++ struct acl_ip_label **i_tmp, *i_utmp2;
++ struct gr_hash_struct ghash;
++ struct subject_map *subjmap;
++ unsigned long i_num;
++ int err;
++
++ s_tmp = lookup_subject_map(userp);
++
++ /* we've already copied this subject into the kernel, just return
++ the reference to it, and don't copy it over again
++ */
++ if (s_tmp)
++ return(s_tmp);
++
++
++ if ((s_tmp = (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++ if (subjmap == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap->user = userp;
++ subjmap->kernel = s_tmp;
++ insert_subj_map_entry(subjmap);
++
++ if (copy_from_user(s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ return ERR_PTR(-EFAULT);
++
++ if (!s_last) {
++ s_tmp->prev = NULL;
++ role->hash->first = s_tmp;
++ } else {
++ s_last->next = s_tmp;
++ s_tmp->prev = s_last;
++ }
++
++ s_last = s_tmp;
++
++ len = strnlen_user(s_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return ERR_PTR(-EINVAL);
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user(tmp, s_tmp->filename, len))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->filename = tmp;
++
++ if (!strcmp(s_tmp->filename, "/"))
++ role->root_label = s_tmp;
++
++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ return ERR_PTR(-EFAULT);
++
++ /* copy user and group transition tables */
++
++ if (s_tmp->user_trans_num) {
++ uid_t *uidlist;
++
++ uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t));
++ if (uidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->user_transitions = uidlist;
++ }
++
++ if (s_tmp->group_trans_num) {
++ gid_t *gidlist;
++
++ gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t));
++ if (gidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->group_transitions = gidlist;
++ }
++
++ /* set up object hash table */
++ num_objs = count_user_objs(ghash.first);
++
++ s_tmp->obj_hash_size = num_objs;
++ s_tmp->obj_hash =
++ (struct acl_object_label **)
++ create_table(&(s_tmp->obj_hash_size));
++
++ if (!s_tmp->obj_hash)
++ return ERR_PTR(-ENOMEM);
++
++ memset(s_tmp->obj_hash, 0,
++ s_tmp->obj_hash_size *
++ sizeof (struct acl_object_label *));
++
++ /* copy before adding in objects, since a nested
++ acl could be found and be the final subject
++ copied
++ */
++
++ s_final = s_tmp;
++
++ /* add in objects */
++ err = copy_user_objs(ghash.first, s_tmp, role);
++
++ if (err)
++ return ERR_PTR(err);
++
++ /* set pointer for parent subject */
++ if (s_tmp->parent_subject) {
++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
++
++ if (IS_ERR(s_tmp2))
++ return s_tmp2;
++
++ s_tmp->parent_subject = s_tmp2;
++ }
++
++ /* add in ip acls */
++
++ if (!s_tmp->ip_num) {
++ s_tmp->ips = NULL;
++ goto insert;
++ }
++
++ i_tmp =
++ (struct acl_ip_label **) acl_alloc(s_tmp->ip_num *
++ sizeof (struct
++ acl_ip_label *));
++
++ if (!i_tmp)
++ return ERR_PTR(-ENOMEM);
++
++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++ *(i_tmp + i_num) =
++ (struct acl_ip_label *)
++ acl_alloc(sizeof (struct acl_ip_label));
++ if (!*(i_tmp + i_num))
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user
++ (&i_utmp2, s_tmp->ips + i_num,
++ sizeof (struct acl_ip_label *)))
++ return ERR_PTR(-EFAULT);
++
++ if (copy_from_user
++ (*(i_tmp + i_num), i_utmp2,
++ sizeof (struct acl_ip_label)))
++ return ERR_PTR(-EFAULT);
++ }
++
++ s_tmp->ips = i_tmp;
++
++insert:
++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++ s_tmp->device))
++ return ERR_PTR(-ENOMEM);
++
++ return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label s_pre;
++ struct acl_subject_label * ret;
++ int err;
++
++ while (userp) {
++ if (copy_from_user(&s_pre, userp,
++ sizeof (struct acl_subject_label)))
++ return -EFAULT;
++
++ /* do not add nested subjects here, add
++ while parsing objects
++ */
++
++ if (s_pre.mode & GR_NESTED) {
++ userp = s_pre.prev;
++ continue;
++ }
++
++ ret = do_copy_user_subj(userp, role);
++
++ err = PTR_ERR(ret);
++ if (IS_ERR(ret))
++ return err;
++
++ insert_acl_subj_label(ret, role);
++
++ userp = s_pre.prev;
++ }
++
++ s_final->next = NULL;
++
++ return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2, *r_last;
++ struct sprole_pw *sptmp;
++ struct gr_hash_struct *ghash;
++ unsigned long r_num;
++ unsigned int len;
++ char *tmp;
++ int err = 0;
++ __u16 i;
++ __u32 num_subjs;
++
++ /* we need a default and kernel role */
++ if (arg->role_db.r_entries < 2)
++ return -EINVAL;
++
++ /* copy special role authentication info from userspace */
++
++ num_sprole_pws = arg->num_sprole_pws;
++ acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *));
++
++ if (!acl_special_roles) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++ if (!sptmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(sptmp, arg->sprole_pws + i,
++ sizeof (struct sprole_pw))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len =
++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(tmp, sptmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Copying special role %s\n", tmp);
++#endif
++ sptmp->rolename = tmp;
++ acl_special_roles[i] = sptmp;
++ }
++
++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++ for (r_num = 0; r_num < arg->role_db.r_entries; r_num++) {
++ r_last = r_tmp;
++
++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++ if (!r_tmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(&r_utmp2, r_utmp + r_num,
++ sizeof (struct acl_role_label *))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (copy_from_user(r_tmp, r_utmp2,
++ sizeof (struct acl_role_label))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (!r_last) {
++ r_tmp->prev = NULL;
++ role_list_head = r_tmp;
++ } else {
++ r_last->next = r_tmp;
++ r_tmp->prev = r_last;
++ }
++
++ if (r_num == (arg->role_db.r_entries - 1))
++ r_tmp->next = NULL;
++
++ len = strnlen_user(r_tmp->rolename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(tmp, r_tmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ r_tmp->rolename = tmp;
++
++ if (!strcmp(r_tmp->rolename, "default")
++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++ default_role = r_tmp;
++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++ kernel_role = r_tmp;
++ }
++
++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ r_tmp->hash = ghash;
++
++ num_subjs = count_user_subjs(r_tmp->hash->first);
++
++ r_tmp->subj_hash_size = num_subjs;
++ r_tmp->subj_hash =
++ (struct acl_subject_label **)
++ create_table(&(r_tmp->subj_hash_size));
++
++ if (!r_tmp->subj_hash) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ err = copy_user_allowedips(r_tmp);
++ if (err)
++ goto cleanup;
++
++ err = copy_user_transitions(r_tmp);
++ if (err)
++ goto cleanup;
++
++ memset(r_tmp->subj_hash, 0,
++ r_tmp->subj_hash_size *
++ sizeof (struct acl_subject_label *));
++
++ s_last = NULL;
++
++ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
++
++ if (err)
++ goto cleanup;
++
++ insert_acl_role_label(r_tmp);
++ }
++
++ goto return_err;
++ cleanup:
++ free_variables();
++ return_err:
++ return err;
++
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++ int error = 0;
++
++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++ if (init_variables(args->role_db.o_entries, args->role_db.g_entries,
++ args->role_db.s_entries, args->role_db.i_entries,
++ args->role_db.r_entries, args->role_db.a_entries,
++ args->role_db.t_entries, args->num_sprole_pws)) {
++ security_alert_good(GR_INITF_ACL_MSG, GR_VERSION);
++ error = -ENOMEM;
++ free_variables();
++ goto out;
++ }
++
++ error = copy_user_acl(args);
++ free_init_variables();
++ if (error) {
++ free_variables();
++ goto out;
++ }
++
++ if ((error = gr_set_acls(0))) {
++ free_variables();
++ goto out;
++ }
++
++ gr_status |= GR_READY;
++ out:
++ return error;
++}
++
++static int
++glob_match(char *pattern, char *string)
++{
++ char *p1, *p2;
++
++ p1 = pattern;
++ p2 = string;
++
++ while (*p1 != '\0' && *p2 != '\0' && *p1 != '*') {
++ if (*p1 == *p2 || *p1 == '?') {
++ p1++;
++ p2++;
++ } else
++ break;
++ }
++ if (*p1 == '*') {
++ p1++;
++ while (*p2 != '\0') {
++ if (!glob_match(p1, p2))
++ return 0;
++ else
++ p2++;
++ }
++ }
++
++ if (*p2 == '\0' && *p1 == '*')
++ while (*p1 == '*')
++ p1++;
++
++ if (*p1 == '\0' && *p2 == '\0')
++ return 0;
++ else
++ return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++ struct dentry *dentry, struct vfsmount *mnt, char **path)
++{
++ struct acl_object_label *tmp;
++
++ if (*path == NULL)
++ *path = gr_to_filename_nolock(dentry, mnt);
++
++ tmp = globbed;
++
++ while (tmp) {
++ if (!glob_match(tmp->filename, *path))
++ return tmp;
++ tmp = tmp->next;
++ }
++
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ struct dentry *curr_dentry,
++ const struct acl_subject_label *subj, char **path)
++{
++ struct acl_subject_label *tmpsubj;
++ struct acl_object_label *retval;
++ struct acl_object_label *retval2;
++
++ tmpsubj = (struct acl_subject_label *) subj;
++ read_lock(&gr_inode_lock);
++ do {
++ retval = lookup_acl_obj_label(curr_dentry->d_inode->i_ino,
++ curr_dentry->d_inode->i_sb->s_dev, tmpsubj);
++ if (retval) {
++ if (retval->globbed) {
++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
++ (struct vfsmount *)orig_mnt, path);
++ if (retval2)
++ retval = retval2;
++ }
++ break;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ return retval;
++}
++
++static struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++ struct acl_object_label *retval;
++ char *path = NULL;
++
++ read_lock(&child_reaper->fs->lock);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (dentry == root && mnt == rootmnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, root, subj, &path);
++out:
++ spin_unlock(&dcache_lock);
++ dput(root);
++ mntput(rootmnt);
++
++ return retval;
++}
++
++static struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++ struct acl_object_label *retval;
++
++ read_lock(&child_reaper->fs->lock);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (dentry == root && mnt == rootmnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, root, subj, &path);
++out:
++ spin_unlock(&dcache_lock);
++ dput(root);
++ mntput(rootmnt);
++
++ return retval;
++}
++
++static struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_role_label *role)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++ struct acl_subject_label *retval;
++
++ read_lock(&child_reaper->fs->lock);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (unlikely(dentry == root && mnt == rootmnt))
++ break;
++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (unlikely(retval != NULL))
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (unlikely(retval != NULL))
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++
++ if (unlikely(retval == NULL)) {
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(root->d_inode->i_ino,
++ root->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ }
++ out:
++ spin_unlock(&dcache_lock);
++ dput(root);
++ mntput(rootmnt);
++
++ return retval;
++}
++
++static __inline__ void
++gr_log_learn(const struct acl_role_label *role, const uid_t uid, const gid_t gid,
++ const struct task_struct *task, const char *pathname,
++ const __u32 mode)
++{
++ security_learn(GR_LEARN_AUDIT_MSG, role->rolename, role->roletype,
++ uid, gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
++ task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
++ 1, 1, pathname, (unsigned long) mode, NIPQUAD(task->curr_ip));
++
++ return;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++ struct acl_object_label *obj;
++ __u32 oldmode, newmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (GR_WRITE | GR_CREATE);
++
++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++ oldmode = obj->mode;
++
++ if (current->acl->mode & GR_LEARN)
++ oldmode |= (GR_WRITE | GR_CREATE);
++ newmode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ oldmode | GR_CREATE | GR_AUDIT_CREATE |
++ GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if ((newmode & oldmode) == oldmode)
++ return newmode;
++ else if (current->acl->mode & GR_LEARN) {
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(old_dentry, old_mnt), oldmode);
++ return (GR_WRITE | GR_CREATE);
++ } else if (newmode & GR_SUPPRESS)
++ return GR_SUPPRESS;
++ else
++ return 0;
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++ const struct vfsmount * mnt)
++{
++ __u32 retval = mode;
++ struct acl_subject_label *curracl;
++ struct acl_object_label *currobj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ curracl = current->acl;
++
++ currobj = chk_obj_label(dentry, mnt, curracl);
++ retval = currobj->mode & mode;
++
++ if (unlikely
++ ((curracl->mode & GR_LEARN) && !(mode & GR_NOPTRACE)
++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ retval = new_mode;
++
++ if (!(mode & GR_NOLEARN))
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(dentry, mnt), new_mode);
++ }
++
++ return retval;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++ const struct vfsmount * mnt, const __u32 mode)
++{
++ struct name_entry *match;
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *curracl;
++ char *path;
++ __u32 retval;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ preempt_disable();
++ path = gr_to_filename(new_dentry, mnt);
++ match = lookup_name_entry(path);
++
++ if (!match)
++ goto check_parent;
++
++ curracl = current->acl;
++
++ read_lock(&gr_inode_lock);
++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++ read_unlock(&gr_inode_lock);
++
++ if (matchpo) {
++ if ((matchpo->mode & mode) !=
++ (mode & ~(GR_AUDITS | GR_SUPPRESS))
++ && curracl->mode & GR_LEARN) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(new_dentry, mnt), new_mode);
++
++ preempt_enable();
++ return new_mode;
++ }
++ preempt_enable();
++ return (matchpo->mode & mode);
++ }
++
++ check_parent:
++ curracl = current->acl;
++
++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++ retval = matchpo->mode & mode;
++
++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++ && (curracl->mode & GR_LEARN)) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(new_dentry, mnt), new_mode);
++ preempt_enable();
++ return new_mode;
++ }
++
++ preempt_enable();
++ return retval;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (!(task->acl->mode & GR_FIND) && !(current->acl->mode & GR_VIEW))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY) || !task))
++ return 0;
++
++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL))
++ return 1;
++
++ return 0;
++}
++
++__inline__ void
++gr_copy_label(struct task_struct *tsk)
++{
++ tsk->used_accept = 0;
++ tsk->acl_sp_role = 0;
++ tsk->acl_role_id = current->acl_role_id;
++ tsk->acl = current->acl;
++ tsk->role = current->role;
++ tsk->curr_ip = current->curr_ip;
++ if (current->exec_file)
++ get_file(current->exec_file);
++ tsk->exec_file = current->exec_file;
++ tsk->is_writable = current->is_writable;
++ if (unlikely(current->used_accept))
++ current->curr_ip = 0;
++
++ return;
++}
++
++static __inline__ void
++gr_set_proc_res(void)
++{
++ struct acl_subject_label *proc;
++ unsigned short i;
++
++ proc = current->acl;
++
++ if (proc->mode & GR_LEARN)
++ return;
++
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(proc->resmask & (1 << i)))
++ continue;
++
++ current->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++ current->rlim[i].rlim_max = proc->res[i].rlim_max;
++ }
++
++ return;
++}
++
++static __inline__ void
++do_set_role_label(struct task_struct *task, const uid_t uid, const gid_t gid)
++{
++ task->role = lookup_acl_role_label(task, uid, gid);
++
++ return;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ uid_t *uidlist;
++ int curuid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ num = current->acl->user_trans_num;
++ uidlist = current->acl->user_transitions;
++
++ if (uidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ realok = 1;
++ if (effective == curuid)
++ effectiveok = 1;
++ if (fs == curuid)
++ fsok = 1;
++ }
++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ break;
++ if (effective == curuid)
++ break;
++ if (fs == curuid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ security_alert(GR_USRCHANGE_ACL_MSG,
++ realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
++ return 1;
++ }
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ gid_t *gidlist;
++ int curgid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ num = current->acl->group_trans_num;
++ gidlist = current->acl->group_transitions;
++
++ if (gidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ realok = 1;
++ if (effective == curgid)
++ effectiveok = 1;
++ if (fs == curgid)
++ fsok = 1;
++ }
++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ break;
++ if (effective == curgid)
++ break;
++ if (fs == curgid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ security_alert(GR_GRPCHANGE_ACL_MSG,
++ realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
++ return 1;
++ }
++}
++
++void
++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++{
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ filp = task->exec_file;
++
++ /* kernel process, we'll give them the kernel role */
++ if (unlikely(!filp)) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ return;
++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
++ do_set_role_label(task, uid, gid);
++
++ task->acl =
++ chk_subj_label(filp->f_dentry, filp->f_vfsmnt, task->role);
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++
++ gr_set_proc_res();
++
++ return;
++}
++
++void
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct acl_subject_label *newacl;
++ struct acl_object_label *obj;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ newacl = chk_subj_label(dentry, mnt, current->role);
++
++ obj = chk_obj_label(dentry, mnt, current->acl);
++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT)) {
++ if (obj->nested)
++ current->acl = obj->nested;
++ else
++ current->acl = newacl;
++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
++ security_audit(GR_INHERIT_ACL_MSG, current->acl->filename,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++
++ current->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(dentry, mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ current->is_writable = 1;
++ obj = chk_obj_label(dentry, mnt, current->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ current->is_writable = 1;
++
++ gr_set_proc_res();
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", current->comm, current->pid, current->role->rolename, current->acl->filename);
++#endif
++ return;
++}
++
++static __inline__ void
++do_handle_delete(const ino_t ino, const dev_t dev)
++{
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *matchps;
++ struct acl_subject_label *i;
++ struct acl_role_label *role;
++
++ for (role = role_list_head; role; role = role->next) {
++ for (i = role->hash->first; i; i = i->next) {
++ if (unlikely((i->mode & GR_NESTED) &&
++ (i->inode == ino) &&
++ (i->device == dev)))
++ i->mode |= GR_DELETED;
++ if (unlikely((matchpo =
++ lookup_acl_obj_label(ino, dev, i)) != NULL))
++ matchpo->mode |= GR_DELETED;
++ }
++
++ if (unlikely((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL))
++ matchps->mode |= GR_DELETED;
++ }
++
++ return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ write_lock(&gr_inode_lock);
++ if (unlikely((unsigned long)lookup_inodev_entry(ino, dev)))
++ do_handle_delete(ino, dev);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static __inline__ void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_subject_label *subj)
++{
++ unsigned long index = fhash(oldinode, olddevice, subj->obj_hash_size);
++ struct acl_object_label **match;
++ struct acl_object_label *tmp;
++ __u8 i = 0;
++
++ match = &subj->obj_hash[index];
++
++ while (*match && ((*match)->inode != oldinode ||
++ (*match)->device != olddevice ||
++ !((*match)->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj->obj_hash_size;
++ match = &subj->obj_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (*match && ((*match) != deleted_object)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)
++ && ((*match)->mode & GR_DELETED)) {
++ tmp = *match;
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++ tmp->mode &= ~GR_DELETED;
++
++ *match = deleted_object;
++
++ insert_acl_obj_label(tmp, subj);
++ }
++
++ return;
++}
++
++static __inline__ void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_role_label *role)
++{
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long subj_size = role->subj_hash_size;
++ unsigned long index = fhash(oldinode, olddevice, subj_size);
++ struct acl_subject_label **match;
++ struct acl_subject_label *tmp;
++ __u8 i = 0;
++
++ match = &s_hash[index];
++
++ while (*match && ((*match)->inode != oldinode ||
++ (*match)->device != olddevice ||
++ !((*match)->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj_size;
++ i = (i + 1) % 32;
++ match = &s_hash[index];
++ }
++
++ if (*match && (*match != deleted_subject)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)
++ && ((*match)->mode & GR_DELETED)) {
++ tmp = *match;
++
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++ tmp->mode &= ~GR_DELETED;
++
++ *match = deleted_subject;
++
++ insert_acl_subj_label(tmp, role);
++ }
++
++ return;
++}
++
++static __inline__ void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice)
++{
++ unsigned long index = fhash(oldinode, olddevice, inodev_set.n_size);
++ struct name_entry **match;
++ struct name_entry *tmp;
++ __u8 i = 0;
++
++ match = &inodev_set.n_hash[index];
++
++ while (*match
++ && ((*match)->inode != oldinode
++ || (*match)->device != olddevice)) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ i = (i + 1) % 32;
++ match = &inodev_set.n_hash[index];
++ }
++
++ if (*match && (*match != deleted_inodev)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)) {
++ tmp = *match;
++
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++
++ *match = deleted_inodev;
++
++ insert_inodev_entry(tmp);
++ }
++
++ return;
++}
++
++static __inline__ void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++ const struct vfsmount *mnt)
++{
++ struct acl_subject_label *i;
++ struct acl_role_label *role;
++
++ for (role = role_list_head; role; role = role->next) {
++ update_acl_subj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++
++ for (i = role->hash->first; i; i = i->next) {
++ if (unlikely((i->mode & GR_NESTED) &&
++ (i->inode == dentry->d_inode->i_ino) &&
++ (i->device == dentry->d_inode->i_sb->s_dev))) {
++ i->inode = dentry->d_inode->i_ino;
++ i->device = dentry->d_inode->i_sb->s_dev;
++ }
++ update_acl_obj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, i);
++ }
++ }
++
++ update_inodev_entry(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
++
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename(dentry, mnt));
++ preempt_enable();
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ do_handle_create(matchn, dentry, mnt);
++ write_unlock(&gr_inode_lock);
++ }
++
++ return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename(new_dentry, mnt));
++ preempt_enable();
++
++ /* we wouldn't have to check d_inode if it weren't for
++ NFS silly-renaming
++ */
++
++ write_lock(&gr_inode_lock);
++ if (unlikely(replace && new_dentry->d_inode)) {
++ if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev) &&
++ (old_dentry->d_inode->i_nlink <= 1)))
++ do_handle_delete(new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev);
++ }
++
++ if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev) &&
++ (old_dentry->d_inode->i_nlink <= 1)))
++ do_handle_delete(old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev);
++
++ if (unlikely((unsigned long)matchn))
++ do_handle_create(matchn, old_dentry, mnt);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static int
++lookup_special_role_auth(const char *rolename, unsigned char **salt,
++ unsigned char **sum)
++{
++ struct acl_role_label *r;
++ struct role_transition *trans;
++ __u16 i;
++ int found = 0;
++
++ /* check transition table */
++
++ for (trans = current->role->transitions; trans; trans = trans->next) {
++ if (!strcmp(rolename, trans->rolename)) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found)
++ return 0;
++
++ /* handle special roles that do not require authentication */
++
++ for (r = role_list_head; r; r = r->next) {
++ if (!strcmp(rolename, r->rolename)
++ && (r->roletype & GR_ROLE_NOPW)) {
++ *salt = NULL;
++ *sum = NULL;
++ return 1;
++ }
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
++ *salt = acl_special_roles[i]->salt;
++ *sum = acl_special_roles[i]->sum;
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static void
++assign_special_role(char *rolename)
++{
++ struct acl_object_label *obj;
++ struct acl_role_label *r;
++ struct acl_role_label *assigned = NULL;
++ struct task_struct *tsk;
++ struct file *filp;
++
++ for (r = role_list_head; r; r = r->next)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL))
++ assigned = r;
++
++ if (!assigned)
++ return;
++
++ tsk = current->parent;
++ filp = tsk->exec_file;
++
++ if (tsk && filp) {
++ tsk->is_writable = 0;
++
++ acl_sp_role_value = (acl_sp_role_value % 65535) + 1;
++ tsk->acl_sp_role = 1;
++ tsk->acl_role_id = acl_sp_role_value;
++ tsk->role = assigned;
++ tsk->acl =
++ chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role);
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
++#endif
++ }
++
++ return;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++{
++ struct gr_arg *arg;
++ unsigned char *sprole_salt;
++ unsigned char *sprole_sum;
++ int error = sizeof (struct gr_arg);
++ int error2 = 0;
++
++ down(&gr_dev_sem);
++
++ arg = (struct gr_arg *) buf;
++
++ if (count != sizeof (struct gr_arg)) {
++ security_alert_good(GR_DEV_ACL_MSG, count,
++ (int) sizeof (struct gr_arg));
++ error = -EINVAL;
++ goto out;
++ }
++
++ if ((gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
++ && time_before_eq(gr_auth_expires, get_seconds())) {
++ gr_auth_expires = 0;
++ gr_auth_attempts = 0;
++ }
++
++ if (copy_from_user(gr_usermode, arg, sizeof (struct gr_arg))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if (gr_usermode->mode != SPROLE && time_after(gr_auth_expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ /* if non-root trying to do anything other than use a special role,
++ do not attempt authentication, do not count towards authentication
++ locking
++ */
++
++ if (gr_usermode->mode != SPROLE && current->uid) {
++ error = -EPERM;
++ goto out;
++ }
++
++ /* ensure pw and special role name are null terminated */
++
++ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++ /* Okay.
++ * We have our enough of the argument structure..(we have yet
++ * to copy_from_user the tables themselves) . Copy the tables
++ * only if we need them, i.e. for loading operations. */
++
++ switch (gr_usermode->mode) {
++ case STATUS:
++ if (gr_status & GR_READY)
++ error = 1;
++ else
++ error = 2;
++ goto out;
++ case SHUTDOWN:
++ if ((gr_status & GR_READY)
++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ gr_status &= ~GR_READY;
++ security_alert_good(GR_SHUTS_ACL_MSG, DEFAULTSECARGS);
++ free_variables();
++ memset(gr_usermode, 0, sizeof (struct gr_arg));
++ memset(gr_system_salt, 0, GR_SALT_LEN);
++ memset(gr_system_sum, 0, GR_SHA_LEN);
++ } else if (gr_status & GR_READY) {
++ security_alert(GR_SHUTF_ACL_MSG, DEFAULTSECARGS);
++ error = -EPERM;
++ } else {
++ security_alert_good(GR_SHUTI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ }
++ break;
++ case ENABLE:
++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
++ security_alert_good(GR_ENABLE_ACL_MSG, GR_VERSION);
++ else {
++ if (gr_status & GR_READY)
++ error = -EAGAIN;
++ else
++ error = error2;
++ security_alert(GR_ENABLEF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ }
++ break;
++ case RELOAD:
++ if (!(gr_status & GR_READY)) {
++ security_alert_good(GR_RELOADI_ACL_MSG);
++ error = -EAGAIN;
++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ lock_kernel();
++ gr_status &= ~GR_READY;
++ free_variables();
++ if (!(error2 = gracl_init(gr_usermode))) {
++ unlock_kernel();
++ security_alert_good(GR_RELOAD_ACL_MSG,
++ GR_VERSION);
++ } else {
++ unlock_kernel();
++ error = error2;
++ security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ }
++ } else {
++ security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ }
++ break;
++ case SEGVMOD:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_SEGVMODI_ACL_MSG,
++ DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ security_alert_good(GR_SEGVMODS_ACL_MSG,
++ DEFAULTSECARGS);
++ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++ struct acl_subject_label *segvacl;
++ segvacl =
++ lookup_acl_subj_label(gr_usermode->segv_inode,
++ gr_usermode->segv_device,
++ current->role);
++ if (segvacl) {
++ segvacl->crashes = 0;
++ segvacl->expires = 0;
++ }
++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
++ gr_remove_uid(gr_usermode->segv_uid);
++ }
++ } else {
++ security_alert(GR_SEGVMODF_ACL_MSG, DEFAULTSECARGS);
++ error = -EPERM;
++ }
++ break;
++ case SPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_SPROLEI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
++ && time_before_eq(current->role->expires, get_seconds())) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (lookup_special_role_auth
++ (gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++ assign_special_role(gr_usermode->sp_role);
++ security_alert_good(GR_SPROLES_ACL_MSG,
++ (current->parent) ? current->
++ parent->role->rolename : "",
++ acl_sp_role_value, DEFAULTSECARGS);
++ } else {
++ security_alert(GR_SPROLEF_ACL_MSG, gr_usermode->sp_role,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ current->role->auth_attempts++;
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
++ current->role->expires =
++ get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++ security_alert(GR_MAXROLEPW_ACL_MSG,
++ CONFIG_GRKERNSEC_ACL_MAXTRIES,
++ gr_usermode->sp_role, DEFAULTSECARGS);
++ }
++
++ goto out;
++ }
++ break;
++ case UNSPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_UNSPROLEI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
++ && time_before_eq(current->role->expires, get_seconds())) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if ((current->role->roletype & GR_ROLE_SPECIAL) &&
++ lookup_special_role_auth
++ (current->role->rolename, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++ security_alert_good(GR_UNSPROLES_ACL_MSG,
++ (current->parent) ? current->
++ parent->role->rolename : "",
++ (current->parent) ? current->
++ parent->acl_role_id : 0, DEFAULTSECARGS);
++ gr_set_acls(1);
++ if (current->parent)
++ current->parent->acl_sp_role = 0;
++ } else {
++ security_alert(GR_UNSPROLEF_ACL_MSG, current->role->rolename,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ current->role->auth_attempts++;
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
++ current->role->expires =
++ get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++ security_alert(GR_MAXROLEPW_ACL_MSG,
++ CONFIG_GRKERNSEC_ACL_MAXTRIES,
++ current->role->rolename, DEFAULTSECARGS);
++ }
++
++ goto out;
++ }
++ break;
++ default:
++ security_alert(GR_INVMODE_ACL_MSG, gr_usermode->mode,
++ DEFAULTSECARGS);
++ error = -EINVAL;
++ break;
++ }
++
++ if (error != -EPERM)
++ goto out;
++
++ gr_auth_attempts++;
++
++ if (gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
++ security_alert(GR_MAXPW_ACL_MSG, CONFIG_GRKERNSEC_ACL_MAXTRIES);
++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++ }
++
++ out:
++ up(&gr_dev_sem);
++ return error;
++}
++
++int
++gr_set_acls(const int type)
++{
++ struct acl_object_label *obj;
++ struct task_struct *task, *task2;
++ struct file *filp;
++ unsigned short i;
++
++ read_lock(&tasklist_lock);
++ for_each_process(task2) {
++ task = task2;
++ do {
++ /* check to see if we're called from the exit handler,
++ if so, only replace ACLs that have inherited the admin
++ ACL */
++
++ if (type && (task->role != current->role ||
++ task->acl_role_id != current->acl_role_id))
++ continue;
++
++ task->acl_role_id = 0;
++
++ if ((filp = task->exec_file)) {
++ do_set_role_label(task, task->uid, task->gid);
++
++ task->acl =
++ chk_subj_label(filp->f_dentry, filp->f_vfsmnt,
++ task->role);
++ if (task->acl) {
++ struct acl_subject_label *curr;
++ curr = task->acl;
++
++ task->is_writable = 0;
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ if (!(curr->mode & GR_LEARN))
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(curr->resmask & (1 << i)))
++ continue;
++
++ task->rlim[i].rlim_cur =
++ curr->res[i].rlim_cur;
++ task->rlim[i].rlim_max =
++ curr->res[i].rlim_max;
++ }
++ } else {
++ read_unlock(&tasklist_lock);
++ security_alert_good(GR_DEFACL_MSG, task->comm,
++ task->pid);
++ return 1;
++ }
++ } else {
++ // it's a kernel process
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_FIND;
++#endif
++ }
++ } while ((task = next_thread(task)) != task2);
++ }
++ read_unlock(&tasklist_lock);
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_learn_resource);
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ struct acl_subject_label *acl;
++
++ if (unlikely((gr_status & GR_READY) &&
++ task->acl && (task->acl->mode & GR_LEARN)))
++ goto skip_reslog;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ gr_log_resource(task, res, wanted, gt);
++#endif
++ skip_reslog:
++
++ if (unlikely(!(gr_status & GR_READY) || !wanted))
++ return;
++
++ acl = task->acl;
++
++ if (likely(!acl || !(acl->mode & GR_LEARN) ||
++ !(acl->resmask & (1 << (unsigned short) res))))
++ return;
++
++ if (wanted >= acl->res[res].rlim_cur) {
++ unsigned long res_add;
++
++ res_add = wanted;
++ switch (res) {
++ case RLIMIT_CPU:
++ res_add += GR_RLIM_CPU_BUMP;
++ break;
++ case RLIMIT_FSIZE:
++ res_add += GR_RLIM_FSIZE_BUMP;
++ break;
++ case RLIMIT_DATA:
++ res_add += GR_RLIM_DATA_BUMP;
++ break;
++ case RLIMIT_STACK:
++ res_add += GR_RLIM_STACK_BUMP;
++ break;
++ case RLIMIT_CORE:
++ res_add += GR_RLIM_CORE_BUMP;
++ break;
++ case RLIMIT_RSS:
++ res_add += GR_RLIM_RSS_BUMP;
++ break;
++ case RLIMIT_NPROC:
++ res_add += GR_RLIM_NPROC_BUMP;
++ break;
++ case RLIMIT_NOFILE:
++ res_add += GR_RLIM_NOFILE_BUMP;
++ break;
++ case RLIMIT_MEMLOCK:
++ res_add += GR_RLIM_MEMLOCK_BUMP;
++ break;
++ case RLIMIT_AS:
++ res_add += GR_RLIM_AS_BUMP;
++ break;
++ case RLIMIT_LOCKS:
++ res_add += GR_RLIM_LOCKS_BUMP;
++ break;
++ }
++
++ acl->res[res].rlim_cur = res_add;
++
++ if (wanted > acl->res[res].rlim_max)
++ acl->res[res].rlim_max = res_add;
++
++ security_learn(GR_LEARN_AUDIT_MSG, current->role->rolename,
++ current->role->roletype, acl->filename,
++ acl->res[res].rlim_cur, acl->res[res].rlim_max,
++ "", (unsigned long) res);
++ }
++
++ return;
++}
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_flags(struct linux_binprm *bprm)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *proc;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ proc = task->acl;
++
++ if (proc->mode & GR_PAXPAGE)
++ task->flags &= ~PF_PAX_PAGEEXEC;
++ if (proc->mode & GR_PAXSEGM)
++ task->flags &= ~PF_PAX_SEGMEXEC;
++ if (proc->mode & GR_PAXGCC)
++ task->flags |= PF_PAX_EMUTRAMP;
++ if (proc->mode & GR_PAXMPROTECT)
++ task->flags &= ~PF_PAX_MPROTECT;
++ if (proc->mode & GR_PAXRANDMMAP)
++ task->flags &= ~PF_PAX_RANDMMAP;
++ if (proc->mode & GR_PAXRANDEXEC)
++ task->flags |= PF_PAX_RANDEXEC;
++
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++extern struct proc_dir_entry *proc_sys_root;
++
++
++/* the following function is called under the BKL */
++
++__u32
++gr_handle_sysctl(const struct ctl_table *table, const void *oldval,
++ const void *newval)
++{
++ struct proc_dir_entry *tmp;
++ struct nameidata nd;
++ const char *proc_sys = "/proc/sys";
++ char *path;
++ struct acl_object_label *obj;
++ unsigned short len = 0, pos = 0, depth = 0, i;
++ __u32 err = 0;
++ __u32 mode = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 1;
++
++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++
++ if (oldval)
++ mode |= GR_READ;
++ if (newval)
++ mode |= GR_WRITE;
++
++ /* convert the requested sysctl entry into a pathname */
++
++ for (tmp = table->de; tmp != proc_sys_root; tmp = tmp->parent) {
++ len += strlen(tmp->name);
++ len++;
++ depth++;
++ }
++
++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE)
++ return 0; /* deny */
++
++ memset(path, 0, PAGE_SIZE);
++
++ memcpy(path, proc_sys, strlen(proc_sys));
++
++ pos += strlen(proc_sys);
++
++ for (; depth > 0; depth--) {
++ path[pos] = '/';
++ pos++;
++ for (i = 1, tmp = table->de; tmp != proc_sys_root;
++ tmp = tmp->parent) {
++ if (depth == i) {
++ memcpy(path + pos, tmp->name,
++ strlen(tmp->name));
++ pos += strlen(tmp->name);
++ }
++ i++;
++ }
++ }
++
++ err = path_lookup(path, LOOKUP_FOLLOW, &nd);
++
++ if (err)
++ goto out;
++
++ obj = chk_obj_label(nd.dentry, nd.mnt, current->acl);
++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
++
++ if (unlikely((current->acl->mode & GR_LEARN) && ((err & mode) != mode))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ err = new_mode;
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, path, new_mode);
++ } else if ((err & mode) != mode && !(err & GR_SUPPRESS)) {
++ security_alert(GR_SYSCTL_ACL_MSG, "denied", path,
++ (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "",
++ DEFAULTSECARGS);
++ err = 0;
++ } else if ((err & mode) != mode) {
++ err = 0;
++ } else if (((err & mode) == mode) && (err & GR_AUDITS)) {
++ security_audit(GR_SYSCTL_ACL_MSG, "successful",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "",
++ DEFAULTSECARGS);
++ }
++
++ path_release(&nd);
++
++ out:
++ return err;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ filp = task->exec_file;
++
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++ read_unlock(&tasklist_lock);
++
++ if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))
++ return 1;
++
++ retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt);
++
++ if (retmode & GR_NOPTRACE)
++ return 1;
++
++ if (!(current->acl->mode & GR_OVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl || (current->acl != current->role->root_label
++ && current->pid != task->pid)))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ filp = task->exec_file;
++
++ if (task->acl->mode & GR_NOPTRACE) {
++ security_alert(GR_PTRACE_ACL_MSG, filp ?
++ gr_to_filename(filp->f_dentry, filp->f_vfsmnt)
++ : "(none)", task->comm, task->pid,
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++ read_unlock(&tasklist_lock);
++
++ if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) {
++ security_alert(GR_PTRACE_ACL_MSG, filp ?
++ gr_to_filename(filp->f_dentry, filp->f_vfsmnt)
++ : "(none)", task->comm, task->pid,
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ if (unlikely(!filp))
++ return 0;
++
++ retmode = gr_search_file(filp->f_dentry, GR_PTRACERD | GR_NOPTRACE, filp->f_vfsmnt);
++
++ if (retmode & GR_NOPTRACE) {
++ security_alert(GR_PTRACE_ACL_MSG, gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
++ task->comm, task->pid, DEFAULTSECARGS);
++ return 1;
++ }
++
++ if (retmode & GR_PTRACERD) {
++ switch (request) {
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA)
++ case PTRACE_SETREGS:
++ case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++ case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++ case PTRACE_SETVRREGS:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++ } else if (!(current->acl->mode & GR_OVERRIDE) &&
++ !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl
++ || (current->acl != current->role->root_label
++ && current->pid != task->pid))) {
++ security_alert(GR_PTRACE_ACL_MSG,
++ gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
++ task->comm, task->pid, DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_handle_ptrace_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ __u32 retmode;
++ struct acl_subject_label *subj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (unlikely
++ ((current->ptrace & PT_PTRACED)
++ && !(current->acl->mode & GR_OVERRIDE)))
++ retmode = gr_search_file(dentry, GR_PTRACERD, mnt);
++ else
++ return 0;
++
++ subj = chk_subj_label(dentry, mnt, current->role);
++
++ if (!(retmode & GR_PTRACERD) &&
++ !(current->role->roletype & GR_ROLE_GOD) &&
++ (current->acl != subj)) {
++ security_alert(GR_PTRACE_EXEC_ACL_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_handle_mmap(const struct file *filp, const unsigned long prot)
++{
++ struct acl_object_label *obj, *obj2;
++
++ if (unlikely(!(gr_status & GR_READY) ||
++ (current->acl->mode & GR_OVERRIDE) || !filp ||
++ !(prot & PROT_EXEC)))
++ return 0;
++
++ if (unlikely(current->is_writable))
++ return 0;
++
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt,
++ current->role->root_label);
++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++ security_alert(GR_WRITLIB_ACL_MSG,
++ gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ mode =
++ gr_search_file(file->f_dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_vfsmnt);
++
++ if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
++ security_alert(GR_MMAP_ACL_MSG, "denied",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ security_audit(GR_MMAP_ACL_MSG, "successful",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ mode =
++ gr_search_file(file->f_dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_vfsmnt);
++
++ if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
++ security_alert(GR_MPROTECT_ACL_MSG, "denied",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ security_audit(GR_MPROTECT_ACL_MSG, "successful",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ u64 runtime64;
++ unsigned long runtime;
++ unsigned long cputime;
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ char cur_tty[64] = { 0 };
++ char parent_tty[64] = { 0 };
++
++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++ !(task->acl->mode & GR_PROCACCT)))
++ return;
++
++ runtime64 = get_jiffies_64() - task->start_time;
++ do_div(runtime64, HZ);
++ runtime = (unsigned long)runtime64;
++ wday = runtime / (3600 * 24);
++ runtime -= wday * (3600 * 24);
++ whr = runtime / 3600;
++ runtime -= whr * 3600;
++ wmin = runtime / 60;
++ runtime -= wmin * 60;
++ wsec = runtime;
++
++ cputime = (task->utime + task->stime) / HZ;
++ cday = cputime / (3600 * 24);
++ cputime -= cday * (3600 * 24);
++ chr = cputime / 3600;
++ cputime -= chr * 3600;
++ cmin = cputime / 60;
++ cputime -= cmin * 60;
++ csec = cputime;
++
++ security_audit(GR_ACL_PROCACCT_MSG, gr_task_fullpath(task), task->comm,
++ task->pid, NIPQUAD(task->curr_ip), tty_name(task->tty,
++ cur_tty),
++ task->uid, task->euid, task->gid, task->egid, wday, whr,
++ wmin, wsec, cday, chr, cmin, csec,
++ (task->flags & PF_SIGNALED) ? "killed by signal" : "exited",
++ code, gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ NIPQUAD(task->parent->curr_ip),
++ tty_name(task->parent->tty, parent_tty),
++ task->parent->uid, task->parent->euid, task->parent->gid,
++ task->parent->egid);
++
++ return;
++}
++
++EXPORT_SYMBOL(gr_set_kernel_label);
++
++void gr_set_kernel_label(struct task_struct *task)
++{
++ if (gr_status & GR_READY) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ }
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/gracl_alloc.c linux-2.6.5-new/grsecurity/gracl_alloc.c
+--- linux-2.6.5/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_alloc.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,93 @@
++/* stack-based acl allocation tracking (c) Brad Spengler 2002,2003 */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static unsigned long alloc_stack_next = 1;
++static unsigned long alloc_stack_size = 1;
++static void **alloc_stack;
++
++static __inline__ int
++alloc_pop(void)
++{
++ if (alloc_stack_next == 1)
++ return 0;
++
++ kfree(alloc_stack[alloc_stack_next - 2]);
++
++ alloc_stack_next--;
++
++ return 1;
++}
++
++static __inline__ void
++alloc_push(void *buf)
++{
++ if (alloc_stack_next >= alloc_stack_size)
++ BUG();
++
++ alloc_stack[alloc_stack_next - 1] = buf;
++
++ alloc_stack_next++;
++
++ return;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++ void *ret;
++
++ if (len > PAGE_SIZE)
++ BUG();
++
++ ret = kmalloc(len, GFP_KERNEL);
++
++ if (ret)
++ alloc_push(ret);
++
++ return ret;
++}
++
++void
++acl_free_all(void)
++{
++ if (gr_acl_is_enabled() || !alloc_stack)
++ return;
++
++ while (alloc_pop()) ;
++
++ if (alloc_stack) {
++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++ kfree(alloc_stack);
++ else
++ vfree(alloc_stack);
++ }
++
++ alloc_stack = NULL;
++ alloc_stack_size = 1;
++ alloc_stack_next = 1;
++
++ return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++ if ((size * sizeof (void *)) <= PAGE_SIZE)
++ alloc_stack =
++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++ else
++ alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++ alloc_stack_size = size;
++
++ if (!alloc_stack)
++ return 0;
++ else
++ return 1;
++}
+diff -urN linux-2.6.5/grsecurity/gracl_cap.c linux-2.6.5-new/grsecurity/gracl_cap.c
+--- linux-2.6.5/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_cap.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,115 @@
++/* capability handling routines, (c) Brad Spengler 2002,2003 */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/capability.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static const char *captab_log[29] = {
++ "CAP_CHOWN",
++ "CAP_DAC_OVERRIDE",
++ "CAP_DAC_READ_SEARCH",
++ "CAP_FOWNER",
++ "CAP_FSETID",
++ "CAP_KILL",
++ "CAP_SETGID",
++ "CAP_SETUID",
++ "CAP_SETPCAP",
++ "CAP_LINUX_IMMUTABLE",
++ "CAP_NET_BIND_SERVICE",
++ "CAP_NET_BROADCAST",
++ "CAP_NET_ADMIN",
++ "CAP_NET_RAW",
++ "CAP_IPC_LOCK",
++ "CAP_IPC_OWNER",
++ "CAP_SYS_MODULE",
++ "CAP_SYS_RAWIO",
++ "CAP_SYS_CHROOT",
++ "CAP_SYS_PTRACE",
++ "CAP_SYS_PACCT",
++ "CAP_SYS_ADMIN",
++ "CAP_SYS_BOOT",
++ "CAP_SYS_NICE",
++ "CAP_SYS_RESOURCE",
++ "CAP_SYS_TIME",
++ "CAP_SYS_TTY_CONFIG",
++ "CAP_MKNOD",
++ "CAP_LEASE"
++};
++
++EXPORT_SYMBOL(gr_task_is_capable);
++
++int
++gr_task_is_capable(struct task_struct *task, const int cap)
++{
++ struct acl_subject_label *curracl;
++ __u32 cap_drop = 0, cap_mask = 0;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = task->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask);
++ cap_mask |= curracl->cap_mask;
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ curracl = task->acl;
++
++ if ((curracl->mode & GR_LEARN)
++ && cap_raised(task->cap_effective, cap)) {
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, task->uid,
++ task->gid, task->exec_file ?
++ gr_to_filename(task->exec_file->f_dentry,
++ task->exec_file->f_vfsmnt) : curracl->filename,
++ curracl->filename, 0UL,
++ 0UL, "", (unsigned long) cap, NIPQUAD(task->curr_ip));
++ return 1;
++ }
++
++ if ((cap >= 0) && (cap < 29) && cap_raised(task->cap_effective, cap))
++ security_alert(GR_CAP_ACL_MSG, captab_log[cap],
++ gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid,
++ task->gid, task->egid, gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid, task->parent->uid,
++ task->parent->euid, task->parent->gid, task->parent->egid);
++
++ return 0;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ struct acl_subject_label *curracl;
++ __u32 cap_drop = 0, cap_mask = 0;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = current->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask);
++ cap_mask |= curracl->cap_mask;
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ return 0;
++}
++
+diff -urN linux-2.6.5/grsecurity/gracl_fs.c linux-2.6.5-new/grsecurity/gracl_fs.c
+--- linux-2.6.5/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_fs.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,460 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return GR_FIND;
++
++ mode =
++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++ security_audit(GR_HIDDEN_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_HIDDEN_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!(mode & GR_FIND)))
++ return 0;
++
++ return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 reqmode = GR_FIND;
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return reqmode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ else if (unlikely(fmode & FMODE_WRITE))
++ reqmode |= GR_WRITE;
++ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_OPEN_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" :
++ reqmode & GR_APPEND ? " appending" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_OPEN_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "", DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
++ reqmode |= GR_SETID;
++
++ mode =
++ gr_check_create(dentry, p_dentry, p_mnt,
++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_CREATE_ACL_MSG, "successful",
++ gr_to_filename(dentry, p_mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" :
++ reqmode & GR_APPEND ? " appending" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_CREATE_ACL_MSG, "denied",
++ gr_to_filename(dentry, p_mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "", DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 mode, reqmode = GR_FIND;
++
++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_EXEC;
++ if (fmode & S_IWOTH)
++ reqmode |= GR_WRITE;
++ if (fmode & S_IROTH)
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_ACCESS_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_ACCESS_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "",
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++#define generic_fs_handler(dentry, mnt, reqmode, fmt) \
++{ \
++ __u32 mode; \
++ \
++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); \
++ \
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
++ security_audit(fmt, "successful", \
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS); \
++ return mode; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
++ security_alert(fmt, "denied", gr_to_filename(dentry, mnt), \
++ DEFAULTSECARGS); \
++ return 0; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode))) \
++ return 0; \
++ \
++ return (reqmode); \
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_FCHMOD_ACL_MSG);
++ } else {
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_CHMOD_ACL_MSG);
++ } else {
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++ GR_UNIXCONNECT_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_filldir(const struct dentry *dentry, const struct vfsmount *mnt,
++ const ino_t ino)
++{
++ if (likely((unsigned long)(dentry->d_inode))) {
++ struct dentry d = *dentry;
++ struct inode inode = *(dentry->d_inode);
++
++ inode.i_ino = ino;
++ d.d_inode = &inode;
++
++ if (unlikely(!gr_search_file(&d, GR_FIND | GR_NOLEARN, mnt)))
++ return 0;
++ }
++
++ return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++ old_mnt);
++
++ if (unlikely(((mode & needmode) == needmode) && mode & GR_AUDITS)) {
++ security_audit(GR_LINK_ACL_MSG, "successful",
++ gr_to_filename(old_dentry, old_mnt), to,
++ DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_LINK_ACL_MSG, "denied",
++ gr_to_filename(old_dentry, old_mnt), to,
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_CREATE | GR_AUDIT_CREATE |
++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++ security_audit(GR_SYMLINK_ACL_MSG, "successful",
++ from, gr_to_filename(new_dentry, parent_mnt),
++ DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_SYMLINK_ACL_MSG, "denied",
++ from, gr_to_filename(new_dentry, parent_mnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++#define generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, reqmode, fmt) \
++{ \
++ __u32 mode; \
++ \
++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); \
++ \
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
++ security_audit(fmt, "successful", \
++ gr_to_filename(new_dentry, parent_mnt), \
++ DEFAULTSECARGS); \
++ return mode; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
++ security_alert(fmt, "denied", \
++ gr_to_filename(new_dentry, parent_mnt), \
++ DEFAULTSECARGS); \
++ return 0; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode))) \
++ return 0; \
++ \
++ return (reqmode); \
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ if (unlikely(mode & (S_ISUID | S_ISGID)))
++ reqmode |= GR_SETID;
++
++ generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt)
++{
++ generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname)
++{
++ __u32 comp1, comp2;
++ int error = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (!new_dentry->d_inode) {
++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, old_mnt);
++ } else {
++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++ GR_CREATE | GR_DELETE |
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, parent_mnt);
++ comp2 =
++ gr_search_file(old_dentry,
++ GR_READ | GR_WRITE | GR_AUDIT_READ |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++ }
++
++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++ security_audit(GR_RENAME_ACL_MSG, "successful",
++ gr_to_filename(old_dentry, old_mnt),
++ newname, DEFAULTSECARGS);
++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++ && !(comp2 & GR_SUPPRESS)) {
++ security_alert(GR_RENAME_ACL_MSG, "denied",
++ gr_to_filename(old_dentry, old_mnt), newname,
++ DEFAULTSECARGS);
++ error = -EACCES;
++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++ error = -EACCES;
++
++ return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ u16 id;
++ char *rolename;
++
++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
++ id = current->acl_role_id;
++ rolename = current->role->rolename;
++ gr_set_acls(1);
++ security_alert_good(GR_SPROLEL_ACL_MSG,
++ rolename, id, DEFAULTSECARGS);
++ }
++
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (task->acl->mode & GR_PROTPROCFD)
++ return -EACCES;
++
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/gracl_ip.c linux-2.6.5-new/grsecurity/gracl_ip.c
+--- linux-2.6.5/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_ip.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,236 @@
++/*
++ * grsecurity/gracl_ip.c
++ * Copyright Brad Spengler 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/smp_lock.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND 0x01
++#define GR_CONNECT 0x02
++
++static const char * gr_protocols[256] = {
++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++ };
++
++static const char * gr_socktypes[11] = {
++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
++ "unknown:7", "unknown:8", "unknown:9", "packet"
++ };
++
++__inline__ const char *
++gr_proto_to_name(unsigned char proto)
++{
++ return gr_protocols[proto];
++}
++
++__inline__ const char *
++gr_socktype_to_name(unsigned char type)
++{
++ return gr_socktypes[type];
++}
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ goto exit;
++
++ if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
++ || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
++ goto exit; // let the kernel handle it
++
++ curr = current->acl;
++
++ if (!curr->ips)
++ goto exit;
++
++ if ((curr->ip_type & (1 << type)) &&
++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
++ goto exit;
++
++ if (curr->mode & GR_LEARN) {
++ /* we don't place acls on raw sockets , and sometimes
++ dgram/ip sockets are opened for ioctl and not
++ bind/connect, so we'll fake a bind learn log */
++ if (type == SOCK_RAW || type == SOCK_PACKET) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_CONNECT, NIPQUAD(current->curr_ip));
++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_BIND, NIPQUAD(current->curr_ip));
++ }
++ /* we'll log when they use connect or bind */
++ goto exit;
++ }
++
++ security_alert(GR_SOCK_MSG, "inet", gr_socktype_to_name(type),
++ gr_proto_to_name(protocol), DEFAULTSECARGS);
++
++ return 0;
++ exit:
++ return 1;
++}
++
++static __inline__ int
++gr_search_connectbind(const int mode, const struct sock *sk,
++ const struct sockaddr_in *addr, const int type)
++{
++ struct acl_subject_label *curr;
++ struct acl_ip_label *ip;
++ unsigned long i;
++ __u32 ip_addr = 0;
++ __u16 ip_port = 0;
++
++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++ return 1;
++
++ curr = current->acl;
++
++ if (!curr->ips)
++ return 1;
++
++ ip_addr = addr->sin_addr.s_addr;
++ ip_port = ntohs(addr->sin_port);
++
++ for (i = 0; i < curr->ip_num; i++) {
++ ip = *(curr->ips + i);
++ if ((ip->mode & mode) &&
++ (ip_port >= ip->low) &&
++ (ip_port <= ip->high) &&
++ ((ntohl(ip_addr) & ip->netmask) ==
++ (ntohl(ip->addr) & ip->netmask))
++ && (ip->
++ proto[sk->sk_protocol / 32] & (1 << (sk->sk_protocol % 32)))
++ && (ip->type & (1 << type)))
++ return 1;
++ }
++
++ if (curr->mode & GR_LEARN) {
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(ip_addr), ip_port, type,
++ sk->sk_protocol, mode, NIPQUAD(current->curr_ip));
++ return 1;
++ }
++
++ if (mode == GR_BIND)
++ security_alert(GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port,
++ gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol),
++ DEFAULTSECARGS);
++ else if (mode == GR_CONNECT)
++ security_alert(GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port,
++ gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol),
++ DEFAULTSECARGS);
++
++ return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ if (addr)
++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++ else {
++ struct sockaddr_in sin;
++ const struct inet_opt *inet = inet_sk(sk);
++
++ sin.sin_addr.s_addr = inet->daddr;
++ sin.sin_port = inet->dport;
++
++ return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
++ }
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ struct sockaddr_in sin;
++
++ if (unlikely(skb->len < sizeof (struct udphdr)))
++ return 1; // skip this packet
++
++ sin.sin_addr.s_addr = skb->nh.iph->saddr;
++ sin.sin_port = skb->h.uh->source;
++
++ return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
++}
+diff -urN linux-2.6.5/grsecurity/gracl_learn.c linux-2.6.5-new/grsecurity/gracl_learn.c
+--- linux-2.6.5/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_learn.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,204 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/smp_lock.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char * buf,
++ size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED;
++static DECLARE_MUTEX(gr_learn_user_sem);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++ uses a semaphore around the userspace copying, and the other kernel contexts
++ use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char * buf, size_t count, loff_t * ppos)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval = 0;
++
++ add_wait_queue(&learn_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++ do {
++ down(&gr_learn_user_sem);
++ spin_lock(&gr_learn_lock);
++ if (learn_buffer_len)
++ break;
++ spin_unlock(&gr_learn_lock);
++ up(&gr_learn_user_sem);
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ }
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++
++ schedule();
++ } while (1);
++
++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++ learn_buffer_user_len = learn_buffer_len;
++ retval = learn_buffer_len;
++ learn_buffer_len = 0;
++
++ spin_unlock(&gr_learn_lock);
++
++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++ retval = -EFAULT;
++
++ up(&gr_learn_user_sem);
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&learn_wait, &wait);
++ return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++ poll_wait(file, &learn_wait, wait);
++
++ if (learn_buffer_len)
++ return (POLLIN | POLLRDNORM);
++
++ return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++ char *tmp;
++
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(learn_buffer);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ up(&gr_learn_user_sem);
++
++ return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++ va_list args;
++ unsigned int len;
++
++ if (!gr_learn_attached)
++ return;
++
++ spin_lock(&gr_learn_lock);
++
++ /* leave a gap at the end so we know when it's "full" but don't have to
++ compute the exact length of the string we're trying to append
++ */
++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++ return;
++ }
++ if (learn_buffer == NULL) {
++ spin_unlock(&gr_learn_lock);
++ return;
++ }
++
++ va_start(args, fmt);
++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++ va_end(args);
++
++ learn_buffer_len += len + 1;
++
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++
++ return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ && gr_learn_attached)
++ return -EBUSY;
++ if (file->f_mode & FMODE_READ) {
++ down(&gr_learn_user_sem);
++ if (learn_buffer == NULL)
++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer_user == NULL)
++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer == NULL)
++ return -ENOMEM;
++ if (learn_buffer_user == NULL)
++ return -ENOMEM;
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 1;
++ up(&gr_learn_user_sem);
++ }
++ return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++ char *tmp;
++
++ if (file->f_mode & FMODE_READ) {
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(tmp);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 0;
++ up(&gr_learn_user_sem);
++ }
++
++ return 0;
++}
++
++struct file_operations grsec_fops = {
++ read: read_learn,
++ write: write_grsec_handler,
++ open: open_learn,
++ release: close_learn,
++ poll: poll_learn,
++};
+diff -urN linux-2.6.5/grsecurity/gracl_res.c linux-2.6.5-new/grsecurity/gracl_res.c
+--- linux-2.6.5/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_res.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,50 @@
++/* resource handling routines (c) Brad Spengler 2002, 2003 */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[11] = {
++ "RLIMIT_CPU",
++ "RLIMIT_FSIZE",
++ "RLIMIT_DATA",
++ "RLIMIT_STACK",
++ "RLIMIT_CORE",
++ "RLIMIT_RSS",
++ "RLIMIT_NPROC",
++ "RLIMIT_NOFILE",
++ "RLIMIT_MEMLOCK",
++ "RLIMIT_AS",
++ "RLIMIT_LOCKS"
++};
++
++__inline__ void
++gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ if (unlikely(res == RLIMIT_NPROC &&
++ (cap_raised(task->cap_effective, CAP_SYS_ADMIN) ||
++ cap_raised(task->cap_effective, CAP_SYS_RESOURCE))))
++ return;
++
++ preempt_disable();
++
++ if (unlikely(((gt && wanted > task->rlim[res].rlim_cur) ||
++ (!gt && wanted >= task->rlim[res].rlim_cur)) &&
++ task->rlim[res].rlim_cur != RLIM_INFINITY))
++ security_alert(GR_RESOURCE_MSG, wanted, restab_log[res],
++ task->rlim[res].rlim_cur,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm,
++ task->parent->pid, task->parent->uid,
++ task->parent->euid, task->parent->gid,
++ task->parent->egid);
++
++ preempt_enable_no_resched();
++
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/gracl_segv.c linux-2.6.5-new/grsecurity/gracl_segv.c
+--- linux-2.6.5/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_segv.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,330 @@
++/*
++ * grsecurity/gracl_segv.c
++ * Copyright Brad Spengler 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static rwlock_t gr_uid_lock = RW_LOCK_UNLOCKED;
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++ struct acl_role_label *role);
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int
++gr_init_uidset(void)
++{
++ uid_set =
++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++ uid_used = 0;
++
++ return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++ if (uid_set)
++ kfree(uid_set);
++
++ return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++ struct crash_uid *tmp = uid_set;
++ uid_t buid;
++ int low = 0, high = uid_used - 1, mid;
++
++ while (high >= low) {
++ mid = (low + high) >> 1;
++ buid = tmp[mid].uid;
++ if (buid == uid)
++ return mid;
++ if (buid > uid)
++ high = mid - 1;
++ if (buid < uid)
++ low = mid + 1;
++ }
++
++ return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++ unsigned short i, j;
++ struct crash_uid index;
++
++ for (i = 1; i < uid_used; i++) {
++ index = uid_set[i];
++ j = i;
++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++ uid_set[j] = uid_set[j - 1];
++ j--;
++ }
++ uid_set[j] = index;
++ }
++
++ return;
++}
++
++static __inline__ void
++gr_insert_uid(const uid_t uid, const unsigned long expires)
++{
++ int loc;
++
++ if (uid_used == GR_UIDTABLE_MAX)
++ return;
++
++ loc = gr_find_uid(uid);
++
++ if (loc >= 0) {
++ uid_set[loc].expires = expires;
++ return;
++ }
++
++ uid_set[uid_used].uid = uid;
++ uid_set[uid_used].expires = expires;
++ uid_used++;
++
++ gr_insertsort();
++
++ return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++ unsigned short i;
++
++ for (i = loc + 1; i < uid_used; i++)
++ uid_set[i - i] = uid_set[i];
++
++ uid_used--;
++
++ return;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ int loc;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_uid_lock);
++ loc = gr_find_uid(uid);
++ read_unlock(&gr_uid_lock);
++
++ if (loc < 0)
++ return 0;
++
++ write_lock(&gr_uid_lock);
++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
++ gr_remove_uid(loc);
++ else {
++ write_unlock(&gr_uid_lock);
++ return 1;
++ }
++
++ write_unlock(&gr_uid_lock);
++ return 0;
++}
++
++static __inline__ int
++proc_is_setxid(const struct task_struct *task)
++{
++ if (task->uid != task->euid || task->uid != task->suid ||
++ task->uid != task->fsuid)
++ return 1;
++ if (task->gid != task->egid || task->gid != task->sgid ||
++ task->gid != task->fsgid)
++ return 1;
++
++ return 0;
++}
++static __inline__ int
++gr_fake_force_sig(int sig, struct task_struct *t)
++{
++ unsigned long int flags;
++ int ret;
++
++ spin_lock_irqsave(&t->sighand->siglock, flags);
++ if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
++ t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
++ sigdelset(&t->blocked, sig);
++ recalc_sigpending_tsk(t);
++ }
++ ret = specific_send_sig_info(sig, (void*)1L, t);
++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++ return ret;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ struct acl_subject_label *curr;
++ struct acl_subject_label *curr2;
++ struct task_struct *tsk, *tsk2;
++
++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++ return;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curr = task->acl;
++
++ if (!(curr->resmask & (1 << GR_CRASH_RES)))
++ return;
++
++ if (time_before_eq(curr->expires, get_seconds())) {
++ curr->expires = 0;
++ curr->crashes = 0;
++ }
++
++ curr->crashes++;
++
++ if (!curr->expires)
++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds())) {
++ if (task->uid && proc_is_setxid(task)) {
++ security_alert(GR_SEGVSTART_ACL_MSG,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ task->parent->uid, task->parent->euid,
++ task->parent->gid, task->parent->egid,
++ task->uid,
++ curr->res[GR_CRASH_RES].rlim_max);
++ write_lock(&gr_uid_lock);
++ gr_insert_uid(task->uid, curr->expires);
++ write_unlock(&gr_uid_lock);
++ curr->expires = 0;
++ curr->crashes = 0;
++ read_lock(&tasklist_lock);
++ for_each_process(tsk) {
++ tsk2 = tsk;
++ do {
++ if (tsk2 != task && tsk2->uid == task->uid)
++ gr_fake_force_sig(SIGKILL, tsk2);
++ } while ((tsk2 = next_thread(tsk2)) != tsk);
++ }
++ read_unlock(&tasklist_lock);
++ } else {
++ security_alert(GR_SEGVNOSUID_ACL_MSG,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ task->parent->uid, task->parent->euid,
++ task->parent->gid, task->parent->egid,
++ curr->res[GR_CRASH_RES].rlim_max);
++ read_lock(&tasklist_lock);
++ for_each_process(tsk) {
++ tsk2 = tsk;
++ do {
++ if (likely(tsk2 != task)) {
++ curr2 = tsk2->acl;
++
++ if (curr2->device == curr->device &&
++ curr2->inode == curr->inode)
++ gr_fake_force_sig(SIGKILL, tsk2);
++ }
++ } while ((tsk2 = next_thread(tsk2)) != tsk);
++ }
++ read_unlock(&tasklist_lock);
++ }
++ }
++
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_inode_lock);
++ curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino,
++ filp->f_dentry->d_inode->i_sb->s_dev,
++ current->role);
++ read_unlock(&gr_inode_lock);
++
++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
++ (!curr->crashes && !curr->expires))
++ return 0;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds()))
++ return 1;
++ else if (time_before_eq(curr->expires, get_seconds())) {
++ curr->crashes = 0;
++ curr->expires = 0;
++ }
++
++ return 0;
++}
++
++void
++gr_handle_alertkill(void)
++{
++ struct acl_subject_label *curracl;
++ __u32 curr_ip;
++ struct task_struct *task, *task2;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curracl = current->acl;
++ curr_ip = current->curr_ip;
++
++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip &&
++ (curr_ip != 0xffffffff)) {
++ read_lock(&tasklist_lock);
++ for_each_process(task) {
++ task2 = task;
++ do {
++ if (task2->curr_ip == curr_ip)
++ gr_fake_force_sig(SIGKILL, task2);
++ } while ((task2 = next_thread(task2)) != task);
++ }
++ read_unlock(&tasklist_lock);
++ } else if (curracl->mode & GR_KILLPROC)
++ gr_fake_force_sig(SIGKILL, current);
++
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/gracl_shm.c linux-2.6.5-new/grsecurity/gracl_shm.c
+--- linux-2.6.5/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/gracl_shm.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,36 @@
++/* shared memory handling routines, (c) Brad Spengler 2002, 2003 */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ struct task_struct *task;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ task = find_task_by_pid(shm_cprid);
++
++ if (unlikely(!task))
++ task = find_task_by_pid(shm_lapid);
++
++ if (unlikely(task && ((task->start_time < shm_createtime) ||
++ (task->pid == shm_lapid)) &&
++ (task->acl->mode & GR_PROTSHM) &&
++ (task->acl != current->acl))) {
++ security_alert(GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid,
++ DEFAULTSECARGS);
++ return 0;
++ }
++
++ return 1;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_chdir.c linux-2.6.5-new/grsecurity/grsec_chdir.c
+--- linux-2.6.5/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_chdir.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,20 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ if ((grsec_enable_chdir && grsec_enable_group &&
++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++ !grsec_enable_group)) {
++ security_audit(GR_CHDIR_AUDIT_MSG, gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++ }
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_chroot.c linux-2.6.5-new/grsecurity/grsec_chroot.c
+--- linux-2.6.5/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_chroot.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,348 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ struct pid *spid = NULL;
++
++ if (unlikely(!grsec_enable_chroot_unix))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ spid = find_pid(PIDTYPE_PID, pid);
++ if (spid) {
++ struct task_struct *p;
++ p = pid_task(spid->task_list.next, PIDTYPE_PID);
++ if (unlikely(!have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ security_alert(GR_UNIX_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ }
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++ security_alert(GR_NICE_CHROOT_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && (!have_same_root(p, current)
++ || (have_same_root(p, current)
++ && (niceval < task_nice(p))
++ && proc_is_chrooted(current)))) {
++ security_alert(GR_PRIORITY_CHROOT_MSG, p->comm, p->pid,
++ DEFAULTSECARGS);
++ return -ESRCH;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_capset(const struct task_struct *target)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ !have_same_root(current, target)) {
++ security_alert(GR_CAPSET_CHROOT_MSG, target->comm, target->pid,
++ DEFAULTSECARGS);
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++int
++gr_pid_is_chrooted(const struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ if (!grsec_enable_chroot_findtask || (current->pid <= 1))
++ return 0;
++
++ if (p && p->fs && p->fs->root && p->fs->root->d_inode &&
++ child_reaper && child_reaper->fs && child_reaper->fs->root &&
++ child_reaper->fs->root->d_inode && current && current->fs &&
++ current->fs->root && current->fs->root->d_inode) {
++ if (proc_is_chrooted(current) && !have_same_root(current, p))
++ return 1;
++ }
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++ struct dentry *dentry = (struct dentry *)u_dentry;
++ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
++ struct dentry *realroot;
++ struct vfsmount *realrootmnt;
++ struct dentry *currentroot;
++ struct vfsmount *currentmnt;
++
++ read_lock(&child_reaper->fs->lock);
++ realrootmnt = mntget(child_reaper->fs->rootmnt);
++ realroot = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++
++ read_lock(&current->fs->lock);
++ currentmnt = mntget(current->fs->rootmnt);
++ currentroot = dget(current->fs->root);
++ read_unlock(&current->fs->lock);
++
++ spin_lock(&dcache_lock);
++ for (;;) {
++ if (unlikely((dentry == realroot && mnt == realrootmnt)
++ || (dentry == currentroot && mnt == currentmnt)))
++ break;
++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
++ if (mnt->mnt_parent == mnt)
++ break;
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++ dentry = dentry->d_parent;
++ }
++ spin_unlock(&dcache_lock);
++
++ dput(currentroot);
++ mntput(currentmnt);
++
++ if (dentry == realroot && mnt == realrootmnt) {
++ /* access is outside of chroot */
++ dput(realroot);
++ mntput(realrootmnt);
++ return 0;
++ }
++
++ dput(realroot);
++ mntput(realrootmnt);
++ return 1;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ if (!grsec_enable_chroot_fchdir)
++ return 1;
++
++ if (!proc_is_chrooted(current))
++ return 1;
++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++ security_alert(GR_CHROOT_FCHDIR_MSG,
++ gr_to_filename(u_dentry, u_mnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ struct pid *pid = NULL;
++ u64 starttime64;
++ time_t starttime;
++
++ if (unlikely(!grsec_enable_chroot_shmat))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ pid = find_pid(PIDTYPE_PID, shm_cprid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid->task_list.next, PIDTYPE_PID);
++ starttime64 = p->start_time;
++ do_div(starttime64, HZ);
++ starttime = (time_t) starttime64;
++ if (unlikely(!have_same_root(current, p) &&
++ time_before((unsigned long)starttime, (unsigned long)shm_createtime))) {
++ read_unlock(&tasklist_lock);
++ security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ } else {
++ pid = find_pid(PIDTYPE_PID, shm_lapid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid->task_list.next, PIDTYPE_PID);
++ if (unlikely(!have_same_root(current, p))) {
++ read_unlock(&tasklist_lock);
++ security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ }
++ }
++
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++ security_audit(GR_EXEC_CHROOT_MSG, gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
++ proc_is_chrooted(current)) {
++ security_alert(GR_MKNOD_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++ security_alert(GR_MOUNT_CHROOT_MSG, dev_name,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++ security_alert(GR_PIVOT_CHROOT_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++ !gr_is_outside_chroot(dentry, mnt)) {
++ security_alert(GR_CHROOT_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_caps(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++ task->cap_permitted =
++ cap_drop(task->cap_permitted, GR_CHROOT_CAPS);
++ task->cap_inheritable =
++ cap_drop(task->cap_inheritable, GR_CHROOT_CAPS);
++ task->cap_effective =
++ cap_drop(task->cap_effective, GR_CHROOT_CAPS);
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
++ && (op & 002))
++ return -EACCES;
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ if (grsec_enable_chroot_chdir)
++ set_fs_pwd(current->fs, mnt, dentry);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ if (grsec_enable_chroot_chmod &&
++ ((mode & S_ISUID) || (mode & S_ISGID)) &&
++ proc_is_chrooted(current)) {
++ security_alert(GR_CHMOD_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_disabled.c linux-2.6.5-new/grsecurity/grsec_disabled.c
+--- linux-2.6.5/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_disabled.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,406 @@
++/*
++ * when grsecurity is disabled, compile all external functions into nothing
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++__inline__ void
++pax_set_flags(struct linux_binprm *bprm)
++{
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__inline__ __u32
++gr_handle_sysctl(const struct ctl_table * table, __u32 mode)
++{
++ return mode;
++}
++#endif
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_rawio(const struct inode *inode)
++{
++ return 0;
++}
++
++__inline__ void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ return;
++}
++
++__inline__ int
++gr_handle_ptrace_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_mmap(const struct file *filp, const unsigned long prot)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ return;
++}
++
++__inline__ int
++gr_set_acls(const int type)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_protected_task(const struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ void
++gr_copy_label(struct task_struct *tsk)
++{
++ return;
++}
++
++__inline__ void
++gr_set_pax_flags(struct task_struct *task)
++{
++ return;
++}
++
++__inline__ void
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++__inline__ void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ return;
++}
++
++__inline__ void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++__inline__ void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ return;
++}
++
++__inline__ int
++gr_check_crash_exec(const struct file *filp)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_crash_uid(const uid_t uid)
++{
++ return 0;
++}
++
++__inline__ void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ return;
++}
++
++__inline__ int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_connectbind(const int mode, const struct socket *sock,
++ const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ int
++gr_task_is_capable(struct task_struct *task, const int cap)
++{
++ return 1;
++}
++
++__inline__ int
++gr_is_capable_nolog(const int cap)
++{
++ return 1;
++}
++
++__inline__ void
++gr_handle_alertkill(void)
++{
++ return;
++}
++
++__inline__ __u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++ unsigned int *vm_flags)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_access(const struct dentry * dentry,
++ const struct vfsmount * mnt, const int fmode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ void
++grsecurity_init(void)
++{
++ return;
++}
++
++__inline__ __u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ return 1;
++}
++
++__inline__ int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct inode *old_parent_inode,
++ const struct vfsmount *old_mnt, const char *newname)
++{
++ return 0;
++}
++
++__inline__ __u32
++gr_acl_handle_filldir(const struct dentry * dentry,
++ const struct vfsmount * mnt, const ino_t ino)
++{
++ return 1;
++}
++
++__inline__ int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ return 1;
++}
++
++__inline__ void
++gr_acl_handle_exit(void)
++{
++ return;
++}
++
++__inline__ int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ return 1;
++}
++
++__inline__ void
++gr_set_role_label(const uid_t uid, const gid_t gid)
++{
++ return;
++}
++
++__inline__ int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ void
++gr_set_kernel_label(struct task_struct *task)
++{
++ return;
++}
++
++EXPORT_SYMBOL(gr_task_is_capable);
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++
+diff -urN linux-2.6.5/grsecurity/grsec_exec.c linux-2.6.5-new/grsecurity/grsec_exec.c
+--- linux-2.6.5/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_exec.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,71 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++
++#include <asm/uaccess.h>
++
++int
++gr_handle_nproc(void)
++{
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ if (grsec_enable_execve && current->user &&
++ (atomic_read(&current->user->processes) >
++ current->rlim[RLIMIT_NPROC].rlim_cur) &&
++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
++ security_alert(GR_NPROC_MSG, DEFAULTSECARGS);
++ return -EAGAIN;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, char **argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char grarg[64] = { 0 };
++ __u8 execlen = 0;
++ unsigned int i;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ if (unlikely(!argv))
++ goto log;
++
++ for (i = 0; i < bprm->argc && execlen < 62; i++) {
++ char *p;
++ __u8 len;
++
++ if (get_user(p, argv + i))
++ goto log;
++ if (!p)
++ goto log;
++ len = strnlen_user(p, 62 - execlen);
++ if (len > 62 - execlen)
++ len = 62 - execlen;
++ else if (len > 0)
++ len--;
++ if (copy_from_user(grarg + execlen, p, len))
++ goto log;
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ security_audit(GR_EXEC_AUDIT_MSG, gr_to_filename(bprm->file->f_dentry,
++ bprm->file->f_vfsmnt),
++ grarg, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_fifo.c linux-2.6.5-new/grsecurity/grsec_fifo.c
+--- linux-2.6.5/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_fifo.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
++ (current->fsuid != dentry->d_inode->i_uid)) {
++ if (!vfs_permission(dentry->d_inode, acc_mode))
++ security_alert(GR_FIFO_MSG, gr_to_filename(dentry, mnt),
++ dentry->d_inode->i_uid,
++ dentry->d_inode->i_gid, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_fork.c linux-2.6.5-new/grsecurity/grsec_fork.c
+--- linux-2.6.5/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_fork.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,14 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ if (grsec_enable_forkfail)
++ security_alert(GR_FAILFORK_MSG, retval, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_init.c linux-2.6.5-new/grsecurity/grsec_init.c
+--- linux-2.6.5/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_init.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,227 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp_lock.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_fifo;
++int grsec_enable_execve;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_time;
++int grsec_enable_audit_textrel;
++int grsec_enable_group;
++int grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_audit_ipc;
++int grsec_enable_mount;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++int grsec_tpe_gid;
++int grsec_enable_tpe_all;
++int grsec_enable_randpid;
++int grsec_enable_randid;
++int grsec_enable_randisn;
++int grsec_enable_randsrc;
++int grsec_enable_randrpc;
++int grsec_enable_socket_all;
++int grsec_socket_all_gid;
++int grsec_enable_socket_client;
++int grsec_socket_client_gid;
++int grsec_enable_socket_server;
++int grsec_socket_server_gid;
++int grsec_lock;
++
++spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED;
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++spinlock_t grsec_alertgood_lock = SPIN_LOCK_UNLOCKED;
++unsigned long grsec_alertgood_wtime = 0;
++unsigned long grsec_alertgood_fyet = 0;
++
++spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED;
++
++char *gr_shared_page[4];
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++extern struct task_struct **gr_conn_table;
++extern const unsigned int gr_conn_table_size;
++
++void
++grsecurity_init(void)
++{
++ int j;
++ /* create the per-cpu shared pages */
++
++ preempt_disable();
++ for (j = 0; j < 4; j++) {
++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(char *));
++ if (gr_shared_page[j] == NULL) {
++ panic("Unable to allocate grsecurity shared page");
++ return;
++ }
++ }
++ preempt_enable();
++
++ /* create hash tables for ip tagging */
++
++ gr_conn_table = (struct task_struct **) vmalloc(gr_conn_table_size * sizeof(struct task_struct *));
++ if (gr_conn_table == NULL) {
++ panic("Unable to allocate grsecurity IP tagging table");
++ return;
++ }
++ memset(gr_conn_table, 0, gr_conn_table_size * sizeof(struct task_struct *));
++
++ /* allocate memory for authentication structure */
++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++ panic("Unable to allocate grsecurity authentication structure");
++ return;
++ }
++
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++ grsec_lock = 1;
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ grsec_enable_audit_textrel = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ grsec_enable_group = 1;
++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ grsec_enable_audit_ipc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ grsec_enable_execve = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ grsec_enable_tpe = 1;
++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ grsec_enable_randpid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++ grsec_enable_randid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ grsec_enable_randisn = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ grsec_enable_randsrc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ grsec_enable_randrpc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ grsec_enable_socket_all = 1;
++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ grsec_enable_socket_client = 1;
++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ grsec_enable_socket_server = 1;
++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++#endif
++#endif
++
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_ipc.c linux-2.6.5-new/grsecurity/grsec_ipc.c
+--- linux-2.6.5/grsecurity/grsec_ipc.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_ipc.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,81 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/ipc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_msgget(const int ret, const int msgflg)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (ret >= 0)
++ && (msgflg & IPC_CREAT))
++ security_audit(GR_MSGQ_AUDIT_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_msgrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_MSGQR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_semget(const int err, const int semflg)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (err >= 0)
++ && (semflg & IPC_CREAT))
++ security_audit(GR_SEM_AUDIT_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_semrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_SEMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_shmget(const int err, const int shmflg, const size_t size)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (err >= 0)
++ && (shmflg & IPC_CREAT))
++ security_audit(GR_SHM_AUDIT_MSG, size, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_shmrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_SHMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_link.c linux-2.6.5-new/grsecurity/grsec_link.c
+--- linux-2.6.5/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_link.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,41 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
++ (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) {
++ security_alert(GR_SYMLINK_MSG, gr_to_filename(dentry, mnt),
++ inode->i_uid, inode->i_gid, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode, const int mode, const char *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ if (grsec_enable_link && current->fsuid != inode->i_uid &&
++ (!S_ISREG(mode) || (mode & S_ISUID) ||
++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
++ (vfs_permission(inode, MAY_READ | MAY_WRITE))) &&
++ !capable(CAP_FOWNER) && current->uid) {
++ security_alert(GR_HARDLINK_MSG, gr_to_filename(dentry, mnt),
++ inode->i_uid, inode->i_gid, to, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_mem.c linux-2.6.5-new/grsecurity/grsec_mem.c
+--- linux-2.6.5/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_mem.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,54 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++ security_alert(GR_IOPERM_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_iopl(void)
++{
++ security_alert(GR_IOPL_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_mem_write(void)
++{
++ security_alert(GR_MEM_WRITE_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_kmem_write(void)
++{
++ security_alert(GR_KMEM_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_open_port(void)
++{
++ security_alert(GR_PORT_OPEN_MSG, DEFAULTSECARGS);
++ return;
++}
++
++int
++gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
++{
++ if (offset < __pa(high_memory) && (vma->vm_flags & VM_WRITE) &&
++ !(offset == 0xf0000 && ((vma->vm_end - vma->vm_start) <= 0x10000)) &&
++ !(offset == 0xa0000 && ((vma->vm_end - vma->vm_start) <= 0x20000))) {
++ security_alert(GR_MEM_MMAP_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ } else if (offset < __pa(high_memory))
++ vma->vm_flags &= ~VM_MAYWRITE;
++
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_mount.c linux-2.6.5-new/grsecurity/grsec_mount.c
+--- linux-2.6.5/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_mount.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,34 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_REMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_MOUNT_AUDIT_MSG, from, to, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_rand.c linux-2.6.5-new/grsecurity/grsec_rand.c
+--- linux-2.6.5/grsecurity/grsec_rand.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_rand.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,22 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern int pid_max;
++
++int
++gr_random_pid(void)
++{
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ int pid;
++
++ if (grsec_enable_randpid && current->fs->root) {
++
++ pid = 1 + (get_random_long() % pid_max);
++ return pid;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_sig.c linux-2.6.5-new/grsecurity/grsec_sig.c
+--- linux-2.6.5/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_sig.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,48 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_signal(const int sig, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++ (sig == SIGABRT) || (sig == SIGBUS))) {
++ if (t->pid == current->pid) {
++ security_alert_good(GR_UNISIGLOG_MSG, sig,
++ DEFAULTSECARGS);
++ } else {
++ security_alert_good(GR_DUALSIGLOG_MSG, sig,
++ gr_task_fullpath0(t), t->comm,
++ t->pid, t->uid, t->euid, t->gid,
++ t->egid, gr_parent_task_fullpath0(t),
++ t->parent->comm,
++ t->parent->pid, t->parent->uid,
++ t->parent->euid, t->parent->gid,
++ t->parent->egid, DEFAULTSECARGS);
++ }
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (current->pid > 1 && gr_check_protected_task(p)) {
++ security_alert(GR_SIG_ACL_MSG, sig, gr_task_fullpath0(p),
++ p->comm, p->pid, p->uid,
++ p->euid, p->gid, p->egid,
++ gr_parent_task_fullpath0(p), p->parent->comm,
++ p->parent->pid, p->parent->uid,
++ p->parent->euid, p->parent->gid,
++ p->parent->egid, DEFAULTSECARGS);
++ return -EPERM;
++ } else if (gr_pid_is_chrooted(p)) {
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_sock.c linux-2.6.5-new/grsecurity/grsec_sock.c
+--- linux-2.6.5/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_sock.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,256 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE)
++extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
++EXPORT_SYMBOL(udp_v4_lookup);
++#endif
++#if defined(CONFIG_GRKERNSEC_RANDID)
++EXPORT_SYMBOL(ip_randomid);
++#endif
++#if defined(CONFIG_GRKERNSEC_RANDSRC) || defined(CONFIG_GRKERNSEC_RANDRPC)
++EXPORT_SYMBOL(get_random_long);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++EXPORT_SYMBOL(ip_randomisn);
++EXPORT_SYMBOL(grsec_enable_randisn);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++EXPORT_SYMBOL(grsec_enable_randid);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++EXPORT_SYMBOL(grsec_enable_randsrc);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++EXPORT_SYMBOL(grsec_enable_randrpc);
++#endif
++
++EXPORT_SYMBOL(gr_cap_rtnetlink);
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++struct task_struct **gr_conn_table;
++const unsigned int gr_conn_table_size = 65521;
++struct task_struct *deleted_conn = (struct task_struct *)~0;
++spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED;
++
++extern __inline__ const char * gr_socktype_to_name(unsigned char type);
++extern __inline__ const char * gr_proto_to_name(unsigned char proto);
++
++static __inline__ int
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct task_struct *task, __u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ if (unlikely(task != deleted_conn && task->gr_saddr == saddr &&
++ task->gr_daddr == daddr && task->gr_sport == sport &&
++ task->gr_dport == dport))
++ return 1;
++ else
++ return 0;
++}
++
++void gr_add_to_task_ip_table(struct task_struct *task)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return;
++
++ if (!thread_group_leader(task))
++ task = task->group_leader;
++
++ index = conn_hash(task->gr_saddr, task->gr_daddr,
++ task->gr_sport, task->gr_dport,
++ gr_conn_table_size);
++
++ spin_lock(&gr_conn_table_lock);
++
++ while (gr_conn_table[index] && gr_conn_table[index] != deleted_conn) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ gr_conn_table[index] = task;
++
++ spin_unlock(&gr_conn_table_lock);
++
++ return;
++}
++
++void gr_del_task_from_ip_table_nolock(struct task_struct *task)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return;
++
++ if (!thread_group_leader(task))
++ task = task->group_leader;
++
++ index = conn_hash(task->gr_saddr, task->gr_daddr,
++ task->gr_sport, task->gr_dport,
++ gr_conn_table_size);
++
++ while (gr_conn_table[index] && !conn_match(gr_conn_table[index],
++ task->gr_saddr, task->gr_daddr, task->gr_sport,
++ task->gr_dport)) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ if (gr_conn_table[index]) {
++ if (gr_conn_table[(index + 1) % gr_conn_table_size])
++ gr_conn_table[index] = deleted_conn;
++ else
++ gr_conn_table[index] = NULL;
++ }
++
++ return;
++}
++
++struct task_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return NULL;
++
++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++ while (gr_conn_table[index] && !conn_match(gr_conn_table[index],
++ saddr, daddr, sport, dport)) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ return gr_conn_table[index];
++}
++
++#endif
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ spin_lock(&gr_conn_table_lock);
++ if (!thread_group_leader(task))
++ gr_del_task_from_ip_table_nolock(task->group_leader);
++ else
++ gr_del_task_from_ip_table_nolock(task);
++ spin_unlock(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct task_struct *p;
++ struct task_struct *set;
++ const struct inet_opt *inet = inet_sk(sk);
++
++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++ return;
++
++ set = current;
++ if (!thread_group_leader(set))
++ set = set->group_leader;
++
++ spin_lock(&gr_conn_table_lock);
++ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
++ inet->dport, inet->sport);
++ if (unlikely(p != NULL)) {
++ set->curr_ip = p->curr_ip;
++ set->used_accept = 1;
++ gr_del_task_from_ip_table_nolock(p);
++ spin_unlock(&gr_conn_table_lock);
++ return;
++ }
++ spin_unlock(&gr_conn_table_lock);
++
++ set->curr_ip = inet->daddr;
++ set->used_accept = 1;
++#endif
++ return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++ (family != AF_UNIX) && (family != AF_LOCAL)) {
++ security_alert(GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol),
++ DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ security_alert(GR_BIND_MSG, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ security_alert(GR_CONNECT_MSG, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++__u32
++gr_cap_rtnetlink(void)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_acl_is_enabled())
++ return current->cap_effective;
++ else
++ return (current->cap_effective & ~(current->acl->cap_lower));
++#else
++ return current->cap_effective;
++#endif
++}
+diff -urN linux-2.6.5/grsecurity/grsec_sysctl.c linux-2.6.5-new/grsecurity/grsec_sysctl.c
+--- linux-2.6.5/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_sysctl.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,453 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) {
++ security_alert(GR_SYSCTL_MSG, name, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL,
++GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT,
++GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM,
++GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS,
++GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL, GS_SIDCAPS,
++GS_RANDPID, GS_RANDID, GS_RANDSRC, GS_RANDISN,
++GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT,
++GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID, GS_TTY, GS_TTYS,
++GS_PTY, GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG, GS_RANDRPC,
++GS_TEXTREL, GS_FINDTASK, GS_LOCK};
++
++
++ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_LINK
++ {
++ .ctl_name = GS_LINK,
++ .procname = "linking_restrictions",
++ .data = &grsec_enable_link,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ {
++ .ctl_name = GS_FIFO,
++ .procname = "fifo_restrictions",
++ .data = &grsec_enable_fifo,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ {
++ .ctl_name = GS_EXECVE,
++ .procname = "execve_limiting",
++ .data = &grsec_enable_execve,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ {
++ .ctl_name = GS_EXECLOG,
++ .procname = "exec_logging",
++ .data = &grsec_enable_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ {
++ .ctl_name = GS_SIGNAL,
++ .procname = "signal_logging",
++ .data = &grsec_enable_signal,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ {
++ .ctl_name = GS_FORKFAIL,
++ .procname = "forkfail_logging",
++ .data = &grsec_enable_forkfail,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ {
++ .ctl_name = GS_TIME,
++ .procname = "timechange_logging",
++ .data = &grsec_enable_time,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ {
++ .ctl_name = GS_CHROOT_SHMAT,
++ .procname = "chroot_deny_shmat",
++ .data = &grsec_enable_chroot_shmat,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ {
++ .ctl_name = GS_CHROOT_UNIX,
++ .procname = "chroot_deny_unix",
++ .data = &grsec_enable_chroot_unix,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ {
++ .ctl_name = GS_CHROOT_MNT,
++ .procname = "chroot_deny_mount",
++ .data = &grsec_enable_chroot_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ {
++ .ctl_name = GS_CHROOT_FCHDIR,
++ .procname = "chroot_deny_fchdir",
++ .data = &grsec_enable_chroot_fchdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ {
++ .ctl_name = GS_CHROOT_DBL,
++ .procname = "chroot_deny_chroot",
++ .data = &grsec_enable_chroot_double,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ {
++ .ctl_name = GS_CHROOT_PVT,
++ .procname = "chroot_deny_pivot",
++ .data = &grsec_enable_chroot_pivot,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ {
++ .ctl_name = GS_CHROOT_CD,
++ .procname = "chroot_enforce_chdir",
++ .data = &grsec_enable_chroot_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ {
++ .ctl_name = GS_CHROOT_CM,
++ .procname = "chroot_deny_chmod",
++ .data = &grsec_enable_chroot_chmod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ {
++ .ctl_name = GS_CHROOT_MK,
++ .procname = "chroot_deny_mknod",
++ .data = &grsec_enable_chroot_mknod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ {
++ .ctl_name = GS_CHROOT_NI,
++ .procname = "chroot_restrict_nice",
++ .data = &grsec_enable_chroot_nice,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ {
++ .ctl_name = GS_CHROOT_EXECLOG,
++ .procname = "chroot_execlog",
++ .data = &grsec_enable_chroot_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ {
++ .ctl_name = GS_CHROOT_CAPS,
++ .procname = "chroot_caps",
++ .data = &grsec_enable_chroot_caps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ {
++ .ctl_name = GS_CHROOT_SYSCTL,
++ .procname = "chroot_deny_sysctl",
++ .data = &grsec_enable_chroot_sysctl,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ {
++ .ctl_name = GS_TPE,
++ .procname = "tpe",
++ .data = &grsec_enable_tpe,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_TPE_GID,
++ .procname = "tpe_gid",
++ .data = &grsec_tpe_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ {
++ .ctl_name = GS_TPE_ALL,
++ .procname = "tpe_restrict_all",
++ .data = &grsec_enable_tpe_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ {
++ .ctl_name = GS_RANDPID,
++ .procname = "rand_pids",
++ .data = &grsec_enable_randpid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++ {
++ .ctl_name = GS_RANDID,
++ .procname = "rand_ip_ids",
++ .data = &grsec_enable_randid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ {
++ .ctl_name = GS_RANDSRC,
++ .procname = "rand_tcp_src_ports",
++ .data = &grsec_enable_randsrc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ {
++ .ctl_name = GS_RANDISN,
++ .procname = "rand_isns",
++ .data = &grsec_enable_randisn,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ {
++ .ctl_name = GS_SOCKET_ALL,
++ .procname = "socket_all",
++ .data = &grsec_enable_socket_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_ALL_GID,
++ .procname = "socket_all_gid",
++ .data = &grsec_socket_all_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ {
++ .ctl_name = GS_SOCKET_CLIENT,
++ .procname = "socket_client",
++ .data = &grsec_enable_socket_client,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_CLIENT_GID,
++ .procname = "socket_client_gid",
++ .data = &grsec_socket_client_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ {
++ .ctl_name = GS_SOCKET_SERVER,
++ .procname = "socket_server",
++ .data = &grsec_enable_socket_server,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_SERVER_GID,
++ .procname = "socket_server_gid",
++ .data = &grsec_socket_server_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ {
++ .ctl_name = GS_GROUP,
++ .procname = "audit_group",
++ .data = &grsec_enable_group,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_GID,
++ .procname = "audit_gid",
++ .data = &grsec_audit_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ {
++ .ctl_name = GS_ACHDIR,
++ .procname = "audit_chdir",
++ .data = &grsec_enable_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ {
++ .ctl_name = GS_AMOUNT,
++ .procname = "audit_mount",
++ .data = &grsec_enable_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ {
++ .ctl_name = GS_AIPC,
++ .procname = "audit_ipc",
++ .data = &grsec_enable_audit_ipc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ {
++ .ctl_name = GS_TEXTREL,
++ .procname = "audit_textrel",
++ .data = &grsec_enable_audit_textrel,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ {
++ .ctl_name = GS_DMSG,
++ .procname = "dmesg",
++ .data = &grsec_enable_dmesg,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ {
++ .ctl_name = GS_RANDRPC,
++ .procname = "rand_rpc",
++ .data = &grsec_enable_randrpc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ {
++ .ctl_name = GS_FINDTASK,
++ .procname = "chroot_findtask",
++ .data = &grsec_enable_chroot_findtask,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ {
++ .ctl_name = GS_LOCK,
++ .procname = "grsec_lock",
++ .data = &grsec_lock,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ { .ctl_name = 0 }
++};
++#endif
+diff -urN linux-2.6.5/grsecurity/grsec_textrel.c linux-2.6.5-new/grsecurity/grsec_textrel.c
+--- linux-2.6.5/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_textrel.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ if (grsec_enable_audit_textrel)
++ security_audit(GR_TEXTREL_AUDIT_MSG, vma->vm_file ?
++ gr_to_filename(vma->vm_file->f_dentry, vma->vm_file->f_vfsmnt)
++ : "<anonymous mapping>", vma->vm_start,
++ vma->vm_pgoff, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_time.c linux-2.6.5-new/grsecurity/grsec_time.c
+--- linux-2.6.5/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_time.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,13 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++ if (grsec_enable_time)
++ security_alert_good(GR_TIME_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.5/grsecurity/grsec_tpe.c linux-2.6.5-new/grsecurity/grsec_tpe.c
+--- linux-2.6.5/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsec_tpe.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,35 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct inode *inode = file->f_dentry->d_parent->d_inode;
++
++ if (current->uid && ((grsec_enable_tpe && in_group_p(grsec_tpe_gid)) || gr_acl_tpe_check()) &&
++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
++ (inode->i_mode & S_IWOTH))))) {
++ security_alert(GR_EXEC_TPE_MSG,
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
++ ((inode->i_uid && (inode->i_uid != current->uid)) ||
++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
++ security_alert(GR_EXEC_TPE_MSG,
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#endif
++#endif
++ return 1;
++}
+diff -urN linux-2.6.5/grsecurity/grsum.c linux-2.6.5-new/grsecurity/grsum.c
+--- linux-2.6.5/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/grsum.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,59 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <asm/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++ char *p;
++ struct crypto_tfm *tfm;
++ unsigned char temp_sum[GR_SHA_LEN];
++ struct scatterlist sg[2];
++ volatile int retval = 0;
++ volatile int dummy = 0;
++ unsigned int i;
++
++ tfm = crypto_alloc_tfm("sha256", 0);
++ if (tfm == NULL) {
++ /* should never happen, since sha256 should be built in */
++ return 1;
++ }
++
++ crypto_digest_init(tfm);
++
++ p = salt;
++ sg[0].page = virt_to_page(p);
++ sg[0].offset = ((long) p & ~PAGE_MASK);
++ sg[0].length = GR_SALT_LEN;
++
++ crypto_digest_update(tfm, sg, 1);
++
++ p = entry->pw;
++ sg[0].page = virt_to_page(p);
++ sg[0].offset = ((long) p & ~PAGE_MASK);
++ sg[0].length = strlen(entry->pw);
++
++ crypto_digest_update(tfm, sg, 1);
++
++ crypto_digest_final(tfm, temp_sum);
++
++ memset(entry->pw, 0, GR_PW_LEN);
++
++ for (i = 0; i < GR_SHA_LEN; i++)
++ if (sum[i] != temp_sum[i])
++ retval = 1;
++ else
++ dummy = 1; // waste a cycle
++
++ crypto_free_tfm(tfm);
++
++ return retval;
++}
+diff -urN linux-2.6.5/grsecurity/obsd_rand.c linux-2.6.5-new/grsecurity/obsd_rand.c
+--- linux-2.6.5/grsecurity/obsd_rand.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/grsecurity/obsd_rand.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,186 @@
++
++/*
++ * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
++ *
++ * Version 1.89, last modified 19-Sep-99
++ *
++ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
++ * All rights reserved.
++ *
++ * Copyright 1998 Niels Provos <provos@citi.umich.edu>
++ * All rights reserved.
++ * Theo de Raadt <deraadt@openbsd.org> came up with the idea of using
++ * such a mathematical system to generate more random (yet non-repeating)
++ * ids to solve the resolver/named problem. But Niels designed the
++ * actual system based on the constraints.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer,
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/time.h>
++#include <linux/timer.h>
++#include <linux/smp_lock.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
++
++#define RU_OUT 180
++#define RU_MAX 30000
++#define RU_GEN 2
++#define RU_N 32749
++#define RU_AGEN 7
++#define RU_M 31104
++#define PFAC_N 3
++const static __u16 pfacts[PFAC_N] = { 2, 3, 2729 };
++
++static __u16 ru_x;
++static __u16 ru_seed, ru_seed2;
++static __u16 ru_a, ru_b;
++static __u16 ru_g;
++static __u16 ru_counter = 0;
++static __u16 ru_msb = 0;
++static unsigned long ru_reseed = 0;
++static __u32 tmp;
++
++#define TCP_RNDISS_ROUNDS 15
++#define TCP_RNDISS_OUT 7200
++#define TCP_RNDISS_MAX 30000
++
++static __u8 tcp_rndiss_sbox[128];
++static __u16 tcp_rndiss_msb;
++static __u16 tcp_rndiss_cnt;
++static unsigned long tcp_rndiss_reseed;
++
++static __u16 pmod(__u16, __u16, __u16);
++static void ip_initid(void);
++__u16 ip_randomid(void);
++
++static __u16
++pmod(__u16 gen, __u16 exp, __u16 mod)
++{
++ __u16 s, t, u;
++
++ s = 1;
++ t = gen;
++ u = exp;
++
++ while (u) {
++ if (u & 1)
++ s = (s * t) % mod;
++ u >>= 1;
++ t = (t * t) % mod;
++ }
++ return (s);
++}
++
++static void
++ip_initid(void)
++{
++ __u16 j, i;
++ int noprime = 1;
++
++ ru_x = ((tmp = get_random_long()) & 0xFFFF) % RU_M;
++
++ ru_seed = (tmp >> 16) & 0x7FFF;
++ ru_seed2 = get_random_long() & 0x7FFF;
++
++ ru_b = ((tmp = get_random_long()) & 0xfffe) | 1;
++ ru_a = pmod(RU_AGEN, (tmp >> 16) & 0xfffe, RU_M);
++ while (ru_b % 3 == 0)
++ ru_b += 2;
++
++ j = (tmp = get_random_long()) % RU_N;
++ tmp = tmp >> 16;
++
++ while (noprime) {
++ for (i = 0; i < PFAC_N; i++)
++ if (j % pfacts[i] == 0)
++ break;
++
++ if (i >= PFAC_N)
++ noprime = 0;
++ else
++ j = (j + 1) % RU_N;
++ }
++
++ ru_g = pmod(RU_GEN, j, RU_N);
++ ru_counter = 0;
++
++ ru_reseed = xtime.tv_sec + RU_OUT;
++ ru_msb = ru_msb == 0x8000 ? 0 : 0x8000;
++}
++
++__u16
++ip_randomid(void)
++{
++ int i, n;
++
++ if (ru_counter >= RU_MAX || time_after(get_seconds(), ru_reseed))
++ ip_initid();
++
++ if (!tmp)
++ tmp = get_random_long();
++
++ n = tmp & 0x3;
++ tmp = tmp >> 2;
++ if (ru_counter + n >= RU_MAX)
++ ip_initid();
++ for (i = 0; i <= n; i++)
++ ru_x = (ru_a * ru_x + ru_b) % RU_M;
++ ru_counter += i;
++
++ return ((ru_seed ^ pmod(ru_g, ru_seed2 ^ ru_x, RU_N)) | ru_msb);
++}
++
++__u16
++tcp_rndiss_encrypt(__u16 val)
++{
++ __u16 sum = 0, i;
++
++ for (i = 0; i < TCP_RNDISS_ROUNDS; i++) {
++ sum += 0x79b9;
++ val ^= ((__u16) tcp_rndiss_sbox[(val ^ sum) & 0x7f]) << 7;
++ val = ((val & 0xff) << 7) | (val >> 8);
++ }
++
++ return val;
++}
++
++static void
++tcp_rndiss_init(void)
++{
++ get_random_bytes(tcp_rndiss_sbox, sizeof (tcp_rndiss_sbox));
++ tcp_rndiss_reseed = get_seconds() + TCP_RNDISS_OUT;
++ tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000;
++ tcp_rndiss_cnt = 0;
++}
++
++__u32
++ip_randomisn(void)
++{
++ if (tcp_rndiss_cnt >= TCP_RNDISS_MAX ||
++ time_after(get_seconds(), tcp_rndiss_reseed))
++ tcp_rndiss_init();
++
++ return (((tcp_rndiss_encrypt(tcp_rndiss_cnt++) |
++ tcp_rndiss_msb) << 16) | (get_random_long() & 0x7fff));
++}
+diff -urN linux-2.6.5/include/asm-alpha/a.out.h linux-2.6.5-new/include/asm-alpha/a.out.h
+--- linux-2.6.5/include/asm-alpha/a.out.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-alpha/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -98,7 +98,7 @@
+ set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000 \
+ ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
+
+-#define STACK_TOP \
++#define __STACK_TOP \
+ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+
+ #endif
+diff -urN linux-2.6.5/include/asm-alpha/elf.h linux-2.6.5-new/include/asm-alpha/elf.h
+--- linux-2.6.5/include/asm-alpha/elf.h 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/include/asm-alpha/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -84,6 +84,17 @@
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urN linux-2.6.5/include/asm-alpha/mman.h linux-2.6.5-new/include/asm-alpha/mman.h
+--- linux-2.6.5/include/asm-alpha/mman.h 2004-04-03 22:36:14.000000000 -0500
++++ linux-2.6.5-new/include/asm-alpha/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -29,6 +29,10 @@
+ #define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x40000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x20000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_SYNC 2 /* synchronous memory sync */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -urN linux-2.6.5/include/asm-alpha/page.h linux-2.6.5-new/include/asm-alpha/page.h
+--- linux-2.6.5/include/asm-alpha/page.h 2004-04-03 22:37:40.000000000 -0500
++++ linux-2.6.5-new/include/asm-alpha/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -98,6 +98,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ALPHA_PAGE_H */
+diff -urN linux-2.6.5/include/asm-alpha/pgtable.h linux-2.6.5-new/include/asm-alpha/pgtable.h
+--- linux-2.6.5/include/asm-alpha/pgtable.h 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/include/asm-alpha/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -96,6 +96,17 @@
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urN linux-2.6.5/include/asm-i386/a.out.h linux-2.6.5-new/include/asm-i386/a.out.h
+--- linux-2.6.5/include/asm-i386/a.out.h 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -19,7 +19,11 @@
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP TASK_SIZE
++#ifdef CONFIG_PAX_SEGMEXEC
++#define __STACK_TOP ((current->flags & PF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE)
++#else
++#define __STACK_TOP TASK_SIZE
++#endif
+
+ #endif
+
+diff -urN linux-2.6.5/include/asm-i386/desc.h linux-2.6.5-new/include/asm-i386/desc.h
+--- linux-2.6.5/include/asm-i386/desc.h 2004-04-03 22:36:12.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/desc.h 2004-04-14 09:15:12.000000000 -0400
+@@ -8,11 +8,19 @@
+
+ #include <linux/preempt.h>
+ #include <linux/smp.h>
++#include <linux/sched.h>
+
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
+
+ extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
++static inline void pax_switch_segments(struct task_struct * tsk, int cpu)
++{
++ cpu_gdt_table[cpu][GDT_ENTRY_DEFAULT_USER_CS].b = tsk->flags & PF_PAX_SEGMEXEC ? 0x60c9fb00U : 0x00cffb00U;
++}
++
+ struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+@@ -28,7 +36,7 @@
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+-extern struct desc_struct default_ldt[];
++extern const struct desc_struct default_ldt[];
+ extern void set_intr_gate(unsigned int irq, void * addr);
+
+ #define _set_tssldt_desc(n,addr,limit,type) \
+@@ -42,16 +50,50 @@
+ "rorl $16,%%eax" \
+ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
+ {
+ _set_tssldt_desc(&cpu_gdt_table[cpu][entry], (int)addr, 235, 0x89);
+ }
+
+ #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++static inline void __set_ldt_desc(unsigned int cpu, const void *addr, unsigned int size)
++{
++ _set_tssldt_desc(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++}
++
++#define pax_open_kernel(flags, cr3) \
++do { \
++ typecheck(unsigned long,flags); \
++ typecheck(unsigned long,cr3); \
++ local_irq_save(flags); \
++ asm("movl %%cr3,%0":"=r" (cr3)); \
++ load_cr3(kernexec_pg_dir); \
++} while(0)
++
++#define pax_close_kernel(flags, cr3) \
++do { \
++ typecheck(unsigned long,flags); \
++ typecheck(unsigned long,cr3); \
++ asm("movl %0,%%cr3": :"r" (cr3)); \
++ local_irq_restore(flags); \
++} while(0)
++
++static inline void set_ldt_desc(unsigned int cpu, const void *addr, unsigned int size)
+ {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++
++ pax_open_kernel(flags, cr3);
++#endif
++
+ _set_tssldt_desc(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ }
+
+ #define LDT_entry_a(info) \
+@@ -67,7 +109,7 @@
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+- 0x7000)
++ 0x7100)
+
+ #define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+@@ -104,7 +146,7 @@
+ */
+ static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+ {
+- void *segments = pc->ldt;
++ const void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (likely(!count)) {
+@@ -123,6 +165,22 @@
+ put_cpu();
+ }
+
++static inline void _load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ const void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count)) {
++ segments = &default_ldt[0];
++ count = 5;
++ }
++
++ __set_ldt_desc(cpu, segments, count);
++ load_LDT_desc();
++ put_cpu();
++}
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+diff -urN linux-2.6.5/include/asm-i386/elf.h linux-2.6.5-new/include/asm-i386/elf.h
+--- linux-2.6.5/include/asm-i386/elf.h 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/elf.h 2004-04-14 13:57:42.000000000 -0400
+@@ -70,7 +70,22 @@
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3*2:TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x08048000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->flags & PF_PAX_SEGMEXEC ? 15 : 16)
++#endif
+
+ /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different) */
+@@ -129,9 +144,16 @@
+
+ #define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL))
+ #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE)
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define VSYSCALL_ENTRY ((current->flags & PF_PAX_SEGMEXEC) ? (unsigned long) &__kernel_vsyscall - SEGMEXEC_TASK_SIZE : (unsigned long) &__kernel_vsyscall)
++#else
+ #define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
++#endif
++
+ extern void __kernel_vsyscall;
+
++#ifndef CONFIG_PAX_NOVSYSCALL
+ #define ARCH_DLINFO \
+ do { \
+ NEW_AUX_ENT(AT_SYSINFO, VSYSCALL_ENTRY); \
+@@ -185,3 +207,5 @@
+ #endif
+
+ #endif
++
++#endif
+diff -urN linux-2.6.5/include/asm-i386/mach-default/apm.h linux-2.6.5-new/include/asm-i386/mach-default/apm.h
+--- linux-2.6.5/include/asm-i386/mach-default/apm.h 2004-04-03 22:36:26.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/mach-default/apm.h 2004-04-14 09:15:12.000000000 -0400
+@@ -36,7 +36,7 @@
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -60,7 +60,7 @@
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urN linux-2.6.5/include/asm-i386/mach-pc9800/apm.h linux-2.6.5-new/include/asm-i386/mach-pc9800/apm.h
+--- linux-2.6.5/include/asm-i386/mach-pc9800/apm.h 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/mach-pc9800/apm.h 2004-04-14 09:15:12.000000000 -0400
+@@ -39,7 +39,7 @@
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "pushfl\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -64,7 +64,7 @@
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "pushfl\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urN linux-2.6.5/include/asm-i386/mman.h linux-2.6.5-new/include/asm-i386/mman.h
+--- linux-2.6.5/include/asm-i386/mman.h 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -23,6 +23,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define MAP_MIRROR 0x20000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-i386/module.h linux-2.6.5-new/include/asm-i386/module.h
+--- linux-2.6.5/include/asm-i386/module.h 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/module.h 2004-04-14 09:06:29.000000000 -0400
+@@ -60,6 +60,12 @@
+ #define MODULE_REGPARM ""
+ #endif
+
+-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSECURITY "
++#else
++#define MODULE_GRSEC ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_GRSEC
+
+ #endif /* _ASM_I386_MODULE_H */
+diff -urN linux-2.6.5/include/asm-i386/page.h linux-2.6.5-new/include/asm-i386/page.h
+--- linux-2.6.5/include/asm-i386/page.h 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -120,6 +120,19 @@
+ #define __PAGE_OFFSET (0xC0000000UL)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef __ASSEMBLY__
++#define __KERNEL_TEXT_OFFSET (0xC0400000)
++#else
++#define __KERNEL_TEXT_OFFSET (0xC0400000UL)
++#endif
++#else
++#ifdef __ASSEMBLY__
++#define __KERNEL_TEXT_OFFSET (0)
++#else
++#define __KERNEL_TEXT_OFFSET (0x0UL)
++#endif
++#endif
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+@@ -139,6 +152,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
+diff -urN linux-2.6.5/include/asm-i386/pgalloc.h linux-2.6.5-new/include/asm-i386/pgalloc.h
+--- linux-2.6.5/include/asm-i386/pgalloc.h 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/pgalloc.h 2004-04-14 09:15:12.000000000 -0400
+@@ -8,7 +8,7 @@
+ #include <linux/mm.h> /* for struct page */
+
+ #define pmd_populate_kernel(mm, pmd, pte) \
+- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+ {
+diff -urN linux-2.6.5/include/asm-i386/pgtable.h linux-2.6.5-new/include/asm-i386/pgtable.h
+--- linux-2.6.5/include/asm-i386/pgtable.h 2004-04-03 22:37:38.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -32,6 +32,11 @@
+ #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+ extern unsigned long empty_zero_page[1024];
+ extern pgd_t swapper_pg_dir[1024];
++
++#ifdef CONFIG_PAX_KERNEXEC
++extern pgd_t kernexec_pg_dir[1024];
++#endif
++
+ extern kmem_cache_t *pgd_cache;
+ extern kmem_cache_t *pmd_cache;
+ extern spinlock_t pgd_lock;
+@@ -130,6 +135,16 @@
+ #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+@@ -149,18 +164,18 @@
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff -urN linux-2.6.5/include/asm-i386/processor.h linux-2.6.5-new/include/asm-i386/processor.h
+--- linux-2.6.5/include/asm-i386/processor.h 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/processor.h 2004-04-14 09:15:12.000000000 -0400
+@@ -296,10 +296,19 @@
+ */
+ #define TASK_SIZE (PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE ((PAGE_OFFSET) / 2)
++#endif
++
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3:TASK_SIZE/3))
++#else
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
++#endif
+
+ /*
+ * Size of io_bitmap, covering ports 0 to 0x3ff.
+@@ -624,7 +633,7 @@
+ extern inline void prefetch(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+- "prefetchnta (%1)",
++ "prefetchnta (%2)",
+ X86_FEATURE_XMM,
+ "r" (x));
+ }
+@@ -638,7 +647,7 @@
+ extern inline void prefetchw(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+- "prefetchw (%1)",
++ "prefetchw (%2)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+ }
+diff -urN linux-2.6.5/include/asm-i386/system.h linux-2.6.5-new/include/asm-i386/system.h
+--- linux-2.6.5/include/asm-i386/system.h 2004-04-03 22:36:13.000000000 -0500
++++ linux-2.6.5-new/include/asm-i386/system.h 2004-04-14 09:15:12.000000000 -0400
+@@ -5,6 +5,7 @@
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+ #include <asm/cpufeature.h>
++#include <asm/page.h>
+ #include <linux/bitops.h> /* for LOCK_PREFIX */
+
+ #ifdef __KERNEL__
+@@ -301,7 +302,7 @@
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+- " .long 661b\n" /* label */ \
++ " .long 661b + %c1\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+@@ -309,7 +310,7 @@
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+- ".previous" :: "i" (feature) : "memory")
++ ".previous" :: "i" (feature), "i" (__KERNEL_TEXT_OFFSET) : "memory")
+
+ /*
+ * Alternative inline assembly with input.
+@@ -325,7 +326,7 @@
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+- " .long 661b\n" /* label */ \
++ " .long 661b + %c1\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+@@ -333,7 +334,7 @@
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+- ".previous" :: "i" (feature), input)
++ ".previous" :: "i" (feature), "i" (__KERNEL_TEXT_OFFSET), input)
+
+ /*
+ * Force strict CPU ordering.
+diff -urN linux-2.6.5/include/asm-ia64/elf.h linux-2.6.5-new/include/asm-ia64/elf.h
+--- linux-2.6.5/include/asm-ia64/elf.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-ia64/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -162,6 +162,16 @@
+ typedef struct ia64_fpreg elf_fpreg_t;
+ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#endif
+
+
+ struct pt_regs; /* forward declaration... */
+diff -urN linux-2.6.5/include/asm-ia64/mman.h linux-2.6.5-new/include/asm-ia64/mman.h
+--- linux-2.6.5/include/asm-ia64/mman.h 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/include/asm-ia64/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -31,6 +31,10 @@
+ #define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x40000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-ia64/page.h linux-2.6.5-new/include/asm-ia64/page.h
+--- linux-2.6.5/include/asm-ia64/page.h 2004-04-03 22:36:19.000000000 -0500
++++ linux-2.6.5-new/include/asm-ia64/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -187,4 +187,13 @@
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
+ ? VM_EXEC : 0))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* _ASM_IA64_PAGE_H */
+diff -urN linux-2.6.5/include/asm-ia64/pgtable.h linux-2.6.5-new/include/asm-ia64/pgtable.h
+--- linux-2.6.5/include/asm-ia64/pgtable.h 2004-04-03 22:36:54.000000000 -0500
++++ linux-2.6.5-new/include/asm-ia64/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -120,6 +120,17 @@
+ #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urN linux-2.6.5/include/asm-ia64/ustack.h linux-2.6.5-new/include/asm-ia64/ustack.h
+--- linux-2.6.5/include/asm-ia64/ustack.h 2004-04-03 22:38:21.000000000 -0500
++++ linux-2.6.5-new/include/asm-ia64/ustack.h 2004-04-14 09:15:12.000000000 -0400
+@@ -11,6 +11,6 @@
+ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+ /* Make a default stack size of 2GB */
+ #define DEFAULT_USER_STACK_SIZE (1UL << 31)
+-#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
++#define __STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+
+ #endif /* _ASM_IA64_USTACK_H */
+diff -urN linux-2.6.5/include/asm-mips/a.out.h linux-2.6.5-new/include/asm-mips/a.out.h
+--- linux-2.6.5/include/asm-mips/a.out.h 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/include/asm-mips/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -36,10 +36,10 @@
+ #ifdef __KERNEL__
+
+ #ifdef CONFIG_MIPS32
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+ #endif
+ #ifdef CONFIG_MIPS64
+-#define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
++#define __STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
+ #endif
+
+ #endif
+diff -urN linux-2.6.5/include/asm-mips/elf.h linux-2.6.5-new/include/asm-mips/elf.h
+--- linux-2.6.5/include/asm-mips/elf.h 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/include/asm-mips/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -273,4 +273,15 @@
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #endif /* _ASM_ELF_H */
+diff -urN linux-2.6.5/include/asm-mips/page.h linux-2.6.5-new/include/asm-mips/page.h
+--- linux-2.6.5/include/asm-mips/page.h 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/include/asm-mips/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -130,6 +130,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
+ #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+
+diff -urN linux-2.6.5/include/asm-parisc/a.out.h linux-2.6.5-new/include/asm-parisc/a.out.h
+--- linux-2.6.5/include/asm-parisc/a.out.h 2004-04-03 22:38:18.000000000 -0500
++++ linux-2.6.5-new/include/asm-parisc/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -22,7 +22,7 @@
+ /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
+ * prumpf */
+
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+ #endif
+
+diff -urN linux-2.6.5/include/asm-parisc/elf.h linux-2.6.5-new/include/asm-parisc/elf.h
+--- linux-2.6.5/include/asm-parisc/elf.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-parisc/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -337,6 +337,17 @@
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urN linux-2.6.5/include/asm-parisc/mman.h linux-2.6.5-new/include/asm-parisc/mman.h
+--- linux-2.6.5/include/asm-parisc/mman.h 2004-04-03 22:37:24.000000000 -0500
++++ linux-2.6.5-new/include/asm-parisc/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -23,6 +23,10 @@
+ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x20000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_SYNC 1 /* synchronous memory sync */
+ #define MS_ASYNC 2 /* sync memory asynchronously */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -urN linux-2.6.5/include/asm-parisc/page.h linux-2.6.5-new/include/asm-parisc/page.h
+--- linux-2.6.5/include/asm-parisc/page.h 2004-04-03 22:36:26.000000000 -0500
++++ linux-2.6.5-new/include/asm-parisc/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -113,6 +113,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _PARISC_PAGE_H */
+diff -urN linux-2.6.5/include/asm-parisc/pgtable.h linux-2.6.5-new/include/asm-parisc/pgtable.h
+--- linux-2.6.5/include/asm-parisc/pgtable.h 2004-04-03 22:38:13.000000000 -0500
++++ linux-2.6.5-new/include/asm-parisc/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -179,6 +179,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urN linux-2.6.5/include/asm-ppc/a.out.h linux-2.6.5-new/include/asm-ppc/a.out.h
+--- linux-2.6.5/include/asm-ppc/a.out.h 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/include/asm-ppc/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -2,7 +2,7 @@
+ #define __PPC_A_OUT_H__
+
+ /* grabbed from the intel stuff */
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+
+ struct exec
+diff -urN linux-2.6.5/include/asm-ppc/elf.h linux-2.6.5-new/include/asm-ppc/elf.h
+--- linux-2.6.5/include/asm-ppc/elf.h 2004-04-03 22:36:13.000000000 -0500
++++ linux-2.6.5-new/include/asm-ppc/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -87,6 +87,17 @@
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 15
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 15
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 15
++#endif
++
+ #define USE_ELF_CORE_DUMP
+ #define ELF_EXEC_PAGESIZE 4096
+
+diff -urN linux-2.6.5/include/asm-ppc/mman.h linux-2.6.5-new/include/asm-ppc/mman.h
+--- linux-2.6.5/include/asm-ppc/mman.h 2004-04-03 22:38:28.000000000 -0500
++++ linux-2.6.5-new/include/asm-ppc/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -24,6 +24,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0200
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-ppc/page.h linux-2.6.5-new/include/asm-ppc/page.h
+--- linux-2.6.5/include/asm-ppc/page.h 2004-04-03 22:36:53.000000000 -0500
++++ linux-2.6.5-new/include/asm-ppc/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -162,5 +162,14 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _PPC_PAGE_H */
+diff -urN linux-2.6.5/include/asm-ppc/pgtable.h linux-2.6.5-new/include/asm-ppc/pgtable.h
+--- linux-2.6.5/include/asm-ppc/pgtable.h 2004-04-03 22:36:26.000000000 -0500
++++ linux-2.6.5-new/include/asm-ppc/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -349,11 +349,21 @@
+
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
++
++#if defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x)
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define PAGE_KERNEL __pgprot(_PAGE_RAM)
+ #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
+@@ -365,21 +375,21 @@
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY_X
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY_X
+-#define __P100 PAGE_READONLY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
++#define __P100 PAGE_READONLY_X
+ #define __P101 PAGE_READONLY_X
+-#define __P110 PAGE_COPY
++#define __P110 PAGE_COPY_X
+ #define __P111 PAGE_COPY_X
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY_X
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED_X
+-#define __S100 PAGE_READONLY
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
++#define __S100 PAGE_READONLY_X
+ #define __S101 PAGE_READONLY_X
+-#define __S110 PAGE_SHARED
++#define __S110 PAGE_SHARED_X
+ #define __S111 PAGE_SHARED_X
+
+ #ifndef __ASSEMBLY__
+diff -urN linux-2.6.5/include/asm-sparc/a.out.h linux-2.6.5-new/include/asm-sparc/a.out.h
+--- linux-2.6.5/include/asm-sparc/a.out.h 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -91,7 +91,7 @@
+
+ #include <asm/page.h>
+
+-#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
++#define __STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
+
+ #endif /* __KERNEL__ */
+
+diff -urN linux-2.6.5/include/asm-sparc/elf.h linux-2.6.5-new/include/asm-sparc/elf.h
+--- linux-2.6.5/include/asm-sparc/elf.h 2004-04-03 22:37:43.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -145,6 +145,17 @@
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urN linux-2.6.5/include/asm-sparc/mman.h linux-2.6.5-new/include/asm-sparc/mman.h
+--- linux-2.6.5/include/asm-sparc/mman.h 2004-04-03 22:38:20.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -27,6 +27,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-sparc/page.h linux-2.6.5-new/include/asm-sparc/page.h
+--- linux-2.6.5/include/asm-sparc/page.h 2004-04-03 22:36:53.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -176,6 +176,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _SPARC_PAGE_H */
+diff -urN linux-2.6.5/include/asm-sparc/pgtable.h linux-2.6.5-new/include/asm-sparc/pgtable.h
+--- linux-2.6.5/include/asm-sparc/pgtable.h 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -109,6 +109,13 @@
+ BTFIXUPDEF_INT(page_shared)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
+@@ -130,6 +137,16 @@
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(BTFIXUP_INT(page_shared_noexec))
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urN linux-2.6.5/include/asm-sparc/pgtsrmmu.h linux-2.6.5-new/include/asm-sparc/pgtsrmmu.h
+--- linux-2.6.5/include/asm-sparc/pgtsrmmu.h 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/pgtsrmmu.h 2004-04-14 09:15:12.000000000 -0400
+@@ -103,6 +103,16 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urN linux-2.6.5/include/asm-sparc/uaccess.h linux-2.6.5-new/include/asm-sparc/uaccess.h
+--- linux-2.6.5/include/asm-sparc/uaccess.h 2004-04-03 22:38:14.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc/uaccess.h 2004-04-14 09:15:12.000000000 -0400
+@@ -41,7 +41,7 @@
+ * No one can read/write anything from userland in the kernel space by setting
+ * large size and address near to PAGE_OFFSET - a fault will break his intentions.
+ */
+-#define __user_ok(addr,size) ((addr) < STACK_TOP)
++#define __user_ok(addr,size) ((addr) < __STACK_TOP)
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
+ #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+diff -urN linux-2.6.5/include/asm-sparc64/a.out.h linux-2.6.5-new/include/asm-sparc64/a.out.h
+--- linux-2.6.5/include/asm-sparc64/a.out.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc64/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -95,7 +95,7 @@
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
++#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
+
+ #endif
+
+diff -urN linux-2.6.5/include/asm-sparc64/elf.h linux-2.6.5-new/include/asm-sparc64/elf.h
+--- linux-2.6.5/include/asm-sparc64/elf.h 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc64/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -140,6 +140,16 @@
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_EXEC_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 15 : 29 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urN linux-2.6.5/include/asm-sparc64/mman.h linux-2.6.5-new/include/asm-sparc64/mman.h
+--- linux-2.6.5/include/asm-sparc64/mman.h 2004-04-03 22:37:06.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc64/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -27,6 +27,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-sparc64/page.h linux-2.6.5-new/include/asm-sparc64/page.h
+--- linux-2.6.5/include/asm-sparc64/page.h 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc64/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -174,6 +174,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* !(__KERNEL__) */
+
+ #endif /* !(_SPARC64_PAGE_H) */
+diff -urN linux-2.6.5/include/asm-sparc64/pgtable.h linux-2.6.5-new/include/asm-sparc64/pgtable.h
+--- linux-2.6.5/include/asm-sparc64/pgtable.h 2004-04-03 22:36:14.000000000 -0500
++++ linux-2.6.5-new/include/asm-sparc64/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -124,7 +124,8 @@
+
+ /* Here are the SpitFire software bits we use in the TTE's. */
+ #define _PAGE_FILE 0x0000000000001000 /* Pagecache page */
+-#define _PAGE_MODIFIED 0x0000000000000800 /* Modified Page (ie. dirty) */
++#define _PAGE_MODIFIED 0x0000000000001000 /* Modified Page (ie. dirty) */
++#define _PAGE_EXEC 0x0000000000000800 /* Executable SW Bit */
+ #define _PAGE_ACCESSED 0x0000000000000400 /* Accessed Page (ie. referenced) */
+ #define _PAGE_READ 0x0000000000000200 /* Readable SW Bit */
+ #define _PAGE_WRITE 0x0000000000000100 /* Writable SW Bit */
+@@ -160,34 +161,48 @@
+
+ /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
+ #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS | _PAGE_WRITE)
++ __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
+
+ #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
++ __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS | \
++ _PAGE_EXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS | _PAGE_WRITE)
++# define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++# define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define _PFN_MASK _PAGE_PADDR
+
+ #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
+
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff -urN linux-2.6.5/include/asm-x86_64/a.out.h linux-2.6.5-new/include/asm-x86_64/a.out.h
+--- linux-2.6.5/include/asm-x86_64/a.out.h 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -21,7 +21,7 @@
+
+ #ifdef __KERNEL__
+ #include <linux/thread_info.h>
+-#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
++#define __STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
+ #endif
+
+ #endif /* __A_OUT_GNU_H__ */
+diff -urN linux-2.6.5/include/asm-x86_64/elf.h linux-2.6.5-new/include/asm-x86_64/elf.h
+--- linux-2.6.5/include/asm-x86_64/elf.h 2004-04-03 22:37:44.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -89,6 +89,17 @@
+
+ #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#endif
++
+ /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different). Assumes current is the process
+ getting dumped. */
+diff -urN linux-2.6.5/include/asm-x86_64/mman.h linux-2.6.5-new/include/asm-x86_64/mman.h
+--- linux-2.6.5/include/asm-x86_64/mman.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -24,6 +24,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x8000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.5/include/asm-x86_64/page.h linux-2.6.5-new/include/asm-x86_64/page.h
+--- linux-2.6.5/include/asm-x86_64/page.h 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/page.h 2004-04-14 09:15:12.000000000 -0400
+@@ -127,6 +127,16 @@
+
+ #define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define VM_DATA_DEFAULT_FLAGS __VM_DATA_DEFAULT_FLAGS
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#else
+ #define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+@@ -137,6 +147,8 @@
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
+
++#endif
++
+ #define CONFIG_ARCH_GATE_AREA 1
+
+ #ifndef __ASSEMBLY__
+diff -urN linux-2.6.5/include/asm-x86_64/pgalloc.h linux-2.6.5-new/include/asm-x86_64/pgalloc.h
+--- linux-2.6.5/include/asm-x86_64/pgalloc.h 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/pgalloc.h 2004-04-14 09:15:12.000000000 -0400
+@@ -8,7 +8,7 @@
+ #include <linux/mm.h>
+
+ #define pmd_populate_kernel(mm, pmd, pte) \
+- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
++ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(pte)))
+ #define pgd_populate(mm, pgd, pmd) \
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pmd)))
+
+diff -urN linux-2.6.5/include/asm-x86_64/pgtable.h linux-2.6.5-new/include/asm-x86_64/pgtable.h
+--- linux-2.6.5/include/asm-x86_64/pgtable.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/asm-x86_64/pgtable.h 2004-04-14 09:15:12.000000000 -0400
+@@ -170,6 +170,10 @@
+ #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+ #define __PAGE_KERNEL_EXECUTABLE \
+diff -urN linux-2.6.5/include/linux/a.out.h linux-2.6.5-new/include/linux/a.out.h
+--- linux-2.6.5/include/linux/a.out.h 2004-04-03 22:37:24.000000000 -0500
++++ linux-2.6.5-new/include/linux/a.out.h 2004-04-14 09:15:12.000000000 -0400
+@@ -7,6 +7,16 @@
+
+ #include <asm/a.out.h>
+
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __DELTA_STACK (current->mm->delta_stack)
++#else
++#define __DELTA_STACK 0UL
++#endif
++
++#ifndef STACK_TOP
++#define STACK_TOP (__STACK_TOP - __DELTA_STACK)
++#endif
++
+ #endif /* __STRUCT_EXEC_OVERRIDE__ */
+
+ /* these go in the N_MACHTYPE field */
+@@ -37,6 +47,14 @@
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define F_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -urN linux-2.6.5/include/linux/binfmts.h linux-2.6.5-new/include/linux/binfmts.h
+--- linux-2.6.5/include/linux/binfmts.h 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/include/linux/binfmts.h 2004-04-14 09:15:12.000000000 -0400
+@@ -36,6 +36,7 @@
+ of the time same as filename, but could be
+ different for binfmt_{misc,script} */
+ unsigned long loader, exec;
++ int misc;
+ };
+
+ /*
+@@ -65,5 +66,8 @@
+ extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
+ extern int set_binfmt(struct linux_binfmt *new);
+
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++void pax_report_insns(void *pc, void *sp);
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_BINFMTS_H */
+diff -urN linux-2.6.5/include/linux/elf.h linux-2.6.5-new/include/linux/elf.h
+--- linux-2.6.5/include/linux/elf.h 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/include/linux/elf.h 2004-04-14 09:15:12.000000000 -0400
+@@ -35,6 +35,17 @@
+ #define PT_HIPROC 0x7fffffff
+ #define PT_GNU_EH_FRAME 0x6474e550
+
++#define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define EF_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ /* These constants define the different elf file types */
+ #define ET_NONE 0
+ #define ET_REL 1
+@@ -121,6 +132,8 @@
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_LOPROC 0x70000000
+ #define DT_HIPROC 0x7fffffff
+
+@@ -271,6 +284,19 @@
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1 << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1 << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1 << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1 << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1 << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1 << 9) /* Disable MPROTECT */
++#define PF_RANDEXEC (1 << 10) /* Enable RANDEXEC */
++#define PF_NORANDEXEC (1 << 11) /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1 << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1 << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1 << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1 << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -363,6 +389,8 @@
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -419,6 +447,7 @@
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -426,6 +455,7 @@
+ #define elfhdr elf64_hdr
+ #define elf_phdr elf64_phdr
+ #define elf_note elf64_note
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -urN linux-2.6.5/include/linux/fs.h linux-2.6.5-new/include/linux/fs.h
+--- linux-2.6.5/include/linux/fs.h 2004-04-03 22:36:52.000000000 -0500
++++ linux-2.6.5-new/include/linux/fs.h 2004-04-14 09:06:29.000000000 -0400
+@@ -1134,7 +1134,7 @@
+
+ /* fs/open.c */
+
+-extern int do_truncate(struct dentry *, loff_t start);
++extern int do_truncate(struct dentry *, loff_t start, struct vfsmount *);
+ extern struct file *filp_open(const char *, int, int);
+ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+ extern int filp_close(struct file *, fl_owner_t id);
+diff -urN linux-2.6.5/include/linux/gracl.h linux-2.6.5-new/include/linux/gracl.h
+--- linux-2.6.5/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/gracl.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,246 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++#endif
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* * * * * * * * * * * * * * * * * * * * *
++ * grsecurity ACL System
++ * Main header file
++ * Purpose: define most gracl data structures
++ * * * * * * * * * * * * * * * * * * * * */
++
++/* Major status information */
++
++#define GR_VERSION "grsecurity 2.0"
++
++enum {
++
++ SHUTDOWN = 0,
++ ENABLE = 1,
++ SPROLE = 2,
++ RELOAD = 3,
++ SEGVMOD = 4,
++ STATUS = 5,
++ UNSPROLE = 6
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++ GR_PW_LEN = 128,
++ GR_SALT_LEN = 16,
++ GR_SHA_LEN = 32,
++};
++
++enum {
++ GR_SPROLE_LEN = 64,
++};
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++ unsigned char *rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++ ino_t inode;
++ dev_t device;
++ char *name;
++ __u16 len;
++};
++
++struct acl_role_db {
++ struct acl_role_label **r_hash;
++ __u32 r_size;
++};
++
++struct name_db {
++ struct name_entry **n_hash;
++ __u32 n_size;
++};
++
++struct crash_uid {
++ uid_t uid;
++ unsigned long expires;
++};
++
++struct gr_hash_struct {
++ void **table;
++ void **nametable;
++ void *first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++struct acl_subject_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++ __u32 cap_mask;
++ __u32 cap_lower;
++
++ struct rlimit res[RLIM_NLIMITS + 1];
++ __u16 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ uid_t *user_transitions;
++ gid_t *group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ struct acl_ip_label **ips;
++ __u32 ip_num;
++
++ __u32 crashes;
++ unsigned long expires;
++
++ struct acl_subject_label *parent_subject;
++ struct gr_hash_struct *hash;
++ struct acl_ip_label *ip_object;
++ struct acl_subject_label *prev;
++ struct acl_subject_label *next;
++
++ struct acl_object_label **obj_hash;
++ __u32 obj_hash_size;
++};
++
++struct role_allowed_ip {
++ __u32 addr;
++ __u32 netmask;
++
++ struct role_allowed_ip *prev;
++ struct role_allowed_ip *next;
++};
++
++struct role_transition {
++ char *rolename;
++
++ struct role_transition *prev;
++ struct role_transition *next;
++};
++
++struct acl_role_label {
++ char *rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ unsigned long expires;
++
++ struct acl_subject_label *root_label;
++ struct gr_hash_struct *hash;
++
++ struct acl_role_label *prev;
++ struct acl_role_label *next;
++
++ struct role_transition *transitions;
++ struct role_allowed_ip *allowed_ips;
++ struct acl_subject_label **subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++ struct acl_role_label **r_table;
++ __u32 r_entries; /* number of entries in table */
++ __u32 s_entries; /* total number of subject acls */
++ __u32 i_entries; /* total number of ip acls */
++ __u32 o_entries; /* Total number of object acls */
++ __u32 g_entries; /* total number of globbed objects */
++ __u32 a_entries; /* total number of allowed ips */
++ __u32 t_entries; /* total number of transitions */
++};
++
++struct acl_object_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++
++ struct acl_subject_label *nested;
++ struct acl_object_label *globbed;
++
++ /* next two structures not used */
++
++ struct acl_object_label *prev;
++ struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ /* next two structures not used */
++
++ struct acl_ip_label *prev;
++ struct acl_ip_label *next;
++};
++
++struct gr_arg {
++ struct user_acl_role_db role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ struct sprole_pw *sprole_pws;
++ dev_t segv_device;
++ ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct subject_map {
++ struct acl_subject_label *user;
++ struct acl_subject_label *kernel;
++};
++
++struct acl_subj_map_db {
++ struct subject_map **s_hash;
++ __u32 s_size;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++ Makes good use of the low bits of the inode. Generally 0-1 times
++ in loop for successful match. 0-3 for unsuccessful match.
++ Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned long
++rhash(const uid_t uid, const __u16 type, const unsigned long sz)
++{
++ return (((uid << type) + (uid ^ type)) % sz);
++}
++
++ static __inline__ unsigned long
++shash(const struct acl_subject_label *userp, const unsigned long sz)
++{
++ return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned long
++fhash(const ino_t ino, const dev_t dev, const unsigned long sz)
++{
++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned long
++nhash(const char *name, const __u16 len, const unsigned long sz)
++{
++ return full_name_hash(name, len) % sz;
++}
+diff -urN linux-2.6.5/include/linux/gralloc.h linux-2.6.5-new/include/linux/gralloc.h
+--- linux-2.6.5/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/gralloc.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,8 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++
++#endif
+diff -urN linux-2.6.5/include/linux/grdefs.h linux-2.6.5-new/include/linux/grdefs.h
+--- linux-2.6.5/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/grdefs.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,116 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++ GR_READY = 0x01,
++ GR_STATUS_INIT = 0x00 // disabled state
++};
++
++/* Begin ACL declarations */
++
++/* Role flags */
++
++enum {
++ GR_ROLE_USER = 0x0001,
++ GR_ROLE_GROUP = 0x0002,
++ GR_ROLE_DEFAULT = 0x0004,
++ GR_ROLE_SPECIAL = 0x0008,
++ GR_ROLE_AUTH = 0x0010,
++ GR_ROLE_NOPW = 0x0020,
++ GR_ROLE_GOD = 0x0040,
++ GR_ROLE_LEARN = 0x0080,
++ GR_ROLE_TPE = 0x0100
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++ GR_DELETED = 0x00000080
++};
++
++/* ACL Object-only mode flags */
++enum {
++ GR_READ = 0x00000001,
++ GR_APPEND = 0x00000002,
++ GR_WRITE = 0x00000004,
++ GR_EXEC = 0x00000008,
++ GR_FIND = 0x00000010,
++ GR_INHERIT = 0x00000040,
++ GR_PTRACERD = 0x00000100,
++ GR_SETID = 0x00000200,
++ GR_CREATE = 0x00000400,
++ GR_DELETE = 0x00000800,
++ GR_NOPTRACE = 0x00001000,
++ GR_AUDIT_READ = 0x00002000,
++ GR_AUDIT_APPEND = 0x00004000,
++ GR_AUDIT_WRITE = 0x00008000,
++ GR_AUDIT_EXEC = 0x00010000,
++ GR_AUDIT_FIND = 0x00020000,
++ GR_AUDIT_INHERIT= 0x00040000,
++ GR_AUDIT_SETID = 0x00080000,
++ GR_AUDIT_CREATE = 0x00100000,
++ GR_AUDIT_DELETE = 0x00200000,
++ GR_SUPPRESS = 0x00400000,
++ GR_NOLEARN = 0x00800000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE)
++
++/* ACL subject-only mode flags */
++enum {
++ GR_KILL = 0x00000001,
++ GR_VIEW = 0x00000002,
++ GR_PROTECTED = 0x00000100,
++ GR_LEARN = 0x00000200,
++ GR_OVERRIDE = 0x00000400,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_DUMMY = 0x00000800,
++ GR_PAXPAGE = 0x00001000,
++ GR_PAXSEGM = 0x00002000,
++ GR_PAXGCC = 0x00004000,
++ GR_PAXRANDMMAP = 0x00008000,
++ GR_PAXRANDEXEC = 0x00010000,
++ GR_PAXMPROTECT = 0x00020000,
++ GR_PROTSHM = 0x00040000,
++ GR_KILLPROC = 0x00080000,
++ GR_KILLIPPROC = 0x00100000,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_NOTROJAN = 0x00200000,
++ GR_PROTPROCFD = 0x00400000,
++ GR_PROCACCT = 0x00800000,
++ GR_RELAXPTRACE = 0x01000000,
++ GR_NESTED = 0x02000000
++};
++
++enum {
++ GR_ID_USER = 0x01,
++ GR_ID_GROUP = 0x02,
++};
++
++enum {
++ GR_ID_ALLOW = 0x01,
++ GR_ID_DENY = 0x02,
++};
++
++#define GR_CRASH_RES 11
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++ GR_RLIM_CPU_BUMP = 60,
++ GR_RLIM_FSIZE_BUMP = 50000,
++ GR_RLIM_DATA_BUMP = 10000,
++ GR_RLIM_STACK_BUMP = 1000,
++ GR_RLIM_CORE_BUMP = 10000,
++ GR_RLIM_RSS_BUMP = 500000,
++ GR_RLIM_NPROC_BUMP = 1,
++ GR_RLIM_NOFILE_BUMP = 5,
++ GR_RLIM_MEMLOCK_BUMP = 50000,
++ GR_RLIM_AS_BUMP = 500000,
++ GR_RLIM_LOCKS_BUMP = 2
++};
++
++#endif
+diff -urN linux-2.6.5/include/linux/grinternal.h linux-2.6.5-new/include/linux/grinternal.h
+--- linux-2.6.5/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/grinternal.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,201 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++extern void gr_add_learn_entry(const char *fmt, ...);
++extern __u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++ const struct vfsmount *mnt);
++extern __u32 gr_check_create(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt, const __u32 mode);
++extern int gr_check_protected_task(const struct task_struct *task);
++extern __u32 to_gr_audit(const __u32 reqmode);
++extern int gr_set_acls(const int type);
++
++extern void gr_handle_alertkill(void);
++extern char *gr_to_filename(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename1(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename2(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename3(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_forkbomb;
++extern int grsec_forkbomb_gid;
++extern int grsec_forkbomb_sec;
++extern int grsec_forkbomb_max;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_findtask;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_tpe;
++extern int grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_sidcaps;
++extern int grsec_enable_randpid;
++extern int grsec_enable_socket_all;
++extern int grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern int grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern int grsec_socket_server_gid;
++extern int grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_audit_ipc;
++extern int grsec_enable_audit_textrel;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_lock;
++
++extern struct task_struct *child_reaper;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_alertgood_lock;
++extern unsigned long grsec_alertgood_wtime;
++extern unsigned long grsec_alertgood_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++#define gr_task_fullpath(tsk) (tsk->exec_file ? \
++ gr_to_filename2(tsk->exec_file->f_dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename3(tsk->parent->exec_file->f_dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
++ gr_to_filename(tsk->exec_file->f_dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename1(tsk->parent->exec_file->f_dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define proc_is_chrooted(tsk_a) ((tsk_a->pid > 1) && \
++ ((tsk_a->fs->root->d_inode->i_sb->s_dev != \
++ child_reaper->fs->root->d_inode->i_sb->s_dev) || \
++ (tsk_a->fs->root->d_inode->i_ino != \
++ child_reaper->fs->root->d_inode->i_ino)))
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs->root->d_inode->i_sb->s_dev == \
++ tsk_b->fs->root->d_inode->i_sb->s_dev) && \
++ (tsk_a->fs->root->d_inode->i_ino == \
++ tsk_b->fs->root->d_inode->i_ino))
++
++#define DEFAULTSECARGS gr_task_fullpath(current), current->comm, \
++ current->pid, current->uid, \
++ current->euid, current->gid, current->egid, \
++ gr_parent_task_fullpath(current), \
++ current->parent->comm, current->parent->pid, \
++ current->parent->uid, current->parent->euid, \
++ current->parent->gid, current->parent->egid
++
++#define GR_CHROOT_CAPS ( \
++ CAP_TO_MASK(CAP_FOWNER) | \
++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++ CAP_TO_MASK(CAP_IPC_OWNER))
++
++#define security_alert_good(normal_msg,args...) \
++({ \
++ spin_lock(&grsec_alertgood_lock); \
++ \
++ if (!grsec_alertgood_wtime || get_seconds() - grsec_alertgood_wtime > CONFIG_GRKERNSEC_FLOODTIME) { \
++ grsec_alertgood_wtime = get_seconds(); grsec_alertgood_fyet = 0; \
++ if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if((get_seconds() - grsec_alertgood_wtime < CONFIG_GRKERNSEC_FLOODTIME) && (grsec_alertgood_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { \
++ grsec_alertgood_fyet++; \
++ if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if (grsec_alertgood_fyet == CONFIG_GRKERNSEC_FLOODBURST) { \
++ grsec_alertgood_wtime = get_seconds(); grsec_alertgood_fyet++; \
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for " \
++ "%d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); \
++ } \
++ \
++ spin_unlock(&grsec_alertgood_lock); \
++})
++
++#define security_alert(normal_msg,args...) \
++({ \
++ spin_lock(&grsec_alert_lock); \
++ \
++ if (!grsec_alert_wtime || get_seconds() - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME) { \
++ grsec_alert_wtime = get_seconds(); grsec_alert_fyet = 0; \
++ if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if((get_seconds() - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { \
++ grsec_alert_fyet++; \
++ if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { \
++ grsec_alert_wtime = get_seconds(); grsec_alert_fyet++; \
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for " \
++ "%d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); \
++ } \
++ \
++ gr_handle_alertkill(); \
++ spin_unlock(&grsec_alert_lock); \
++})
++
++#define security_audit(normal_msg,args...) \
++({ \
++ spin_lock(&grsec_audit_lock); \
++ if (current->curr_ip) \
++ printk(KERN_INFO "grsec: From %u.%u.%u.%u: " normal_msg "\n", \
++ NIPQUAD(current->curr_ip) , ## args); \
++ else \
++ printk(KERN_INFO "grsec: " normal_msg "\n", ## args); \
++ spin_unlock(&grsec_audit_lock); \
++})
++
++#define security_learn(normal_msg,args...) \
++({ \
++ preempt_disable(); \
++ gr_add_learn_entry(normal_msg "\n", ## args); \
++ preempt_enable(); \
++})
++
++#endif
++
++#endif
+diff -urN linux-2.6.5/include/linux/grmsg.h linux-2.6.5-new/include/linux/grmsg.h
+--- linux-2.6.5/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/grmsg.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,108 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d, parent %.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " DEFAULTSECMSG
++#define GR_IOPERM_MSG "denied use of ioperm() by " DEFAULTSECMSG
++#define GR_IOPL_MSG "denied use of iopl() by " DEFAULTSECMSG
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " DEFAULTSECMSG
++#define GR_UNIX_CHROOT_MSG "denied connect to abstract AF_UNIX socket outside of chroot by " DEFAULTSECMSG
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " DEFAULTSECMSG
++#define GR_KMEM_MSG "attempted write to /dev/kmem by " DEFAULTSECMSG
++#define GR_PORT_OPEN_MSG "attempted open of /dev/port by " DEFAULTSECMSG
++#define GR_MEM_WRITE_MSG "attempted write of /dev/mem by " DEFAULTSECMSG
++#define GR_MEM_MMAP_MSG "attempted mmap write of /dev/[k]mem by " DEFAULTSECMSG
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " DEFAULTSECMSG
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " DEFAULTSECMSG
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " DEFAULTSECMSG
++#define GR_MKNOD_CHROOT_MSG "refused attempt to mknod %.950s from chroot by " DEFAULTSECMSG
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " DEFAULTSECMSG
++#define GR_UNIXCONNECT_ACL_MSG "%s connect to the unix domain socket %.950s by " DEFAULTSECMSG
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " DEFAULTSECMSG
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " DEFAULTSECMSG
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " DEFAULTSECMSG
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " DEFAULTSECMSG
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " DEFAULTSECMSG
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " DEFAULTSECMSG
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " DEFAULTSECMSG
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " DEFAULTSECMSG
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " DEFAULTSECMSG
++#define GR_NPROC_MSG "attempt to overstep process limit by " DEFAULTSECMSG
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " DEFAULTSECMSG
++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " DEFAULTSECMSG
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied attempt to mount %.30s as %.930s from chroot by " DEFAULTSECMSG
++#define GR_PIVOT_CHROOT_MSG "denied attempt to pivot_root from chroot by " DEFAULTSECMSG
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " DEFAULTSECMSG
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " DEFAULTSECMSG
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " DEFAULTSECMSG
++#define GR_CHROOT_CHROOT_MSG "denied attempt to double chroot to %.950s by " DEFAULTSECMSG
++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " DEFAULTSECMSG
++#define GR_CHMOD_CHROOT_MSG "denied attempt to chmod +s %.950s by " DEFAULTSECMSG
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " DEFAULTSECMSG
++#define GR_CHROOT_FCHDIR_MSG "attempted fchdir outside of chroot to %.950s by " DEFAULTSECMSG
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " DEFAULTSECMSG
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " DEFAULTSECMSG
++#define GR_INITF_ACL_MSG "init_variables() failed %s"
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: being fed garbage %d bytes sent %d required"
++#define GR_SHUTS_ACL_MSG "shutdown auth success for " DEFAULTSECMSG
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " DEFAULTSECMSG
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " DEFAULTSECMSG
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " DEFAULTSECMSG
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " DEFAULTSECMSG
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " DEFAULTSECMSG
++#define GR_ENABLE_ACL_MSG "Loaded %s"
++#define GR_ENABLEF_ACL_MSG "Unable to load %s for " DEFAULTSECMSG " RBAC system may already be enabled."
++#define GR_RELOADI_ACL_MSG "Ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "Reloaded %s"
++#define GR_RELOADF_ACL_MSG "Failed reload of %s for " DEFAULTSECMSG
++#define GR_SPROLEI_ACL_MSG "Ignoring change to special role for disabled RBAC system for " DEFAULTSECMSG
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " DEFAULTSECMSG
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " DEFAULTSECMSG
++#define GR_SPROLEF_ACL_MSG "special role %s failure for " DEFAULTSECMSG
++#define GR_UNSPROLEI_ACL_MSG "Ignoring unauth of special role for disabled RBAC system for " DEFAULTSECMSG
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " DEFAULTSECMSG
++#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for " DEFAULTSECMSG
++#define GR_INVMODE_ACL_MSG "Invalid mode %d by " DEFAULTSECMSG
++#define GR_MAXPW_ACL_MSG "Maximum pw attempts reached (%d), locking password authentication"
++#define GR_MAXROLEPW_ACL_MSG "Maximum pw attempts reached (%d) trying to auth to special role %s, locking auth for role of " DEFAULTSECMSG
++#define GR_PRIORITY_CHROOT_MSG "attempted priority change of process (%.16s:%d) by " DEFAULTSECMSG
++#define GR_CAPSET_CHROOT_MSG "denied capset of (%.16s:%d) within chroot by " DEFAULTSECMSG
++#define GR_FAILFORK_MSG "failed fork with errno %d by " DEFAULTSECMSG
++#define GR_NICE_CHROOT_MSG "attempted priority change by " DEFAULTSECMSG
++#define GR_UNISIGLOG_MSG "signal %d sent to " DEFAULTSECMSG
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " DEFAULTSECMSG
++#define GR_SIG_ACL_MSG "Attempted send of signal %d to protected task " DEFAULTSECMSG " by " DEFAULTSECMSG
++#define GR_SYSCTL_MSG "attempt to modify grsecurity sysctl value : %.32s by " DEFAULTSECMSG
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_TIME_MSG "time set by " DEFAULTSECMSG
++#define GR_DEFACL_MSG "Fatal: Unable to find ACL for (%.16s:%d)"
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " DEFAULTSECMSG
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " DEFAULTSECMSG
++#define GR_SOCK_MSG "attempted socket(%.16s,%.16s,%.16s) by " DEFAULTSECMSG
++#define GR_SOCK2_MSG "attempted socket(%d,%.16s,%.16s) by " DEFAULTSECMSG
++#define GR_BIND_MSG "attempted bind() by " DEFAULTSECMSG
++#define GR_CONNECT_MSG "attempted connect by " DEFAULTSECMSG
++#define GR_BIND_ACL_MSG "attempted bind to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
++#define GR_CONNECT_ACL_MSG "attempted connect to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " DEFAULTSECMSG
++#define GR_CAP_ACL_MSG "use of %s denied for " DEFAULTSECMSG
++#define GR_USRCHANGE_ACL_MSG "change to uid %d denied for " DEFAULTSECMSG
++#define GR_GRPCHANGE_ACL_MSG "change to gid %d denied for " DEFAULTSECMSG
++#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by " DEFAULTSECMSG
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by " DEFAULTSECMSG
++#define GR_MOUNT_AUDIT_MSG "mount %.30s to %.64s by " DEFAULTSECMSG
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " DEFAULTSECMSG
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.63s) by " DEFAULTSECMSG
++#define GR_MSGQ_AUDIT_MSG "message queue created by " DEFAULTSECMSG
++#define GR_MSGQR_AUDIT_MSG "message queue of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_SEM_AUDIT_MSG "semaphore created by " DEFAULTSECMSG
++#define GR_SEMR_AUDIT_MSG "semaphore of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_SHM_AUDIT_MSG "shared memory of size %d created by " DEFAULTSECMSG
++#define GR_SHMR_AUDIT_MSG "shared memory of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_RESOURCE_MSG "attempted resource overstep by requesting %lu for %.16s against limit %lu by " DEFAULTSECMSG
++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " DEFAULTSECMSG
+diff -urN linux-2.6.5/include/linux/grsecurity.h linux-2.6.5-new/include/linux/grsecurity.h
+--- linux-2.6.5/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/include/linux/grsecurity.h 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,188 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/binfmts.h>
++
++extern int gr_check_user_change(int real, int effective, int fs);
++extern int gr_check_group_change(int real, int effective, int fs);
++
++extern void gr_add_to_task_ip_table(struct task_struct *p);
++extern void gr_del_task_from_ip_table(struct task_struct *p);
++
++extern int gr_pid_is_chrooted(const struct task_struct *p);
++extern int gr_handle_chroot_nice(void);
++extern int gr_handle_chroot_sysctl(const int op);
++extern int gr_handle_chroot_capset(const struct task_struct *target);
++extern int gr_handle_chroot_setpriority(struct task_struct *p,
++ const int niceval);
++extern int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++extern int gr_handle_chroot_chroot(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_chroot_caps(struct task_struct *task);
++extern void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt);
++extern int gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++extern int gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++extern int gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const char *dev_name);
++extern int gr_handle_chroot_pivot(void);
++extern int gr_handle_chroot_unix(const pid_t pid);
++
++extern int gr_handle_rawio(const struct inode *inode);
++extern int gr_handle_nproc(void);
++
++extern void gr_handle_ioperm(void);
++extern void gr_handle_iopl(void);
++
++extern int gr_tpe_allow(const struct file *file);
++
++extern int gr_random_pid(void);
++
++extern void gr_log_forkfail(const int retval);
++extern void gr_log_timechange(void);
++extern void gr_log_signal(const int sig, const struct task_struct *t);
++extern void gr_log_chdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_log_chroot_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
++extern void gr_log_remount(const char *devname, const int retval);
++extern void gr_log_unmount(const char *devname, const int retval);
++extern void gr_log_mount(const char *from, const char *to, const int retval);
++extern void gr_log_msgget(const int ret, const int msgflg);
++extern void gr_log_msgrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_semget(const int err, const int semflg);
++extern void gr_log_semrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_shmget(const int err, const int shmflg, const size_t size);
++extern void gr_log_shmrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_textrel(struct vm_area_struct *vma);
++
++extern int gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_handle_fifo(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag,
++ const int acc_mode);
++extern int gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode,
++ const int mode, const char *to);
++
++extern int gr_task_is_capable(struct task_struct *task, const int cap);
++extern int gr_is_capable_nolog(const int cap);
++extern void gr_learn_resource(const struct task_struct *task, const int limit,
++ const unsigned long wanted, const int gt);
++extern void gr_copy_label(struct task_struct *tsk);
++extern void gr_handle_crash(struct task_struct *task, const int sig);
++extern int gr_handle_signal(const struct task_struct *p, const int sig);
++extern int gr_check_crash_uid(const uid_t uid);
++extern int gr_check_protected_task(const struct task_struct *task);
++extern int gr_acl_handle_mmap(const struct file *file,
++ const unsigned long prot);
++extern int gr_acl_handle_mprotect(const struct file *file,
++ const unsigned long prot);
++extern int gr_check_hidden_task(const struct task_struct *tsk);
++extern __u32 gr_acl_handle_truncate(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_utime(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_access(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++extern __u32 gr_acl_handle_fchmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++extern __u32 gr_acl_handle_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++extern __u32 gr_acl_handle_chown(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_handle_ptrace_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_handle_ptrace(struct task_struct *task, const long request);
++extern int gr_handle_proc_ptrace(struct task_struct *task);
++extern int gr_handle_mmap(const struct file *filp, const unsigned long prot);
++extern __u32 gr_acl_handle_execve(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_check_crash_exec(const struct file *filp);
++extern int gr_acl_is_enabled(void);
++extern void gr_set_kernel_label(struct task_struct *task);
++extern void gr_set_role_label(struct task_struct *task, const uid_t uid,
++ const gid_t gid);
++extern void gr_set_proc_label(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_open(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++extern __u32 gr_acl_handle_creat(const struct dentry *dentry,
++ const struct dentry *p_dentry,
++ const struct vfsmount *p_mnt, const int fmode,
++ const int imode);
++extern void gr_handle_create(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const int mode);
++extern __u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt);
++extern __u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_delete(const ino_t ino, const dev_t dev);
++extern __u32 gr_acl_handle_unlink(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const char *from);
++extern __u32 gr_acl_handle_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt, const char *to);
++extern int gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname);
++extern void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace);
++extern __u32 gr_check_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt);
++extern __u32 gr_acl_handle_filldir(const struct dentry *dentry,
++ const struct vfsmount *mnt, const ino_t ino);
++extern __u32 gr_acl_handle_unix(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_acl_handle_exit(void);
++extern void gr_acl_handle_psacct(struct task_struct *task, const long code);
++extern int gr_acl_handle_procpidmem(const struct task_struct *task);
++extern __u32 gr_cap_rtnetlink(void);
++
++#ifdef CONFIG_GRKERNSEC
++extern void gr_handle_mem_write(void);
++extern void gr_handle_kmem_write(void);
++extern void gr_handle_open_port(void);
++extern int gr_handle_mem_mmap(const unsigned long offset,
++ struct vm_area_struct *vma);
++
++extern __u16 ip_randomid(void);
++extern __u32 ip_randomisn(void);
++extern unsigned long get_random_long(void);
++
++extern int grsec_enable_dmesg;
++extern int grsec_enable_randid;
++extern int grsec_enable_randisn;
++extern int grsec_enable_randsrc;
++extern int grsec_enable_randrpc;
++#endif
++
++#endif
+diff -urN linux-2.6.5/include/linux/mm.h linux-2.6.5-new/include/linux/mm.h
+--- linux-2.6.5/include/linux/mm.h 2004-04-03 22:36:15.000000000 -0500
++++ linux-2.6.5-new/include/linux/mm.h 2004-04-14 09:15:12.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+ #include <asm/atomic.h>
++#include <asm/mman.h>
+
+ #ifndef MM_VM_SIZE
+ #define MM_VM_SIZE(mm) TASK_SIZE
+@@ -115,6 +116,18 @@
+ /* It makes sense to apply VM_ACCOUNT to this vma. */
+ #define VM_MAYACCT(vma) (!!((vma)->vm_flags & VM_HUGETLB))
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define VM_MIRROR 0x01000000 /* vma is mirroring another */
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++#define VM_MAYNOTWRITE 0x02000000 /* vma cannot be granted VM_WRITE any more */
++#endif
++
++#ifdef __VM_STACK_FLAGS
++#define VM_STACK_DEFAULT_FLAGS (0x00000033 | __VM_STACK_FLAGS)
++#endif
++
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+ #endif
+@@ -549,21 +562,48 @@
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+
++extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++
+ static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+ {
+ unsigned long ret = -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || (addr && addr > SEGMEXEC_TASK_SIZE-len)))
++ goto out;
++#endif
++
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && ret < TASK_SIZE && ((flag & MAP_TYPE) == MAP_PRIVATE)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && (!(current->flags & PF_PAX_MPROTECT) || ((prot & PROT_EXEC) && file && !(prot & PROT_WRITE)))
++#endif
++
++ )
++ {
++ unsigned long ret_m;
++ prot = prot & PROT_EXEC ? prot : PROT_NONE;
++ ret_m = do_mmap_pgoff(NULL, ret + SEGMEXEC_TASK_SIZE, 0UL, prot, flag | MAP_MIRROR | MAP_FIXED, ret);
++ if (ret_m >= TASK_SIZE) {
++ do_munmap(current->mm, ret, len);
++ ret = ret_m;
++ }
++ }
++#endif
++
+ out:
+ return ret;
+ }
+
+-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+-
+ extern unsigned long do_brk(unsigned long, unsigned long);
+
+ static inline void
+@@ -579,6 +619,12 @@
+ static inline int
+ can_vma_merge(struct vm_area_struct *vma, unsigned long vm_flags)
+ {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((vma->vm_flags | vm_flags) & VM_MIRROR)
++ return 0;
++#endif
++
+ #ifdef CONFIG_MMU
+ if (!vma->vm_file && vma->vm_flags == vm_flags)
+ return 1;
+diff -urN linux-2.6.5/include/linux/mman.h linux-2.6.5-new/include/linux/mman.h
+--- linux-2.6.5/include/linux/mman.h 2004-04-03 22:36:19.000000000 -0500
++++ linux-2.6.5-new/include/linux/mman.h 2004-04-14 09:15:12.000000000 -0400
+@@ -56,6 +56,11 @@
+ calc_vm_flag_bits(unsigned long flags)
+ {
+ return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ _calc_vm_trans(flags, MAP_MIRROR, VM_MIRROR) |
++#endif
++
+ _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
+ _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
+ _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
+diff -urN linux-2.6.5/include/linux/proc_fs.h linux-2.6.5-new/include/linux/proc_fs.h
+--- linux-2.6.5/include/linux/proc_fs.h 2004-04-03 22:38:28.000000000 -0500
++++ linux-2.6.5-new/include/linux/proc_fs.h 2004-04-14 09:06:29.000000000 -0400
+@@ -226,7 +226,7 @@
+
+ #endif /* CONFIG_PROC_FS */
+
+-#if !defined(CONFIG_PROC_FS)
++#if !defined(CONFIG_PROC_FS) || !defined(CONFIG_PROC_KCORE)
+ static inline void kclist_add(struct kcore_list *new, void *addr, size_t size)
+ {
+ }
+diff -urN linux-2.6.5/include/linux/random.h linux-2.6.5-new/include/linux/random.h
+--- linux-2.6.5/include/linux/random.h 2004-04-03 22:38:24.000000000 -0500
++++ linux-2.6.5-new/include/linux/random.h 2004-04-14 09:15:12.000000000 -0400
+@@ -69,6 +69,8 @@
+
+ extern __u32 secure_ipv6_id(__u32 *daddr);
+
++extern unsigned long pax_get_random_long(void);
++
+ #ifndef MODULE
+ extern struct file_operations random_fops, urandom_fops;
+ #endif
+diff -urN linux-2.6.5/include/linux/sched.h linux-2.6.5-new/include/linux/sched.h
+--- linux-2.6.5/include/linux/sched.h 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/include/linux/sched.h 2004-04-14 09:15:12.000000000 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/percpu.h>
+
+ struct exec_domain;
++struct linux_binprm;
+
+ /*
+ * cloning flags:
+@@ -228,6 +229,21 @@
+ struct kioctx *ioctx_list;
+
+ struct kioctx default_kioctx;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_exec; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ extern int mmlist_nr;
+@@ -493,6 +509,22 @@
+
+ unsigned long ptrace_message;
+ siginfo_t *last_siginfo; /* For ptrace use. */
++
++#ifdef CONFIG_GRKERNSEC
++ /* grsecurity */
++ struct acl_subject_label *acl;
++ struct acl_role_label *role;
++ struct file *exec_file;
++ u32 curr_ip;
++ u32 gr_saddr;
++ u32 gr_daddr;
++ u16 gr_sport;
++ u16 gr_dport;
++ u16 acl_role_id;
++ u8 acl_sp_role:1;
++ u8 used_accept:1;
++ u8 is_writable:1;
++#endif
+ };
+
+ static inline pid_t process_group(struct task_struct *tsk)
+@@ -530,6 +562,29 @@
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
+
++#define PF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define PF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define PF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define PF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++#define PF_PAX_RANDEXEC 0x10000000 /* Randomize ET_EXEC base */
++#define PF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++extern unsigned int pax_aslr;
++#endif
++
++extern unsigned int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_flags(struct linux_binprm * bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_flags_func)(struct linux_binprm * bprm);
++#endif
++
+ #ifdef CONFIG_SMP
+ extern int set_cpus_allowed(task_t *p, cpumask_t new_mask);
+ #else
+@@ -675,14 +730,29 @@
+ : on_sig_stack(sp) ? SS_ONSTACK : 0);
+ }
+
++extern int gr_task_is_capable(struct task_struct *task, const int cap);
++extern int gr_is_capable_nolog(const int cap);
+
+ #ifdef CONFIG_SECURITY
+ /* code is in security.c */
+ extern int capable(int cap);
++static inline int capable_nolog(int cap)
++{
++ return capable(cap);
++}
+ #else
+ static inline int capable(int cap)
+ {
+- if (cap_raised(current->cap_effective, cap)) {
++ if (cap_raised(current->cap_effective, cap) && gr_task_is_capable(current, cap)) {
++ current->flags |= PF_SUPERPRIV;
++ return 1;
++ }
++ return 0;
++}
++
++static inline int capable_nolog(int cap)
++{
++ if (cap_raised(current->cap_effective, cap) && gr_is_capable_nolog(cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+diff -urN linux-2.6.5/include/linux/shm.h linux-2.6.5-new/include/linux/shm.h
+--- linux-2.6.5/include/linux/shm.h 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/include/linux/shm.h 2004-04-14 09:06:29.000000000 -0400
+@@ -84,6 +84,10 @@
+ time_t shm_ctim;
+ pid_t shm_cprid;
+ pid_t shm_lprid;
++#ifdef CONFIG_GRKERNSEC
++ time_t shm_createtime;
++ pid_t shm_lapid;
++#endif
+ };
+
+ /* shm_mode upper byte flags */
+diff -urN linux-2.6.5/include/linux/sysctl.h linux-2.6.5-new/include/linux/sysctl.h
+--- linux-2.6.5/include/linux/sysctl.h 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/include/linux/sysctl.h 2004-04-14 09:16:31.000000000 -0400
+@@ -131,8 +131,21 @@
+ KERN_PRINTK_RATELIMIT_BURST=61, /* int: tune printk ratelimiting */
+ KERN_PTY=62, /* dir: pty driver */
+ KERN_NGROUPS_MAX=63, /* int: NGROUPS_MAX */
++ KERN_GRSECURITY=68, /* grsecurity */
++
++
++#ifdef CONFIG_PAX_SOFTMODE
++ KERN_PAX=69, /* PaX control */
++#endif
++
+ };
+
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_ASLR=1, /* PaX: disable/enable all randomization features */
++ PAX_SOFTMODE=2 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+diff -urN linux-2.6.5/include/net/ip.h linux-2.6.5-new/include/net/ip.h
+--- linux-2.6.5/include/net/ip.h 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/include/net/ip.h 2004-04-14 09:06:29.000000000 -0400
+@@ -33,6 +33,11 @@
+ #include <net/route.h>
+ #include <net/arp.h>
+
++#ifdef CONFIG_GRKERNSEC_RANDID
++extern int grsec_enable_randid;
++extern __u16 ip_randomid(void);
++#endif
++
+ #ifndef _SNMP_H
+ #include <net/snmp.h>
+ #endif
+@@ -188,6 +193,13 @@
+
+ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+ {
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ iph->id = htons(ip_randomid());
++ else
++#endif
++
+ if (iph->frag_off & htons(IP_DF)) {
+ /* This is only to work around buggy Windows95/2000
+ * VJ compression implementations. If the ID field
+diff -urN linux-2.6.5/init/Kconfig linux-2.6.5-new/init/Kconfig
+--- linux-2.6.5/init/Kconfig 2004-04-03 22:37:44.000000000 -0500
++++ linux-2.6.5-new/init/Kconfig 2004-04-14 09:06:29.000000000 -0400
+@@ -193,6 +193,7 @@
+ config KALLSYMS
+ bool "Load all symbols for debugging/kksymoops" if EMBEDDED
+ default y
++ depends on !GRKERNSEC_HIDESYM
+ help
+ Say Y here to let the kernel print out symbolic crash information and
+ symbolic stack backtraces. This increases the size of the kernel
+diff -urN linux-2.6.5/init/do_mounts.c linux-2.6.5-new/init/do_mounts.c
+--- linux-2.6.5/init/do_mounts.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/init/do_mounts.c 2004-04-14 09:06:29.000000000 -0400
+@@ -287,6 +287,7 @@
+ case -EINVAL:
+ continue;
+ }
++
+ /*
+ * Allow the user to distinguish between failed sys_open
+ * and bad superblock on root device.
+diff -urN linux-2.6.5/init/main.c linux-2.6.5-new/init/main.c
+--- linux-2.6.5/init/main.c 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/init/main.c 2004-04-14 09:06:29.000000000 -0400
+@@ -89,6 +89,7 @@
+ extern void free_initmem(void);
+ extern void populate_rootfs(void);
+ extern void driver_init(void);
++extern void grsecurity_init(void);
+
+ #ifdef CONFIG_TC
+ extern void tc_init(void);
+@@ -605,7 +606,7 @@
+ do_basic_setup();
+
+ prepare_namespace();
+-
++ grsecurity_init();
+ /*
+ * Ok, we have completed the initial bootup, and
+ * we're essentially up and running. Get rid of the
+diff -urN linux-2.6.5/ipc/msg.c linux-2.6.5-new/ipc/msg.c
+--- linux-2.6.5/ipc/msg.c 2004-04-03 22:36:13.000000000 -0500
++++ linux-2.6.5-new/ipc/msg.c 2004-04-14 09:06:29.000000000 -0400
+@@ -24,6 +24,7 @@
+ #include <linux/list.h>
+ #include <linux/security.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+ #include "util.h"
+@@ -331,6 +332,9 @@
+ msg_unlock(msq);
+ }
+ up(&msg_ids.sem);
++
++ gr_log_msgget(ret, msgflg);
++
+ return ret;
+ }
+
+@@ -580,6 +584,8 @@
+ break;
+ }
+ case IPC_RMID:
++ gr_log_msgrm(ipcp->uid, ipcp->cuid);
++
+ freeque (msq, msqid);
+ break;
+ }
+diff -urN linux-2.6.5/ipc/sem.c linux-2.6.5-new/ipc/sem.c
+--- linux-2.6.5/ipc/sem.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/ipc/sem.c 2004-04-14 09:06:29.000000000 -0400
+@@ -71,6 +71,7 @@
+ #include <linux/time.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "util.h"
+
+@@ -238,6 +239,9 @@
+ }
+
+ up(&sem_ids.sem);
++
++ gr_log_semget(err, semflg);
++
+ return err;
+ }
+
+@@ -804,6 +808,8 @@
+
+ switch(cmd){
+ case IPC_RMID:
++ gr_log_semrm(ipcp->uid, ipcp->cuid);
++
+ freeary(sma, semid);
+ err = 0;
+ break;
+diff -urN linux-2.6.5/ipc/shm.c linux-2.6.5-new/ipc/shm.c
+--- linux-2.6.5/ipc/shm.c 2004-04-03 22:37:07.000000000 -0500
++++ linux-2.6.5-new/ipc/shm.c 2004-04-14 09:06:29.000000000 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ #include "util.h"
+@@ -50,6 +51,14 @@
+ static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid,
++ const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime);
++#endif
++
+ size_t shm_ctlmax = SHMMAX;
+ size_t shm_ctlall = SHMALL;
+ int shm_ctlmni = SHMMNI;
+@@ -213,6 +222,9 @@
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+ shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_createtime = get_seconds();
++#endif
+ shp->shm_segsz = size;
+ shp->shm_nattch = 0;
+ shp->id = shm_buildid(id,shp->shm_perm.seq);
+@@ -267,6 +279,8 @@
+ }
+ up(&shm_ids.sem);
+
++ gr_log_shmget(err, shmflg, size);
++
+ return err;
+ }
+
+@@ -567,6 +581,8 @@
+ if (err)
+ goto out_unlock_up;
+
++ gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid);
++
+ if (shp->shm_nattch){
+ shp->shm_flags |= SHM_DEST;
+ /* Do not find it any more */
+@@ -705,9 +721,27 @@
+ return err;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++ shp->shm_perm.cuid, shmid)) {
++ shm_unlock(shp);
++ return -EACCES;
++ }
++
++ if (!gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++ shm_unlock(shp);
++ return -EACCES;
++ }
++#endif
++
+ file = shp->shm_file;
+ size = i_size_read(file->f_dentry->d_inode);
+ shp->shm_nattch++;
++
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_lapid = current->pid;
++#endif
++
+ shm_unlock(shp);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.5/kernel/capability.c linux-2.6.5-new/kernel/capability.c
+--- linux-2.6.5/kernel/capability.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/kernel/capability.c 2004-04-14 09:06:29.000000000 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+@@ -168,6 +169,11 @@
+ } else
+ target = current;
+
++ if (gr_handle_chroot_capset(target)) {
++ ret = -ESRCH;
++ goto out;
++ }
++
+ ret = -EPERM;
+
+ if (security_capset_check(target, &effective, &inheritable, &permitted))
+diff -urN linux-2.6.5/kernel/configs.c linux-2.6.5-new/kernel/configs.c
+--- linux-2.6.5/kernel/configs.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/kernel/configs.c 2004-04-14 09:06:29.000000000 -0400
+@@ -81,8 +81,16 @@
+ IKCONFIG_VERSION);
+
+ /* create the current config file */
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR, &proc_root);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR | S_IRGRP, &proc_root);
++#endif
++#else
+ entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO,
+ &proc_root);
++#endif
+ if (!entry)
+ return -ENOMEM;
+
+diff -urN linux-2.6.5/kernel/exit.c linux-2.6.5-new/kernel/exit.c
+--- linux-2.6.5/kernel/exit.c 2004-04-03 22:38:13.000000000 -0500
++++ linux-2.6.5-new/kernel/exit.c 2004-04-14 09:06:29.000000000 -0400
+@@ -22,6 +22,7 @@
+ #include <linux/profile.h>
+ #include <linux/mount.h>
+ #include <linux/proc_fs.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -232,6 +233,13 @@
+ {
+ write_lock_irq(&tasklist_lock);
+
++#ifdef CONFIG_GRKERNSEC
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++#endif
++
+ ptrace_unlink(current);
+ /* Reparent to init */
+ REMOVE_LINKS(current);
+@@ -239,6 +247,8 @@
+ current->real_parent = child_reaper;
+ SET_LINKS(current);
+
++ gr_set_kernel_label(current);
++
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+@@ -333,6 +343,15 @@
+ vsnprintf(current->comm, sizeof(current->comm), name, args);
+ va_end(args);
+
++#ifdef CONFIG_GRKERNSEC
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++#endif
++
++ gr_set_kernel_label(current);
++
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+@@ -771,6 +790,11 @@
+ }
+
+ acct_process(code);
++
++ gr_acl_handle_psacct(tsk, code);
++ gr_acl_handle_exit();
++ gr_del_task_from_ip_table(tsk);
++
+ __exit_mm(tsk);
+
+ exit_sem(tsk);
+diff -urN linux-2.6.5/kernel/fork.c linux-2.6.5-new/kernel/fork.c
+--- linux-2.6.5/kernel/fork.c 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/kernel/fork.c 2004-04-14 09:15:12.000000000 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/futex.h>
+ #include <linux/ptrace.h>
+ #include <linux/mount.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -274,7 +275,7 @@
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++ mm->free_area_cache = oldmm->free_area_cache;
+ mm->map_count = 0;
+ mm->rss = 0;
+ cpus_clear(mm->cpu_vm_mask);
+@@ -880,6 +881,9 @@
+ goto fork_out;
+
+ retval = -EAGAIN;
++
++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0);
++
+ if (atomic_read(&p->user->processes) >=
+ p->rlim[RLIMIT_NPROC].rlim_cur) {
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+@@ -968,6 +972,8 @@
+ if (retval)
+ goto bad_fork_cleanup_namespace;
+
++ gr_copy_label(p);
++
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+@@ -1101,6 +1107,9 @@
+ free_uid(p->user);
+ bad_fork_free:
+ free_task(p);
++
++ gr_log_forkfail(retval);
++
+ goto fork_out;
+ }
+
+diff -urN linux-2.6.5/kernel/kallsyms.c linux-2.6.5-new/kernel/kallsyms.c
+--- linux-2.6.5/kernel/kallsyms.c 2004-04-03 22:38:21.000000000 -0500
++++ linux-2.6.5-new/kernel/kallsyms.c 2004-04-14 09:06:29.000000000 -0400
+@@ -301,7 +301,15 @@
+ {
+ struct proc_dir_entry *entry;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("kallsyms", 0444, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &kallsyms_operations;
+ return 0;
+diff -urN linux-2.6.5/kernel/pid.c linux-2.6.5-new/kernel/pid.c
+--- linux-2.6.5/kernel/pid.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/kernel/pid.c 2004-04-14 09:06:29.000000000 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/init.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/grsecurity.h>
+
+ #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
+ static struct list_head *pid_hash[PIDTYPE_MAX];
+@@ -99,10 +100,12 @@
+
+ int alloc_pidmap(void)
+ {
+- int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
++ int pid = 0, offset, max_steps = PIDMAP_ENTRIES + 1;
+ pidmap_t *map;
+
+- pid = last_pid + 1;
++ pid = gr_random_pid();
++ if (!pid)
++ pid = last_pid + 1;
+ if (pid >= pid_max)
+ pid = RESERVED_PIDS;
+
+@@ -225,10 +228,16 @@
+ task_t *find_task_by_pid(int nr)
+ {
+ struct pid *pid = find_pid(PIDTYPE_PID, nr);
++ struct task_struct *task = NULL;
+
+ if (!pid)
+ return NULL;
+- return pid_task(pid->task_list.next, PIDTYPE_PID);
++ task = pid_task(pid->task_list.next, PIDTYPE_PID);
++
++ if (gr_pid_is_chrooted(task))
++ return NULL;
++
++ return task;
+ }
+
+ EXPORT_SYMBOL(find_task_by_pid);
+diff -urN linux-2.6.5/kernel/printk.c linux-2.6.5-new/kernel/printk.c
+--- linux-2.6.5/kernel/printk.c 2004-04-03 22:38:24.000000000 -0500
++++ linux-2.6.5-new/kernel/printk.c 2004-04-14 09:06:29.000000000 -0400
+@@ -30,6 +30,7 @@
+ #include <linux/smp.h>
+ #include <linux/security.h>
+ #include <linux/bootmem.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+
+@@ -248,6 +249,11 @@
+ char c;
+ int error = 0;
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (!capable(CAP_SYS_ADMIN) && grsec_enable_dmesg)
++ return -EPERM;
++#endif
++
+ error = security_syslog(type);
+ if (error)
+ return error;
+diff -urN linux-2.6.5/kernel/resource.c linux-2.6.5-new/kernel/resource.c
+--- linux-2.6.5/kernel/resource.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/kernel/resource.c 2004-04-14 09:06:29.000000000 -0400
+@@ -134,10 +134,27 @@
+ {
+ struct proc_dir_entry *entry;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("ioports", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("ioports", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("ioports", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_ioports_operations;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("iomem", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("iomem", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("iomem", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_iomem_operations;
+ return 0;
+diff -urN linux-2.6.5/kernel/sched.c linux-2.6.5-new/kernel/sched.c
+--- linux-2.6.5/kernel/sched.c 2004-04-03 22:37:42.000000000 -0500
++++ linux-2.6.5-new/kernel/sched.c 2004-04-14 09:06:29.000000000 -0400
+@@ -39,6 +39,7 @@
+ #include <linux/cpu.h>
+ #include <linux/percpu.h>
+ #include <linux/kthread.h>
++#include <linux/grsecurity.h>
+
+ #ifdef CONFIG_NUMA
+ #define cpu_to_node_mask(cpu) node_to_cpumask(cpu_to_node(cpu))
+@@ -2038,6 +2039,8 @@
+ return -EPERM;
+ if (increment < -40)
+ increment = -40;
++ if (gr_handle_chroot_nice())
++ return -EPERM;
+ }
+ if (increment > 40)
+ increment = 40;
+diff -urN linux-2.6.5/kernel/signal.c linux-2.6.5-new/kernel/signal.c
+--- linux-2.6.5/kernel/signal.c 2004-04-03 22:36:57.000000000 -0500
++++ linux-2.6.5-new/kernel/signal.c 2004-04-14 09:13:57.000000000 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+ #include <asm/param.h>
+ #include <asm/uaccess.h>
+ #include <asm/siginfo.h>
+@@ -750,11 +751,13 @@
+ (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
+
+
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ int ret = 0;
+
++ gr_log_signal(sig, t);
++
+ if (!irqs_disabled())
+ BUG();
+ #ifdef CONFIG_SMP
+@@ -805,6 +808,8 @@
+ ret = specific_send_sig_info(sig, info, t);
+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
++ gr_handle_crash(t, sig);
++
+ return ret;
+ }
+
+@@ -1063,9 +1068,14 @@
+ int err;
+
+ found = 1;
+- err = group_send_sig_info(sig, info, p);
+- if (!retval)
+- retval = err;
++
++ if (gr_handle_signal(p, sig))
++ retval = -EPERM;
++ else {
++ err = group_send_sig_info(sig, info, p);
++ if (!retval)
++ retval = err;
++ }
+ }
+ return found ? retval : -ESRCH;
+ }
+@@ -1123,8 +1133,12 @@
+ read_lock(&tasklist_lock);
+ p = find_task_by_pid(pid);
+ error = -ESRCH;
+- if (p)
+- error = group_send_sig_info(sig, info, p);
++ if (p) {
++ if (gr_handle_signal(p, sig))
++ error = -EPERM;
++ else
++ error = group_send_sig_info(sig, info, p);
++ }
+ read_unlock(&tasklist_lock);
+ return error;
+ }
+@@ -1148,10 +1162,14 @@
+ read_lock(&tasklist_lock);
+ for_each_process(p) {
+ if (p->pid > 1 && p->tgid != current->tgid) {
+- int err = group_send_sig_info(sig, info, p);
+- ++count;
+- if (err != -EPERM)
+- retval = err;
++ if (gr_handle_signal(p, sig))
++ retval = -EPERM;
++ else {
++ int err = group_send_sig_info(sig, info, p);
++ ++count;
++ if (err != -EPERM)
++ retval = err;
++ }
+ }
+ }
+ read_unlock(&tasklist_lock);
+diff -urN linux-2.6.5/kernel/sys.c linux-2.6.5-new/kernel/sys.c
+--- linux-2.6.5/kernel/sys.c 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/kernel/sys.c 2004-04-14 09:06:29.000000000 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/security.h>
+ #include <linux/dcookies.h>
+ #include <linux/suspend.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -279,6 +280,12 @@
+ error = -EACCES;
+ goto out;
+ }
++
++ if (gr_handle_chroot_setpriority(p, niceval)) {
++ error = -ESRCH;
++ goto out;
++ }
++
+ no_nice = security_task_setnice(p, niceval);
+ if (no_nice) {
+ error = no_nice;
+@@ -579,6 +586,9 @@
+ if (rgid != (gid_t) -1 ||
+ (egid != (gid_t) -1 && egid != old_rgid))
+ current->sgid = new_egid;
++
++ gr_set_role_label(current, current->uid, new_rgid);
++
+ current->fsgid = new_egid;
+ current->egid = new_egid;
+ current->gid = new_rgid;
+@@ -606,6 +616,9 @@
+ current->mm->dumpable=0;
+ wmb();
+ }
++
++ gr_set_role_label(current, current->uid, gid);
++
+ current->gid = current->egid = current->sgid = current->fsgid = gid;
+ }
+ else if ((gid == current->gid) || (gid == current->sgid))
+@@ -644,6 +657,9 @@
+ current->mm->dumpable = 0;
+ wmb();
+ }
++
++ gr_set_role_label(current, new_ruid, current->gid);
++
+ current->uid = new_ruid;
+ return 0;
+ }
+@@ -744,6 +760,9 @@
+ } else if ((uid != current->uid) && (uid != new_suid))
+ return -EPERM;
+
++ if (gr_check_crash_uid(uid))
++ return -EPERM;
++
+ if (old_euid != uid)
+ {
+ current->mm->dumpable = 0;
+@@ -843,8 +862,10 @@
+ current->egid = egid;
+ }
+ current->fsgid = current->egid;
+- if (rgid != (gid_t) -1)
++ if (rgid != (gid_t) -1) {
++ gr_set_role_label(current, current->uid, rgid);
+ current->gid = rgid;
++ }
+ if (sgid != (gid_t) -1)
+ current->sgid = sgid;
+ return 0;
+diff -urN linux-2.6.5/kernel/sysctl.c linux-2.6.5-new/kernel/sysctl.c
+--- linux-2.6.5/kernel/sysctl.c 2004-04-03 22:36:18.000000000 -0500
++++ linux-2.6.5-new/kernel/sysctl.c 2004-04-14 09:15:12.000000000 -0400
+@@ -46,6 +46,14 @@
+ #endif
+
+ #if defined(CONFIG_SYSCTL)
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern __u32 gr_handle_sysctl(const ctl_table *table, const void *oldval,
++ const void *newval);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++ const int op);
++extern int gr_handle_chroot_sysctl(const int op);
+
+ /* External variables not in a header file. */
+ extern int panic_timeout;
+@@ -139,6 +147,32 @@
+ #ifdef CONFIG_UNIX98_PTYS
+ extern ctl_table pty_table[];
+ #endif
++extern ctl_table grsecurity_table[];
++
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++ {
++ .ctl_name = PAX_ASLR,
++ .procname = "aslr",
++ .data = &pax_aslr,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++
++ {
++ .ctl_name = PAX_SOFTMODE,
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ }
++};
++#endif
+
+ /* /proc declarations: */
+
+@@ -615,6 +649,14 @@
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ {
++ .ctl_name = KERN_GRSECURITY,
++ .procname = "grsecurity",
++ .mode = 0500,
++ .child = grsecurity_table,
++ },
++#endif
+ { .ctl_name = 0 }
+ };
+
+@@ -854,6 +896,16 @@
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .ctl_name = KERN_PAX,
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ { .ctl_name = 0 }
+ };
+
+@@ -938,6 +990,10 @@
+ static inline int ctl_perm(ctl_table *table, int op)
+ {
+ int error;
++ if (table->de && gr_handle_sysctl_mod(table->de->parent->name, table->de->name, op))
++ return -EACCES;
++ if (gr_handle_chroot_sysctl(op))
++ return -EACCES;
+ error = security_sysctl(table, op);
+ if (error)
+ return error;
+@@ -974,6 +1030,10 @@
+ table = table->child;
+ goto repeat;
+ }
++
++ if (!gr_handle_sysctl(table, oldval, newval))
++ return -EACCES;
++
+ error = do_sysctl_strategy(table, name, nlen,
+ oldval, oldlenp,
+ newval, newlen, context);
+diff -urN linux-2.6.5/kernel/time.c linux-2.6.5-new/kernel/time.c
+--- linux-2.6.5/kernel/time.c 2004-04-03 22:36:27.000000000 -0500
++++ linux-2.6.5-new/kernel/time.c 2004-04-14 09:06:29.000000000 -0400
+@@ -28,6 +28,7 @@
+ #include <linux/timex.h>
+ #include <linux/errno.h>
+ #include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ /*
+@@ -80,6 +81,9 @@
+
+ tv.tv_nsec = 0;
+ do_settimeofday(&tv);
++
++ gr_log_timechange();
++
+ return 0;
+ }
+
+@@ -181,6 +185,8 @@
+ return -EFAULT;
+ }
+
++ gr_log_timechange();
++
+ return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+ }
+
+diff -urN linux-2.6.5/kernel/timer.c linux-2.6.5-new/kernel/timer.c
+--- linux-2.6.5/kernel/timer.c 2004-04-03 22:37:59.000000000 -0500
++++ linux-2.6.5-new/kernel/timer.c 2004-04-14 09:06:29.000000000 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/time.h>
+ #include <linux/jiffies.h>
+ #include <linux/cpu.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/div64.h>
+@@ -688,6 +689,9 @@
+
+ psecs = (p->utime += user);
+ psecs += (p->stime += system);
++
++ gr_learn_resource(p, RLIMIT_CPU, psecs / HZ, 1);
++
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (!(psecs % HZ))
+diff -urN linux-2.6.5/mm/filemap.c linux-2.6.5-new/mm/filemap.c
+--- linux-2.6.5/mm/filemap.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/mm/filemap.c 2004-04-14 09:15:12.000000000 -0400
+@@ -27,6 +27,8 @@
+ #include <linux/pagevec.h>
+ #include <linux/blkdev.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
++
+ /*
+ * This is needed for the following functions:
+ * - try_to_release_page
+@@ -1356,6 +1358,12 @@
+
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++#endif
++
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ return 0;
+@@ -1654,6 +1662,7 @@
+ *pos = i_size_read(inode);
+
+ if (limit != RLIM_INFINITY) {
++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+ if (*pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+diff -urN linux-2.6.5/mm/madvise.c linux-2.6.5-new/mm/madvise.c
+--- linux-2.6.5/mm/madvise.c 2004-04-03 22:36:52.000000000 -0500
++++ linux-2.6.5-new/mm/madvise.c 2004-04-14 09:15:12.000000000 -0400
+@@ -13,8 +13,42 @@
+ * We can potentially split a vm area into separate
+ * areas, each area with its own behavior.
+ */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static long __madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior);
++
++static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ error = __madvise_behavior(vma_m, start_m, end_m, behavior);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: madvise bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __madvise_behavior(vma, start, end, behavior);
++}
++
++static long __madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior)
++#else
+ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
+ unsigned long end, int behavior)
++#endif
+ {
+ struct mm_struct * mm = vma->vm_mm;
+ int error;
+diff -urN linux-2.6.5/mm/memory.c linux-2.6.5-new/mm/memory.c
+--- linux-2.6.5/mm/memory.c 2004-04-03 22:36:58.000000000 -0500
++++ linux-2.6.5-new/mm/memory.c 2004-04-14 09:15:12.000000000 -0400
+@@ -46,6 +46,7 @@
+ #include <linux/rmap-locking.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgalloc.h>
+ #include <asm/rmap.h>
+@@ -989,6 +990,69 @@
+ update_mmu_cache(vma, address, entry);
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * mm->page_table_lock is held on entry and is not released on exit or inside
++ * to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++struct pte_chain * pax_mirror_fault(struct mm_struct *mm, struct vm_area_struct * vma,
++ unsigned long address, pte_t *pte, struct pte_chain *pte_chain)
++{
++ unsigned long address_m;
++ struct vm_area_struct * vma_m = NULL;
++ pte_t * pte_m, entry_m;
++ struct page * page_m;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, address_m);
++ BUG_ON(!vma_m || vma_m->vm_start != address_m);
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++
++ {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_offset(pgd_m, address_m);
++ pte_m = pte_offset_map_nested(pmd_m, address_m);
++ }
++
++ if (pte_present(*pte_m)) {
++ flush_cache_page(vma_m, address_m);
++ flush_icache_page(vma_m, pte_page(*pte_m));
++ }
++ entry_m = ptep_get_and_clear(pte_m);
++ if (pte_present(entry_m))
++ flush_tlb_page(vma_m, address_m);
++
++ if (pte_none(entry_m)) {
++ ++mm->rss;
++ } else if (pte_present(entry_m)) {
++ page_remove_rmap(pte_page(entry_m), pte_m);
++ page_cache_release(pte_page(entry_m));
++ } else if (!pte_file(entry_m)) {
++ free_swap_and_cache(pte_to_swp_entry(entry_m));
++ ++mm->rss;
++ } else {
++ printk(KERN_ERR "PAX: VMMIRROR: bug in mirror_fault: %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, address_m, vma_m->vm_start);
++ }
++
++ page_m = pte_page(*pte);
++ page_cache_get(page_m);
++ entry_m = mk_pte(page_m, vma_m->vm_page_prot);
++ if (pte_write(*pte))
++ entry_m = pte_mkdirty(pte_mkwrite(entry_m));
++ pte_chain = page_add_rmap(page_m, pte_m, pte_chain);
++ ptep_establish(vma_m, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++ pte_unmap_nested(pte_m);
++ return pte_chain;
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -1017,6 +1081,10 @@
+ struct pte_chain *pte_chain;
+ pte_t entry;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct pte_chain *pte_chain_m = NULL;
++#endif
++
+ if (unlikely(!pfn_valid(pfn))) {
+ /*
+ * This should really halt the system so it can be debugged or
+@@ -1056,6 +1124,13 @@
+ pte_chain = pte_chain_alloc(GFP_KERNEL);
+ if (!pte_chain)
+ goto no_pte_chain;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_m = pte_chain_alloc(GFP_KERNEL);
++ if (!pte_chain_m)
++ goto no_new_page;
++#endif
++
+ new_page = alloc_page(GFP_HIGHUSER);
+ if (!new_page)
+ goto no_new_page;
+@@ -1076,16 +1151,32 @@
+
+ /* Free the old page.. */
+ new_page = old_page;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pte_chain_m = pax_mirror_fault(mm, vma, address, page_table, pte_chain_m);
++#endif
++
+ }
+ pte_unmap(page_table);
+ page_cache_release(new_page);
+ page_cache_release(old_page);
+ spin_unlock(&mm->page_table_lock);
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ return VM_FAULT_MINOR;
+
+ no_new_page:
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ no_pte_chain:
+ page_cache_release(old_page);
+ return VM_FAULT_OOM;
+@@ -1187,6 +1278,7 @@
+
+ do_expand:
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
++ gr_learn_resource(current, RLIMIT_FSIZE, offset, 1);
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+@@ -1246,6 +1338,10 @@
+ int ret = VM_FAULT_MINOR;
+ struct pte_chain *pte_chain = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct pte_chain *pte_chain_m = NULL;
++#endif
++
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ page = lookup_swap_cache(entry);
+@@ -1279,6 +1375,15 @@
+ ret = VM_FAULT_OOM;
+ goto out;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_m = pte_chain_alloc(GFP_KERNEL);
++ if (!pte_chain_m) {
++ ret = -ENOMEM;
++ goto out;
++ }
++#endif
++
+ lock_page(page);
+
+ /*
+@@ -1314,10 +1419,21 @@
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pte_chain_m = pax_mirror_fault(mm, vma, address, page_table, pte_chain_m);
++#endif
++
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ out:
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ return ret;
+ }
+
+@@ -1336,13 +1452,38 @@
+ struct pte_chain *pte_chain;
+ int ret;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct pte_chain *pte_chain_m = NULL;
++#endif
++
+ pte_chain = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_m = pte_chain_alloc(GFP_ATOMIC | __GFP_NOWARN);
++#endif
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (!pte_chain || !pte_chain_m) {
++#else
+ if (!pte_chain) {
++#endif
++
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (!pte_chain)
++ pte_chain = pte_chain_alloc(GFP_KERNEL);
++ if (!pte_chain_m)
++ pte_chain_m = pte_chain_alloc(GFP_KERNEL);
++ if (!pte_chain || !pte_chain_m)
++ goto no_mem;
++#else
+ pte_chain = pte_chain_alloc(GFP_KERNEL);
+ if (!pte_chain)
+ goto no_mem;
++#endif
++
+ spin_lock(&mm->page_table_lock);
+ page_table = pte_offset_map(pmd, addr);
+ }
+@@ -1382,10 +1523,16 @@
+ set_pte(page_table, entry);
+ /* ignores ZERO_PAGE */
+ pte_chain = page_add_rmap(page, page_table, pte_chain);
+- pte_unmap(page_table);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pte_chain_m = pax_mirror_fault(mm, vma, addr, page_table, pte_chain_m);
++#endif
++
++ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ ret = VM_FAULT_MINOR;
+ goto out;
+@@ -1394,6 +1541,11 @@
+ ret = VM_FAULT_OOM;
+ out:
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ return ret;
+ }
+
+@@ -1420,6 +1572,10 @@
+ int sequence = 0;
+ int ret = VM_FAULT_MINOR;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct pte_chain *pte_chain_m = NULL;
++#endif
++
+ if (!vma->vm_ops || !vma->vm_ops->nopage)
+ return do_anonymous_page(mm, vma, page_table,
+ pmd, write_access, address);
+@@ -1444,6 +1600,12 @@
+ if (!pte_chain)
+ goto oom;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_m = pte_chain_alloc(GFP_KERNEL);
++ if (!pte_chain_m)
++ goto oom;
++#endif
++
+ /*
+ * Should we do an early C-O-W break?
+ */
+@@ -1469,6 +1631,11 @@
+ spin_unlock(&mm->page_table_lock);
+ page_cache_release(new_page);
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ goto retry;
+ }
+ page_table = pte_offset_map(pmd, address);
+@@ -1493,6 +1660,15 @@
+ entry = maybe_mkwrite(pte_mkdirty(entry), vma);
+ set_pte(page_table, entry);
+ pte_chain = page_add_rmap(new_page, page_table, pte_chain);
++
++ /* no need to invalidate: a not-present page shouldn't be cached */
++ update_mmu_cache(vma, address, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pte_chain_m = pax_mirror_fault(mm, vma, address, page_table, pte_chain_m);
++#endif
++
+ pte_unmap(page_table);
+ } else {
+ /* One of our sibling threads was faster, back out. */
+@@ -1502,8 +1678,6 @@
+ goto out;
+ }
+
+- /* no need to invalidate: a not-present page shouldn't be cached */
+- update_mmu_cache(vma, address, entry);
+ spin_unlock(&mm->page_table_lock);
+ goto out;
+ oom:
+@@ -1511,6 +1685,11 @@
+ ret = VM_FAULT_OOM;
+ out:
+ pte_chain_free(pte_chain);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pte_chain_free(pte_chain_m);
++#endif
++
+ return ret;
+ }
+
+@@ -1613,6 +1792,11 @@
+ pgd_t *pgd;
+ pmd_t *pmd;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long address_m = 0UL;
++ struct vm_area_struct * vma_m = NULL;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+ pgd = pgd_offset(mm, address);
+
+@@ -1626,6 +1810,48 @@
+ * and the SMP-safe atomic PTE updates.
+ */
+ spin_lock(&mm->page_table_lock);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, address_m);
++
++ /* PaX: sanity checks */
++ if (!vma_m) {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n",
++ address, vma, address_m, vma_m);
++ return VM_FAULT_SIGBUS;
++ } else if (!(vma_m->vm_flags & VM_MIRROR) ||
++ vma_m->vm_start != address_m ||
++ vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start)
++ {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return VM_FAULT_SIGBUS;
++ }
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_alloc(mm, pgd_m, address_m);
++ if (!pmd_m) {
++ spin_unlock(&mm->page_table_lock);
++ return VM_FAULT_OOM;
++ }
++ pte_m = pte_alloc_map(mm, pmd_m, address_m);
++ if (!pte_m) {
++ spin_unlock(&mm->page_table_lock);
++ return VM_FAULT_OOM;
++ }
++ pte_unmap(pte_m);
++ }
++#endif
++
+ pmd = pmd_alloc(mm, pgd, address);
+
+ if (pmd) {
+diff -urN linux-2.6.5/mm/mlock.c linux-2.6.5-new/mm/mlock.c
+--- linux-2.6.5/mm/mlock.c 2004-04-03 22:36:16.000000000 -0500
++++ linux-2.6.5-new/mm/mlock.c 2004-04-14 09:15:12.000000000 -0400
+@@ -7,11 +7,43 @@
+
+ #include <linux/mman.h>
+ #include <linux/mm.h>
++#include <linux/grsecurity.h>
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags);
+
+ static int mlock_fixup(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, unsigned int newflags)
+ {
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(vma->vm_mm, start_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ error = __mlock_fixup(vma_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mlock bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++ return __mlock_fixup(vma, start, end, newflags);
++}
++
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++#else
++static int mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++#endif
++{
+ struct mm_struct * mm = vma->vm_mm;
+ int pages;
+ int ret = 0;
+@@ -65,6 +97,17 @@
+ return -EINVAL;
+ if (end == start)
+ return 0;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -115,6 +158,7 @@
+ lock_limit >>= PAGE_SHIFT;
+
+ /* check against resource limits */
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked <= lock_limit)
+ error = do_mlock(start, len, 1);
+ up_write(&current->mm->mmap_sem);
+@@ -151,6 +195,16 @@
+ for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
+ unsigned int newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (vma->vm_end > SEGMEXEC_TASK_SIZE)
++ break;
++ } else
++#endif
++
++ if (vma->vm_end > TASK_SIZE)
++ break;
++
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+@@ -174,6 +228,7 @@
+ lock_limit >>= PAGE_SHIFT;
+
+ ret = -ENOMEM;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
+ if (current->mm->total_vm <= lock_limit)
+ ret = do_mlockall(flags);
+ out:
+diff -urN linux-2.6.5/mm/mmap.c linux-2.6.5-new/mm/mmap.c
+--- linux-2.6.5/mm/mmap.c 2004-04-03 22:37:25.000000000 -0500
++++ linux-2.6.5-new/mm/mmap.c 2004-04-14 10:05:32.000000000 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/profile.h>
+ #include <linux/module.h>
+ #include <linux/mount.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+@@ -121,6 +122,7 @@
+
+ /* Check against rlimit.. */
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
++ gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1);
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
+@@ -327,6 +329,12 @@
+ static inline int is_mergeable_vma(struct vm_area_struct *vma,
+ struct file *file, unsigned long vm_flags)
+ {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((vma->vm_flags | vm_flags) & VM_MIRROR)
++ return 0;
++#endif
++
+ if (vma->vm_ops && vma->vm_ops->close)
+ return 0;
+ if (vma->vm_file != file)
+@@ -494,6 +502,28 @@
+ int accountable = 1;
+ unsigned long charged = 0;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct * vma_m = NULL;
++
++ if (flags & MAP_MIRROR) {
++ /* PaX: sanity checks, to be removed when proved to be stable */
++ if (file || len || ((flags & MAP_TYPE) != MAP_PRIVATE))
++ return -EINVAL;
++
++ vma_m = find_vma(mm, pgoff);
++
++ if (!vma_m || is_vm_hugetlb_page(vma_m) ||
++ vma_m->vm_start != pgoff ||
++ (vma_m->vm_flags & VM_MIRROR) ||
++ (!(vma_m->vm_flags & VM_WRITE) && (prot & PROT_WRITE)))
++ return -EINVAL;
++
++ file = vma_m->vm_file;
++ pgoff = vma_m->vm_pgoff;
++ len = vma_m->vm_end - vma_m->vm_start;
++ }
++#endif
++
+ if (file) {
+ if (is_file_hugepages(file))
+ accountable = 0;
+@@ -535,6 +565,30 @@
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++ if (file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
++ vm_flags &= ~VM_MAYEXEC;
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT) {
++ if (!file || (prot & PROT_WRITE))
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++ else
++ vm_flags &= ~VM_MAYWRITE;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (file && (flags & MAP_MIRROR) && (vm_flags & VM_EXEC))
++ vma_m->vm_flags &= ~VM_MAYWRITE;
++#endif
++
++ }
++#endif
++
++ }
++#endif
++
+ if (flags & MAP_LOCKED) {
+ if (!capable(CAP_IPC_LOCK))
+ return -EPERM;
+@@ -544,6 +598,7 @@
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+@@ -599,6 +654,9 @@
+ if (error)
+ return error;
+
++ if (!gr_acl_handle_mmap(file, prot))
++ return -EACCES;
++
+ /* Clear old maps */
+ error = -ENOMEM;
+ munmap_back:
+@@ -610,6 +668,7 @@
+ }
+
+ /* Check against address space limit. */
++ gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+@@ -650,6 +709,13 @@
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((file || !(current->flags & PF_PAX_PAGEEXEC)) && (vm_flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(vm_flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = pgoff;
+@@ -679,6 +745,14 @@
+ goto free_vma;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR) {
++ vma_m->vm_flags |= VM_MIRROR;
++ vma_m->vm_private_data = (void *)(vma->vm_start - vma_m->vm_start);
++ vma->vm_private_data = (void *)(vma_m->vm_start - vma->vm_start);
++ }
++#endif
++
+ /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
+ * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
+ * that memory reservation must be checked; but that reservation
+@@ -759,12 +833,28 @@
+ struct vm_area_struct *vma;
+ unsigned long start_addr;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && len > SEGMEXEC_TASK_SIZE)
++ return -ENOMEM;
++ else
++#endif
++
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE-len < addr)
++ return -ENOMEM;
++#endif
++
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+@@ -780,6 +870,13 @@
+ * some holes.
+ */
+ if (start_addr != TASK_UNMAPPED_BASE) {
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ start_addr = addr = TASK_UNMAPPED_BASE + mm->delta_mmap;
++ else
++#endif
++
+ start_addr = addr = TASK_UNMAPPED_BASE;
+ goto full_search;
+ }
+@@ -939,10 +1036,18 @@
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+-
++
++ gr_learn_resource(current, RLIMIT_STACK, address - vma->vm_start, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1);
++ if (vma->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1);
++
+ if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+- current->rlim[RLIMIT_AS].rlim_cur) {
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma->vm_flags & VM_LOCKED) &&
++ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+@@ -993,10 +1098,62 @@
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+-
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m;
++ unsigned long address_m;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(vma->vm_mm, address_m);
++ if (!vma_m || vma_m->vm_start != address_m ||
++ !(vma_m->vm_flags & VM_MIRROR) ||
++ vma->vm_end - vma->vm_start !=
++ vma_m->vm_end - vma_m->vm_start) {
++ spin_unlock(&vma->vm_mm->page_table_lock);
++ vm_unacct_memory(grow);
++ printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return -ENOMEM;
++ }
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++
++ gr_learn_resource(current, RLIMIT_STACK, vma_m->vm_end - address_m, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT, 1);
++ if (vma_m->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT, 1);
++
++ if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
++ ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma_m->vm_flags & VM_LOCKED) &&
++ ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
++ spin_unlock(&vma->vm_mm->page_table_lock);
++ vm_unacct_memory(grow);
++ return -ENOMEM;
++ }
++
++ vma_m->vm_start = address_m;
++ vma_m->vm_pgoff -= grow;
++ vma_m->vm_mm->total_vm += grow;
++ if (vma_m->vm_flags & VM_LOCKED)
++ vma_m->vm_mm->locked_vm += grow;
++ } else
++#endif
++
++ gr_learn_resource(current, RLIMIT_STACK, vma->vm_end - address, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1);
++ if (vma->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1);
++
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+- current->rlim[RLIMIT_AS].rlim_cur) {
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma->vm_flags & VM_LOCKED) &&
++ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+@@ -1108,15 +1265,15 @@
+ {
+ size_t len = area->vm_end - area->vm_start;
+
+- area->vm_mm->total_vm -= len >> PAGE_SHIFT;
++ mm->total_vm -= len >> PAGE_SHIFT;
+ if (area->vm_flags & VM_LOCKED)
+- area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
++ mm->locked_vm -= len >> PAGE_SHIFT;
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+- area->vm_start < area->vm_mm->free_area_cache)
+- area->vm_mm->free_area_cache = area->vm_start;
++ area->vm_start < mm->free_area_cache)
++ mm->free_area_cache = area->vm_start;
+
+ remove_shared_vm_struct(area);
+
+@@ -1178,21 +1335,73 @@
+ */
+ static void
+ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+- struct vm_area_struct *prev, unsigned long end)
++ struct vm_area_struct *prev, unsigned long *start, unsigned long *end)
+ {
+ struct vm_area_struct **insertion_point;
+ struct vm_area_struct *tail_vma = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long start_m;
++ struct vm_area_struct *vma_m, *head_vma = vma, *mirrors = NULL, *head_vma_m = NULL;
++#endif
++
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ do {
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((vma->vm_flags & VM_MIRROR) &&
++ vma->vm_start + (unsigned long)vma->vm_private_data >= *start &&
++ vma->vm_start + (unsigned long)vma->vm_private_data < *end)
++ {
++ mm->mmap_cache = NULL; /* Kill the cache. */
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, start_m);
++ if (vma_m && (vma_m->vm_flags & VM_MIRROR) && vma_m->vm_start == start_m) {
++ vma->vm_flags &= ~VM_MIRROR;
++ vma_m->vm_flags &= ~VM_MIRROR;
++ } else
++ printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, vma->vm_start);
++ }
++#endif
++
+ tail_vma = vma;
+ vma = vma->vm_next;
+- } while (vma && vma->vm_start < end);
++ } while (vma && vma->vm_start < *end);
+ *insertion_point = vma;
+ tail_vma->vm_next = NULL;
+ mm->mmap_cache = NULL; /* Kill the cache. */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ for (; head_vma; head_vma = head_vma->vm_next) {
++ struct vm_area_struct *prev_m;
++
++ if (!(head_vma->vm_flags & VM_MIRROR))
++ continue;
++
++ start_m = head_vma->vm_start + (unsigned long)head_vma->vm_private_data;
++ vma_m = find_vma_prev(mm, start_m, &prev_m);
++ rb_erase(&vma_m->vm_rb, &mm->mm_rb);
++ mm->map_count--;
++ insertion_point = prev_m ? &prev_m->vm_next : &mm->mmap;
++ *insertion_point = vma_m->vm_next;
++ if (head_vma_m) {
++ mirrors->vm_next = vma_m;
++ mirrors = vma_m;
++ } else
++ head_vma_m = mirrors = vma_m;
++ mirrors->vm_next = NULL;
++ if (vma_m->vm_start < *start)
++ *start = vma_m->vm_start;
++ if (vma_m->vm_end > *end)
++ *end = vma_m->vm_end;
++ mm->mmap_cache = NULL; /* Kill the cache. */
++ }
++ if (head_vma_m)
++ tail_vma->vm_next = head_vma_m;
++#endif
++
+ }
+
+ /*
+@@ -1262,6 +1471,10 @@
+ unsigned long end;
+ struct vm_area_struct *mpnt, *prev, *last;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct *mpnt_m = NULL, *last_m;
++#endif
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -1298,6 +1511,20 @@
+ * places tmp vma above, and higher split_vma places tmp vma below.
+ */
+ if (start > mpnt->vm_start) {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (mpnt->vm_flags & VM_MIRROR) {
++ unsigned long start_m = mpnt->vm_start + (unsigned long)mpnt->vm_private_data;
++
++ mpnt_m = find_vma(mm, start_m);
++ if (!mpnt_m || (!mpnt_m->vm_flags & VM_MIRROR) || mpnt_m->vm_start != start_m)
++ return -EINVAL;
++ start_m = start + (unsigned long)mpnt->vm_private_data;
++ if (split_vma(mm, mpnt_m, start_m, 0))
++ return -ENOMEM;
++ }
++#endif
++
+ if (split_vma(mm, mpnt, start, 0))
+ return -ENOMEM;
+ prev = mpnt;
+@@ -1306,6 +1533,20 @@
+ /* Does it split the last one? */
+ last = find_vma(mm, end);
+ if (last && end > last->vm_start) {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (last->vm_flags & VM_MIRROR) {
++ unsigned long end_m = last->vm_start + (unsigned long)last->vm_private_data;
++
++ last_m = find_vma(mm, end_m);
++ if (!last_m || (!last_m->vm_flags & VM_MIRROR) || last_m->vm_start != end_m)
++ return -EINVAL;
++ end_m = end + (unsigned long)last->vm_private_data;
++ if (split_vma(mm, last_m, end_m, 1))
++ return -ENOMEM;
++ }
++#endif
++
+ if (split_vma(mm, last, end, 1))
+ return -ENOMEM;
+ }
+@@ -1315,7 +1556,7 @@
+ * Remove the vma's, and unmap the actual pages
+ */
+ spin_lock(&mm->page_table_lock);
+- detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
++ detach_vmas_to_be_unmapped(mm, mpnt, prev, &start, &end);
+ unmap_region(mm, mpnt, prev, start, end);
+ spin_unlock(&mm->page_table_lock);
+
+@@ -1332,6 +1573,12 @@
+ int ret;
+ struct mm_struct *mm = current->mm;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+@@ -1343,7 +1590,31 @@
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
++#if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
++unsigned long __do_brk(unsigned long addr, unsigned long len);
++
++unsigned long do_brk(unsigned long addr, unsigned long len)
++{
++ unsigned long ret;
++
++ ret = __do_brk(addr, len);
++ if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) {
++ unsigned long ret_m;
++
++ ret_m = do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr);
++ if (ret_m > TASK_SIZE) {
++ do_munmap(current->mm, addr, len);
++ ret = ret_m;
++ }
++ }
++
++ return ret;
++}
++
++unsigned long __do_brk(unsigned long addr, unsigned long len)
++#else
+ unsigned long do_brk(unsigned long addr, unsigned long len)
++#endif
+ {
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+@@ -1354,6 +1625,13 @@
+ if (!len)
+ return addr;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
++ return -EINVAL;
++ } else
++#endif
++
+ if ((addr + len) > TASK_SIZE || (addr + len) < addr)
+ return -EINVAL;
+
+@@ -1363,6 +1641,7 @@
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+@@ -1379,6 +1658,7 @@
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
++ gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+@@ -1391,6 +1671,18 @@
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /* Can we just expand an old anonymous mapping? */
+ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len,
+ flags, NULL, 0))
+@@ -1409,6 +1701,13 @@
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = 0;
+diff -urN linux-2.6.5/mm/mprotect.c linux-2.6.5-new/mm/mprotect.c
+--- linux-2.6.5/mm/mprotect.c 2004-04-03 22:38:28.000000000 -0500
++++ linux-2.6.5-new/mm/mprotect.c 2004-04-14 09:58:52.000000000 -0400
+@@ -16,6 +16,12 @@
+ #include <linux/fs.h>
+ #include <linux/highmem.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/fs.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+@@ -150,6 +156,46 @@
+ return 1;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags);
++
++static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ if ((current->flags & PF_PAX_SEGMEXEC) && !(newflags & VM_EXEC))
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
++ else
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __mprotect_fixup(vma, pprev, start, end, newflags);
++}
++
++static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ struct mm_struct * mm = vma->vm_mm;
++ unsigned long charged = 0;
++ pgprot_t newprot;
++ int error;
++#else
+ static int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned int newflags)
+@@ -163,6 +209,7 @@
+ *pprev = vma;
+ return 0;
+ }
++#endif
+
+ /*
+ * If we make a private mapping writable we increase our commit;
+@@ -182,6 +229,12 @@
+ }
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE)))
++ newprot = protection_map[(newflags | VM_EXEC) & 0xf];
++ else
++#endif
++
+ newprot = protection_map[newflags & 0xf];
+
+ if (start == vma->vm_start) {
+@@ -222,6 +275,69 @@
+ return error;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p, p_dyn;
++ elf_dyn dyn;
++ unsigned long i, j = 65536UL / sizeof(struct elf_phdr);
++
++#ifndef CONFIG_PAX_NOELFRELOCS
++ if ((vma->vm_start != start) ||
++ !vma->vm_file ||
++ !(vma->vm_flags & VM_MAYEXEC) ||
++ (vma->vm_flags & VM_MAYNOTWRITE))
++#endif
++
++ return;
++
++ if (0 > kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) ||
++#else
++ elf_h.e_type != ET_DYN ||
++#endif
++
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > j)
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (0 > kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p)))
++ return;
++ if (elf_p.p_type == PT_DYNAMIC) {
++ p_dyn = elf_p;
++ j = i;
++ }
++ }
++ if (elf_h.e_phnum <= j)
++ return;
++
++ i = 0UL;
++ do {
++ if (0 > kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE;
++ gr_log_textrel(vma);
++ return;
++ }
++ i++;
++ } while (dyn.d_tag != DT_NULL);
++ return;
++}
++#endif
++
+ asmlinkage long
+ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+ {
+@@ -239,6 +355,17 @@
+ end = start + len;
+ if (end < start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
+ return -EINVAL;
+ if (end == start)
+@@ -271,6 +398,16 @@
+ }
+ }
+
++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
++ error = -EACCES;
++ goto out;
++ }
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE))
++ pax_handle_maywrite(vma, start);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned int newflags;
+ int last = 0;
+@@ -289,6 +426,12 @@
+ goto out;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE))
++ newflags &= ~VM_MAYWRITE;
++#endif
++
+ error = security_file_mprotect(vma, prot);
+ if (error)
+ goto out;
+diff -urN linux-2.6.5/mm/mremap.c linux-2.6.5-new/mm/mremap.c
+--- linux-2.6.5/mm/mremap.c 2004-04-03 22:37:23.000000000 -0500
++++ linux-2.6.5-new/mm/mremap.c 2004-04-14 09:15:12.000000000 -0400
+@@ -329,6 +329,18 @@
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-new_len ||
++ old_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-old_len)
++ goto out;
++ } else
++#endif
++
++ if (new_len > TASK_SIZE || addr > TASK_SIZE-new_len ||
++ old_len > TASK_SIZE || addr > TASK_SIZE-old_len)
++ goto out;
++
+ /* new_addr is only valid if MREMAP_FIXED is specified */
+ if (flags & MREMAP_FIXED) {
+ if (new_addr & ~PAGE_MASK)
+@@ -336,6 +348,13 @@
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || new_addr > SEGMEXEC_TASK_SIZE-new_len)
++ goto out;
++ } else
++#endif
++
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+@@ -379,6 +398,16 @@
+ ret = -EINVAL;
+ goto out;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) &&
++ (vma->vm_flags & VM_MIRROR))
++ {
++ ret = -EINVAL;
++ goto out;
++ }
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
+diff -urN linux-2.6.5/net/ipv4/af_inet.c linux-2.6.5-new/net/ipv4/af_inet.c
+--- linux-2.6.5/net/ipv4/af_inet.c 2004-04-03 22:36:24.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/af_inet.c 2004-04-14 09:06:29.000000000 -0400
+@@ -87,6 +87,7 @@
+ #include <linux/init.h>
+ #include <linux/poll.h>
+ #include <linux/netfilter_ipv4.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/system.h>
+@@ -387,7 +388,12 @@
+ else
+ inet->pmtudisc = IP_PMTUDISC_WANT;
+
+- inet->id = 0;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = 0;
+
+ sock_init_data(sock, sk);
+ sk_set_owner(sk, THIS_MODULE);
+diff -urN linux-2.6.5/net/ipv4/ip_output.c linux-2.6.5-new/net/ipv4/ip_output.c
+--- linux-2.6.5/net/ipv4/ip_output.c 2004-04-03 22:38:22.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/ip_output.c 2004-04-14 09:06:29.000000000 -0400
+@@ -64,6 +64,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <net/snmp.h>
+ #include <net/ip.h>
+@@ -1161,6 +1162,12 @@
+ iph->tos = inet->tos;
+ iph->tot_len = htons(skb->len);
+ iph->frag_off = df;
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ iph->id = htons(ip_randomid());
++ else
++#endif
+ if (!df) {
+ __ip_select_ident(iph, &rt->u.dst, 0);
+ } else {
+diff -urN linux-2.6.5/net/ipv4/netfilter/Kconfig linux-2.6.5-new/net/ipv4/netfilter/Kconfig
+--- linux-2.6.5/net/ipv4/netfilter/Kconfig 2004-04-03 22:37:45.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/netfilter/Kconfig 2004-04-14 09:06:29.000000000 -0400
+@@ -225,6 +225,21 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_MATCH_STEALTH
++ tristate "stealth match support"
++ depends on IP_NF_IPTABLES
++ help
++ Enabling this option will drop all syn packets coming to unserved tcp
++ ports as well as all packets coming to unserved udp ports. If you
++ are using your system to route any type of packets (ie. via NAT)
++ you should put this module at the end of your ruleset, since it will
++ drop packets that aren't going to ports that are listening on your
++ machine itself, it doesn't take into account that the packet might be
++ destined for someone on your internal network if you're using NAT for
++ instance.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+ config IP_NF_MATCH_HELPER
+ tristate "Helper match support"
+ depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+diff -urN linux-2.6.5/net/ipv4/netfilter/Makefile linux-2.6.5-new/net/ipv4/netfilter/Makefile
+--- linux-2.6.5/net/ipv4/netfilter/Makefile 2004-04-03 22:36:27.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/netfilter/Makefile 2004-04-14 09:06:29.000000000 -0400
+@@ -64,6 +64,8 @@
+ obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
+ obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
+
++obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o
++
+ obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
+
+ # targets
+diff -urN linux-2.6.5/net/ipv4/netfilter/ipt_stealth.c linux-2.6.5-new/net/ipv4/netfilter/ipt_stealth.c
+--- linux-2.6.5/net/ipv4/netfilter/ipt_stealth.c 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/netfilter/ipt_stealth.c 2004-04-14 09:06:29.000000000 -0400
+@@ -0,0 +1,112 @@
++/* Kernel module to add stealth support.
++ *
++ * Copyright (C) 2002 Brad Spengler <spender@grsecurity.net>
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/net.h>
++#include <linux/sched.h>
++#include <linux/inet.h>
++#include <linux/stddef.h>
++
++#include <net/ip.h>
++#include <net/sock.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++#include <net/route.h>
++#include <net/inet_common.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_LICENSE("GPL");
++
++extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ struct iphdr *ip = skb->nh.iph;
++ struct tcphdr th;
++ struct udphdr uh;
++ struct sock *sk = NULL;
++
++ if (!ip || offset) return 0;
++
++ switch(ip->protocol) {
++ case IPPROTO_TCP:
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &th, sizeof(th)) < 0) {
++ *hotdrop = 1;
++ return 0;
++ }
++ if (!(th.syn && !th.ack)) return 0;
++ sk = tcp_v4_lookup_listener(ip->daddr, ntohs(th.dest), ((struct rtable*)skb->dst)->rt_iif);
++ break;
++ case IPPROTO_UDP:
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &uh, sizeof(uh)) < 0) {
++ *hotdrop = 1;
++ return 0;
++ }
++ sk = udp_v4_lookup(ip->saddr, uh.source, ip->daddr, uh.dest, skb->dev->ifindex);
++ break;
++ default:
++ return 0;
++ }
++
++ if(!sk) // port is being listened on, match this
++ return 1;
++ else {
++ sock_put(sk);
++ return 0;
++ }
++}
++
++/* Called when user tries to insert an entry of this type. */
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (matchsize != IPT_ALIGN(0))
++ return 0;
++
++ if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) ||
++ ((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO)))
++ && (hook_mask & (1 << NF_IP_LOCAL_IN)))
++ return 1;
++
++ printk("stealth: Only works on TCP and UDP for the INPUT chain.\n");
++
++ return 0;
++}
++
++
++static struct ipt_match stealth_match = {
++ .name = "stealth",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = NULL,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&stealth_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&stealth_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -urN linux-2.6.5/net/ipv4/tcp_ipv4.c linux-2.6.5-new/net/ipv4/tcp_ipv4.c
+--- linux-2.6.5/net/ipv4/tcp_ipv4.c 2004-04-03 22:36:55.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/tcp_ipv4.c 2004-04-14 09:06:29.000000000 -0400
+@@ -62,6 +62,7 @@
+ #include <linux/jhash.h>
+ #include <linux/init.h>
+ #include <linux/times.h>
++#include <linux/grsecurity.h>
+
+ #include <net/icmp.h>
+ #include <net/tcp.h>
+@@ -224,9 +225,16 @@
+ spin_lock(&tcp_portalloc_lock);
+ rover = tcp_port_rover;
+ do {
+- rover++;
+- if (rover < low || rover > high)
+- rover = low;
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ if (grsec_enable_randsrc && (high > low)) {
++ rover = low + (get_random_long() % (high - low));
++ } else
++#endif
++ {
++ rover++;
++ if (rover < low || rover > high)
++ rover = low;
++ }
+ head = &tcp_bhash[tcp_bhashfn(rover)];
+ spin_lock(&head->lock);
+ tb_for_each(tb, node, &head->chain)
+@@ -537,6 +545,11 @@
+
+ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
+ {
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ if (likely(grsec_enable_randisn))
++ return ip_randomisn();
++ else
++#endif
+ return secure_tcp_sequence_number(skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ skb->h.th->dest,
+@@ -671,10 +684,17 @@
+ rover = tcp_port_rover;
+
+ do {
+- rover++;
+- if ((rover < low) || (rover > high))
+- rover = low;
+- head = &tcp_bhash[tcp_bhashfn(rover)];
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ if (grsec_enable_randsrc && (high > low)) {
++ rover = low + (get_random_long() % (high - low));
++ } else
++#endif
++ {
++ rover++;
++ if ((rover < low) || (rover > high))
++ rover = low;
++ }
++ head = &tcp_bhash[tcp_bhashfn(rover)];
+ spin_lock(&head->lock);
+
+ /* Does not bother with rcv_saddr checks,
+@@ -724,6 +744,15 @@
+ }
+ spin_unlock(&head->lock);
+
++#ifdef CONFIG_GRKERNSEC
++ gr_del_task_from_ip_table(current);
++ current->gr_saddr = inet_sk(sk)->rcv_saddr;
++ current->gr_daddr = inet_sk(sk)->daddr;
++ current->gr_sport = inet_sk(sk)->sport;
++ current->gr_dport = inet_sk(sk)->dport;
++ gr_add_to_task_ip_table(current);
++#endif
++
+ if (tw) {
+ tcp_tw_deschedule(tw);
+ tcp_tw_put(tw);
+@@ -843,13 +872,24 @@
+ tcp_v4_setup_caps(sk, &rt->u.dst);
+ tp->ext2_header_len = rt->u.dst.header_len;
+
+- if (!tp->write_seq)
++ if (!tp->write_seq) {
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ if (likely(grsec_enable_randisn))
++ tp->write_seq = ip_randomisn();
++ else
++#endif
+ tp->write_seq = secure_tcp_sequence_number(inet->saddr,
+ inet->daddr,
+ inet->sport,
+ usin->sin_port);
++ }
+
+- inet->id = tp->write_seq ^ jiffies;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = tp->write_seq ^ jiffies;
+
+ err = tcp_connect(sk);
+ rt = NULL;
+@@ -1593,7 +1633,13 @@
+ if (newinet->opt)
+ newtp->ext_header_len = newinet->opt->optlen;
+ newtp->ext2_header_len = dst->header_len;
+- newinet->id = newtp->write_seq ^ jiffies;
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ newinet->id = htons(ip_randomid());
++ else
++#endif
++ newinet->id = newtp->write_seq ^ jiffies;
+
+ tcp_sync_mss(newsk, dst_pmtu(dst));
+ newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+diff -urN linux-2.6.5/net/ipv4/udp.c linux-2.6.5-new/net/ipv4/udp.c
+--- linux-2.6.5/net/ipv4/udp.c 2004-04-03 22:36:25.000000000 -0500
++++ linux-2.6.5-new/net/ipv4/udp.c 2004-04-14 09:06:29.000000000 -0400
+@@ -100,6 +100,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
++#include <linux/grsecurity.h>
+ #include <net/sock.h>
+ #include <net/udp.h>
+ #include <net/icmp.h>
+@@ -108,6 +109,12 @@
+ #include <net/checksum.h>
+ #include <net/xfrm.h>
+
++extern int gr_search_udp_recvmsg(const struct sock *sk,
++ const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk,
++ const struct sockaddr_in *addr);
++
++
+ /*
+ * Snmp MIB for the UDP layer
+ */
+@@ -538,9 +545,16 @@
+ dport = usin->sin_port;
+ if (dport == 0)
+ return -EINVAL;
++
++ if (!gr_search_udp_sendmsg(sk, usin))
++ return -EPERM;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
++
++ if (!gr_search_udp_sendmsg(sk, NULL))
++ return -EPERM;
++
+ daddr = inet->daddr;
+ dport = inet->dport;
+ /* Open fast path for connected socket.
+@@ -792,7 +806,12 @@
+ if (!skb)
+ goto out;
+
+- copied = skb->len - sizeof(struct udphdr);
++ if (!gr_search_udp_recvmsg(sk, skb)) {
++ err = -EPERM;
++ goto out_free;
++ }
++
++ copied = skb->len - sizeof(struct udphdr);
+ if (copied > len) {
+ copied = len;
+ msg->msg_flags |= MSG_TRUNC;
+@@ -901,7 +920,12 @@
+ inet->daddr = rt->rt_dst;
+ inet->dport = usin->sin_port;
+ sk->sk_state = TCP_ESTABLISHED;
+- inet->id = jiffies;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+ return(0);
+diff -urN linux-2.6.5/net/socket.c linux-2.6.5-new/net/socket.c
+--- linux-2.6.5/net/socket.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/net/socket.c 2004-04-14 09:06:29.000000000 -0400
+@@ -81,6 +81,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/compat.h>
+ #include <linux/kmod.h>
++#include <linux/in.h>
+
+ #ifdef CONFIG_NET_RADIO
+ #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
+@@ -92,6 +93,18 @@
+ #include <net/sock.h>
+ #include <linux/netfilter.h>
+
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++ const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(const struct socket * sock,
++ const struct sockaddr_in * addr);
++extern int gr_search_bind(const struct socket * sock,
++ const struct sockaddr_in * addr);
++extern int gr_search_socket(const int domain, const int type,
++ const int protocol);
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, char __user *buf,
+ size_t size, loff_t pos);
+@@ -872,6 +885,7 @@
+ printk(KERN_DEBUG "sock_close: NULL inode\n");
+ return 0;
+ }
++
+ sock_fasync(-1, filp, 0);
+ sock_release(SOCKET_I(inode));
+ return 0;
+@@ -1092,6 +1106,16 @@
+ int retval;
+ struct socket *sock;
+
++ if(!gr_search_socket(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_handle_sock_all(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+@@ -1187,11 +1211,23 @@
+ {
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
++ struct sockaddr *sck;
+ int err;
+
+ if((sock = sockfd_lookup(fd,&err))!=NULL)
+ {
+ if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0) {
++ sck = (struct sockaddr *)address;
++ if (!gr_search_bind(sock, (struct sockaddr_in *)sck)) {
++ sockfd_put(sock);
++ return -EACCES;
++ }
++
++ if (gr_handle_sock_server(sck)) {
++ sockfd_put(sock);
++ return -EACCES;
++ }
++
+ err = security_socket_bind(sock, (struct sockaddr *)address, addrlen);
+ if (err) {
+ sockfd_put(sock);
+@@ -1294,6 +1330,7 @@
+ goto out_release;
+
+ security_socket_post_accept(sock, newsock);
++ gr_attach_curr_ip(newsock->sk);
+
+ out_put:
+ sockfd_put(sock);
+@@ -1321,6 +1358,7 @@
+ {
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
++ struct sockaddr *sck;
+ int err;
+
+ sock = sockfd_lookup(fd, &err);
+@@ -1330,6 +1368,18 @@
+ if (err < 0)
+ goto out_put;
+
++ sck = (struct sockaddr *)address;
++
++ if (!gr_search_connect(sock, (struct sockaddr_in *)sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
++ if (gr_handle_sock_client(sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
+ err = security_socket_connect(sock, (struct sockaddr *)address, addrlen);
+ if (err)
+ goto out_put;
+@@ -1583,6 +1633,7 @@
+ err=sock->ops->shutdown(sock, how);
+ sockfd_put(sock);
+ }
++
+ return err;
+ }
+
+diff -urN linux-2.6.5/net/sunrpc/xprt.c linux-2.6.5-new/net/sunrpc/xprt.c
+--- linux-2.6.5/net/sunrpc/xprt.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/net/sunrpc/xprt.c 2004-04-14 09:11:24.000000000 -0400
+@@ -58,6 +58,7 @@
+ #include <linux/file.h>
+ #include <linux/workqueue.h>
+ #include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <net/sock.h>
+ #include <net/checksum.h>
+@@ -1322,6 +1323,12 @@
+ */
+ static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
+ {
++
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ if (grsec_enable_randrpc)
++ return (u32) get_random_long();
++#endif
++
+ return xprt->xid++;
+ }
+
+diff -urN linux-2.6.5/net/unix/af_unix.c linux-2.6.5-new/net/unix/af_unix.c
+--- linux-2.6.5/net/unix/af_unix.c 2004-04-03 22:37:36.000000000 -0500
++++ linux-2.6.5-new/net/unix/af_unix.c 2004-04-14 09:06:29.000000000 -0400
+@@ -120,6 +120,7 @@
+ #include <linux/mount.h>
+ #include <net/checksum.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ int sysctl_unix_max_dgram_qlen = 10;
+
+@@ -683,6 +684,11 @@
+ if (err)
+ goto put_fail;
+
++ if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) {
++ err = -EACCES;
++ goto put_fail;
++ }
++
+ err = -ECONNREFUSED;
+ if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
+ goto put_fail;
+@@ -706,6 +712,13 @@
+ if (u) {
+ struct dentry *dentry;
+ dentry = unix_sk(u)->dentry;
++
++ if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
++ err = -EPERM;
++ sock_put(u);
++ goto fail;
++ }
++
+ if (dentry)
+ touch_atime(unix_sk(u)->mnt, dentry);
+ } else
+@@ -805,9 +818,18 @@
+ */
+ mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
++
++ if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
++ err = -EACCES;
++ goto out_mknod_dput;
++ }
++
+ err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
+ if (err)
+ goto out_mknod_dput;
++
++ gr_handle_create(dentry, nd.mnt);
++
+ up(&nd.dentry->d_inode->i_sem);
+ dput(nd.dentry);
+ nd.dentry = dentry;
+@@ -825,6 +847,10 @@
+ goto out_unlock;
+ }
+
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ sk->sk_peercred.pid = current->pid;
++#endif
++
+ list = &unix_socket_table[addr->hash];
+ } else {
+ list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
+diff -urN linux-2.6.5/security/Kconfig linux-2.6.5-new/security/Kconfig
+--- linux-2.6.5/security/Kconfig 2004-04-03 22:37:37.000000000 -0500
++++ linux-2.6.5-new/security/Kconfig 2004-04-14 09:15:12.000000000 -0400
+@@ -4,6 +4,407 @@
+
+ menu "Security options"
+
++source grsecurity/Kconfig
++
++menu "PaX"
++
++config PAX
++ bool "Enable various PaX features"
++ depends on ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || SPARC32 || SPARC64 || X86 || X86_64
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
++ is the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS support.
++
++ You should enable this option only if your toolchain does not yet
++ support the new control flag location (PT_PAX_FLAGS) or you still
++ have applications not marked by PT_PAX_FLAGS.
++
++ Note that if you enable PT_PAX_FLAGS marking support as well,
++ it will override the legacy EI_PAX marks.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ it will be overridden by the PT_PAX_FLAGS marking.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_NO_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_flags() itself,
++ - "hook": if your MAC system uses the pax_set_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || SPARC32 || SPARC64 || X86 || X86_64)
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ depends on PAX_NOEXEC && !HIGHPTE && (!X86 || X86_64 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUM4 || MK7 || MK8)
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 it has a variable performance impact on applications
++ depending on their memory usage pattern. You should carefully
++ test your applications before using this feature in production.
++ On alpha, ia64, parisc, sparc, sparc64 and x86_64 there is no
++ performance impact. On ppc there is a slight performance impact.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ depends on PAX_NOEXEC && X86 && !X86_64
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has little performance impact, however applications will
++ be limited to a 1.5 GB address space instead of the normal 3 GB.
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || PPC || X86) && !X86_64
++ default y if PARISC || PPC
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc and ppc you MUST enable this option and EMUSIGRT as
++ well, otherwise your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && (PARISC || PPC)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc and ppc you MUST enable this option, otherwise your
++ system will not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ depends on PAX_PAGEEXEC || PAX_SEGMEXEC
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_NOELFRELOCS
++ bool "Disallow ELF text relocations"
++ depends on PAX_MPROTECT && (IA64 || X86 || X86_64)
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ then enable this option otherwise be careful as you may not even
++ be able to boot or log on your system (for example, some PAM
++ modules are erroneously compiled as non-PIC by default).
++
++ NOTE: if you are using dynamic ELF executables (as suggested
++ when using ASLR) then you must have made sure that you linked
++ your files using the PIC version of crt1 (the et_dyn.tar.gz package
++ referenced there has already been updated to support this).
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || PPC || SPARC32 || SPARC64)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc, ppc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool
++ depends on PAX_EMUPLT && (SPARC32 || SPARC64)
++ default y
++
++config PAX_SYSCALL
++ bool
++ depends on PAX_PAGEEXEC && PPC
++ default y
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ depends on PAX_NOEXEC && X86 && !X86_64 && !MODULES
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ depends on PAX_ASLR && X86_TSC && !X86_64
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++config PAX_RANDEXEC
++ bool "Randomize ET_EXEC base"
++ depends on PAX_MPROTECT && PAX_RANDMMAP
++ help
++ By saying Y here the kernel will randomize the base address of normal
++ ET_EXEC ELF executables as well. This is accomplished by mapping the
++ executable in memory in a special way which also allows for detecting
++ attackers who attempt to execute its code for their purposes. Since
++ this special mapping causes performance degradation and the attack
++ detection may create false alarms as well, you should carefully test
++ your executables when this feature is enabled.
++
++ This solution is intended only as a temporary one until you relink
++ your programs as a dynamic ELF file.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++config PAX_NOVSYSCALL
++ bool "Disable the vsyscall page"
++ depends on PAX_ASLR && X86 && !X86_64
++ help
++ The Linux 2.6 kernel introduced a new feature that speeds up or
++ simplifies certain operations, such as system calls or returns
++ from signal handlers.
++
++ Unfortunately the implementation also gives a powerful instrument
++ into the hands of exploit writers: the so-called vsyscall page exists
++ in every task at the same fixed address and it contains machine code
++ that is very useful in performing the return-to-libc style attack.
++
++ Since this exploit technique cannot in general be protected against
++ via kernel solutions, this option will allow you to disable the use
++ of the vsyscall page and revert back to the old behaviour.
++
++endmenu
++
++endmenu
++
+ config SECURITY
+ bool "Enable different security models"
+ help
+diff -urN linux-2.6.5/security/commoncap.c linux-2.6.5-new/security/commoncap.c
+--- linux-2.6.5/security/commoncap.c 2004-04-03 22:36:56.000000000 -0500
++++ linux-2.6.5-new/security/commoncap.c 2004-04-14 09:06:29.000000000 -0400
+@@ -27,7 +27,7 @@
+ int cap_capable (struct task_struct *tsk, int cap)
+ {
+ /* Derived from include/linux/sched.h:capable. */
+- if (cap_raised (tsk->cap_effective, cap))
++ if (cap_raised (tsk->cap_effective, cap) && gr_task_is_capable(tsk, cap))
+ return 0;
+ else
+ return -EPERM;
+@@ -37,7 +37,7 @@
+ {
+ /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */
+ if (!cap_issubset (child->cap_permitted, current->cap_permitted) &&
+- !capable (CAP_SYS_PTRACE))
++ !capable_nolog (CAP_SYS_PTRACE))
+ return -EPERM;
+ else
+ return 0;
+@@ -338,7 +338,7 @@
+ /*
+ * Leave the last 3% for root
+ */
+- if (!capable(CAP_SYS_ADMIN))
++ if (!capable_nolog(CAP_SYS_ADMIN))
+ free -= free / 32;
+
+ if (free > pages)
+@@ -349,7 +349,7 @@
+ * only call if we're about to fail.
+ */
+ n = nr_free_pages();
+- if (!capable(CAP_SYS_ADMIN))
++ if (!capable_nolog(CAP_SYS_ADMIN))
+ n -= n / 32;
+ free += n;
+
+diff -urN linux-2.6.5/security/security.c linux-2.6.5-new/security/security.c
+--- linux-2.6.5/security/security.c 2004-04-03 22:36:13.000000000 -0500
++++ linux-2.6.5-new/security/security.c 2004-04-14 09:06:29.000000000 -0400
+@@ -206,4 +206,5 @@
+ EXPORT_SYMBOL_GPL(mod_reg_security);
+ EXPORT_SYMBOL_GPL(mod_unreg_security);
+ EXPORT_SYMBOL(capable);
++EXPORT_SYMBOL(capable_nolog);
+ EXPORT_SYMBOL(security_ops);
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1300_linux-2.6.4-selinux-hooks.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1300_linux-2.6.4-selinux-hooks.patch
new file mode 100644
index 0000000000..ce033764f6
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1300_linux-2.6.4-selinux-hooks.patch
@@ -0,0 +1,137 @@
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/hooks.c linux-2.4.24-hardened-r1/security/selinux/hooks.c
+--- linux-2.4.24-hardened-r1.orig/security/selinux/hooks.c 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/hooks.c 2004-02-22 23:46:53.000000000 -0600
+@@ -3190,6 +3190,68 @@
+ return size;
+ }
+
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++static void avc_pax_set_flags(struct linux_binprm * bprm)
++{
++ struct inode_security_struct *isec;
++ unsigned long flags = 0;
++ int rc;
++
++ char *scontext;
++ u32 scontext_len;
++
++ /*
++ * get the security struct from the inode of the file
++ * since the bprm security struct will just point to
++ * the user running the binary
++ */
++ struct inode *inode = bprm->file->f_dentry->d_inode;
++ isec = inode->i_security;
++
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__PAGEEXEC, &isec->avcr,NULL);
++ if (!rc) {
++ flags |= PF_PAX_PAGEEXEC;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__EMUTRAMP, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_EMUTRAMP;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__RANDEXEC, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_RANDEXEC;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__MPROTECT, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_MPROTECT;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__RANDMMAP, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_RANDMMAP;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__SEGMEXEC, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_SEGMEXEC;
++ }
++
++ if (selinux_enforcing) {
++ /* pull all the pax flags in current */
++ current->flags &= ~(PF_PAX_PAGEEXEC | PF_PAX_EMUTRAMP | PF_PAX_MPROTECT | PF_PAX_RANDMMAP | PF_PAX_RANDEXEC | PF_PAX_SEGMEXEC);
++ /* and add ours */
++ current->flags |= flags;
++
++ if (pax_check_flags(&current->flags) < 0) {
++ security_sid_to_context(isec->sid, &scontext, &scontext_len);
++ printk(KERN_WARNING "avc: PaX flags overridden to %lx for %s (%s)\n",
++ current->flags,
++ scontext,
++ bprm->filename);
++ kfree(scontext);
++ }
++ }
++}
++#endif /* CONFIG_PAX_HOOK_ACL_FLAGS */
++
++
+ struct security_operations selinux_ops = {
+ .ptrace = selinux_ptrace,
+ .capget = selinux_capget,
+@@ -3370,6 +3432,11 @@
+ {
+ printk(KERN_INFO "SELinux: Completing initialization.\n");
+
++ #ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++ printk(KERN_INFO "SELinux: Setting PaX callback function.\n");
++ pax_set_flags_func = avc_pax_set_flags;
++ #endif
++
+ /* Set up any superblocks initialized prior to the policy load. */
+ printk(KERN_INFO "SELinux: Setting up existing superblocks.\n");
+ spin_lock(&sb_security_lock);
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/av_perm_to_string.h linux-2.4.24-hardened-r1/security/selinux/include/av_perm_to_string.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/av_perm_to_string.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/av_perm_to_string.h 2004-02-20 16:50:39.000000000 -0600
+@@ -114,6 +120,12 @@
+ { SECCLASS_PASSWD, PASSWD__PASSWD, "passwd" },
+ { SECCLASS_PASSWD, PASSWD__CHFN, "chfn" },
+ { SECCLASS_PASSWD, PASSWD__CHSH, "chsh" },
++ { SECCLASS_PAX, PAX__PAGEEXEC, "pageexec" },
++ { SECCLASS_PAX, PAX__EMUTRAMP, "emutramp" },
++ { SECCLASS_PAX, PAX__MPROTECT, "mprotect" },
++ { SECCLASS_PAX, PAX__RANDMMAP, "randmmap" },
++ { SECCLASS_PAX, PAX__RANDEXEC, "randexec" },
++ { SECCLASS_PAX, PAX__SEGMEXEC, "segmexec" },
+ };
+
+
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/av_permissions.h linux-2.4.24-hardened-r1/security/selinux/include/av_permissions.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/av_permissions.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/av_permissions.h 2004-02-20 16:50:40.000000000 -0600
+@@ -546,5 +554,12 @@
+ #define PASSWD__CHFN 0x00000002UL
+ #define PASSWD__CHSH 0x00000004UL
+
++#define PAX__PAGEEXEC 0x00000001UL
++#define PAX__EMUTRAMP 0x00000002UL
++#define PAX__MPROTECT 0x00000004UL
++#define PAX__RANDMMAP 0x00000008UL
++#define PAX__RANDEXEC 0x00000010UL
++#define PAX__SEGMEXEC 0x00000020UL
++
+
+ /* FLASK */
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/class_to_string.h linux-2.4.24-hardened-r1/security/selinux/include/class_to_string.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/class_to_string.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/class_to_string.h 2004-02-20 16:50:40.000000000 -0600
+@@ -35,5 +35,6 @@
+ "shm",
+ "ipc",
+ "passwd",
++ "pax",
+ };
+
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/flask.h linux-2.4.24-hardened-r1/security/selinux/include/flask.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/flask.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/flask.h 2004-02-20 16:50:41.000000000 -0600
+@@ -35,6 +35,7 @@
+ #define SECCLASS_SHM 28
+ #define SECCLASS_IPC 29
+ #define SECCLASS_PASSWD 30
++#define SECCLASS_PAX 31
+
+ /*
+ * Security identifier indices for initial entities
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1305_linux-2.6.4-selinux-ipaddr.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1305_linux-2.6.4-selinux-ipaddr.patch
new file mode 100644
index 0000000000..c1a9c2f0a7
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1305_linux-2.6.4-selinux-ipaddr.patch
@@ -0,0 +1,14 @@
+--- linux-2.6.3-openpax/security/selinux/avc.c 2004-02-17 21:58:52.000000000 -0600
++++ linux-2.6.3/security/selinux/avc.c 2004-03-07 18:24:57.000000000 -0600
+@@ -143,6 +143,11 @@
+ char *scontext;
+ u32 scontext_len;
+
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ if (current->curr_ip)
++ printk("ipaddr=%u.%u.%u.%u ", NIPQUAD(current->curr_ip));
++#endif /* CONFIG_GRKERNSEC_PROC_IPADDR */
++
+ rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+ if (rc)
+ printk("ssid=%d", ssid);
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1310_linux-2.6.5-extra_sec_ops.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1310_linux-2.6.5-extra_sec_ops.patch
new file mode 100644
index 0000000000..fee0ff623e
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1310_linux-2.6.5-extra_sec_ops.patch
@@ -0,0 +1,63 @@
+--- linux-2.6.4/security/selinux/hooks.c 2004-04-13 00:51:48.225259424 -0500
++++ linux-2.6.5-hardened/security/selinux/hooks.c 2004-04-13 00:34:15.067464600 -0500
+@@ -1673,6 +1673,11 @@
+
+ static int selinux_bprm_check_security (struct linux_binprm *bprm)
+ {
++ int rc;
++
++ rc = secondary_ops->bprm_check_security(bprm);
++ if (rc)
++ return rc;
+ return 0;
+ }
+
+@@ -2013,6 +2018,11 @@
+
+ static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
+ {
++ int rc;
++
++ rc = secondary_ops->inode_unlink(dir, dentry);
++ if (rc)
++ return rc;
+ return may_link(dir, dentry, MAY_UNLINK);
+ }
+
+@@ -2081,11 +2091,17 @@
+ static int selinux_inode_permission(struct inode *inode, int mask,
+ struct nameidata *nd)
+ {
++ int rc;
++
+ if (!mask) {
+ /* No permission to check. Existence test. */
+ return 0;
+ }
+
++ rc = secondary_ops->inode_permission(inode, mask, nd);
++ if (rc)
++ return rc;
++
+ return inode_has_perm(current, inode,
+ file_mask_to_av(inode->i_mode, mask), NULL, NULL);
+ }
+@@ -2358,6 +2374,7 @@
+ static int selinux_file_mmap(struct file *file, unsigned long prot, unsigned long flags)
+ {
+ u32 av;
++ int rc;
+
+ if (file) {
+ /* read access is always possible with a mapping */
+@@ -2369,6 +2386,10 @@
+
+ if (prot & PROT_EXEC)
+ av |= FILE__EXECUTE;
++
++ rc = secondary_ops->file_mmap(file, prot, flags);
++ if (rc)
++ return rc;
+
+ return file_has_perm(current, file, av);
+ }
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1315_linux-2.6.5-selinux.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1315_linux-2.6.5-selinux.patch
new file mode 100644
index 0000000000..3aed2fe6e5
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/1315_linux-2.6.5-selinux.patch
@@ -0,0 +1,968 @@
+Index: linux-2.6/security/selinux/avc.c
+diff -u linux-2.6/security/selinux/avc.c:1.1.1.4 linux-2.6/security/selinux/avc.c:1.39
+--- linux-2.6/security/selinux/avc.c:1.1.1.4 Wed Feb 18 08:42:04 2004
++++ linux-2.6/security/selinux/avc.c Wed Apr 7 12:18:47 2004
+@@ -22,6 +22,8 @@
+ #include <linux/un.h>
+ #include <net/af_unix.h>
+ #include <linux/ip.h>
++#include <linux/ipv6.h>
++#include <net/ipv6.h>
+ #include "avc.h"
+ #include "avc_ss.h"
+ #include "class_to_string.h"
+@@ -418,6 +420,16 @@
+ return rc;
+ }
+
++static inline void avc_print_ipv6_addr(struct in6_addr *addr, u16 port,
++ char *name1, char *name2)
++{
++ if (!ipv6_addr_any(addr))
++ printk(" %s=%04x:%04x:%04x:%04x:%04x:%04x:%04x:%04x",
++ name1, NIP6(*addr));
++ if (port)
++ printk(" %s=%d", name2, ntohs(port));
++}
++
+ static inline void avc_print_ipv4_addr(u32 addr, u16 port, char *name1, char *name2)
+ {
+ if (addr)
+@@ -602,11 +614,11 @@
+ if (a->u.net.sk) {
+ struct sock *sk = a->u.net.sk;
+ struct unix_sock *u;
+- struct inet_opt *inet;
+
+ switch (sk->sk_family) {
+- case AF_INET:
+- inet = inet_sk(sk);
++ case AF_INET: {
++ struct inet_opt *inet = inet_sk(sk);
++
+ avc_print_ipv4_addr(inet->rcv_saddr,
+ inet->sport,
+ "laddr", "lport");
+@@ -614,6 +626,19 @@
+ inet->dport,
+ "faddr", "fport");
+ break;
++ }
++ case AF_INET6: {
++ struct inet_opt *inet = inet_sk(sk);
++ struct ipv6_pinfo *inet6 = inet6_sk(sk);
++
++ avc_print_ipv6_addr(&inet6->rcv_saddr,
++ inet->sport,
++ "laddr", "lport");
++ avc_print_ipv6_addr(&inet6->daddr,
++ inet->dport,
++ "faddr", "fport");
++ break;
++ }
+ case AF_UNIX:
+ u = unix_sk(sk);
+ if (u->dentry) {
+@@ -639,11 +664,24 @@
+ }
+ }
+
+- avc_print_ipv4_addr(a->u.net.saddr, a->u.net.sport,
+- "saddr", "src");
+- avc_print_ipv4_addr(a->u.net.daddr, a->u.net.dport,
+- "daddr", "dest");
+-
++ switch (a->u.net.family) {
++ case AF_INET:
++ avc_print_ipv4_addr(a->u.net.v4info.saddr,
++ a->u.net.sport,
++ "saddr", "src");
++ avc_print_ipv4_addr(a->u.net.v4info.daddr,
++ a->u.net.dport,
++ "daddr", "dest");
++ break;
++ case AF_INET6:
++ avc_print_ipv6_addr(&a->u.net.v6info.saddr,
++ a->u.net.sport,
++ "saddr", "src");
++ avc_print_ipv6_addr(&a->u.net.v6info.daddr,
++ a->u.net.dport,
++ "daddr", "dest");
++ break;
++ }
+ if (a->u.net.netif)
+ printk(" netif=%s", a->u.net.netif);
+ break;
+Index: linux-2.6/security/selinux/hooks.c
+diff -u linux-2.6/security/selinux/hooks.c:1.1.1.10 linux-2.6/security/selinux/hooks.c:1.106
+--- linux-2.6/security/selinux/hooks.c:1.1.1.10 Mon Apr 5 08:11:22 2004
++++ linux-2.6/security/selinux/hooks.c Wed Apr 7 12:18:47 2004
+@@ -42,6 +42,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/kd.h>
+ #include <linux/netfilter_ipv4.h>
++#include <linux/netfilter_ipv6.h>
+ #include <net/icmp.h>
+ #include <net/ip.h> /* for sysctl_local_port_range[] */
+ #include <net/tcp.h> /* struct or_callable used in sock_rcv_skb */
+@@ -59,6 +60,7 @@
+ #include <net/af_unix.h> /* for Unix socket types */
+ #include <linux/parser.h>
+ #include <linux/nfs_mount.h>
++#include <net/ipv6.h>
+ #include <linux/hugetlb.h>
+
+ #include "avc.h"
+@@ -272,7 +274,7 @@
+
+ static void sk_free_security(struct sock *sk)
+ {
+- struct task_security_struct *ssec = sk->sk_security;
++ struct sk_security_struct *ssec = sk->sk_security;
+
+ if (sk->sk_family != PF_UNIX || ssec->magic != SELINUX_MAGIC)
+ return;
+@@ -2647,35 +2649,32 @@
+
+ #ifdef CONFIG_SECURITY_NETWORK
+
+-static void selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
++/* Returns error only if unable to parse addresses */
++static int selinux_parse_skb_ipv4(struct sk_buff *skb, struct avc_audit_data *ad)
+ {
+- int dlen, ihlen;
+- struct iphdr *iph;
++ int offset, ihlen, ret;
++ struct iphdr iph;
+
+- if (skb->len < sizeof(struct iphdr))
++ offset = skb->nh.raw - skb->data;
++ ret = skb_copy_bits(skb, offset, &iph, sizeof(iph));
++ if (ret)
+ goto out;
+-
+- iph = skb->nh.iph;
+- ihlen = iph->ihl * 4;
+- if (ihlen < sizeof(struct iphdr))
++
++ ihlen = iph.ihl * 4;
++ if (ihlen < sizeof(iph))
+ goto out;
+
+- dlen = skb->len - ihlen;
+- ad->u.net.saddr = iph->saddr;
+- ad->u.net.daddr = iph->daddr;
++ ad->u.net.v4info.saddr = iph.saddr;
++ ad->u.net.v4info.daddr = iph.daddr;
+
+- switch (iph->protocol) {
++ switch (iph.protocol) {
+ case IPPROTO_TCP: {
+- int offset;
+ struct tcphdr tcph;
+
+- if (ntohs(iph->frag_off) & IP_OFFSET)
++ if (ntohs(iph.frag_off) & IP_OFFSET)
+ break;
+-
+- if (dlen < sizeof(tcph))
+- break;
+
+- offset = skb->nh.raw - skb->data + ihlen;
++ offset += ihlen;
+ if (skb_copy_bits(skb, offset, &tcph, sizeof(tcph)) < 0)
+ break;
+
+@@ -2685,16 +2684,12 @@
+ }
+
+ case IPPROTO_UDP: {
+- int offset;
+ struct udphdr udph;
+
+- if (ntohs(iph->frag_off) & IP_OFFSET)
++ if (ntohs(iph.frag_off) & IP_OFFSET)
+ break;
+
+- if (dlen < sizeof(udph))
+- break;
+-
+- offset = skb->nh.raw - skb->data + ihlen;
++ offset += ihlen;
+ if (skb_copy_bits(skb, offset, &udph, sizeof(udph)) < 0)
+ break;
+
+@@ -2707,7 +2702,96 @@
+ break;
+ }
+ out:
+- return;
++ return ret;
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++
++/* Returns error only if unable to parse addresses */
++static int selinux_parse_skb_ipv6(struct sk_buff *skb, struct avc_audit_data *ad)
++{
++ u8 nexthdr;
++ int ret, offset = skb->nh.raw - skb->data;
++ struct ipv6hdr ipv6h;
++
++ offset = skb->nh.raw - skb->data;
++ ret = skb_copy_bits(skb, offset, &ipv6h, sizeof(ipv6h));
++ if (ret)
++ goto out;
++
++ ipv6_addr_copy(&ad->u.net.v6info.saddr, &ipv6h.saddr);
++ ipv6_addr_copy(&ad->u.net.v6info.daddr, &ipv6h.daddr);
++
++ nexthdr = ipv6h.nexthdr;
++ offset += sizeof(ipv6h);
++ offset = ipv6_skip_exthdr(skb, offset, &nexthdr,
++ skb->tail - skb->head - offset);
++ if (offset < 0)
++ goto out;
++
++ switch (nexthdr) {
++ case IPPROTO_TCP: {
++ struct tcphdr tcph;
++
++ if (skb_copy_bits(skb, offset, &tcph, sizeof(tcph)) < 0)
++ break;
++
++ ad->u.net.sport = tcph.source;
++ ad->u.net.dport = tcph.dest;
++ break;
++ }
++
++ case IPPROTO_UDP: {
++ struct udphdr udph;
++
++ if (skb_copy_bits(skb, offset, &udph, sizeof(udph)) < 0)
++ break;
++
++ ad->u.net.sport = udph.source;
++ ad->u.net.dport = udph.dest;
++ break;
++ }
++
++ /* includes fragments */
++ default:
++ break;
++ }
++out:
++ return ret;
++}
++
++#endif /* IPV6 */
++
++static int selinux_parse_skb(struct sk_buff *skb, struct avc_audit_data *ad,
++ char **addrp, int *len, int src)
++{
++ int ret = 0;
++
++ switch (ad->u.net.family) {
++ case PF_INET:
++ ret = selinux_parse_skb_ipv4(skb, ad);
++ if (ret || !addrp)
++ break;
++ *len = 4;
++ *addrp = (char *)(src ? &ad->u.net.v4info.saddr :
++ &ad->u.net.v4info.daddr);
++ break;
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++ case PF_INET6:
++ ret = selinux_parse_skb_ipv6(skb, ad);
++ if (ret || !addrp)
++ break;
++ *len = 16;
++ *addrp = (char *)(src ? &ad->u.net.v6info.saddr :
++ &ad->u.net.v6info.daddr);
++ break;
++#endif /* IPV6 */
++ default:
++ break;
++ }
++
++ return ret;
+ }
+
+ /* socket security operations */
+@@ -2770,6 +2854,7 @@
+
+ static int selinux_socket_bind(struct socket *sock, struct sockaddr *address, int addrlen)
+ {
++ u16 family;
+ int err;
+
+ err = socket_has_perm(current, sock, SOCKET__BIND);
+@@ -2777,20 +2862,35 @@
+ goto out;
+
+ /*
+- * If PF_INET, check name_bind permission for the port.
++ * If PF_INET or PF_INET6, check name_bind permission for the port.
+ */
+- if (sock->sk->sk_family == PF_INET) {
++ family = sock->sk->sk_family;
++ if (family == PF_INET || family == PF_INET6) {
++ char *addrp;
+ struct inode_security_struct *isec;
+ struct task_security_struct *tsec;
+ struct avc_audit_data ad;
+- struct sockaddr_in *addr = (struct sockaddr_in *)address;
+- unsigned short snum = ntohs(addr->sin_port);
++ struct sockaddr_in *addr4 = NULL;
++ struct sockaddr_in6 *addr6 = NULL;
++ unsigned short snum;
+ struct sock *sk = sock->sk;
+- u32 sid, node_perm;
++ u32 sid, node_perm, addrlen;
+
+ tsec = current->security;
+ isec = SOCK_INODE(sock)->i_security;
+
++ if (family == PF_INET) {
++ addr4 = (struct sockaddr_in *)address;
++ snum = ntohs(addr4->sin_port);
++ addrlen = sizeof(addr4->sin_addr.s_addr);
++ addrp = (char *)&addr4->sin_addr.s_addr;
++ } else {
++ addr6 = (struct sockaddr_in6 *)address;
++ snum = ntohs(addr6->sin6_port);
++ addrlen = sizeof(addr6->sin6_addr.s6_addr);
++ addrp = (char *)&addr6->sin6_addr.s6_addr;
++ }
++
+ if (snum&&(snum < max(PROT_SOCK,ip_local_port_range_0) ||
+ snum > ip_local_port_range_1)) {
+ err = security_port_sid(sk->sk_family, sk->sk_type,
+@@ -2820,14 +2920,19 @@
+ break;
+ }
+
+- err = security_node_sid(PF_INET, &addr->sin_addr.s_addr,
+- sizeof(addr->sin_addr.s_addr), &sid);
++ err = security_node_sid(family, addrp, addrlen, &sid);
+ if (err)
+ goto out;
+
+ AVC_AUDIT_DATA_INIT(&ad,NET);
+ ad.u.net.sport = htons(snum);
+- ad.u.net.saddr = addr->sin_addr.s_addr;
++ ad.u.net.family = family;
++
++ if (family == PF_INET)
++ ad.u.net.v4info.saddr = addr4->sin_addr.s_addr;
++ else
++ ipv6_addr_copy(&ad.u.net.v6info.saddr, &addr6->sin6_addr);
++
+ err = avc_has_perm(isec->sid, sid,
+ isec->sclass, node_perm, NULL, &ad);
+ if (err)
+@@ -2967,21 +3072,26 @@
+
+ static int selinux_socket_sock_rcv_skb(struct sock *sk, struct sk_buff *skb)
+ {
+- int err = 0;
++ u16 family;
++ char *addrp;
++ int len, err = 0;
+ u32 netif_perm, node_perm, node_sid, recv_perm = 0;
+ struct socket *sock;
+ struct inode *inode;
+ struct net_device *dev;
+- struct iphdr *iph;
+ struct sel_netif *netif;
+ struct netif_security_struct *nsec;
+ struct inode_security_struct *isec;
+ struct avc_audit_data ad;
+
+- /* Only IPv4 is supported here at this stage */
+- if (sk->sk_family != PF_INET)
++ family = sk->sk_family;
++ if (family != PF_INET && family != PF_INET6)
+ goto out;
+
++ /* Handle mapped IPv4 packets arriving via IPv6 sockets */
++ if (family == PF_INET6 && skb->protocol == ntohs(ETH_P_IP))
++ family = PF_INET;
++
+ sock = sk->sk_socket;
+
+ /* TCP control messages don't always have a socket. */
+@@ -3026,7 +3136,13 @@
+
+ AVC_AUDIT_DATA_INIT(&ad, NET);
+ ad.u.net.netif = dev->name;
+- selinux_parse_skb_ipv4(skb, &ad);
++ ad.u.net.family = family;
++
++ err = selinux_parse_skb(skb, &ad, &addrp, &len, 1);
++ if (err) {
++ sel_netif_put(netif);
++ goto out;
++ }
+
+ err = avc_has_perm(isec->sid, nsec->if_sid, SECCLASS_NETIF,
+ netif_perm, &nsec->avcr, &ad);
+@@ -3035,8 +3151,7 @@
+ goto out;
+
+ /* Fixme: this lookup is inefficient */
+- iph = skb->nh.iph;
+- err = security_node_sid(PF_INET, &iph->saddr, sizeof(iph->saddr), &node_sid);
++ err = security_node_sid(family, addrp, len, &node_sid);
+ if (err)
+ goto out;
+
+@@ -3057,7 +3172,6 @@
+ err = avc_has_perm(isec->sid, port_sid, isec->sclass,
+ recv_perm, NULL, &ad);
+ }
+-
+ out:
+ return err;
+ }
+@@ -3111,18 +3225,20 @@
+ }
+
+ #ifdef CONFIG_NETFILTER
++
+ static unsigned int selinux_ip_postroute_last(unsigned int hooknum,
+ struct sk_buff **pskb,
+ const struct net_device *in,
+ const struct net_device *out,
+- int (*okfn)(struct sk_buff *))
++ int (*okfn)(struct sk_buff *),
++ u16 family)
+ {
+- int err = NF_ACCEPT;
++ char *addrp;
++ int len, err = NF_ACCEPT;
+ u32 netif_perm, node_perm, node_sid, send_perm = 0;
+ struct sock *sk;
+ struct socket *sock;
+ struct inode *inode;
+- struct iphdr *iph;
+ struct sel_netif *netif;
+ struct sk_buff *skb = *pskb;
+ struct netif_security_struct *nsec;
+@@ -3170,9 +3286,17 @@
+ break;
+ }
+
++
+ AVC_AUDIT_DATA_INIT(&ad, NET);
+ ad.u.net.netif = dev->name;
+- selinux_parse_skb_ipv4(skb, &ad);
++ ad.u.net.family = family;
++
++ err = selinux_parse_skb(skb, &ad, &addrp,
++ &len, 0) ? NF_DROP : NF_ACCEPT;
++ if (err != NF_ACCEPT) {
++ sel_netif_put(netif);
++ goto out;
++ }
+
+ err = avc_has_perm(isec->sid, nsec->if_sid, SECCLASS_NETIF,
+ netif_perm, &nsec->avcr, &ad) ? NF_DROP : NF_ACCEPT;
+@@ -3181,8 +3305,7 @@
+ goto out;
+
+ /* Fixme: this lookup is inefficient */
+- iph = skb->nh.iph;
+- err = security_node_sid(PF_INET, &iph->daddr, sizeof(iph->daddr),
++ err = security_node_sid(family, addrp, len,
+ &node_sid) ? NF_DROP : NF_ACCEPT;
+ if (err != NF_ACCEPT)
+ goto out;
+@@ -3212,6 +3335,28 @@
+ return err;
+ }
+
++static unsigned int selinux_ipv4_postroute_last(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return selinux_ip_postroute_last(hooknum, pskb, in, out, okfn, PF_INET);
++}
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++
++static unsigned int selinux_ipv6_postroute_last(unsigned int hooknum,
++ struct sk_buff **pskb,
++ const struct net_device *in,
++ const struct net_device *out,
++ int (*okfn)(struct sk_buff *))
++{
++ return selinux_ip_postroute_last(hooknum, pskb, in, out, okfn, PF_INET6);
++}
++
++#endif /* IPV6 */
++
+ #endif /* CONFIG_NETFILTER */
+
+ #endif /* CONFIG_SECURITY_NETWORK */
+@@ -4025,14 +4170,26 @@
+
+ #if defined(CONFIG_SECURITY_NETWORK) && defined(CONFIG_NETFILTER)
+
+-static struct nf_hook_ops selinux_ip_ops[] = {
+- { .hook = selinux_ip_postroute_last,
+- .owner = THIS_MODULE,
+- .pf = PF_INET,
+- .hooknum = NF_IP_POST_ROUTING,
+- .priority = NF_IP_PRI_SELINUX_LAST, },
++static struct nf_hook_ops selinux_ipv4_op = {
++ .hook = selinux_ipv4_postroute_last,
++ .owner = THIS_MODULE,
++ .pf = PF_INET,
++ .hooknum = NF_IP_POST_ROUTING,
++ .priority = NF_IP_PRI_SELINUX_LAST,
++};
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++
++static struct nf_hook_ops selinux_ipv6_op = {
++ .hook = selinux_ipv6_postroute_last,
++ .owner = THIS_MODULE,
++ .pf = PF_INET6,
++ .hooknum = NF_IP6_POST_ROUTING,
++ .priority = NF_IP6_PRI_SELINUX_LAST,
+ };
+
++#endif /* IPV6 */
++
+ static int __init selinux_nf_ip_init(void)
+ {
+ int err = 0;
+@@ -4042,10 +4199,17 @@
+
+ printk(KERN_INFO "SELinux: Registering netfilter hooks\n");
+
+- err = nf_register_hook(&selinux_ip_ops[0]);
++ err = nf_register_hook(&selinux_ipv4_op);
++ if (err)
++ panic("SELinux: nf_register_hook for IPv4: error %d\n", err);
++
++#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
++
++ err = nf_register_hook(&selinux_ipv6_op);
+ if (err)
+- panic("SELinux: nf_register_hook 0 error %d\n", err);
++ panic("SELinux: nf_register_hook for IPv6: error %d\n", err);
+
++#endif /* IPV6 */
+ out:
+ return err;
+ }
+Index: linux-2.6/security/selinux/selinuxfs.c
+diff -u linux-2.6/security/selinux/selinuxfs.c:1.1.1.7 linux-2.6/security/selinux/selinuxfs.c:1.42
+--- linux-2.6/security/selinux/selinuxfs.c:1.1.1.7 Mon Apr 5 08:11:23 2004
++++ linux-2.6/security/selinux/selinuxfs.c Wed Apr 7 12:18:48 2004
+@@ -164,7 +164,7 @@
+ return -ENOMEM;
+ memset(page, 0, PAGE_SIZE);
+
+- length = scnprintf(page, PAGE_SIZE, "%u", POLICYDB_VERSION);
++ length = scnprintf(page, PAGE_SIZE, "%u", POLICYDB_VERSION_MAX);
+ if (length < 0) {
+ free_page((unsigned long)page);
+ return length;
+Index: linux-2.6/security/selinux/include/avc.h
+diff -u linux-2.6/security/selinux/include/avc.h:1.1.1.3 linux-2.6/security/selinux/include/avc.h:1.14
+--- linux-2.6/security/selinux/include/avc.h:1.1.1.3 Wed Feb 18 08:42:09 2004
++++ linux-2.6/security/selinux/include/avc.h Wed Apr 7 12:18:48 2004
+@@ -12,6 +12,7 @@
+ #include <linux/kdev_t.h>
+ #include <linux/spinlock.h>
+ #include <linux/init.h>
++#include <linux/in6.h>
+ #include <asm/system.h>
+ #include "flask.h"
+ #include "av_permissions.h"
+@@ -65,15 +66,27 @@
+ struct {
+ char *netif;
+ struct sock *sk;
++ u16 family;
+ u16 dport;
+ u16 sport;
+- u32 daddr;
+- u32 saddr;
++ union {
++ struct {
++ u32 daddr;
++ u32 saddr;
++ } v4;
++ struct {
++ struct in6_addr daddr;
++ struct in6_addr saddr;
++ } v6;
++ } fam;
+ } net;
+ int cap;
+ int ipc_id;
+ } u;
+ };
++
++#define v4info fam.v4
++#define v6info fam.v6
+
+ /* Initialize an AVC audit data structure. */
+ #define AVC_AUDIT_DATA_INIT(_d,_t) \
+Index: linux-2.6/security/selinux/include/security.h
+diff -u linux-2.6/security/selinux/include/security.h:1.1.1.5 linux-2.6/security/selinux/include/security.h:1.16
+--- linux-2.6/security/selinux/include/security.h:1.1.1.5 Mon Apr 5 08:11:26 2004
++++ linux-2.6/security/selinux/include/security.h Wed Apr 7 12:18:48 2004
+@@ -15,8 +15,15 @@
+ #define SECCLASS_NULL 0x0000 /* no class */
+
+ #define SELINUX_MAGIC 0xf97cff8c
+-#define POLICYDB_VERSION 16
+-#define POLICYDB_VERSION_COMPAT 15
++
++/* Identify specific policy version changes */
++#define POLICYDB_VERSION_BASE 15
++#define POLICYDB_VERSION_BOOL 16
++#define POLICYDB_VERSION_IPV6 17
++
++/* Range of policy versions we understand*/
++#define POLICYDB_VERSION_MIN POLICYDB_VERSION_BASE
++#define POLICYDB_VERSION_MAX POLICYDB_VERSION_IPV6
+
+ #ifdef CONFIG_SECURITY_SELINUX_BOOTPARAM
+ extern int selinux_enabled;
+Index: linux-2.6/security/selinux/ss/policydb.c
+diff -u linux-2.6/security/selinux/ss/policydb.c:1.1.1.7 linux-2.6/security/selinux/ss/policydb.c:1.30
+--- linux-2.6/security/selinux/ss/policydb.c:1.1.1.7 Mon Apr 5 08:11:24 2004
++++ linux-2.6/security/selinux/ss/policydb.c Wed Apr 7 12:18:49 2004
+@@ -48,6 +48,45 @@
+ 16
+ };
+
++struct policydb_compat_info {
++ int version;
++ int sym_num;
++ int ocon_num;
++};
++
++/* These need to be updated if SYM_NUM or OCON_NUM changes */
++static struct policydb_compat_info policydb_compat[] = {
++ {
++ .version = POLICYDB_VERSION_BASE,
++ .sym_num = SYM_NUM - 1,
++ .ocon_num = OCON_NUM - 1,
++ },
++ {
++ .version = POLICYDB_VERSION_BOOL,
++ .sym_num = SYM_NUM,
++ .ocon_num = OCON_NUM - 1,
++ },
++ {
++ .version = POLICYDB_VERSION_IPV6,
++ .sym_num = SYM_NUM,
++ .ocon_num = OCON_NUM,
++ },
++};
++
++static struct policydb_compat_info *policydb_lookup_compat(int version)
++{
++ int i;
++ struct policydb_compat_info *info = NULL;
++
++ for (i = 0; i < sizeof(policydb_compat)/sizeof(*info); i++) {
++ if (policydb_compat[i].version == version) {
++ info = &policydb_compat[i];
++ break;
++ }
++ }
++ return info;
++}
++
+ /*
+ * Initialize the role table.
+ */
+@@ -1086,9 +1125,10 @@
+ struct role_trans *tr, *ltr;
+ struct ocontext *l, *c, *newc;
+ struct genfs *genfs_p, *genfs, *newgenfs;
+- int i, j, rc, policy_ver, num_syms;
++ int i, j, rc, r_policyvers;
+ u32 *buf, len, len2, config, nprim, nel, nel2;
+ char *policydb_str;
++ struct policydb_compat_info *info;
+
+ config = 0;
+ mls_set_config(config);
+@@ -1151,12 +1191,15 @@
+ for (i = 0; i < 4; i++)
+ buf[i] = le32_to_cpu(buf[i]);
+
+- policy_ver = buf[0];
+- if (policy_ver != POLICYDB_VERSION && policy_ver != POLICYDB_VERSION_COMPAT) {
+- printk(KERN_ERR "security: policydb version %d does not match "
+- "my version %d\n", buf[0], POLICYDB_VERSION);
+- goto bad;
++ r_policyvers = buf[0];
++ if (r_policyvers < POLICYDB_VERSION_MIN ||
++ r_policyvers > POLICYDB_VERSION_MAX) {
++ printk(KERN_ERR "security: policydb version %d does not match "
++ "my version range %d-%d\n",
++ buf[0], POLICYDB_VERSION_MIN, POLICYDB_VERSION_MAX);
++ goto bad;
+ }
++
+ if (buf[1] != config) {
+ printk(KERN_ERR "security: policydb configuration (%s) does "
+ "not match my configuration (%s)\n",
+@@ -1164,30 +1207,27 @@
+ mls_config(config));
+ goto bad;
+ }
++
+
+- if (policy_ver == POLICYDB_VERSION_COMPAT) {
+- if (buf[2] != (SYM_NUM - 1) || buf[3] != OCON_NUM) {
+- printk(KERN_ERR "security: policydb table sizes (%d,%d) do "
+- "not match mine (%d,%d)\n",
+- buf[2], buf[3], SYM_NUM, OCON_NUM);
+- goto bad;
+- }
+- num_syms = SYM_NUM - 1;
+- } else {
+- if (buf[2] != SYM_NUM || buf[3] != OCON_NUM) {
+- printk(KERN_ERR "security: policydb table sizes (%d,%d) do "
+- "not match mine (%d,%d)\n",
+- buf[2], buf[3], SYM_NUM, OCON_NUM);
+- goto bad;
+- }
+- num_syms = SYM_NUM;
++ info = policydb_lookup_compat(r_policyvers);
++ if (!info) {
++ printk(KERN_ERR "security: unable to find policy compat info "
++ "for version %d\n", r_policyvers);
++ goto bad;
++ }
++
++ if (buf[2] != info->sym_num || buf[3] != info->ocon_num) {
++ printk(KERN_ERR "security: policydb table sizes (%d,%d) do "
++ "not match mine (%d,%d)\n", buf[2], buf[3],
++ info->sym_num, info->ocon_num);
++ goto bad;
+ }
+
+ rc = mls_read_nlevels(p, fp);
+ if (rc)
+ goto bad;
+
+- for (i = 0; i < num_syms; i++) {
++ for (i = 0; i < info->sym_num; i++) {
+ buf = next_entry(fp, sizeof(u32)*2);
+ if (!buf) {
+ rc = -EINVAL;
+@@ -1208,7 +1248,7 @@
+ if (rc)
+ goto bad;
+
+- if (policy_ver == POLICYDB_VERSION) {
++ if (r_policyvers >= POLICYDB_VERSION_BOOL) {
+ rc = cond_read_list(p, fp);
+ if (rc)
+ goto bad;
+@@ -1281,7 +1321,7 @@
+ if (rc)
+ goto bad;
+
+- for (i = 0; i < OCON_NUM; i++) {
++ for (i = 0; i < info->ocon_num; i++) {
+ buf = next_entry(fp, sizeof(u32));
+ if (!buf) {
+ rc = -EINVAL;
+@@ -1379,6 +1419,20 @@
+ if (rc)
+ goto bad;
+ break;
++ case OCON_NODE6: {
++ int k;
++
++ buf = next_entry(fp, sizeof(u32) * 8);
++ if (!buf)
++ goto bad;
++ for (k = 0; k < 4; k++)
++ c->u.node6.addr[k] = le32_to_cpu(buf[k]);
++ for (k = 0; k < 4; k++)
++ c->u.node6.mask[k] = le32_to_cpu(buf[k+4]);
++ if (context_read_and_validate(&c->context[0], p, fp))
++ goto bad;
++ break;
++ }
+ }
+ }
+ }
+Index: linux-2.6/security/selinux/ss/policydb.h
+diff -u linux-2.6/security/selinux/ss/policydb.h:1.1.1.3 linux-2.6/security/selinux/ss/policydb.h:1.20
+--- linux-2.6/security/selinux/ss/policydb.h:1.1.1.3 Mon Apr 5 08:11:25 2004
++++ linux-2.6/security/selinux/ss/policydb.h Wed Apr 7 12:18:49 2004
+@@ -138,6 +138,10 @@
+ u32 addr;
+ u32 mask;
+ } node; /* node information */
++ struct {
++ u32 addr[4];
++ u32 mask[4];
++ } node6; /* IPv6 node information */
+ } u;
+ union {
+ u32 sclass; /* security class for genfs */
+@@ -177,7 +181,8 @@
+ #define OCON_NETIF 3 /* network interfaces */
+ #define OCON_NODE 4 /* nodes */
+ #define OCON_FSUSE 5 /* fs_use */
+-#define OCON_NUM 6
++#define OCON_NODE6 6 /* IPv6 nodes */
++#define OCON_NUM 7
+
+ /* The policy database */
+ struct policydb {
+Index: linux-2.6/security/selinux/ss/services.c
+diff -u linux-2.6/security/selinux/ss/services.c:1.1.1.6 linux-2.6/security/selinux/ss/services.c:1.39
+--- linux-2.6/security/selinux/ss/services.c:1.1.1.6 Mon Apr 5 08:11:26 2004
++++ linux-2.6/security/selinux/ss/services.c Wed Apr 7 12:18:49 2004
+@@ -548,32 +548,34 @@
+ return rc;
+ }
+
+-static inline int compute_sid_handle_invalid_context(
++static int compute_sid_handle_invalid_context(
+ struct context *scontext,
+ struct context *tcontext,
+ u16 tclass,
+ struct context *newcontext)
+ {
+- int rc = 0;
++ char *s = NULL, *t = NULL, *n = NULL;
++ u32 slen, tlen, nlen;
+
+- if (selinux_enforcing) {
+- rc = -EACCES;
+- } else {
+- char *s, *t, *n;
+- u32 slen, tlen, nlen;
+-
+- context_struct_to_string(scontext, &s, &slen);
+- context_struct_to_string(tcontext, &t, &tlen);
+- context_struct_to_string(newcontext, &n, &nlen);
+- printk(KERN_ERR "security_compute_sid: invalid context %s", n);
+- printk(" for scontext=%s", s);
+- printk(" tcontext=%s", t);
+- printk(" tclass=%s\n", policydb.p_class_val_to_name[tclass-1]);
+- kfree(s);
+- kfree(t);
+- kfree(n);
+- }
+- return rc;
++ if (context_struct_to_string(scontext, &s, &slen) < 0)
++ goto out;
++ if (context_struct_to_string(tcontext, &t, &tlen) < 0)
++ goto out;
++ if (context_struct_to_string(newcontext, &n, &nlen) < 0)
++ goto out;
++ printk(KERN_WARNING
++ "security_compute_sid: invalid context %s"
++ " for scontext=%s"
++ " tcontext=%s"
++ " tclass=%s",
++ n, s, t, policydb.p_class_val_to_name[tclass-1]);
++out:
++ kfree(s);
++ kfree(t);
++ kfree(n);
++ if (!selinux_enforcing)
++ return 0;
++ return -EACCES;
+ }
+
+ static int security_compute_sid(u32 ssid,
+@@ -1187,6 +1189,18 @@
+ return rc;
+ }
+
++static int match_ipv6_addrmask(u32 *input, u32 *addr, u32 *mask)
++{
++ int i, fail = 0;
++
++ for(i = 0; i < 4; i++)
++ if(addr[i] != (input[i] & mask[i])) {
++ fail = 1;
++ break;
++ }
++
++ return !fail;
++}
+
+ /**
+ * security_node_sid - Obtain the SID for a node (host).
+@@ -1201,22 +1215,47 @@
+ u32 *out_sid)
+ {
+ int rc = 0;
+- u32 addr;
+ struct ocontext *c;
+
+ POLICY_RDLOCK;
+
+- if (domain != AF_INET || addrlen != sizeof(u32)) {
+- *out_sid = SECINITSID_NODE;
+- goto out;
++ switch (domain) {
++ case AF_INET: {
++ u32 addr;
++
++ if (addrlen != sizeof(u32)) {
++ rc = -EINVAL;
++ goto out;
++ }
++
++ addr = *((u32 *)addrp);
++
++ c = policydb.ocontexts[OCON_NODE];
++ while (c) {
++ if (c->u.node.addr == (addr & c->u.node.mask))
++ break;
++ c = c->next;
++ }
++ break;
+ }
+- addr = *((u32 *)addrp);
+
+- c = policydb.ocontexts[OCON_NODE];
+- while (c) {
+- if (c->u.node.addr == (addr & c->u.node.mask))
+- break;
+- c = c->next;
++ case AF_INET6:
++ if (addrlen != sizeof(u64) * 2) {
++ rc = -EINVAL;
++ goto out;
++ }
++ c = policydb.ocontexts[OCON_NODE6];
++ while (c) {
++ if (match_ipv6_addrmask(addrp, c->u.node6.addr,
++ c->u.node6.mask))
++ break;
++ c = c->next;
++ }
++ break;
++
++ default:
++ *out_sid = SECINITSID_NODE;
++ goto out;
+ }
+
+ if (c) {
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2005_modules_off-2.6.3.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2005_modules_off-2.6.3.patch
new file mode 100644
index 0000000000..73d9f8c411
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2005_modules_off-2.6.3.patch
@@ -0,0 +1,75 @@
+diff -uprN -X dontdiff linux-2.6.3-vanilla/fs/proc/proc_misc.c modules_off/linux-2.6.3-modules_off/fs/proc/proc_misc.c
+--- linux-2.6.3-vanilla/fs/proc/proc_misc.c 2004-02-18 04:57:16.000000000 +0100
++++ modules_off/linux-2.6.3-modules_off/fs/proc/proc_misc.c 2004-03-09 19:10:10.016286832 +0100
+@@ -70,6 +70,7 @@ extern int get_locks_status (char *, cha
+ #ifdef CONFIG_SGI_DS1286
+ extern int get_ds1286_status(char *);
+ #endif
++extern int module_loading_enabled;
+
+ static int proc_calc_metrics(char *page, char **start, off_t off,
+ int count, int *eof, int len)
+@@ -334,9 +335,31 @@ static int modules_open(struct inode *in
+ {
+ return seq_open(file, &modules_op);
+ }
++
++static int modules_write(struct file *file, const char __user *buf,
++ size_t count, loff_t *ppos)
++{
++ char tmp[4];
++
++ if (!count)
++ return -EINVAL;
++ if (count > 4)
++ count = 4;
++ if (copy_from_user(&tmp[0], buf, count))
++ return -EFAULT;
++ if (tmp[0]=='o') {
++ printk(KERN_INFO "Disabled module (un)loading\n");
++ module_loading_enabled = 0;
++ }
++
++ return count;
++
++}
++
+ static struct file_operations proc_modules_operations = {
+ .open = modules_open,
+ .read = seq_read,
++ .write = modules_write,
+ .llseek = seq_lseek,
+ .release = seq_release,
+ };
+diff -uprN -X dontdiff linux-2.6.3-vanilla/kernel/module.c modules_off/linux-2.6.3-modules_off/kernel/module.c
+--- linux-2.6.3-vanilla/kernel/module.c 2004-02-18 04:58:48.000000000 +0100
++++ modules_off/linux-2.6.3-modules_off/kernel/module.c 2004-03-09 18:47:10.709973264 +0100
+@@ -53,6 +53,8 @@
+ #define symbol_is(literal, string) \
+ (strcmp(MODULE_SYMBOL_PREFIX literal, (string)) == 0)
+
++int module_loading_enabled = 1;
++
+ /* Protects module list */
+ static spinlock_t modlist_lock = SPIN_LOCK_UNLOCKED;
+
+@@ -667,6 +669,9 @@ sys_delete_module(const char __user *nam
+
+ if (!capable(CAP_SYS_MODULE))
+ return -EPERM;
++
++ if (!module_loading_enabled)
++ return -ENOSYS;
+
+ if (strncpy_from_user(name, name_user, MODULE_NAME_LEN-1) < 0)
+ return -EFAULT;
+@@ -1720,6 +1725,9 @@ sys_init_module(void __user *umod,
+ /* Must have permission */
+ if (!capable(CAP_SYS_MODULE))
+ return -EPERM;
++
++ if (!module_loading_enabled)
++ return -ENOSYS;
+
+ /* Only one module load at a time, please */
+ if (down_interruptible(&module_mutex) != 0)
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2010_tcp-stealth.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2010_tcp-stealth.patch
new file mode 100644
index 0000000000..0c72334361
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2010_tcp-stealth.patch
@@ -0,0 +1,184 @@
+diff -ruN linux-2.6.4/include/linux/sysctl.h linux-2.6.4-new/include/linux/sysctl.h
+--- linux-2.6.4/include/linux/sysctl.h 2004-03-10 21:55:28.000000000 -0500
++++ linux-2.6.4-new/include/linux/sysctl.h 2004-03-28 15:37:06.829571810 -0500
+@@ -322,6 +322,10 @@
+ NET_IPV4_IPFRAG_SECRET_INTERVAL=94,
+ NET_TCP_WESTWOOD=95,
+ NET_IPV4_IGMP_MAX_MSF=96,
++ NET_IPV4_IP_MASQ_UDP_DLOOSE=97,
++ NET_TCP_STACK_SYNFIN=98,
++ NET_TCP_STACK_BOGUS=99,
++ NET_TCP_STACK_ACK=100,
+ };
+
+ enum {
+diff -ruN linux-2.6.4/net/ipv4/Kconfig linux-2.6.4-new/net/ipv4/Kconfig
+--- linux-2.6.4/net/ipv4/Kconfig 2004-03-10 21:55:37.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/Kconfig 2004-03-28 15:37:06.842568736 -0500
+@@ -343,6 +343,29 @@
+
+ If unsure, say N.
+
++config NET_STEALTH
++ bool "IP: TCP stealth options (enabled per default)"
++ depends on INET
++ default n
++ ---help---
++ If you say Y here, note that these options are now enabled by
++ default; you can disable them by executing the commands
++
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_ack
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_bogus
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_synfin
++
++ at boot time after the /proc file system has been mounted.
++
++ If security is more important, say Y.
++
++config NET_STEALTH_LOG
++ bool 'Log all dropped packets'
++ depends on NET_STEALTH
++ ---help---
++ This turns on a logging facility that logs all tcp packets with
++ bad flags. If you said Y to "TCP stealth options", say Y too.
++
+ config INET_AH
+ tristate "IP: AH transformation"
+ select XFRM
+diff -ruN linux-2.6.4/net/ipv4/sysctl_net_ipv4.c linux-2.6.4-new/net/ipv4/sysctl_net_ipv4.c
+--- linux-2.6.4/net/ipv4/sysctl_net_ipv4.c 2004-03-10 21:55:37.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/sysctl_net_ipv4.c 2004-03-28 15:37:06.852566370 -0500
+@@ -48,6 +48,11 @@
+ extern int inet_peer_gc_mintime;
+ extern int inet_peer_gc_maxtime;
+
++/* stealth stuff */
++extern int sysctl_tcp_ignore_synfin;
++extern int sysctl_tcp_ignore_bogus;
++extern int sysctl_tcp_ignore_ack;
++
+ #ifdef CONFIG_SYSCTL
+ static int tcp_retr1_max = 255;
+ static int ip_local_port_range_min[] = { 1, 1 };
+@@ -319,6 +324,32 @@
+ .proc_handler = &proc_dointvec
+ },
+ #endif
++#ifdef CONFIG_NET_STEALTH
++ {
++ .ctl_name = NET_TCP_STACK_SYNFIN,
++ .procname = "tcp_ignore_synfin",
++ .data = &sysctl_tcp_ignore_synfin,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .ctl_name = NET_TCP_STACK_BOGUS,
++ .procname = "tcp_ignore_bogus",
++ .data = &sysctl_tcp_ignore_bogus,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .ctl_name = NET_TCP_STACK_ACK,
++ .procname = "tcp_ignore_ack",
++ .data = &sysctl_tcp_ignore_ack,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++#endif
+ {
+ .ctl_name = NET_TCP_TW_RECYCLE,
+ .procname = "tcp_tw_recycle",
+diff -ruN linux-2.6.4/net/ipv4/tcp_input.c linux-2.6.4-new/net/ipv4/tcp_input.c
+--- linux-2.6.4/net/ipv4/tcp_input.c 2004-03-10 21:55:37.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/tcp_input.c 2004-03-28 15:37:06.000000000 -0500
+@@ -75,6 +75,11 @@
+ int sysctl_tcp_timestamps = 1;
+ int sysctl_tcp_window_scaling = 1;
+ int sysctl_tcp_sack = 1;
++#ifdef CONFIG_NET_STEALTH
++int sysctl_tcp_ignore_synfin = 1;
++int sysctl_tcp_ignore_bogus = 1;
++int sysctl_tcp_ignore_ack = 1;
++#endif
+ int sysctl_tcp_fack = 1;
+ int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
+ #ifdef CONFIG_INET_ECN
+diff -ruN linux-2.6.4/net/ipv4/tcp_ipv4.c linux-2.6.4-new/net/ipv4/tcp_ipv4.c
+--- linux-2.6.4/net/ipv4/tcp_ipv4.c 2004-03-10 21:55:25.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/tcp_ipv4.c 2004-03-28 15:37:06.000000000 -0500
+@@ -79,6 +79,12 @@
+ int sysctl_tcp_tw_reuse;
+ int sysctl_tcp_low_latency;
+
++#ifdef CONFIG_NET_STEALTH
++extern int sysctl_tcp_ignore_synfin;
++extern int sysctl_tcp_ignore_bogus;
++extern int sysctl_tcp_ignore_ack;
++#endif
++
+ /* Check TCP sequence numbers in ICMP packets. */
+ #define ICMP_MIN_LENGTH 8
+
+@@ -1763,6 +1769,23 @@
+ tcp_v4_checksum_init(skb) < 0))
+ goto bad_packet;
+
++#ifdef CONFIG_NET_STEALTH
++ if(sysctl_tcp_ignore_synfin) {
++ if(th->fin && th->syn)
++ goto tcp_bad_flags;
++ }
++
++ if(sysctl_tcp_ignore_bogus) {
++ if(!(th->ack || th->syn || th->rst) || th->res1)
++ goto tcp_bad_flags;
++ }
++
++ if(sysctl_tcp_ignore_ack) {
++ if(th->fin && th->psh && th->urg)
++ goto tcp_bad_flags;
++ }
++#endif
++
+ th = skb->h.th;
+ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+@@ -1804,6 +1827,33 @@
+
+ return ret;
+
++#ifdef CONFIG_NET_STEALTH_LOG
++tcp_bad_flags:
++ printk(KERN_INFO
++ "Packet log: badflag DENY %s PROTO=TCP %d.%d.%d.%d:%d "
++ "%d.%d.%d.%d:%d L=%hu:%u:%u S=0x%2.2hX I=%hu:%u:%u "
++ "T=%hu %c%c%c%c%c%c%c%c%c\n",
++ skb->dev->name, NIPQUAD(skb->nh.iph->saddr), ntohs(th->source),
++ NIPQUAD(skb->nh.iph->daddr), ntohs(th->dest),
++ ntohs(skb->nh.iph->tot_len), skb->len, skb->len - th->doff*4,
++ skb->nh.iph->tos, ntohs(skb->nh.iph->id), ntohl(th->seq),
++ ntohl(th->ack_seq), skb->nh.iph->ttl,
++ th->res1 ? '1' : '.',
++ th->ece ? 'E' : '.',
++ th->cwr ? 'C' : '.',
++ th->ack ? 'A' : '.',
++ th->syn ? 'S' : '.',
++ th->fin ? 'F' : '.',
++ th->rst ? 'R' : '.',
++ th->psh ? 'P' : '.',
++ th->urg ? 'U' : '.' );
++ goto bad_packet;
++#else
++tcp_bad_flags:
++ goto bad_packet;
++
++#endif /* CONFIG_NET_STEALTH_LOG */
++
+ no_tcp_socket:
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto discard_it;
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2015_tcp-nmap-freak.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2015_tcp-nmap-freak.patch
new file mode 100644
index 0000000000..6b5ad0775b
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/2015_tcp-nmap-freak.patch
@@ -0,0 +1,130 @@
+diff -ruN linux-2.6.4/include/linux/sysctl.h linux-2.6.4-new/include/linux/sysctl.h
+--- linux-2.6.4/include/linux/sysctl.h 2004-03-28 15:23:41.616059884 -0500
++++ linux-2.6.4-new/include/linux/sysctl.h 2004-03-28 15:23:56.000000000 -0500
+@@ -326,6 +326,8 @@
+ NET_TCP_STACK_SYNFIN=98,
+ NET_TCP_STACK_BOGUS=99,
+ NET_TCP_STACK_ACK=100,
++ NET_IPV4_ICMP_RESTRICT=101,
++ NET_IPV4_TCP_RESTRICT=102,
+ };
+
+ enum {
+diff -ruN linux-2.6.4/net/ipv4/Kconfig linux-2.6.4-new/net/ipv4/Kconfig
+--- linux-2.6.4/net/ipv4/Kconfig 2004-03-28 15:23:41.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/Kconfig 2004-03-28 15:23:56.000000000 -0500
+@@ -366,6 +366,21 @@
+ This turns on a logging facility that logs all tcp packets with
+ bad flags. If you said Y to "TCP stealth options", say Y too.
+
++config IP_NMAP_FREAK
++ bool "IP: NMAP freak (disabled per default)"
++ depends on INET
++ default n
++ ---help---
++ This is a feature to prevent stealth,fin,rst scans and slows down
++ the tcp connect scan, it also does not show the Operating System.
++
++ You can turn this on(1) and off(0) using /proc
++
++ echo 1 > /proc/sys/net/ipv4/tcp_restrict
++ echo 1 > /proc/sys/net/ipv4/icmp_restrict
++
++ If unsure, say N.
++
+ config INET_AH
+ tristate "IP: AH transformation"
+ select XFRM
+diff -ruN linux-2.6.4/net/ipv4/icmp.c linux-2.6.4-new/net/ipv4/icmp.c
+--- linux-2.6.4/net/ipv4/icmp.c 2004-03-10 21:55:44.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/icmp.c 2004-03-28 15:23:56.000000000 -0500
+@@ -190,6 +190,10 @@
+ int sysctl_icmp_echo_ignore_all;
+ int sysctl_icmp_echo_ignore_broadcasts;
+
++#ifdef CONFIG_IP_NMAP_FREAK
++int sysctl_icmp_restrict = 0;
++#endif
++
+ /* Control parameter - ignore bogus broadcast responses? */
+ int sysctl_icmp_ignore_bogus_error_responses;
+
+@@ -782,7 +786,12 @@
+ icmp_param.offset = 0;
+ icmp_param.data_len = skb->len;
+ icmp_param.head_len = sizeof(struct icmphdr);
++#ifdef CONFIG_IP_NMAP_FREAK
++ if (!sysctl_icmp_restrict)
++ icmp_reply(&icmp_param, skb);
++#else
+ icmp_reply(&icmp_param, skb);
++#endif
+ }
+ }
+
+diff -ruN linux-2.6.4/net/ipv4/sysctl_net_ipv4.c linux-2.6.4-new/net/ipv4/sysctl_net_ipv4.c
+--- linux-2.6.4/net/ipv4/sysctl_net_ipv4.c 2004-03-28 15:23:41.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/sysctl_net_ipv4.c 2004-03-28 15:23:56.000000000 -0500
+@@ -37,6 +37,11 @@
+ extern int sysctl_icmp_ratelimit;
+ extern int sysctl_icmp_ratemask;
+
++#ifdef CONFIG_IP_NMAP_FREAK
++extern int sysctl_icmp_restrict;
++extern int sysctl_tcp_restrict;
++#endif
++
+ /* From igmp.c */
+ extern int sysctl_igmp_max_memberships;
+ extern int sysctl_igmp_max_msf;
+@@ -350,6 +355,24 @@
+ .proc_handler = &proc_dointvec
+ },
+ #endif
++#ifdef CONFIG_IP_NMAP_FREAK
++ {
++ .ctl_name = NET_IPV4_ICMP_RESTRICT,
++ .procname = "icmp_restrict",
++ .data = &sysctl_icmp_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .ctl_name = NET_IPV4_TCP_RESTRICT,
++ .procname = "tcp_restrict",
++ .data = &sysctl_tcp_restrict,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++#endif
+ {
+ .ctl_name = NET_TCP_TW_RECYCLE,
+ .procname = "tcp_tw_recycle",
+diff -ruN linux-2.6.4/net/ipv4/tcp_ipv4.c linux-2.6.4-new/net/ipv4/tcp_ipv4.c
+--- linux-2.6.4/net/ipv4/tcp_ipv4.c 2004-03-28 15:23:41.000000000 -0500
++++ linux-2.6.4-new/net/ipv4/tcp_ipv4.c 2004-03-28 15:23:56.000000000 -0500
+@@ -85,6 +85,10 @@
+ extern int sysctl_tcp_ignore_ack;
+ #endif
+
++#ifdef CONFIG_IP_NMAP_FREAK
++int sysctl_tcp_restrict = 0;
++#endif
++
+ /* Check TCP sequence numbers in ICMP packets. */
+ #define ICMP_MIN_LENGTH 8
+
+@@ -1179,6 +1183,11 @@
+ struct tcphdr rth;
+ struct ip_reply_arg arg;
+
++#ifdef CONFIG_IP_NMAP_FREAK
++ if (sysctl_tcp_restrict)
++ return;
++#endif
++
+ /* Never send a reset in response to a reset. */
+ if (th->rst)
+ return;
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3005_netdev-random-core-2.6.3.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3005_netdev-random-core-2.6.3.patch
new file mode 100644
index 0000000000..a59339cd4b
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3005_netdev-random-core-2.6.3.patch
@@ -0,0 +1,283 @@
+diff -uprN -X dontdiff linux-2.6.3/drivers/net/Kconfig linux-2.6.3-netdev-random/drivers/net/Kconfig
+--- linux-2.6.3/drivers/net/Kconfig 2004-02-18 04:59:18.000000000 +0100
++++ linux-2.6.3-netdev-random/drivers/net/Kconfig 2004-02-26 19:14:33.162880272 +0100
+@@ -108,6 +108,22 @@ config TUN
+
+ If you don't know what to use this for, you don't need it.
+
++config NET_RANDOM
++ bool "Allow Net Devices to contribute to /dev/random"
++ depends on NETDEVICES && EXPERIMENTAL
++ ---help---
++ If you say Y here, network device interrupts will contribute to the
++ kernel entropy pool. Normally, block devices and some other devices
++ feed the pool. Some systems, such as those that are headless or diskless,
++ need additional entropy sources. Some people, however, feel that network
++ devices should not contribute to /dev/random because an external attacker
++ could observe incoming packets in an attempt to learn the entropy pool's
++ state. If you say N, no network device will contribute entropy.
++
++ If you believe there is a chance of your network packets being observed
++ and you doubt the security of the entropy pool's one-way hash, do not
++ enable this. If unsure, say N.
++
+ config ETHERTAP
+ tristate "Ethertap network tap"
+ depends on NETDEVICES && EXPERIMENTAL && NETLINK_DEV
+diff -uprN -X dontdiff linux-2.6.2/include/asm-alpha/signal.h linux-2.6.2-netdev-random/include/asm-alpha/signal.h
+--- linux-2.6.2/include/asm-alpha/signal.h 2004-02-04 04:43:49.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-alpha/signal.h 2004-02-16 22:59:59.567750000 +0100
+@@ -121,8 +121,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x40000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 1 /* for blocking signals */
+ #define SIG_UNBLOCK 2 /* for unblocking signals */
+ #define SIG_SETMASK 3 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-arm/signal.h linux-2.6.2-netdev-random/include/asm-arm/signal.h
+--- linux-2.6.2/include/asm-arm/signal.h 2004-02-04 04:43:46.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-arm/signal.h 2004-02-16 22:59:59.577748480 +0100
+@@ -126,8 +126,15 @@ typedef unsigned long sigset_t;
+ #define SA_SAMPLE_RANDOM 0x10000000
+ #define SA_IRQNOMASK 0x08000000
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-cris/signal.h linux-2.6.2-netdev-random/include/asm-cris/signal.h
+--- linux-2.6.2/include/asm-cris/signal.h 2004-02-04 04:44:21.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-cris/signal.h 2004-02-16 22:59:59.587746960 +0100
+@@ -120,8 +120,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-i386/signal.h linux-2.6.2-netdev-random/include/asm-i386/signal.h
+--- linux-2.6.2/include/asm-i386/signal.h 2004-02-04 04:43:15.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-i386/signal.h 2004-02-16 22:59:59.708728568 +0100
+@@ -121,8 +121,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-ia64/signal.h linux-2.6.2-netdev-random/include/asm-ia64/signal.h
+--- linux-2.6.2/include/asm-ia64/signal.h 2004-02-04 04:43:11.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-ia64/signal.h 2004-02-16 22:59:59.729725376 +0100
+@@ -126,6 +126,12 @@
+ #define SA_SHIRQ 0x04000000
+ #define SA_PERCPU_IRQ 0x02000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 0 /* for blocking signals */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-m68k/signal.h linux-2.6.2-netdev-random/include/asm-m68k/signal.h
+--- linux-2.6.2/include/asm-m68k/signal.h 2004-02-04 04:43:15.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-m68k/signal.h 2004-02-16 22:59:59.739723856 +0100
+@@ -116,8 +116,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-mips/signal.h linux-2.6.2-netdev-random/include/asm-mips/signal.h
+--- linux-2.6.2/include/asm-mips/signal.h 2004-02-04 04:43:41.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-mips/signal.h 2004-02-16 22:59:59.748722488 +0100
+@@ -111,6 +111,12 @@ typedef unsigned long old_sigset_t; /*
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x02000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 1 /* for blocking signals */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-parisc/signal.h linux-2.6.2-netdev-random/include/asm-parisc/signal.h
+--- linux-2.6.2/include/asm-parisc/signal.h 2004-02-04 04:44:55.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-parisc/signal.h 2004-02-16 23:00:00.530603624 +0100
+@@ -100,6 +100,12 @@
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 0 /* for blocking signals */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-ppc64/signal.h linux-2.6.2-netdev-random/include/asm-ppc64/signal.h
+--- linux-2.6.2/include/asm-ppc64/signal.h 2004-02-04 04:43:43.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-ppc64/signal.h 2004-02-16 23:00:00.575596784 +0100
+@@ -107,8 +107,15 @@ typedef struct {
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-s390/signal.h linux-2.6.2-netdev-random/include/asm-s390/signal.h
+--- linux-2.6.2/include/asm-s390/signal.h 2004-02-04 04:44:05.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-s390/signal.h 2004-02-16 23:00:00.593594048 +0100
+@@ -129,8 +129,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-sh/signal.h linux-2.6.2-netdev-random/include/asm-sh/signal.h
+--- linux-2.6.2/include/asm-sh/signal.h 2004-02-04 04:43:56.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-sh/signal.h 2004-02-16 23:00:01.436465912 +0100
+@@ -108,8 +108,15 @@ typedef struct {
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -uprN -X dontdiff linux-2.6.2/include/asm-sparc/signal.h linux-2.6.2-netdev-random/include/asm-sparc/signal.h
+--- linux-2.6.2/include/asm-sparc/signal.h 2004-02-04 04:43:57.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-sparc/signal.h 2004-02-16 23:00:01.447464240 +0100
+@@ -176,8 +176,15 @@ struct sigstack {
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_STATIC_ALLOC 0x80
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ /* Type of a signal handler. */
+ #ifdef __KERNEL__
+ typedef void (*__sighandler_t)(int, int, struct sigcontext *, char *);
+diff -uprN -X dontdiff linux-2.6.2/include/asm-sparc64/signal.h linux-2.6.2-netdev-random/include/asm-sparc64/signal.h
+--- linux-2.6.2/include/asm-sparc64/signal.h 2004-02-04 04:43:20.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-sparc64/signal.h 2004-02-16 23:00:01.465461504 +0100
+@@ -182,8 +182,15 @@ struct sigstack {
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_STATIC_ALLOC 0x80
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ /* Type of a signal handler. */
+ #ifdef __KERNEL__
+ typedef void (*__sighandler_t)(int, struct sigcontext *);
+diff -uprN -X dontdiff linux-2.6.2/include/asm-x86_64/signal.h linux-2.6.2-netdev-random/include/asm-x86_64/signal.h
+--- linux-2.6.2/include/asm-x86_64/signal.h 2004-02-04 04:43:06.000000000 +0100
++++ linux-2.6.2-netdev-random/include/asm-x86_64/signal.h 2004-02-16 23:00:01.484458616 +0100
+@@ -128,8 +128,15 @@ typedef unsigned long sigset_t;
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3010_netdev-random-drivers-2.6.5.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3010_netdev-random-drivers-2.6.5.patch
new file mode 100644
index 0000000000..6a7ebb85a7
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/3010_netdev-random-drivers-2.6.5.patch
@@ -0,0 +1,2346 @@
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c501.c linux-2.6.4-hardened-new/drivers/net/3c501.c
+--- linux-2.6.4-hardened/drivers/net/3c501.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c501.c 2004-03-15 03:29:33.749052936 +0100
+@@ -347,7 +347,7 @@ static int el_open(struct net_device *de
+ if (el_debug > 2)
+ printk(KERN_DEBUG "%s: Doing el_open()...", dev->name);
+
+- if ((retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &el_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ spin_lock_irqsave(&lp->lock, flags);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c503.c linux-2.6.4-hardened-new/drivers/net/3c503.c
+--- linux-2.6.4-hardened/drivers/net/3c503.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c503.c 2004-03-15 03:29:33.760051264 +0100
+@@ -377,7 +377,7 @@ el2_open(struct net_device *dev)
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == probe_irq_off(cookie) /* It's a good IRQ line! */
+ && ((retval = request_irq(dev->irq = *irqp,
+- ei_interrupt, 0, dev->name, dev)) == 0))
++ ei_interrupt, SA_NET_RANDOM, dev->name, dev)) == 0))
+ break;
+ }
+ } while (*++irqp);
+@@ -386,7 +386,7 @@ el2_open(struct net_device *dev)
+ return retval;
+ }
+ } else {
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ return retval;
+ }
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c505.c linux-2.6.4-hardened-new/drivers/net/3c505.c
+--- linux-2.6.4-hardened/drivers/net/3c505.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c505.c 2004-03-15 03:29:33.782047920 +0100
+@@ -905,7 +905,7 @@ static int elp_open(struct net_device *d
+ /*
+ * install our interrupt service routine
+ */
+- if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, &elp_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: could not allocate IRQ%d\n", dev->name, dev->irq);
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c507.c linux-2.6.4-hardened-new/drivers/net/3c507.c
+--- linux-2.6.4-hardened/drivers/net/3c507.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c507.c 2004-03-15 03:29:33.794046096 +0100
+@@ -392,7 +392,7 @@ static int __init el16_probe1(struct net
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+- irqval = request_irq(irq, &el16_interrupt, 0, dev->name, dev);
++ irqval = request_irq(irq, &el16_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (irqval) {
+ printk ("unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ retval = -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c509.c linux-2.6.4-hardened-new/drivers/net/3c509.c
+--- linux-2.6.4-hardened/drivers/net/3c509.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c509.c 2004-03-15 03:29:33.834040016 +0100
+@@ -809,7 +809,7 @@ el3_open(struct net_device *dev)
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+- i = request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev);
++ i = request_irq(dev->irq, &el3_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c515.c linux-2.6.4-hardened-new/drivers/net/3c515.c
+--- linux-2.6.4-hardened/drivers/net/3c515.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c515.c 2004-03-15 03:29:33.847038040 +0100
+@@ -755,11 +755,11 @@ static int corkscrew_open(struct net_dev
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0
+ || dev->dma == 0
+- || request_irq(dev->irq, &corkscrew_interrupt, 0,
++ || request_irq(dev->irq, &corkscrew_interrupt, SA_NET_RANDOM,
+ vp->product_name, dev)) return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+- } else if (request_irq(dev->irq, &corkscrew_interrupt, SA_SHIRQ,
++ } else if (request_irq(dev->irq, &corkscrew_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c523.c linux-2.6.4-hardened-new/drivers/net/3c523.c
+--- linux-2.6.4-hardened/drivers/net/3c523.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c523.c 2004-03-15 03:29:33.860036064 +0100
+@@ -288,7 +288,7 @@ static int elmc_open(struct net_device *
+
+ elmc_id_attn586(); /* disable interrupts */
+
+- ret = request_irq(dev->irq, &elmc_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM,
++ ret = request_irq(dev->irq, &elmc_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: couldn't get irq %d\n", dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c527.c linux-2.6.4-hardened-new/drivers/net/3c527.c
+--- linux-2.6.4-hardened/drivers/net/3c527.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c527.c 2004-03-15 03:29:33.922026640 +0100
+@@ -434,7 +434,7 @@ static int __init mc32_probe1(struct net
+ * Grab the IRQ
+ */
+
+- err = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
++ err = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/3c59x.c linux-2.6.4-hardened-new/drivers/net/3c59x.c
+--- linux-2.6.4-hardened/drivers/net/3c59x.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/3c59x.c 2004-03-15 03:29:33.947022840 +0100
+@@ -1720,7 +1720,7 @@ vortex_open(struct net_device *dev)
+
+ /* Use the now-standard shared IRQ implementation. */
+ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
+- &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
++ &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/7990.c linux-2.6.4-hardened-new/drivers/net/7990.c
+--- linux-2.6.4-hardened/drivers/net/7990.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/7990.c 2004-03-15 03:29:33.958021168 +0100
+@@ -462,7 +462,7 @@ int lance_open (struct net_device *dev)
+ DECLARE_LL;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+- if (request_irq(lp->irq, lance_interrupt, 0, lp->name, dev))
++ if (request_irq(lp->irq, lance_interrupt, SA_NET_RANDOM, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/8139cp.c linux-2.6.4-hardened-new/drivers/net/8139cp.c
+--- linux-2.6.4-hardened/drivers/net/8139cp.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/8139cp.c 2004-03-15 03:29:33.970019344 +0100
+@@ -1166,7 +1166,7 @@ static int cp_open (struct net_device *d
+
+ cp_init_hw(cp);
+
+- rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
++ rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (rc)
+ goto err_out_hw;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/8139too.c linux-2.6.4-hardened-new/drivers/net/8139too.c
+--- linux-2.6.4-hardened/drivers/net/8139too.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/8139too.c 2004-03-15 03:29:33.986016912 +0100
+@@ -1317,7 +1317,7 @@ static int rtl8139_open (struct net_devi
+ int retval;
+ void *ioaddr = tp->mmio_addr;
+
+- retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ return retval;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/82596.c linux-2.6.4-hardened-new/drivers/net/82596.c
+--- linux-2.6.4-hardened/drivers/net/82596.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/82596.c 2004-03-15 03:29:33.998015088 +0100
+@@ -1005,7 +1005,7 @@ static int i596_open(struct net_device *
+
+ DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+- if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
++ if (request_irq(dev->irq, i596_interrupt, SA_NET_RANDOM, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/a2065.c linux-2.6.4-hardened-new/drivers/net/a2065.c
+--- linux-2.6.4-hardened/drivers/net/a2065.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/a2065.c 2004-03-15 03:29:34.018012048 +0100
+@@ -492,7 +492,7 @@ static int lance_open (struct net_device
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+- ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ,
++ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret) return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ac3200.c linux-2.6.4-hardened-new/drivers/net/ac3200.c
+--- linux-2.6.4-hardened/drivers/net/ac3200.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ac3200.c 2004-03-15 03:29:34.031010072 +0100
+@@ -203,7 +203,7 @@ static int __init ac_probe1(int ioaddr,
+ printk(", assigning");
+ }
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
+ goto out1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/acenic.c linux-2.6.4-hardened-new/drivers/net/acenic.c
+--- linux-2.6.4-hardened/drivers/net/acenic.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/acenic.c 2004-03-15 03:29:34.054006576 +0100
+@@ -1453,7 +1453,7 @@ static int __init ace_init(struct net_de
+ goto init_error;
+ }
+
+- ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ,
++ ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ecode) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/apne.c linux-2.6.4-hardened-new/drivers/net/apne.c
+--- linux-2.6.4-hardened/drivers/net/apne.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/apne.c 2004-03-15 03:29:34.064005056 +0100
+@@ -310,7 +310,7 @@ static int __init apne_probe1(struct net
+ dev->base_addr = ioaddr;
+
+ /* Install the Interrupt handler */
+- i = request_irq(IRQ_AMIGA_PORTS, apne_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(IRQ_AMIGA_PORTS, apne_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/appletalk/cops.c linux-2.6.4-hardened-new/drivers/net/appletalk/cops.c
+--- linux-2.6.4-hardened/drivers/net/appletalk/cops.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/appletalk/cops.c 2004-03-15 03:29:34.077003080 +0100
+@@ -326,7 +326,7 @@ static int __init cops_probe1(struct net
+
+ /* Reserve any actual interrupt. */
+ if (dev->irq) {
+- retval = request_irq(dev->irq, &cops_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &cops_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ goto err_out;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/appletalk/ltpc.c linux-2.6.4-hardened-new/drivers/net/appletalk/ltpc.c
+--- linux-2.6.4-hardened/drivers/net/appletalk/ltpc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/appletalk/ltpc.c 2004-03-15 03:29:34.088001408 +0100
+@@ -1182,7 +1182,7 @@ struct net_device * __init ltpc_probe(vo
+ }
+
+ /* grab it and don't let go :-) */
+- if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0)
++ if (irq && request_irq( irq, &ltpc_interrupt, SA_NET_RANDOM, "ltpc", dev) >= 0)
+ {
+ (void) inb_p(io+7); /* enable interrupts from board */
+ (void) inb_p(io+7); /* and reset irq line */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arcnet/arc-rimi.c linux-2.6.4-hardened-new/drivers/net/arcnet/arc-rimi.c
+--- linux-2.6.4-hardened/drivers/net/arcnet/arc-rimi.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arcnet/arc-rimi.c 2004-03-15 03:29:34.097000040 +0100
+@@ -129,7 +129,7 @@ static int __init arcrimi_found(struct n
+ int err;
+
+ /* reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
++ if (request_irq(dev->irq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (RIM I)", dev)) {
+ release_mem_region(dev->mem_start, BUFFER_SIZE);
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arcnet/com20020.c linux-2.6.4-hardened-new/drivers/net/arcnet/com20020.c
+--- linux-2.6.4-hardened/drivers/net/arcnet/com20020.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arcnet/com20020.c 2004-03-15 03:29:34.105998672 +0100
+@@ -195,7 +195,7 @@ int com20020_found(struct net_device *de
+ outb(dev->dev_addr[0], _XREG);
+
+ /* reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, shared,
++ if (request_irq(dev->irq, &arcnet_interrupt, shared | SA_NET_RANDOM,
+ "arcnet (COM20020)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arcnet/com90io.c linux-2.6.4-hardened-new/drivers/net/arcnet/com90io.c
+--- linux-2.6.4-hardened/drivers/net/arcnet/com90io.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arcnet/com90io.c 2004-03-15 03:29:34.113997456 +0100
+@@ -238,7 +238,7 @@ static int __init com90io_found(struct n
+ int err;
+
+ /* Reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
++ if (request_irq(dev->irq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (COM90xx-IO)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arcnet/com90xx.c linux-2.6.4-hardened-new/drivers/net/arcnet/com90xx.c
+--- linux-2.6.4-hardened/drivers/net/arcnet/com90xx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arcnet/com90xx.c 2004-03-15 03:29:34.124995784 +0100
+@@ -445,7 +445,7 @@ static int __init com90xx_found(int ioad
+ goto err_free_dev;
+
+ /* reserve the irq */
+- if (request_irq(airq, &arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
++ if (request_irq(airq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (90xx)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
+ goto err_release_mem;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ariadne.c linux-2.6.4-hardened-new/drivers/net/ariadne.c
+--- linux-2.6.4-hardened/drivers/net/ariadne.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ariadne.c 2004-03-15 03:29:34.141993200 +0100
+@@ -320,7 +320,7 @@ static int ariadne_open(struct net_devic
+
+ netif_start_queue(dev);
+
+- i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, SA_SHIRQ,
++ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (i) return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arm/am79c961a.c linux-2.6.4-hardened-new/drivers/net/arm/am79c961a.c
+--- linux-2.6.4-hardened/drivers/net/arm/am79c961a.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arm/am79c961a.c 2004-03-15 03:29:34.152991528 +0100
+@@ -296,7 +296,7 @@ am79c961_open(struct net_device *dev)
+
+ memset (&priv->stats, 0, sizeof (priv->stats));
+
+- ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, am79c961_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arm/ether00.c linux-2.6.4-hardened-new/drivers/net/arm/ether00.c
+--- linux-2.6.4-hardened/drivers/net/arm/ether00.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arm/ether00.c 2004-03-15 03:29:34.163989856 +0100
+@@ -707,11 +707,11 @@ static int ether00_open(struct net_devic
+ return -EINVAL;
+
+ /* Install interrupt handlers */
+- result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
++ result=request_irq(dev->irq,ether00_int,SA_NET_RANDOM,"ether00",dev);
+ if(result)
+ goto open_err1;
+
+- result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
++ result=request_irq(2,ether00_phy_int,SA_NET_RANDOM,"ether00_phy",dev);
+ if(result)
+ goto open_err2;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arm/ether1.c linux-2.6.4-hardened-new/drivers/net/arm/ether1.c
+--- linux-2.6.4-hardened/drivers/net/arm/ether1.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arm/ether1.c 2004-03-15 03:29:34.173988336 +0100
+@@ -650,7 +650,7 @@ ether1_open (struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
++ if (request_irq(dev->irq, ether1_interrupt, SA_NET_RANDOM, "ether1", dev))
+ return -EAGAIN;
+
+ memset (&priv->stats, 0, sizeof (struct net_device_stats));
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arm/ether3.c linux-2.6.4-hardened-new/drivers/net/arm/ether3.c
+--- linux-2.6.4-hardened/drivers/net/arm/ether3.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arm/ether3.c 2004-03-15 03:29:34.183986816 +0100
+@@ -418,7 +418,7 @@ ether3_open(struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
++ if (request_irq(dev->irq, ether3_interrupt, SA_NET_RANDOM, "ether3", dev))
+ return -EAGAIN;
+
+ ether3_init_for_open(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/arm/etherh.c linux-2.6.4-hardened-new/drivers/net/arm/etherh.c
+--- linux-2.6.4-hardened/drivers/net/arm/etherh.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/arm/etherh.c 2004-03-15 03:29:34.192985448 +0100
+@@ -440,7 +440,7 @@ etherh_open(struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ /*
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/at1700.c linux-2.6.4-hardened-new/drivers/net/at1700.c
+--- linux-2.6.4-hardened/drivers/net/at1700.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/at1700.c 2004-03-15 03:29:34.201984080 +0100
+@@ -542,7 +542,7 @@ found:
+ lp->jumpered = is_fmv18x;
+ lp->mca_slot = slot;
+ /* Snarf the interrupt vector now. */
+- ret = request_irq(irq, &net_interrupt, 0, dev->name, dev);
++ ret = request_irq(irq, &net_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk (" AT1700 at %#3x is unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/atp.c linux-2.6.4-hardened-new/drivers/net/atp.c
+--- linux-2.6.4-hardened/drivers/net/atp.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/atp.c 2004-03-15 03:29:34.209982864 +0100
+@@ -438,7 +438,7 @@ static int net_open(struct net_device *d
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+- ret = request_irq(dev->irq, &atp_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, &atp_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/au1000_eth.c linux-2.6.4-hardened-new/drivers/net/au1000_eth.c
+--- linux-2.6.4-hardened/drivers/net/au1000_eth.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/au1000_eth.c 2004-03-15 03:29:34.221981040 +0100
+@@ -942,7 +942,7 @@ static int au1000_open(struct net_device
+ }
+ netif_start_queue(dev);
+
+- if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
++ if ((retval = request_irq(dev->irq, &au1000_interrupt, SA_NET_RANDOM,
+ dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/b44.c linux-2.6.4-hardened-new/drivers/net/b44.c
+--- linux-2.6.4-hardened/drivers/net/b44.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/b44.c 2004-03-15 03:29:34.230979672 +0100
+@@ -1216,7 +1216,7 @@ static int b44_open(struct net_device *d
+ if (err)
+ return err;
+
+- err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
++ err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err)
+ goto err_out_free;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/bmac.c linux-2.6.4-hardened-new/drivers/net/bmac.c
+--- linux-2.6.4-hardened/drivers/net/bmac.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/bmac.c 2004-03-15 03:29:34.242977848 +0100
+@@ -1352,7 +1352,7 @@ static int __devinit bmac_probe(struct m
+
+ init_timer(&bp->tx_timeout);
+
+- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
++ ret = request_irq(dev->irq, bmac_misc_intr, SA_NET_RANDOM, "BMAC-misc", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
+ goto err_out_iounmap_rx;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/cs89x0.c linux-2.6.4-hardened-new/drivers/net/cs89x0.c
+--- linux-2.6.4-hardened/drivers/net/cs89x0.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/cs89x0.c 2004-03-15 03:29:34.252976328 +0100
+@@ -1134,7 +1134,7 @@ net_open(struct net_device *dev)
+
+ for (i = 2; i < CS8920_NO_INTS; i++) {
+ if ((1 << i) & lp->irq_map) {
+- if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
++ if (request_irq(i, net_interrupt, SA_NET_RANDOM, dev->name, dev) == 0) {
+ dev->irq = i;
+ write_irq(dev, lp->chip_type, i);
+ /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/de600.c linux-2.6.4-hardened-new/drivers/net/de600.c
+--- linux-2.6.4-hardened/drivers/net/de600.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/de600.c 2004-03-15 03:29:34.260975112 +0100
+@@ -134,7 +134,7 @@ static inline u8 de600_read_byte(unsigne
+ static int de600_open(struct net_device *dev)
+ {
+ unsigned long flags;
+- int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
++ int ret = request_irq(DE600_IRQ, de600_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/de620.c linux-2.6.4-hardened-new/drivers/net/de620.c
+--- linux-2.6.4-hardened/drivers/net/de620.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/de620.c 2004-03-15 03:29:34.271973440 +0100
+@@ -444,7 +444,7 @@ de620_get_register(struct net_device *de
+ */
+ static int de620_open(struct net_device *dev)
+ {
+- int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
++ int ret = request_irq(dev->irq, de620_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/declance.c linux-2.6.4-hardened-new/drivers/net/declance.c
+--- linux-2.6.4-hardened/drivers/net/declance.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/declance.c 2004-03-15 03:29:34.280972072 +0100
+@@ -785,7 +785,7 @@ static int lance_open(struct net_device
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+- if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) {
++ if (request_irq(dev->irq, &lance_interrupt, SA_NET_RANDOM, "lance", dev)) {
+ printk("lance: Can't get IRQ %d\n", dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/defxx.c linux-2.6.4-hardened-new/drivers/net/defxx.c
+--- linux-2.6.4-hardened/drivers/net/defxx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/defxx.c 2004-03-15 03:29:34.295969792 +0100
+@@ -1221,7 +1221,7 @@ static int dfx_open(struct net_device *d
+
+ /* Register IRQ - support shared interrupts by passing device ptr */
+
+- ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/depca.c linux-2.6.4-hardened-new/drivers/net/depca.c
+--- linux-2.6.4-hardened/drivers/net/depca.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/depca.c 2004-03-15 03:29:34.305968272 +0100
+@@ -840,7 +840,7 @@ static int depca_open(struct net_device
+
+ depca_dbg_open(dev);
+
+- if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, dev)) {
++ if (request_irq(dev->irq, &depca_interrupt, SA_NET_RANDOM, lp->adapter_name, dev)) {
+ printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/dgrs.c linux-2.6.4-hardened-new/drivers/net/dgrs.c
+--- linux-2.6.4-hardened/drivers/net/dgrs.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/dgrs.c 2004-03-15 03:29:34.320965992 +0100
+@@ -1185,7 +1185,7 @@ dgrs_probe1(struct net_device *dev)
+ if (priv->plxreg)
+ OUTL(dev->base_addr + PLX_LCL2PCI_DOORBELL, 1);
+
+- rc = request_irq(dev->irq, &dgrs_intr, SA_SHIRQ, "RightSwitch", dev);
++ rc = request_irq(dev->irq, &dgrs_intr, SA_SHIRQ | SA_NET_RANDOM, "RightSwitch", dev);
+ if (rc)
+ goto err_out;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/dl2k.c linux-2.6.4-hardened-new/drivers/net/dl2k.c
+--- linux-2.6.4-hardened/drivers/net/dl2k.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/dl2k.c 2004-03-15 03:29:34.329964624 +0100
+@@ -437,7 +437,7 @@ rio_open (struct net_device *dev)
+ int i;
+ u16 macctrl;
+
+- i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/e100.c linux-2.6.4-hardened-new/drivers/net/e100.c
+--- linux-2.6.4-hardened/drivers/net/e100.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/e100.c 2004-03-15 03:32:04.591121456 +0100
+@@ -1643,7 +1643,7 @@ static int e100_up(struct nic *nic)
+ e100_start_receiver(nic);
+ netif_start_queue(nic->netdev);
+ mod_timer(&nic->watchdog, jiffies);
+- if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
++ if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ | SA_NET_RANDOM,
+ nic->netdev->name, nic->netdev)))
+ goto err_no_irq;
+ e100_enable_irq(nic);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/e1000/e1000_main.c linux-2.6.4-hardened-new/drivers/net/e1000/e1000_main.c
+--- linux-2.6.4-hardened/drivers/net/e1000/e1000_main.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/e1000/e1000_main.c 2004-03-15 03:29:40.356048520 +0100
+@@ -277,7 +277,7 @@ e1000_up(struct e1000_adapter *adapter)
+ e1000_alloc_rx_buffers(adapter);
+
+ if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+- SA_SHIRQ | SA_SAMPLE_RANDOM,
++ SA_SHIRQ | SA_NET_RANDOM,
+ netdev->name, netdev)))
+ return err;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/e2100.c linux-2.6.4-hardened-new/drivers/net/e2100.c
+--- linux-2.6.4-hardened/drivers/net/e2100.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/e2100.c 2004-03-15 03:29:40.365047152 +0100
+@@ -283,7 +283,7 @@ e21_open(struct net_device *dev)
+ short ioaddr = dev->base_addr;
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ /* Set the interrupt line and memory base on the hardware. */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/eepro.c linux-2.6.4-hardened-new/drivers/net/eepro.c
+--- linux-2.6.4-hardened/drivers/net/eepro.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/eepro.c 2004-03-15 03:29:40.376045480 +0100
+@@ -964,7 +964,7 @@ static int eepro_open(struct net_device
+ return -EAGAIN;
+ }
+
+- if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) {
++ if (request_irq(dev->irq , &eepro_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/eepro100.c linux-2.6.4-hardened-new/drivers/net/eepro100.c
+--- linux-2.6.4-hardened/drivers/net/eepro100.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/eepro100.c 2004-03-15 03:29:40.391043200 +0100
+@@ -1012,7 +1012,7 @@ speedo_open(struct net_device *dev)
+ sp->in_interrupt = 0;
+
+ /* .. we can safely take handler calls during init. */
+- retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/eexpress.c linux-2.6.4-hardened-new/drivers/net/eexpress.c
+--- linux-2.6.4-hardened/drivers/net/eexpress.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/eexpress.c 2004-03-15 03:29:40.402041528 +0100
+@@ -461,7 +461,7 @@ static int eexp_open(struct net_device *
+ if (!dev->irq || !irqrmap[dev->irq])
+ return -ENXIO;
+
+- ret = request_irq(dev->irq,&eexp_irq,0,dev->name,dev);
++ ret = request_irq(dev->irq,&eexp_irq,SA_NET_RANDOM,dev->name,dev);
+ if (ret) return ret;
+
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/epic100.c linux-2.6.4-hardened-new/drivers/net/epic100.c
+--- linux-2.6.4-hardened/drivers/net/epic100.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/epic100.c 2004-03-15 03:29:40.415039552 +0100
+@@ -677,7 +677,7 @@ static int epic_open(struct net_device *
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+- if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ epic_init_ring(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/es3210.c linux-2.6.4-hardened-new/drivers/net/es3210.c
+--- linux-2.6.4-hardened/drivers/net/es3210.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/es3210.c 2004-03-15 03:29:40.424038184 +0100
+@@ -248,7 +248,7 @@ static int __init es_probe1(struct net_d
+ printk(" assigning IRQ %d", dev->irq);
+ }
+
+- if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, "es3210", dev)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/eth16i.c linux-2.6.4-hardened-new/drivers/net/eth16i.c
+--- linux-2.6.4-hardened/drivers/net/eth16i.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/eth16i.c 2004-03-15 03:29:40.437036208 +0100
+@@ -538,7 +538,7 @@ static int __init eth16i_probe1(struct n
+
+ /* Try to obtain interrupt vector */
+
+- if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_WARNING "%s: %s at %#3x, but is unusable due conflicting IRQ %d.\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ goto out;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ewrk3.c linux-2.6.4-hardened-new/drivers/net/ewrk3.c
+--- linux-2.6.4-hardened/drivers/net/ewrk3.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ewrk3.c 2004-03-15 03:29:40.447034688 +0100
+@@ -630,7 +630,7 @@ static int ewrk3_open(struct net_device
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+- if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
++ if (request_irq(dev->irq, (void *) ewrk3_interrupt, SA_NET_RANDOM, "ewrk3", dev)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/fc/iph5526.c linux-2.6.4-hardened-new/drivers/net/fc/iph5526.c
+--- linux-2.6.4-hardened/drivers/net/fc/iph5526.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/fc/iph5526.c 2004-03-15 03:29:40.464032104 +0100
+@@ -3786,7 +3786,7 @@ int iph5526_detect(Scsi_Host_Template *t
+ int irqval = 0;
+ /* Found it, get IRQ.
+ */
+- irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ : 0, fi->name, host);
++ irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ | SA_NET_RANDOM : 0, fi->name, host);
+ if (irqval) {
+ printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line, irqval);
+ scsi_unregister(host);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/fealnx.c linux-2.6.4-hardened-new/drivers/net/fealnx.c
+--- linux-2.6.4-hardened/drivers/net/fealnx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/fealnx.c 2004-03-15 03:29:40.477030128 +0100
+@@ -885,7 +885,7 @@ static int netdev_open(struct net_device
+
+ writel(0x00000001, ioaddr + BCR); /* Reset */
+
+- if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ init_ring(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/fec.c linux-2.6.4-hardened-new/drivers/net/fec.c
+--- linux-2.6.4-hardened/drivers/net/fec.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/fec.c 2004-03-15 03:29:40.488028456 +0100
+@@ -1057,13 +1057,13 @@ static void __inline__ fec_request_intrs
+ volatile unsigned long *icrp;
+
+ /* Setup interrupt handlers. */
+- if (request_irq(86, fec_enet_interrupt, 0, "fec(RX)", dev) != 0)
++ if (request_irq(86, fec_enet_interrupt, SA_NET_RANDOM, "fec(RX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(86)!\n");
+- if (request_irq(87, fec_enet_interrupt, 0, "fec(TX)", dev) != 0)
++ if (request_irq(87, fec_enet_interrupt, SA_NET_RANDOM, "fec(TX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(87)!\n");
+- if (request_irq(88, fec_enet_interrupt, 0, "fec(OTHER)", dev) != 0)
++ if (request_irq(88, fec_enet_interrupt, SA_NET_RANDOM, "fec(OTHER)", dev) != 0)
+ printk("FEC: Could not allocate FEC(OTHER) IRQ(88)!\n");
+- if (request_irq(66, mii_link_interrupt, 0, "fec(MII)", dev) != 0)
++ if (request_irq(66, mii_link_interrupt, SA_NET_RANDOM, "fec(MII)", dev) != 0)
+ printk("FEC: Could not allocate MII IRQ(66)!\n");
+
+ /* Unmask interrupt at ColdFire 5272 SIM */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/fmv18x.c linux-2.6.4-hardened-new/drivers/net/fmv18x.c
+--- linux-2.6.4-hardened/drivers/net/fmv18x.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/fmv18x.c 2004-03-15 03:29:40.499026784 +0100
+@@ -224,7 +224,7 @@ static int __init fmv18x_probe1(struct n
+ }
+
+ /* Snarf the interrupt vector now. */
+- retval = request_irq(dev->irq, &net_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/forcedeth.c linux-2.6.4-hardened-new/drivers/net/forcedeth.c
+--- linux-2.6.4-hardened/drivers/net/forcedeth.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/forcedeth.c 2004-03-15 03:29:40.509025264 +0100
+@@ -1319,7 +1319,7 @@ static int nv_open(struct net_device *de
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ pci_push(base);
+
+- ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ goto out_drain;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/gt96100eth.c linux-2.6.4-hardened-new/drivers/net/gt96100eth.c
+--- linux-2.6.4-hardened/drivers/net/gt96100eth.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/gt96100eth.c 2004-03-15 03:29:40.524022984 +0100
+@@ -1083,7 +1083,7 @@ gt96100_open(struct net_device *dev)
+ }
+
+ if ((retval = request_irq(dev->irq, &gt96100_interrupt,
+- SA_SHIRQ, dev->name, dev))) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ err("unable to get IRQ %d\n", dev->irq);
+ MOD_DEC_USE_COUNT;
+ return retval;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamachi.c linux-2.6.4-hardened-new/drivers/net/hamachi.c
+--- linux-2.6.4-hardened/drivers/net/hamachi.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamachi.c 2004-03-15 03:29:40.535021312 +0100
+@@ -859,7 +859,7 @@ static int hamachi_open(struct net_devic
+ u32 rx_int_var, tx_int_var;
+ u16 fifo_info;
+
+- i = request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamradio/baycom_ser_fdx.c linux-2.6.4-hardened-new/drivers/net/hamradio/baycom_ser_fdx.c
+--- linux-2.6.4-hardened/drivers/net/hamradio/baycom_ser_fdx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamradio/baycom_ser_fdx.c 2004-03-15 03:29:40.543020096 +0100
+@@ -433,7 +433,7 @@ static int ser12_open(struct net_device
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ "baycom_ser_fdx", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamradio/baycom_ser_hdx.c linux-2.6.4-hardened-new/drivers/net/hamradio/baycom_ser_hdx.c
+--- linux-2.6.4-hardened/drivers/net/hamradio/baycom_ser_hdx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamradio/baycom_ser_hdx.c 2004-03-15 03:29:40.554018424 +0100
+@@ -487,7 +487,7 @@ static int ser12_open(struct net_device
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ "baycom_ser12", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamradio/dmascc.c linux-2.6.4-hardened-new/drivers/net/hamradio/dmascc.c
+--- linux-2.6.4-hardened/drivers/net/hamradio/dmascc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamradio/dmascc.c 2004-03-15 03:29:40.564016904 +0100
+@@ -730,7 +730,7 @@ static int scc_open(struct net_device *d
+
+ /* Request IRQ if not already used by other channel */
+ if (!info->irq_used) {
+- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
++ if (request_irq(dev->irq, scc_isr, SA_NET_RANDOM, "dmascc", info)) {
+ return -EAGAIN;
+ }
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamradio/scc.c linux-2.6.4-hardened-new/drivers/net/hamradio/scc.c
+--- linux-2.6.4-hardened/drivers/net/hamradio/scc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamradio/scc.c 2004-03-15 03:29:40.575015232 +0100
+@@ -1744,7 +1744,7 @@ static int scc_net_ioctl(struct net_devi
+
+ if (!Ivec[hwcfg.irq].used && hwcfg.irq)
+ {
+- if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT, "AX.25 SCC", NULL))
++ if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT | SA_NET_RANDOM, "AX.25 SCC", NULL))
+ printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
+ else
+ Ivec[hwcfg.irq].used = 1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hamradio/yam.c linux-2.6.4-hardened-new/drivers/net/hamradio/yam.c
+--- linux-2.6.4-hardened/drivers/net/hamradio/yam.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hamradio/yam.c 2004-03-15 03:29:40.584013864 +0100
+@@ -885,7 +885,7 @@ static int yam_open(struct net_device *d
+ goto out_release_base;
+ }
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, yam_interrupt, SA_INTERRUPT | SA_SHIRQ, dev->name, dev)) {
++ if (request_irq(dev->irq, yam_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
+ ret = -EBUSY;
+ goto out_release_base;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hp-plus.c linux-2.6.4-hardened-new/drivers/net/hp-plus.c
+--- linux-2.6.4-hardened/drivers/net/hp-plus.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hp-plus.c 2004-03-15 03:29:40.595012192 +0100
+@@ -277,7 +277,7 @@ hpp_open(struct net_device *dev)
+ int option_reg;
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ return retval;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hp.c linux-2.6.4-hardened-new/drivers/net/hp.c
+--- linux-2.6.4-hardened/drivers/net/hp.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hp.c 2004-03-15 03:29:40.603010976 +0100
+@@ -176,13 +176,13 @@ static int __init hp_probe1(struct net_d
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+- if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
++ if (request_irq (irq, NULL, SA_NET_RANDOM, "bogus", NULL) != -EBUSY) {
+ unsigned long cookie = probe_irq_on();
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
+- && request_irq (irq, ei_interrupt, 0, dev->name, dev) == 0) {
++ && request_irq (irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+@@ -197,7 +197,7 @@ static int __init hp_probe1(struct net_d
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hp100.c linux-2.6.4-hardened-new/drivers/net/hp100.c
+--- linux-2.6.4-hardened/drivers/net/hp100.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hp100.c 2004-03-15 03:29:40.625007632 +0100
+@@ -1064,7 +1064,7 @@ static int hp100_open(struct net_device
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if (request_irq(dev->irq, hp100_interrupt,
+ lp->bus == HP100_BUS_PCI || lp->bus ==
+- HP100_BUS_EISA ? SA_SHIRQ : SA_INTERRUPT,
++ HP100_BUS_EISA ? SA_SHIRQ | SA_NET_RANDOM : SA_INTERRUPT,
+ "hp100", dev)) {
+ printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/hydra.c linux-2.6.4-hardened-new/drivers/net/hydra.c
+--- linux-2.6.4-hardened/drivers/net/hydra.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/hydra.c 2004-03-15 03:29:40.640005352 +0100
+@@ -117,7 +117,7 @@ static int __devinit hydra_init(struct z
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+- if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, "Hydra Ethernet",
++ if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, "Hydra Ethernet",
+ dev)) {
+ free_netdev(dev);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ibmlana.c linux-2.6.4-hardened-new/drivers/net/ibmlana.c
+--- linux-2.6.4-hardened/drivers/net/ibmlana.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ibmlana.c 2004-03-15 03:29:40.652003528 +0100
+@@ -779,7 +779,7 @@ static int ibmlana_open(struct net_devic
+
+ /* register resources - only necessary for IRQ */
+
+- result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
++ result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (result != 0) {
+ printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
+ return result;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ioc3-eth.c linux-2.6.4-hardened-new/drivers/net/ioc3-eth.c
+--- linux-2.6.4-hardened/drivers/net/ioc3-eth.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ioc3-eth.c 2004-03-15 03:29:40.663001856 +0100
+@@ -1333,7 +1333,7 @@ ioc3_open(struct net_device *dev)
+ {
+ struct ioc3_private *ip = dev->priv;
+
+- if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
++ if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ | SA_NET_RANDOM, ioc3_str, dev)) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/ali-ircc.c linux-2.6.4-hardened-new/drivers/net/irda/ali-ircc.c
+--- linux-2.6.4-hardened/drivers/net/irda/ali-ircc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/ali-ircc.c 2004-03-15 03:29:40.681998968 +0100
+@@ -1317,7 +1317,7 @@ static int ali_ircc_net_open(struct net_
+ iobase = self->io.fir_base;
+
+ /* Request IRQ and install Interrupt Handler */
+- if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
++ if (request_irq(self->io.irq, ali_ircc_interrupt, SA_NET_RANDOM, dev->name, dev))
+ {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/au1k_ir.c linux-2.6.4-hardened-new/drivers/net/irda/au1k_ir.c
+--- linux-2.6.4-hardened/drivers/net/irda/au1k_ir.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/au1k_ir.c 2004-03-15 03:29:40.701995928 +0100
+@@ -351,14 +351,14 @@ static int au1k_irda_start(struct net_de
+ }
+
+ if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt,
+- 0, dev->name, dev))) {
++ SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ MOD_DEC_USE_COUNT;
+ return retval;
+ }
+ if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt,
+- 0, dev->name, dev))) {
++ SA_NET_RANDOM, dev->name, dev))) {
+ free_irq(AU1000_IRDA_TX_INT, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/donauboe.c linux-2.6.4-hardened-new/drivers/net/irda/donauboe.c
+--- linux-2.6.4-hardened/drivers/net/irda/donauboe.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/donauboe.c 2004-03-15 03:29:40.713994104 +0100
+@@ -1417,7 +1417,7 @@ toshoboe_net_open (struct net_device *de
+ return 0;
+
+ if (request_irq (self->io.irq, toshoboe_interrupt,
+- SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
++ SA_SHIRQ | SA_INTERRUPT | SA_NET_RANDOM, dev->name, (void *) self))
+ {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/irport.c linux-2.6.4-hardened-new/drivers/net/irda/irport.c
+--- linux-2.6.4-hardened/drivers/net/irda/irport.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/irport.c 2004-03-15 03:29:40.730991520 +0100
+@@ -902,7 +902,7 @@ int irport_net_open(struct net_device *d
+
+ iobase = self->io.sir_base;
+
+- if (request_irq(self->io.irq, self->interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, self->interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/nsc-ircc.c linux-2.6.4-hardened-new/drivers/net/irda/nsc-ircc.c
+--- linux-2.6.4-hardened/drivers/net/irda/nsc-ircc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/nsc-ircc.c 2004-03-15 03:29:40.749988632 +0100
+@@ -2008,7 +2008,7 @@ static int nsc_ircc_net_open(struct net_
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
++ if (request_irq(self->io.irq, nsc_ircc_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/sa1100_ir.c linux-2.6.4-hardened-new/drivers/net/irda/sa1100_ir.c
+--- linux-2.6.4-hardened/drivers/net/irda/sa1100_ir.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/sa1100_ir.c 2004-03-15 03:29:40.762986656 +0100
+@@ -844,7 +844,7 @@ static int sa1100_irda_start(struct net_
+
+ si->speed = 9600;
+
+- err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
++ err = request_irq(dev->irq, sa1100_irda_irq, SA_NET_RANDOM, dev->name, dev);
+ if (err)
+ goto err_irq;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/smsc-ircc2.c linux-2.6.4-hardened-new/drivers/net/irda/smsc-ircc2.c
+--- linux-2.6.4-hardened/drivers/net/irda/smsc-ircc2.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/smsc-ircc2.c 2004-03-15 03:29:40.798981184 +0100
+@@ -1547,7 +1547,7 @@ static int smsc_ircc_net_open(struct net
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, smsc_ircc_interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/via-ircc.c linux-2.6.4-hardened-new/drivers/net/irda/via-ircc.c
+--- linux-2.6.4-hardened/drivers/net/irda/via-ircc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/via-ircc.c 2004-03-15 03:29:40.814978752 +0100
+@@ -1467,7 +1467,7 @@ static int via_ircc_net_open(struct net_
+ ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+ if (request_irq
+- (self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
++ (self->io.irq, via_ircc_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/vlsi_ir.c linux-2.6.4-hardened-new/drivers/net/irda/vlsi_ir.c
+--- linux-2.6.4-hardened/drivers/net/irda/vlsi_ir.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/vlsi_ir.c 2004-03-15 03:29:40.824977232 +0100
+@@ -1629,7 +1629,7 @@ static int vlsi_open(struct net_device *
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+- if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
++ if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ drivername, ndev)) {
+ WARNING("%s: couldn't get IRQ: %d\n", __FUNCTION__, ndev->irq);
+ goto errout_io;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/irda/w83977af_ir.c linux-2.6.4-hardened-new/drivers/net/irda/w83977af_ir.c
+--- linux-2.6.4-hardened/drivers/net/irda/w83977af_ir.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/irda/w83977af_ir.c 2004-03-15 03:29:40.840974800 +0100
+@@ -1197,7 +1197,7 @@ static int w83977af_net_open(struct net_
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, w83977af_interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/isa-skeleton.c linux-2.6.4-hardened-new/drivers/net/isa-skeleton.c
+--- linux-2.6.4-hardened/drivers/net/isa-skeleton.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/isa-skeleton.c 2004-03-15 03:29:40.858972064 +0100
+@@ -251,7 +251,7 @@ static int __init netcard_probe1(struct
+ dev->irq = 9;
+
+ {
+- int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev);
++ int irqval = request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, cardname, dev);
+ if (irqval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name, dev->irq, irqval);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ixgb/ixgb_main.c linux-2.6.4-hardened-new/drivers/net/ixgb/ixgb_main.c
+--- linux-2.6.4-hardened/drivers/net/ixgb/ixgb_main.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ixgb/ixgb_main.c 2004-03-15 03:29:40.869970392 +0100
+@@ -211,7 +211,7 @@ ixgb_up(struct ixgb_adapter *adapter)
+
+ IXGB_DBG("ixgb_up\n");
+
+- if (request_irq(netdev->irq, &ixgb_intr, SA_SHIRQ | SA_SAMPLE_RANDOM,
++ if (request_irq(netdev->irq, &ixgb_intr, SA_SHIRQ | SA_NET_RANDOM,
+ netdev->name, netdev)) {
+ IXGB_DBG("%s: request_irq failed\n", netdev->name);
+ return -1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/lance.c linux-2.6.4-hardened-new/drivers/net/lance.c
+--- linux-2.6.4-hardened/drivers/net/lance.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/lance.c 2004-03-15 03:29:40.879968872 +0100
+@@ -713,7 +713,7 @@ lance_open(struct net_device *dev)
+ int i;
+
+ if (dev->irq == 0 ||
+- request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
++ request_irq(dev->irq, &lance_interrupt, SA_NET_RANDOM, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/lasi_82596.c linux-2.6.4-hardened-new/drivers/net/lasi_82596.c
+--- linux-2.6.4-hardened/drivers/net/lasi_82596.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/lasi_82596.c 2004-03-15 03:29:40.902965376 +0100
+@@ -1016,7 +1016,7 @@ static int i596_open(struct net_device *
+ {
+ DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+- if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
++ if (request_irq(dev->irq, &i596_interrupt, SA_NET_RANDOM, "i82596", dev)) {
+ printk("%s: IRQ %d not free\n", dev->name, dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/lne390.c linux-2.6.4-hardened-new/drivers/net/lne390.c
+--- linux-2.6.4-hardened/drivers/net/lne390.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/lne390.c 2004-03-15 03:29:40.911964008 +0100
+@@ -228,7 +228,7 @@ static int __init lne390_probe1(struct n
+ }
+ printk(" IRQ %d,", dev->irq);
+
+- if ((ret = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return ret;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/lp486e.c linux-2.6.4-hardened-new/drivers/net/lp486e.c
+--- linux-2.6.4-hardened/drivers/net/lp486e.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/lp486e.c 2004-03-15 03:29:40.924962032 +0100
+@@ -849,7 +849,7 @@ static int i596_open(struct net_device *
+ {
+ int i;
+
+- i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return i;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/mac8390.c linux-2.6.4-hardened-new/drivers/net/mac8390.c
+--- linux-2.6.4-hardened/drivers/net/mac8390.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/mac8390.c 2004-03-15 03:29:40.933960664 +0100
+@@ -535,7 +535,7 @@ static int __init mac8390_initdev(struct
+ static int mac8390_open(struct net_device *dev)
+ {
+ ei_open(dev);
+- if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, "8390 Ethernet", dev)) {
+ printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/mac89x0.c linux-2.6.4-hardened-new/drivers/net/mac89x0.c
+--- linux-2.6.4-hardened/drivers/net/mac89x0.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/mac89x0.c 2004-03-15 03:29:40.945958840 +0100
+@@ -335,7 +335,7 @@ net_open(struct net_device *dev)
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
+
+ /* Grab the interrupt */
+- if (request_irq(dev->irq, &net_interrupt, 0, "cs89x0", dev))
++ if (request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, "cs89x0", dev))
+ return -EAGAIN;
+
+ /* Set up the IRQ - Apparently magic */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/mace.c linux-2.6.4-hardened-new/drivers/net/mace.c
+--- linux-2.6.4-hardened/drivers/net/mace.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/mace.c 2004-03-15 03:29:40.957957016 +0100
+@@ -241,7 +241,7 @@ static int __devinit mace_probe(struct m
+ */
+ mace_reset(dev);
+
+- rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
++ rc = request_irq(dev->irq, mace_interrupt, SA_NET_RANDOM, "MACE", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
+ goto err_unmap_rx_dma;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/macmace.c linux-2.6.4-hardened-new/drivers/net/macmace.c
+--- linux-2.6.4-hardened/drivers/net/macmace.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/macmace.c 2004-03-15 03:29:40.967955496 +0100
+@@ -319,11 +319,11 @@ static int mace_open(struct net_device *
+ mb->plscc = PORTSEL_AUI;
+ /* mb->utr = RTRD; */
+
+- if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
++ if (request_irq(dev->irq, mace_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+- if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
++ if (request_irq(mp->dma_intr, mace_dma_intr, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/meth.c linux-2.6.4-hardened-new/drivers/net/meth.c
+--- linux-2.6.4-hardened/drivers/net/meth.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/meth.c 2004-03-15 03:29:40.983953064 +0100
+@@ -361,7 +361,7 @@ int meth_open(struct net_device *dev)
+ METH_DMA_TX_EN|/*METH_DMA_TX_INT_EN|*/
+ METH_DMA_RX_EN|METH_DMA_RX_INT_EN;
+
+- if(request_irq(dev->irq,meth_interrupt,SA_SHIRQ,meth_str,dev)){
++ if(request_irq(dev->irq,meth_interrupt,SA_SHIRQ | SA_NET_RANDOM,meth_str,dev)){
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/myri_sbus.c linux-2.6.4-hardened-new/drivers/net/myri_sbus.c
+--- linux-2.6.4-hardened/drivers/net/myri_sbus.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/myri_sbus.c 2004-03-15 03:29:40.996951088 +0100
+@@ -1063,7 +1063,7 @@ static int __init myri_ether_init(struct
+ /* Register interrupt handler now. */
+ DET(("Requesting MYRIcom IRQ line.\n"));
+ if (request_irq(dev->irq, &myri_interrupt,
+- SA_SHIRQ, "MyriCOM Ethernet", (void *) dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, "MyriCOM Ethernet", (void *) dev)) {
+ printk("MyriCOM: Cannot register interrupt handler.\n");
+ goto err;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/natsemi.c linux-2.6.4-hardened-new/drivers/net/natsemi.c
+--- linux-2.6.4-hardened/drivers/net/natsemi.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/natsemi.c 2004-03-15 03:29:41.012948656 +0100
+@@ -1086,7 +1086,7 @@ static int netdev_open(struct net_device
+ /* Reset the chip, just in case. */
+ natsemi_reset(dev);
+
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ if (netif_msg_ifup(np))
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ne.c linux-2.6.4-hardened-new/drivers/net/ne.c
+--- linux-2.6.4-hardened/drivers/net/ne.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ne.c 2004-03-15 03:29:41.031945768 +0100
+@@ -464,7 +464,7 @@ static int __init ne_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
++ ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ne2.c linux-2.6.4-hardened-new/drivers/net/ne2.c
+--- linux-2.6.4-hardened/drivers/net/ne2.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ne2.c 2004-03-15 03:29:41.044943792 +0100
+@@ -470,7 +470,7 @@ static int __init ne2_probe1(struct net_
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, retval);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ne2k-pci.c linux-2.6.4-hardened-new/drivers/net/ne2k-pci.c
+--- linux-2.6.4-hardened/drivers/net/ne2k-pci.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ne2k-pci.c 2004-03-15 03:29:41.054942272 +0100
+@@ -418,7 +418,7 @@ static int ne2k_pci_set_fdx(struct net_d
+
+ static int ne2k_pci_open(struct net_device *dev)
+ {
+- int ret = request_irq(dev->irq, ei_interrupt, SA_SHIRQ, dev->name, dev);
++ int ret = request_irq(dev->irq, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ne2k_cbus.c linux-2.6.4-hardened-new/drivers/net/ne2k_cbus.c
+--- linux-2.6.4-hardened/drivers/net/ne2k_cbus.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ne2k_cbus.c 2004-03-15 03:29:41.067940296 +0100
+@@ -500,7 +500,7 @@ static int __init ne_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
++ ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out_kfree;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ne3210.c linux-2.6.4-hardened-new/drivers/net/ne3210.c
+--- linux-2.6.4-hardened/drivers/net/ne3210.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ne3210.c 2004-03-15 03:29:41.077938776 +0100
+@@ -140,7 +140,7 @@ static int __init ne3210_eisa_probe (str
+ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
+ printk(".\nne3210.c: using IRQ %d, ", dev->irq);
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out2;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ni5010.c linux-2.6.4-hardened-new/drivers/net/ni5010.c
+--- linux-2.6.4-hardened/drivers/net/ni5010.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ni5010.c 2004-03-15 03:29:41.089936952 +0100
+@@ -381,7 +381,7 @@ static int ni5010_open(struct net_device
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
+
+- if (request_irq(dev->irq, &ni5010_interrupt, 0, boardname, dev)) {
++ if (request_irq(dev->irq, &ni5010_interrupt, SA_NET_RANDOM, boardname, dev)) {
+ printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ni52.c linux-2.6.4-hardened-new/drivers/net/ni52.c
+--- linux-2.6.4-hardened/drivers/net/ni52.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ni52.c 2004-03-15 03:29:41.100935280 +0100
+@@ -265,7 +265,7 @@ static int ni52_open(struct net_device *
+ startrecv586(dev);
+ ni_enaint();
+
+- ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev);
++ ret = request_irq(dev->irq, &ni52_interrupt,SA_NET_RANDOM,dev->name,dev);
+ if (ret)
+ {
+ ni_reset586();
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ni65.c linux-2.6.4-hardened-new/drivers/net/ni65.c
+--- linux-2.6.4-hardened/drivers/net/ni65.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ni65.c 2004-03-15 03:29:41.114933152 +0100
+@@ -296,7 +296,7 @@ static void ni65_set_performance(struct
+ static int ni65_open(struct net_device *dev)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+- int irqval = request_irq(dev->irq, &ni65_interrupt,0,
++ int irqval = request_irq(dev->irq, &ni65_interrupt,SA_NET_RANDOM,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/ns83820.c linux-2.6.4-hardened-new/drivers/net/ns83820.c
+--- linux-2.6.4-hardened/drivers/net/ns83820.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/ns83820.c 2004-03-15 03:29:41.126931328 +0100
+@@ -1850,7 +1850,7 @@ static int __devinit ns83820_init_one(st
+ setup_ee_mem_bitbanger(&dev->ee, (long)dev->base + MEAR, 3, 2, 1, 0,
+ 0);
+
+- err = request_irq(pci_dev->irq, ns83820_irq, SA_SHIRQ,
++ err = request_irq(pci_dev->irq, ns83820_irq, SA_SHIRQ | SA_NET_RANDOM,
+ ndev->name, ndev);
+ if (err) {
+ printk(KERN_INFO "ns83820: unable to register irq %d\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/oaknet.c linux-2.6.4-hardened-new/drivers/net/oaknet.c
+--- linux-2.6.4-hardened/drivers/net/oaknet.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/oaknet.c 2004-03-15 03:29:41.138929504 +0100
+@@ -162,7 +162,7 @@ static int __init oaknet_init(void)
+ /* Attempt to get the interrupt line */
+
+ ret = -EAGAIN;
+- if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev)) {
+ printk("%s: unable to request interrupt %d.\n",
+ dev->name, dev->irq);
+ goto out_region;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/pci-skeleton.c linux-2.6.4-hardened-new/drivers/net/pci-skeleton.c
+--- linux-2.6.4-hardened/drivers/net/pci-skeleton.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/pci-skeleton.c 2004-03-15 03:29:41.150927680 +0100
+@@ -1080,7 +1080,7 @@ static int netdrv_open (struct net_devic
+
+ DPRINTK ("ENTER\n");
+
+- retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ DPRINTK ("EXIT, returning %d\n", retval);
+ return retval;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/pcnet32.c linux-2.6.4-hardened-new/drivers/net/pcnet32.c
+--- linux-2.6.4-hardened/drivers/net/pcnet32.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/pcnet32.c 2004-03-15 03:29:41.172924336 +0100
+@@ -1142,7 +1142,7 @@ pcnet32_open(struct net_device *dev)
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &pcnet32_interrupt,
+- lp->shared_irq ? SA_SHIRQ : 0, lp->name, (void *)dev)) {
++ lp->shared_irq ? SA_SHIRQ | SA_NET_RANDOM : 0, lp->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/r8169.c linux-2.6.4-hardened-new/drivers/net/r8169.c
+--- linux-2.6.4-hardened/drivers/net/r8169.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/r8169.c 2004-03-15 03:29:41.182922816 +0100
+@@ -656,7 +656,7 @@ rtl8169_open(struct net_device *dev)
+ u32 TxPhyAddr, RxPhyAddr;
+
+ retval =
+- request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
++ request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/rcpci45.c linux-2.6.4-hardened-new/drivers/net/rcpci45.c
+--- linux-2.6.4-hardened/drivers/net/rcpci45.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/rcpci45.c 2004-03-15 03:29:41.194920992 +0100
+@@ -336,7 +336,7 @@ RCopen (struct net_device *dev)
+ }
+
+ /* Request a shared interrupt line. */
+- error = request_irq (dev->irq, RCinterrupt, SA_SHIRQ, dev->name, dev);
++ error = request_irq (dev->irq, RCinterrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (error) {
+ printk (KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/rrunner.c linux-2.6.4-hardened-new/drivers/net/rrunner.c
+--- linux-2.6.4-hardened/drivers/net/rrunner.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/rrunner.c 2004-03-15 03:29:41.208918864 +0100
+@@ -1201,7 +1201,7 @@ static int rr_open(struct net_device *de
+ readl(&regs->HostCtrl);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+- if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ, dev->name, dev)) {
++ if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+ dev->name, dev->irq);
+ ecode = -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/saa9730.c linux-2.6.4-hardened-new/drivers/net/saa9730.c
+--- linux-2.6.4-hardened/drivers/net/saa9730.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/saa9730.c 2004-03-15 03:29:41.218917344 +0100
+@@ -805,7 +805,7 @@ static int lan_saa9730_open(struct net_d
+ (struct lan_saa9730_private *) dev->priv;
+
+ /* Associate IRQ with lan_saa9730_interrupt */
+- if (request_irq(dev->irq, &lan_saa9730_interrupt, 0, "SAA9730 Eth",
++ if (request_irq(dev->irq, &lan_saa9730_interrupt, SA_NET_RANDOM, "SAA9730 Eth",
+ dev)) {
+ printk("lan_saa9730_open: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sb1000.c linux-2.6.4-hardened-new/drivers/net/sb1000.c
+--- linux-2.6.4-hardened/drivers/net/sb1000.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sb1000.c 2004-03-15 03:29:41.230915520 +0100
+@@ -967,7 +967,7 @@ sb1000_open(struct net_device *dev)
+ lp->rx_frame_id[1] = 0;
+ lp->rx_frame_id[2] = 0;
+ lp->rx_frame_id[3] = 0;
+- if (request_irq(dev->irq, &sb1000_interrupt, 0, "sb1000", dev)) {
++ if (request_irq(dev->irq, &sb1000_interrupt, SA_NET_RANDOM, "sb1000", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sb1250-mac.c linux-2.6.4-hardened-new/drivers/net/sb1250-mac.c
+--- linux-2.6.4-hardened/drivers/net/sb1250-mac.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sb1250-mac.c 2004-03-15 03:32:43.489208048 +0100
+@@ -2464,7 +2464,7 @@ static int sbmac_open(struct net_device
+ * map/route interrupt
+ */
+
+- if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EBUSY;
+
+ /*
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/seeq8005.c linux-2.6.4-hardened-new/drivers/net/seeq8005.c
+--- linux-2.6.4-hardened/drivers/net/seeq8005.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/seeq8005.c 2004-03-15 03:29:41.274908832 +0100
+@@ -323,7 +323,7 @@ static int __init seeq8005_probe1(struct
+
+ #if 0
+ {
+- int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
++ int irqval = request_irq(dev->irq, &seeq8005_interrupt, SA_NET_RANDOM, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sgiseeq.c linux-2.6.4-hardened-new/drivers/net/sgiseeq.c
+--- linux-2.6.4-hardened/drivers/net/sgiseeq.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sgiseeq.c 2004-03-15 03:29:41.284907312 +0100
+@@ -617,7 +617,7 @@ int sgiseeq_init(struct hpc3_regs* regs,
+ goto out;
+ }
+
+- if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
++ if (request_irq(irq, sgiseeq_interrupt, SA_NET_RANDOM, sgiseeqstr, dev)) {
+ printk(KERN_ERR "Seeq8003: Can't get irq %d\n", irq);
+ err = -EAGAIN;
+ goto out1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sis190.c linux-2.6.4-hardened-new/drivers/net/sis190.c
+--- linux-2.6.4-hardened/drivers/net/sis190.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sis190.c 2004-03-15 03:29:41.296905488 +0100
+@@ -713,7 +713,7 @@ SiS190_open(struct net_device *dev)
+ struct sis190_private *tp = dev->priv;
+ int rc;
+
+- rc = request_irq(dev->irq, SiS190_interrupt, SA_SHIRQ, dev->name, dev);
++ rc = request_irq(dev->irq, SiS190_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (rc)
+ goto out;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sis900.c linux-2.6.4-hardened-new/drivers/net/sis900.c
+--- linux-2.6.4-hardened/drivers/net/sis900.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sis900.c 2004-03-15 03:29:41.308903664 +0100
+@@ -928,7 +928,7 @@ sis900_open(struct net_device *net_dev)
+ pci_read_config_byte(sis_priv->pci_dev, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+- ret = request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ, net_dev->name, net_dev);
++ ret = request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ | SA_NET_RANDOM, net_dev->name, net_dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sk98lin/skge.c linux-2.6.4-hardened-new/drivers/net/sk98lin/skge.c
+--- linux-2.6.4-hardened/drivers/net/sk98lin/skge.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sk98lin/skge.c 2004-03-15 03:29:41.335899560 +0100
+@@ -963,9 +963,9 @@ SK_BOOL DualNet;
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ if (pAC->GIni.GIMacsFound == 2) {
+- Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev);
++ Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ | SA_NET_RANDOM, pAC->Name, dev);
+ } else if (pAC->GIni.GIMacsFound == 1) {
+- Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ,
++ Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ | SA_NET_RANDOM,
+ pAC->Name, dev);
+ } else {
+ printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sk_g16.c linux-2.6.4-hardened-new/drivers/net/sk_g16.c
+--- linux-2.6.4-hardened/drivers/net/sk_g16.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sk_g16.c 2004-03-15 03:29:41.346897888 +0100
+@@ -889,7 +889,7 @@ static int SK_open(struct net_device *de
+
+ do
+ {
+- irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", dev);
++ irqval = request_irq(irqtab[i], &SK_interrupt, SA_NET_RANDOM, "sk_g16", dev);
+ i++;
+ } while (irqval && irqtab[i]);
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sk_mca.c linux-2.6.4-hardened-new/drivers/net/sk_mca.c
+--- linux-2.6.4-hardened/drivers/net/sk_mca.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sk_mca.c 2004-03-15 03:29:41.358896064 +0100
+@@ -829,7 +829,7 @@ static int skmca_open(struct net_device
+ /* register resources - only necessary for IRQ */
+ result =
+ request_irq(priv->realirq, irq_handler,
+- SA_SHIRQ | SA_SAMPLE_RANDOM, "sk_mca", dev);
++ SA_SHIRQ | SA_NET_RANDOM, "sk_mca", dev);
+ if (result != 0) {
+ printk("%s: failed to register irq %d\n", dev->name,
+ dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/skfp/skfddi.c linux-2.6.4-hardened-new/drivers/net/skfp/skfddi.c
+--- linux-2.6.4-hardened/drivers/net/skfp/skfddi.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/skfp/skfddi.c 2004-03-15 03:29:41.370894240 +0100
+@@ -523,7 +523,7 @@ static int skfp_open(struct net_device *
+
+ PRINTK(KERN_INFO "entering skfp_open\n");
+ /* Register IRQ - support shared interrupts by passing device ptr */
+- err = request_irq(dev->irq, (void *) skfp_interrupt, SA_SHIRQ,
++ err = request_irq(dev->irq, (void *) skfp_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (err)
+ return err;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/smc-mca.c linux-2.6.4-hardened-new/drivers/net/smc-mca.c
+--- linux-2.6.4-hardened/drivers/net/smc-mca.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/smc-mca.c 2004-03-15 03:29:41.378893024 +0100
+@@ -346,7 +346,7 @@ static int ultramca_open(struct net_devi
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/smc-ultra.c linux-2.6.4-hardened-new/drivers/net/smc-ultra.c
+--- linux-2.6.4-hardened/drivers/net/smc-ultra.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/smc-ultra.c 2004-03-15 03:29:41.389891352 +0100
+@@ -363,7 +363,7 @@ ultra_open(struct net_device *dev)
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ return retval;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/smc-ultra32.c linux-2.6.4-hardened-new/drivers/net/smc-ultra32.c
+--- linux-2.6.4-hardened/drivers/net/smc-ultra32.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/smc-ultra32.c 2004-03-15 03:29:41.399889832 +0100
+@@ -279,7 +279,7 @@ out:
+ static int ultra32_open(struct net_device *dev)
+ {
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+- int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : SA_SHIRQ;
++ int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : SA_SHIRQ | SA_NET_RANDOM;
+ int retval;
+
+ retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/smc9194.c linux-2.6.4-hardened-new/drivers/net/smc9194.c
+--- linux-2.6.4-hardened/drivers/net/smc9194.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/smc9194.c 2004-03-15 03:29:41.409888312 +0100
+@@ -1001,7 +1001,7 @@ static int __init smc_probe(struct net_d
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* Grab the IRQ */
+- retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &smc_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, retval);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sonic.c linux-2.6.4-hardened-new/drivers/net/sonic.c
+--- linux-2.6.4-hardened/drivers/net/sonic.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sonic.c 2004-03-15 03:29:41.419886792 +0100
+@@ -41,8 +41,8 @@ static int sonic_open(struct net_device
+ * covering another bug otherwise corrupting data. This doesn't mean
+ * this glue works ok under all situations.
+ */
+-// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) {
+- if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT,
++// if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_NET_RANDOM, "sonic", dev)) {
++ if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT | SA_NET_RANDOM,
+ "sonic", dev)) {
+ printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/starfire.c linux-2.6.4-hardened-new/drivers/net/starfire.c
+--- linux-2.6.4-hardened/drivers/net/starfire.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/starfire.c 2004-03-15 03:29:41.429885272 +0100
+@@ -1111,7 +1111,7 @@ static int netdev_open(struct net_device
+
+ COMPAT_MOD_INC_USE_COUNT;
+
+- retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ COMPAT_MOD_DEC_USE_COUNT;
+ return retval;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/stnic.c linux-2.6.4-hardened-new/drivers/net/stnic.c
+--- linux-2.6.4-hardened/drivers/net/stnic.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/stnic.c 2004-03-15 03:29:41.439883752 +0100
+@@ -127,7 +127,7 @@ static int __init stnic_probe(void)
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- err = request_irq (dev->irq, ei_interrupt, 0, dev->name, dev);
++ err = request_irq (dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sun3_82586.c linux-2.6.4-hardened-new/drivers/net/sun3_82586.c
+--- linux-2.6.4-hardened/drivers/net/sun3_82586.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sun3_82586.c 2004-03-15 03:29:41.447882536 +0100
+@@ -191,7 +191,7 @@ static int sun3_82586_open(struct net_de
+ startrecv586(dev);
+ sun3_enaint();
+
+- ret = request_irq(dev->irq, &sun3_82586_interrupt,0,dev->name,dev);
++ ret = request_irq(dev->irq, &sun3_82586_interrupt,SA_NET_RANDOM,dev->name,dev);
+ if (ret)
+ {
+ sun3_reset586();
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sun3lance.c linux-2.6.4-hardened-new/drivers/net/sun3lance.c
+--- linux-2.6.4-hardened/drivers/net/sun3lance.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sun3lance.c 2004-03-15 03:33:13.810598496 +0100
+@@ -342,7 +342,7 @@ static int __init lance_probe( struct ne
+
+ REGA(CSR0) = CSR0_STOP;
+
+- request_irq(LANCE_IRQ, lance_interrupt, SA_INTERRUPT, "SUN3 Lance", dev);
++ request_irq(LANCE_IRQ, lance_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "SUN3 Lance", dev);
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sunbmac.c linux-2.6.4-hardened-new/drivers/net/sunbmac.c
+--- linux-2.6.4-hardened/drivers/net/sunbmac.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sunbmac.c 2004-03-15 03:29:41.481877368 +0100
+@@ -905,7 +905,7 @@ static int bigmac_open(struct net_device
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+ int ret;
+
+- ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ, dev->name, bp);
++ ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, bp);
+ if (ret) {
+ printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sundance.c linux-2.6.4-hardened-new/drivers/net/sundance.c
+--- linux-2.6.4-hardened/drivers/net/sundance.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sundance.c 2004-03-15 03:29:41.494875392 +0100
+@@ -861,7 +861,7 @@ static int netdev_open(struct net_device
+
+ /* Do we need to reset the chip??? */
+
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sungem.c linux-2.6.4-hardened-new/drivers/net/sungem.c
+--- linux-2.6.4-hardened/drivers/net/sungem.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sungem.c 2004-03-15 03:29:41.511872808 +0100
+@@ -2170,7 +2170,7 @@ static int gem_open(struct net_device *d
+ * on the controller
+ */
+ if (request_irq(gp->pdev->irq, gem_interrupt,
+- SA_SHIRQ, dev->name, (void *)dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, (void *)dev)) {
+ printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
+
+ spin_lock_irq(&gp->lock);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sunhme.c linux-2.6.4-hardened-new/drivers/net/sunhme.c
+--- linux-2.6.4-hardened/drivers/net/sunhme.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sunhme.c 2004-03-15 03:29:41.533869464 +0100
+@@ -2202,7 +2202,7 @@ static int happy_meal_open(struct net_de
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
+ if (request_irq(dev->irq, &happy_meal_interrupt,
+- SA_SHIRQ, dev->name, (void *)dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, (void *)dev)) {
+ HMD(("EAGAIN\n"));
+ #ifdef __sparc__
+ printk(KERN_ERR "happy_meal(SBUS): Can't order irq %s to go.\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sunlance.c linux-2.6.4-hardened-new/drivers/net/sunlance.c
+--- linux-2.6.4-hardened/drivers/net/sunlance.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sunlance.c 2004-03-15 03:29:41.545867640 +0100
+@@ -923,7 +923,7 @@ static int lance_open(struct net_device
+
+ STOP_LANCE(lp);
+
+- if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %s\n", __irq_itoa(dev->irq));
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/sunqe.c linux-2.6.4-hardened-new/drivers/net/sunqe.c
+--- linux-2.6.4-hardened/drivers/net/sunqe.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/sunqe.c 2004-03-15 03:29:41.554866272 +0100
+@@ -889,7 +889,7 @@ static int __init qec_ether_init(struct
+ * for it now.
+ */
+ if (request_irq(sdev->irqs[0], &qec_interrupt,
+- SA_SHIRQ, "QuadEther", (void *) qecp)) {
++ SA_SHIRQ | SA_NET_RANDOM, "QuadEther", (void *) qecp)) {
+ printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n");
+ res = -EAGAIN;
+ goto out4;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tc35815.c linux-2.6.4-hardened-new/drivers/net/tc35815.c
+--- linux-2.6.4-hardened/drivers/net/tc35815.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tc35815.c 2004-03-15 03:29:41.566864448 +0100
+@@ -879,7 +879,7 @@ tc35815_open(struct net_device *dev)
+ */
+
+ if (dev->irq == 0 ||
+- request_irq(dev->irq, &tc35815_interrupt, SA_SHIRQ, cardname, dev)) {
++ request_irq(dev->irq, &tc35815_interrupt, SA_SHIRQ | SA_NET_RANDOM, cardname, dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tg3.c linux-2.6.4-hardened-new/drivers/net/tg3.c
+--- linux-2.6.4-hardened/drivers/net/tg3.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tg3.c 2004-03-15 03:29:41.595860040 +0100
+@@ -5241,7 +5241,7 @@ static int tg3_open(struct net_device *d
+ return err;
+
+ err = request_irq(dev->irq, tg3_interrupt,
+- SA_SHIRQ, dev->name, dev);
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+
+ if (err) {
+ tg3_free_consistent(tp);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tlan.c linux-2.6.4-hardened-new/drivers/net/tlan.c
+--- linux-2.6.4-hardened/drivers/net/tlan.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tlan.c 2004-03-15 03:29:41.611857608 +0100
+@@ -926,7 +926,7 @@ static int TLan_Open( struct net_device
+ int err;
+
+ priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
+- err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ, TLanSignature, dev );
++ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ | SA_NET_RANDOM, TLanSignature, dev );
+
+ if ( err ) {
+ printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq );
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/3c359.c linux-2.6.4-hardened-new/drivers/net/tokenring/3c359.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/3c359.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/3c359.c 2004-03-15 03:29:41.622855936 +0100
+@@ -575,7 +575,7 @@ static int xl_open(struct net_device *de
+
+ u16 switchsettings, switchsettings_eeprom ;
+
+- if(request_irq(dev->irq, &xl_interrupt, SA_SHIRQ , "3c359", dev)) {
++ if(request_irq(dev->irq, &xl_interrupt, SA_SHIRQ | SA_NET_RANDOM, "3c359", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/abyss.c linux-2.6.4-hardened-new/drivers/net/tokenring/abyss.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/abyss.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/abyss.c 2004-03-15 03:29:41.632854416 +0100
+@@ -123,7 +123,7 @@ static int __devinit abyss_attach(struct
+ goto err_out_trdev;
+ }
+
+- ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
++ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/ibmtr.c linux-2.6.4-hardened-new/drivers/net/tokenring/ibmtr.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/ibmtr.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/ibmtr.c 2004-03-15 03:29:41.649851832 +0100
+@@ -684,7 +684,7 @@ static int __devinit ibmtr_probe1(struct
+
+ /* The PCMCIA has already got the interrupt line and the io port,
+ so no chance of anybody else getting it - MLP */
+- if (request_irq(dev->irq = irq, &tok_interrupt, 0, "ibmtr", dev) != 0) {
++ if (request_irq(dev->irq = irq, &tok_interrupt, SA_NET_RANDOM, "ibmtr", dev) != 0) {
+ DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
+ irq);
+ iounmap(t_mmio);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/lanstreamer.c linux-2.6.4-hardened-new/drivers/net/tokenring/lanstreamer.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/lanstreamer.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/lanstreamer.c 2004-03-15 03:29:41.661850008 +0100
+@@ -597,7 +597,7 @@ static int streamer_open(struct net_devi
+ rc=streamer_reset(dev);
+ }
+
+- if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ, "lanstreamer", dev)) {
++ if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ | SA_NET_RANDOM, "lanstreamer", dev)) {
+ return -EAGAIN;
+ }
+ #if STREAMER_DEBUG
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/madgemc.c linux-2.6.4-hardened-new/drivers/net/tokenring/madgemc.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/madgemc.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/madgemc.c 2004-03-15 03:29:41.670848640 +0100
+@@ -333,7 +333,7 @@ static int __init madgemc_probe(void)
+ */
+ outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
+ madgemc_setsifsel(dev, 1);
+- if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ "madgemc", dev))
+ goto getout;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/olympic.c linux-2.6.4-hardened-new/drivers/net/tokenring/olympic.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/olympic.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/olympic.c 2004-03-15 03:29:41.681846968 +0100
+@@ -441,7 +441,7 @@ static int olympic_open(struct net_devic
+
+ DECLARE_WAITQUEUE(wait,current) ;
+
+- if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
++ if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ | SA_NET_RANDOM, "olympic", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/proteon.c linux-2.6.4-hardened-new/drivers/net/tokenring/proteon.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/proteon.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/proteon.c 2004-03-15 03:29:41.704843472 +0100
+@@ -178,7 +178,7 @@ static int __init setup_card(struct net_
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+- if (!request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (!request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ cardname, dev))
+ break;
+ }
+@@ -200,7 +200,7 @@ static int __init setup_card(struct net_
+ dev->name, dev->irq);
+ goto out3;
+ }
+- if (request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/skisa.c linux-2.6.4-hardened-new/drivers/net/tokenring/skisa.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/skisa.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/skisa.c 2004-03-15 03:29:41.726840128 +0100
+@@ -195,7 +195,7 @@ static int __init setup_card(struct net_
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+- if (!request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (!request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ isa_cardname, dev))
+ break;
+ }
+@@ -217,7 +217,7 @@ static int __init setup_card(struct net_
+ dev->name, dev->irq);
+ goto out3;
+ }
+- if (request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ isa_cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/smctr.c linux-2.6.4-hardened-new/drivers/net/tokenring/smctr.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/smctr.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/smctr.c 2004-03-15 03:29:41.745837240 +0100
+@@ -532,7 +532,7 @@ static int smctr_chk_mca(struct net_devi
+ dev->irq = 15;
+ break;
+ }
+- if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev)) {
++ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ | SA_NET_RANDOM, smctr_name, dev)) {
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ return -ENODEV;
+ }
+@@ -1062,7 +1062,7 @@ static int __init smctr_chk_isa(struct n
+ goto out2;
+ }
+
+- if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
++ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ | SA_NET_RANDOM, smctr_name, dev))
+ goto out2;
+
+ /* Get 58x Rom Base */
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tokenring/tmspci.c linux-2.6.4-hardened-new/drivers/net/tokenring/tmspci.c
+--- linux-2.6.4-hardened/drivers/net/tokenring/tmspci.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tokenring/tmspci.c 2004-03-15 03:29:41.754835872 +0100
+@@ -122,7 +122,7 @@ static int __devinit tms_pci_attach(stru
+ goto err_out_trdev;
+ }
+
+- ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
++ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/de2104x.c linux-2.6.4-hardened-new/drivers/net/tulip/de2104x.c
+--- linux-2.6.4-hardened/drivers/net/tulip/de2104x.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/de2104x.c 2004-03-15 03:29:41.767833896 +0100
+@@ -1387,7 +1387,7 @@ static int de_open (struct net_device *d
+ goto err_out_free;
+ }
+
+- rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
++ rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (rc) {
+ printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
+ dev->name, dev->irq, rc);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/de4x5.c linux-2.6.4-hardened-new/drivers/net/tulip/de4x5.c
+--- linux-2.6.4-hardened/drivers/net/tulip/de4x5.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/de4x5.c 2004-03-15 03:29:41.786831008 +0100
+@@ -1320,10 +1320,10 @@ de4x5_open(struct net_device *dev)
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+- if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+- if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/dmfe.c linux-2.6.4-hardened-new/drivers/net/tulip/dmfe.c
+--- linux-2.6.4-hardened/drivers/net/tulip/dmfe.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/dmfe.c 2004-03-15 03:29:41.796829488 +0100
+@@ -498,7 +498,7 @@ static int dmfe_open(struct DEVICE *dev)
+
+ DMFE_DBUG(0, "dmfe_open", 0);
+
+- ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/tulip_core.c linux-2.6.4-hardened-new/drivers/net/tulip/tulip_core.c
+--- linux-2.6.4-hardened/drivers/net/tulip/tulip_core.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/tulip_core.c 2004-03-15 03:29:41.806827968 +0100
+@@ -484,7 +484,7 @@ tulip_open(struct net_device *dev)
+ {
+ int retval;
+
+- if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ tulip_init_ring (dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/winbond-840.c linux-2.6.4-hardened-new/drivers/net/tulip/winbond-840.c
+--- linux-2.6.4-hardened/drivers/net/tulip/winbond-840.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/winbond-840.c 2004-03-15 03:29:41.819825992 +0100
+@@ -693,7 +693,7 @@ static int netdev_open(struct net_device
+ writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ netif_device_detach(dev);
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ goto out_err;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/xircom_cb.c linux-2.6.4-hardened-new/drivers/net/tulip/xircom_cb.c
+--- linux-2.6.4-hardened/drivers/net/tulip/xircom_cb.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/xircom_cb.c 2004-03-15 03:29:41.828824624 +0100
+@@ -448,7 +448,7 @@ static int xircom_open(struct net_device
+ int retval;
+ enter("xircom_open");
+ printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+- retval = request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ leave("xircom_open - No IRQ");
+ return retval;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/tulip/xircom_tulip_cb.c linux-2.6.4-hardened-new/drivers/net/tulip/xircom_tulip_cb.c
+--- linux-2.6.4-hardened/drivers/net/tulip/xircom_tulip_cb.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/tulip/xircom_tulip_cb.c 2004-03-15 03:29:41.845822040 +0100
+@@ -806,7 +806,7 @@ xircom_open(struct net_device *dev)
+ {
+ struct xircom_private *tp = dev->priv;
+
+- if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ xircom_up(dev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/typhoon.c linux-2.6.4-hardened-new/drivers/net/typhoon.c
+--- linux-2.6.4-hardened/drivers/net/typhoon.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/typhoon.c 2004-03-15 03:29:41.860819760 +0100
+@@ -2109,7 +2109,7 @@ typhoon_open(struct net_device *dev)
+ goto out_sleep;
+ }
+
+- err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
++ err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if(err < 0)
+ goto out_sleep;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/via-rhine.c linux-2.6.4-hardened-new/drivers/net/via-rhine.c
+--- linux-2.6.4-hardened/drivers/net/via-rhine.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/via-rhine.c 2004-03-15 03:29:41.871818088 +0100
+@@ -1132,7 +1132,7 @@ static int via_rhine_open(struct net_dev
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+- i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(np->pdev->irq, &via_rhine_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/c101.c linux-2.6.4-hardened-new/drivers/net/wan/c101.c
+--- linux-2.6.4-hardened/drivers/net/wan/c101.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/c101.c 2004-03-15 03:29:41.886815808 +0100
+@@ -325,7 +325,7 @@ static int __init c101_run(unsigned long
+ return -ENOBUFS;
+ }
+
+- if (request_irq(irq, sca_intr, 0, devname, card)) {
++ if (request_irq(irq, sca_intr, SA_NET_RANDOM, devname, card)) {
+ printk(KERN_ERR "c101: could not allocate IRQ\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/comx-hw-comx.c linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-comx.c
+--- linux-2.6.4-hardened/drivers/net/wan/comx-hw-comx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-comx.c 2004-03-15 03:29:41.899813832 +0100
+@@ -473,7 +473,7 @@ static int COMX_open(struct net_device *
+ if (!request_region(dev->base_addr, hw->io_extent, dev->name)) {
+ return -EAGAIN;
+ }
+- if (request_irq(dev->irq, COMX_interrupt, 0, dev->name,
++ if (request_irq(dev->irq, COMX_interrupt, SA_NET_RANDOM, dev->name,
+ (void *)dev)) {
+ printk(KERN_ERR "comx-hw-comx: unable to obtain irq %d\n", dev->irq);
+ release_region(dev->base_addr, hw->io_extent);
+@@ -585,7 +585,7 @@ static int COMX_close(struct net_device
+ if (ch->twin && (twin_ch = ch->twin->priv) &&
+ (twin_ch->init_status & HW_OPEN)) {
+ /* Pass the irq to the twin */
+- if (request_irq(dev->irq, COMX_interrupt, 0, ch->twin->name,
++ if (request_irq(dev->irq, COMX_interrupt, SA_NET_RANDOM, ch->twin->name,
+ (void *)ch->twin) == 0) {
+ twin_ch->init_status |= IRQ_ALLOCATED;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/comx-hw-locomx.c linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-locomx.c
+--- linux-2.6.4-hardened/drivers/net/wan/comx-hw-locomx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-locomx.c 2004-03-15 03:29:41.907812616 +0100
+@@ -170,7 +170,7 @@ static int LOCOMX_open(struct net_device
+ hw->board.chanA.irqs=&z8530_nop;
+ hw->board.chanB.irqs=&z8530_nop;
+
+- if(request_irq(dev->irq, z8530_interrupt, SA_INTERRUPT,
++ if(request_irq(dev->irq, z8530_interrupt, SA_INTERRUPT | SA_NET_RANDOM,
+ dev->name, &hw->board)) {
+ printk(KERN_ERR "%s: unable to obtain irq %d\n", dev->name,
+ dev->irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/comx-hw-mixcom.c linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-mixcom.c
+--- linux-2.6.4-hardened/drivers/net/wan/comx-hw-mixcom.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-mixcom.c 2004-03-15 03:29:41.917811096 +0100
+@@ -518,7 +518,7 @@ static int MIXCOM_open(struct net_device
+ }
+
+ if(hw->channel==0 && !(ch->init_status & IRQ_ALLOCATED)) {
+- if (request_irq(dev->irq, MIXCOM_interrupt, 0,
++ if (request_irq(dev->irq, MIXCOM_interrupt, SA_NET_RANDOM,
+ dev->name, (void *)dev)) {
+ printk(KERN_ERR "MIXCOM: unable to obtain irq %d\n", dev->irq);
+ ret = -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/comx-hw-munich.c linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-munich.c
+--- linux-2.6.4-hardened/drivers/net/wan/comx-hw-munich.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/comx-hw-munich.c 2004-03-15 03:29:41.929809272 +0100
+@@ -1499,7 +1499,7 @@ static int MUNICH_open(struct net_device
+ board->irq = 0;
+
+ /* (char*) cast to avoid warning about discarding volatile: */
+- if (request_irq(board->pci->irq, MUNICH_interrupt, 0,
++ if (request_irq(board->pci->irq, MUNICH_interrupt, SA_NET_RANDOM,
+ (char *)board->devname, (void *)board))
+ {
+ printk("MUNICH_open: %s: unable to obtain irq %d\n", board->devname,
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/cosa.c linux-2.6.4-hardened-new/drivers/net/wan/cosa.c
+--- linux-2.6.4-hardened/drivers/net/wan/cosa.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/cosa.c 2004-03-15 03:29:41.943807144 +0100
+@@ -543,7 +543,7 @@ static int cosa_probe(int base, int irq,
+ cosa->usage = 0;
+ cosa->nchannels = 2; /* FIXME: how to determine this? */
+
+- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
++ if (request_irq(cosa->irq, cosa_interrupt, SA_NET_RANDOM, cosa->type, cosa)) {
+ err = -1;
+ goto err_out;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/cycx_main.c linux-2.6.4-hardened-new/drivers/net/wan/cycx_main.c
+--- linux-2.6.4-hardened/drivers/net/wan/cycx_main.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/cycx_main.c 2004-03-15 03:29:41.952805776 +0100
+@@ -214,7 +214,7 @@ static int cycx_wan_setup(struct wan_dev
+ /* Allocate IRQ */
+ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
+
+- if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
++ if (request_irq(irq, cycx_isr, SA_NET_RANDOM, wandev->name, card)) {
+ printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
+ wandev->name, irq);
+ goto out;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/dscc4.c linux-2.6.4-hardened-new/drivers/net/wan/dscc4.c
+--- linux-2.6.4-hardened/drivers/net/wan/dscc4.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/dscc4.c 2004-03-15 03:29:41.970803040 +0100
+@@ -750,7 +750,7 @@ static int __devinit dscc4_init_one(stru
+
+ priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
+
+- if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root)){
++ if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ | SA_NET_RANDOM, DRV_NAME, priv->root)){
+ printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
+ goto err_out_free1;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/farsync.c linux-2.6.4-hardened-new/drivers/net/wan/farsync.c
+--- linux-2.6.4-hardened/drivers/net/wan/farsync.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/farsync.c 2004-03-15 03:29:41.987800456 +0100
+@@ -1613,7 +1613,7 @@ fst_add_one ( struct pci_dev *pdev, cons
+ card->state = FST_RESET;
+
+ /* Register the interrupt handler */
+- if ( request_irq ( card->irq, fst_intr, SA_SHIRQ, FST_DEV_NAME, card ))
++ if ( request_irq ( card->irq, fst_intr, SA_SHIRQ | SA_NET_RANDOM, FST_DEV_NAME, card ))
+ {
+
+ printk_err ("Unable to register interrupt %d\n", card->irq );
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/hostess_sv11.c linux-2.6.4-hardened-new/drivers/net/wan/hostess_sv11.c
+--- linux-2.6.4-hardened/drivers/net/wan/hostess_sv11.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/hostess_sv11.c 2004-03-15 03:29:42.006797568 +0100
+@@ -263,7 +263,7 @@ static struct sv11_device *sv11_init(int
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+- if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "Hostess SV/11", dev)<0)
++ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "Hostess SV/11", dev)<0)
+ {
+ printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
+ goto fail1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/lmc/lmc_main.c linux-2.6.4-hardened-new/drivers/net/wan/lmc/lmc_main.c
+--- linux-2.6.4-hardened/drivers/net/wan/lmc/lmc_main.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/lmc/lmc_main.c 2004-03-15 03:29:42.018795744 +0100
+@@ -1060,7 +1060,7 @@ static int lmc_open (struct net_device *
+ lmc_softreset (sc);
+
+ /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
+- if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ, dev->name, dev)){
++ if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)){
+ printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
+ lmc_trace(dev, "lmc_open irq failed out");
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/n2.c linux-2.6.4-hardened-new/drivers/net/wan/n2.c
+--- linux-2.6.4-hardened/drivers/net/wan/n2.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/n2.c 2004-03-15 03:29:42.034793312 +0100
+@@ -377,7 +377,7 @@ static int __init n2_run(unsigned long i
+ }
+ card->io = io;
+
+- if (request_irq(irq, &sca_intr, 0, devname, card)) {
++ if (request_irq(irq, &sca_intr, SA_NET_RANDOM, devname, card)) {
+ printk(KERN_ERR "n2: could not allocate IRQ\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/pc300_drv.c linux-2.6.4-hardened-new/drivers/net/wan/pc300_drv.c
+--- linux-2.6.4-hardened/drivers/net/wan/pc300_drv.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/pc300_drv.c 2004-03-15 03:29:42.058789664 +0100
+@@ -3603,7 +3603,7 @@ cpc_init_one(struct pci_dev *pdev, const
+ }
+
+ /* Allocate IRQ */
+- if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) {
++ if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ | SA_NET_RANDOM, "Cyclades-PC300", card)) {
+ printk ("PC300 found at RAM 0x%08lx, but could not allocate IRQ%d.\n",
+ card->hw.ramphys, card->hw.irq);
+ goto err_io_unmap;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/pci200syn.c linux-2.6.4-hardened-new/drivers/net/wan/pci200syn.c
+--- linux-2.6.4-hardened/drivers/net/wan/pci200syn.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/pci200syn.c 2004-03-15 03:29:42.075787080 +0100
+@@ -395,7 +395,7 @@ static int __devinit pci200_pci_init_one
+ writew(readw(p) | 0x0040, p);
+
+ /* Allocate IRQ */
+- if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
++ if(request_irq(pdev->irq, sca_intr, SA_SHIRQ | SA_NET_RANDOM, devname, card)) {
+ printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
+ pdev->irq);
+ pci200_pci_remove_one(pdev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/sbni.c linux-2.6.4-hardened-new/drivers/net/wan/sbni.c
+--- linux-2.6.4-hardened/drivers/net/wan/sbni.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/sbni.c 2004-03-15 03:29:42.096783888 +0100
+@@ -1187,7 +1187,7 @@ sbni_open( struct net_device *dev )
+ }
+ }
+
+- if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ, dev->name, dev) ) {
++ if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev) ) {
+ printk( KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq );
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/sdla.c linux-2.6.4-hardened-new/drivers/net/wan/sdla.c
+--- linux-2.6.4-hardened/drivers/net/wan/sdla.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/sdla.c 2004-03-15 03:33:45.726746504 +0100
+@@ -1460,7 +1460,7 @@ got_type:
+ }
+
+ err = -EAGAIN;
+- if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev))
++ if (request_irq(dev->irq, &sdla_isr, SA_NET_RANDOM, dev->name, dev))
+ goto fail;
+
+ if (flp->type == SDLA_S507) {
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/sdlamain.c linux-2.6.4-hardened-new/drivers/net/wan/sdlamain.c
+--- linux-2.6.4-hardened/drivers/net/wan/sdlamain.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/sdlamain.c 2004-03-15 03:29:42.149775832 +0100
+@@ -458,7 +458,7 @@ static int setup(struct wan_device* wand
+ /* when using the S514 PCI adapter */
+
+ if(request_irq(irq, sdla_isr,
+- (card->hw.type == SDLA_S514) ? SA_SHIRQ : 0,
++ (card->hw.type == SDLA_S514) ? SA_SHIRQ | SA_NET_RANDOM : 0,
+ wandev->name, card)){
+
+ printk(KERN_INFO "%s: Can't reserve IRQ %d!\n", wandev->name, irq);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/sealevel.c linux-2.6.4-hardened-new/drivers/net/wan/sealevel.c
+--- linux-2.6.4-hardened/drivers/net/wan/sealevel.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/sealevel.c 2004-03-15 03:29:42.160774160 +0100
+@@ -320,7 +320,7 @@ static __init struct slvl_board *slvl_in
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+- if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "SeaLevel", dev)<0)
++ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "SeaLevel", dev)<0)
+ {
+ printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
+ goto fail1_1;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wan/wanxl.c linux-2.6.4-hardened-new/drivers/net/wan/wanxl.c
+--- linux-2.6.4-hardened/drivers/net/wan/wanxl.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wan/wanxl.c 2004-03-15 03:34:44.767770904 +0100
+@@ -746,7 +746,7 @@ static int __devinit wanxl_pci_init_one(
+ card_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
+
+ /* Allocate IRQ */
+- if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ, "wanXL", card)) {
++ if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ | SA_NET_RANDOM, "wanXL", card)) {
+ printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
+ card_name(pdev), pdev->irq);
+ wanxl_pci_remove_one(pdev);
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wd.c linux-2.6.4-hardened-new/drivers/net/wd.c
+--- linux-2.6.4-hardened/drivers/net/wd.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wd.c 2004-03-15 03:29:42.204767472 +0100
+@@ -300,7 +300,7 @@ static int __init wd_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- i = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ i = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (i) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return i;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/airo.c linux-2.6.4-hardened-new/drivers/net/wireless/airo.c
+--- linux-2.6.4-hardened/drivers/net/wireless/airo.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/airo.c 2004-03-15 03:29:42.241761848 +0100
+@@ -2730,7 +2730,7 @@ struct net_device *_init_airo_card( unsi
+ if (test_bit(FLAG_MPI,&ai->flags))
+ reset_mpi_card (dev);
+
+- rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev );
++ rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev );
+ if (rc) {
+ printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc );
+ goto err_out_unlink;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/airport.c linux-2.6.4-hardened-new/drivers/net/wireless/airport.c
+--- linux-2.6.4-hardened/drivers/net/wireless/airport.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/airport.c 2004-03-15 03:29:42.251760328 +0100
+@@ -243,7 +243,7 @@ airport_attach(struct macio_dev *mdev, c
+ /* Reset it before we get the interrupt */
+ hermes_init(hw);
+
+- if (request_irq(dev->irq, orinoco_interrupt, 0, "Airport", dev)) {
++ if (request_irq(dev->irq, orinoco_interrupt, SA_NET_RANDOM, "Airport", dev)) {
+ printk(KERN_ERR "airport: Couldn't get IRQ %d\n", dev->irq);
+ goto failed;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/arlan-main.c linux-2.6.4-hardened-new/drivers/net/wireless/arlan-main.c
+--- linux-2.6.4-hardened/drivers/net/wireless/arlan-main.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/arlan-main.c 2004-03-15 03:29:42.265758200 +0100
+@@ -1116,7 +1116,7 @@ static int arlan_open(struct net_device
+
+ ARLAN_DEBUG_ENTRY("arlan_open");
+
+- ret = request_irq(dev->irq, &arlan_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, &arlan_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ {
+ printk(KERN_ERR "%s: unable to get IRQ %d .\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/atmel.c linux-2.6.4-hardened-new/drivers/net/wireless/atmel.c
+--- linux-2.6.4-hardened/drivers/net/wireless/atmel.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/atmel.c 2004-03-15 03:29:42.282755616 +0100
+@@ -1483,7 +1483,7 @@ struct net_device *init_atmel_card( unsi
+ dev->irq = irq;
+ dev->base_addr = port;
+
+- if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) {
++ if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc );
+ goto err_out_free;
+ }
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/orinoco_pci.c linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_pci.c
+--- linux-2.6.4-hardened/drivers/net/wireless/orinoco_pci.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_pci.c 2004-03-15 03:29:42.296753488 +0100
+@@ -229,7 +229,7 @@ static int orinoco_pci_init_one(struct p
+ HERMES_MEM, HERMES_32BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_pci: Error allocating IRQ %d.\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/orinoco_plx.c linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_plx.c
+--- linux-2.6.4-hardened/drivers/net/wireless/orinoco_plx.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_plx.c 2004-03-15 03:29:42.307751816 +0100
+@@ -242,7 +242,7 @@ static int orinoco_plx_init_one(struct p
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, dev);
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_plx: Error allocating IRQ %d.\n", pdev->irq);
+ err = -EBUSY;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/orinoco_tmd.c linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_tmd.c
+--- linux-2.6.4-hardened/drivers/net/wireless/orinoco_tmd.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/orinoco_tmd.c 2004-03-15 03:29:42.317750296 +0100
+@@ -134,7 +134,7 @@ static int orinoco_tmd_init_one(struct p
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name,
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name,
+ dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_tmd: Error allocating IRQ %d.\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/wireless/wavelan.c linux-2.6.4-hardened-new/drivers/net/wireless/wavelan.c
+--- linux-2.6.4-hardened/drivers/net/wireless/wavelan.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/wireless/wavelan.c 2004-03-15 03:29:42.337747256 +0100
+@@ -4022,7 +4022,7 @@ static int wavelan_open(struct net_devic
+ return -ENXIO;
+ }
+
+- if (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", dev) != 0)
++ if (request_irq(dev->irq, &wavelan_interrupt, SA_NET_RANDOM, "WaveLAN", dev) != 0)
+ {
+ #ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n",
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/yellowfin.c linux-2.6.4-hardened-new/drivers/net/yellowfin.c
+--- linux-2.6.4-hardened/drivers/net/yellowfin.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/yellowfin.c 2004-03-15 03:29:42.350745280 +0100
+@@ -632,7 +632,7 @@ static int yellowfin_open(struct net_dev
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+- i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ if (yellowfin_debug > 1)
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/znet.c linux-2.6.4-hardened-new/drivers/net/znet.c
+--- linux-2.6.4-hardened/drivers/net/znet.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/znet.c 2004-03-15 03:29:42.365743000 +0100
+@@ -173,7 +173,7 @@ static int znet_request_resources (struc
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+
+- if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
++ if (request_irq (dev->irq, &znet_interrupt, SA_NET_RANDOM, "ZNet", dev))
+ goto failed;
+ if (request_dma (znet->rx_dma, "ZNet rx"))
+ goto free_irq;
+diff -uprN -X dontdiff linux-2.6.4-hardened/drivers/net/zorro8390.c linux-2.6.4-hardened-new/drivers/net/zorro8390.c
+--- linux-2.6.4-hardened/drivers/net/zorro8390.c 2004-03-15 03:58:10.000000000 +0100
++++ linux-2.6.4-hardened-new/drivers/net/zorro8390.c 2004-03-15 03:29:42.385739960 +0100
+@@ -198,7 +198,7 @@ static int __devinit zorro8390_init(stru
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+- i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-4.5/4005_CAN-2004-0109.patch b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/4005_CAN-2004-0109.patch
new file mode 100644
index 0000000000..d7726c2e5a
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-4.5/4005_CAN-2004-0109.patch
@@ -0,0 +1,88 @@
+--- linux/fs/isofs/rock.c.orig
++++ linux/fs/isofs/rock.c
+@@ -14,6 +14,7 @@
+ #include <linux/slab.h>
+ #include <linux/pagemap.h>
+ #include <linux/smp_lock.h>
+ #include <linux/buffer_head.h>
++#include <asm/page.h>
+
+ #include "rock.h"
+@@ -419,7 +420,7 @@ int parse_rock_ridge_inode_internal(stru
+ return 0;
+ }
+
+-static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr)
++static char *get_symlink_chunk(char *rpnt, struct rock_ridge *rr, char *plimit)
+ {
+ int slen;
+ int rootflag;
+@@ -431,16 +432,25 @@ static char *get_symlink_chunk(char *rpn
+ rootflag = 0;
+ switch (slp->flags & ~1) {
+ case 0:
++ if (slp->len > plimit - rpnt)
++ return NULL;
+ memcpy(rpnt, slp->text, slp->len);
+ rpnt+=slp->len;
+ break;
++ case 2:
++ if (rpnt >= plimit)
++ return NULL;
++ *rpnt++='.';
++ break;
+ case 4:
++ if (2 > plimit - rpnt)
++ return NULL;
+ *rpnt++='.';
+- /* fallthru */
+- case 2:
+ *rpnt++='.';
+ break;
+ case 8:
++ if (rpnt >= plimit)
++ return NULL;
+ rootflag = 1;
+ *rpnt++='/';
+ break;
+@@ -457,17 +467,23 @@ static char *get_symlink_chunk(char *rpn
+ * If there is another SL record, and this component
+ * record isn't continued, then add a slash.
+ */
+- if ((!rootflag) && (rr->u.SL.flags & 1) && !(oldslp->flags & 1))
++ if ((!rootflag) && (rr->u.SL.flags & 1) &&
++ !(oldslp->flags & 1)) {
++ if (rpnt >= plimit)
++ return NULL;
+ *rpnt++='/';
++ }
+ break;
+ }
+
+ /*
+ * If this component record isn't continued, then append a '/'.
+ */
+- if (!rootflag && !(oldslp->flags & 1))
++ if (!rootflag && !(oldslp->flags & 1)) {
++ if (rpnt >= plimit)
++ return NULL;
+ *rpnt++='/';
+-
++ }
+ }
+ return rpnt;
+ }
+@@ -548,7 +564,10 @@ static int rock_ridge_symlink_readpage(s
+ CHECK_SP(goto out);
+ break;
+ case SIG('S', 'L'):
+- rpnt = get_symlink_chunk(rpnt, rr);
++ rpnt = get_symlink_chunk(rpnt, rr,
++ link + (PAGE_SIZE - 1));
++ if (rpnt == NULL)
++ goto out;
+ break;
+ case SIG('C', 'E'):
+ /* This tells is if there is a continuation record */
+
+