diff options
author | Anthony G. Basile <basile@opensource.dyc.edu> | 2010-09-19 09:56:35 -0400 |
---|---|---|
committer | Anthony G. Basile <basile@opensource.dyc.edu> | 2010-09-19 09:56:35 -0400 |
commit | a31c29d8aa9c1cc7d5065710b8f9716bd117bf11 (patch) | |
tree | e81d9a8528846cef1358b9af725358598eb88b07 | |
parent | Fixed compat_alloc_user_space undefined (diff) | |
download | hardened-patchset-a31c29d8aa9c1cc7d5065710b8f9716bd117bf11.tar.gz hardened-patchset-a31c29d8aa9c1cc7d5065710b8f9716bd117bf11.tar.bz2 hardened-patchset-a31c29d8aa9c1cc7d5065710b8f9716bd117bf11.zip |
Updated Grsec/PaX20100917
2.2.0-2.6.32.21-201009171945 for 2.6.32.21
2.2.0-2.6.34.7-201009171945 for 2.6.34.6
2.2.0-2.6.35.4-201009172030 for 2.6.35.4
21 files changed, 59537 insertions, 644 deletions
diff --git a/2.6.32/0000_README b/2.6.32/0000_README index 495f8be..e980fa6 100644 --- a/2.6.32/0000_README +++ b/2.6.32/0000_README @@ -3,7 +3,7 @@ README Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch +Patch: 4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch index 4ed5e67..653c257 100644 --- a/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009162222.patch +++ b/2.6.32/4420_grsecurity-2.2.0-2.6.32.21-201009171945.patch @@ -50,7 +50,16 @@ diff -urNp linux-2.6.32.21/arch/alpha/kernel/module.c linux-2.6.32.21/arch/alpha for (i = 0; i < n; i++) { diff -urNp linux-2.6.32.21/arch/alpha/kernel/osf_sys.c linux-2.6.32.21/arch/alpha/kernel/osf_sys.c --- linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/alpha/kernel/osf_sys.c 2010-09-17 18:34:04.000000000 -0400 +@@ -1169,7 +1169,7 @@ arch_get_unmapped_area_1(unsigned long a + /* At this point: (!vma || addr < vma->vm_end). */ + if (limit - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + vma = vma->vm_next; @@ -1205,6 +1205,10 @@ arch_get_unmapped_area(struct file *filp merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ @@ -446,7 +455,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/fault.c linux-2.6.32.21/arch/arm/mm/fault * diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c --- linux-2.6.32.21/arch/arm/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/arm/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp if (len > TASK_SIZE) return -ENOMEM; @@ -458,7 +467,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp +@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { @@ -472,7 +487,7 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c } full_search: -@@ -94,8 +98,8 @@ full_search: +@@ -94,14 +97,14 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -483,6 +498,13 @@ diff -urNp linux-2.6.32.21/arch/arm/mm/mmap.c linux-2.6.32.21/arch/arm/mm/mmap.c mm->cached_hole_size = 0; goto full_search; } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ diff -urNp linux-2.6.32.21/arch/arm/plat-s3c/pm.c linux-2.6.32.21/arch/arm/plat-s3c/pm.c --- linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/arm/plat-s3c/pm.c 2010-09-04 15:54:51.000000000 -0400 @@ -618,6 +640,37 @@ diff -urNp linux-2.6.32.21/arch/frv/include/asm/kmap_types.h linux-2.6.32.21/arc KM_TYPE_NR }; +diff -urNp linux-2.6.32.21/arch/frv/mm/elf-fdpic.c linux-2.6.32.21/arch/frv/mm/elf-fdpic.c +--- linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-08-13 16:24:37.000000000 -0400 ++++ linux-2.6.32.21/arch/frv/mm/elf-fdpic.c 2010-09-17 18:34:04.000000000 -0400 +@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + goto success; + } + +@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } diff -urNp linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c --- linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/ia64/hp/common/hwsw_iommu.c 2010-09-04 15:54:51.000000000 -0400 @@ -1023,7 +1076,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/pci-swiotlb.c linux-2.6.32.21/arch/i .map_page = swiotlb_map_page, diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c --- linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c 2010-09-17 18:34:04.000000000 -0400 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; @@ -1038,7 +1091,7 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64 if (!addr) addr = mm->free_area_cache; -@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil +@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { @@ -1050,6 +1103,12 @@ diff -urNp linux-2.6.32.21/arch/ia64/kernel/sys_ia64.c linux-2.6.32.21/arch/ia64 goto full_search; } return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* Remember the address where we stopped this search: */ + mm->free_area_cache = addr + len; + return addr; diff -urNp linux-2.6.32.21/arch/ia64/kernel/topology.c linux-2.6.32.21/arch/ia64/kernel/topology.c --- linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/ia64/kernel/topology.c 2010-09-04 15:54:51.000000000 -0400 @@ -1126,6 +1185,18 @@ diff -urNp linux-2.6.32.21/arch/ia64/mm/fault.c linux-2.6.32.21/arch/ia64/mm/fau survive: /* * If for any reason at all we couldn't handle the fault, make +diff -urNp linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c +--- linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400 ++++ linux-2.6.32.21/arch/ia64/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400 +@@ -172,7 +172,7 @@ unsigned long hugetlb_get_unmapped_area( + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) + return -ENOMEM; +- if (!vmm || (addr + len) <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = ALIGN(vmm->vm_end, HPAGE_SIZE); + } diff -urNp linux-2.6.32.21/arch/ia64/mm/init.c linux-2.6.32.21/arch/ia64/mm/init.c --- linux-2.6.32.21/arch/ia64/mm/init.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/ia64/mm/init.c 2010-09-04 15:54:51.000000000 -0400 @@ -1312,8 +1383,8 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/process.c linux-2.6.32.21/arch/mips/ -} diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/kernel/syscall.c --- linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-04 15:54:51.000000000 -0400 -@@ -102,6 +102,11 @@ unsigned long arch_get_unmapped_area(str ++++ linux-2.6.32.21/arch/mips/kernel/syscall.c 2010-09-17 18:34:04.000000000 -0400 +@@ -102,17 +102,21 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; @@ -1325,8 +1396,12 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/ if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -112,7 +117,7 @@ unsigned long arch_get_unmapped_area(str - (!vmm || addr + len <= vmm->vm_start)) + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); +- if (task_size - len >= addr && +- (!vmm || addr + len <= vmm->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len)) return addr; } - addr = TASK_UNMAPPED_BASE; @@ -1334,6 +1409,15 @@ diff -urNp linux-2.6.32.21/arch/mips/kernel/syscall.c linux-2.6.32.21/arch/mips/ if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else +@@ -122,7 +126,7 @@ unsigned long arch_get_unmapped_area(str + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (task_size - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (do_color_align) diff -urNp linux-2.6.32.21/arch/mips/mm/fault.c linux-2.6.32.21/arch/mips/mm/fault.c --- linux-2.6.32.21/arch/mips/mm/fault.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/mips/mm/fault.c 2010-09-04 15:54:51.000000000 -0400 @@ -1516,7 +1600,25 @@ diff -urNp linux-2.6.32.21/arch/parisc/kernel/module.c linux-2.6.32.21/arch/pari me->arch.unwind_section, table, end, gp); diff -urNp linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c --- linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/parisc/kernel/sys_parisc.c 2010-09-17 18:34:04.000000000 -0400 +@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + } +@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; + if (addr < vma->vm_end) /* handle wraparound */ @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str if (flags & MAP_FIXED) return addr; @@ -2671,8 +2773,38 @@ diff -urNp linux-2.6.32.21/arch/powerpc/mm/mmap_64.c linux-2.6.32.21/arch/powerp } diff -urNp linux-2.6.32.21/arch/powerpc/mm/slice.c linux-2.6.32.21/arch/powerpc/mm/slice.c --- linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-04 15:54:51.000000000 -0400 -@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un ++++ linux-2.6.32.21/arch/powerpc/mm/slice.c 2010-09-17 18:34:04.000000000 -0400 +@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_ + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len); + } + +-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) + { + return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, + 1ul << SLICE_LOW_SHIFT); +@@ -256,7 +255,7 @@ full_search: + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); + continue; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || (addr + len) <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ + if (use_cache) + mm->free_area_cache = addr; +@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un if (fixed && addr > (mm->task_size - len)) return -EINVAL; @@ -3115,6 +3247,56 @@ diff -urNp linux-2.6.32.21/arch/sh/kernel/kgdb.c linux-2.6.32.21/arch/sh/kernel/ /* Breakpoint instruction: trapa #0x3c */ #ifdef CONFIG_CPU_LITTLE_ENDIAN .gdb_bpt_instr = { 0x3c, 0xc3 }, +diff -urNp linux-2.6.32.21/arch/sh/mm/mmap.c linux-2.6.32.21/arch/sh/mm/mmap.c +--- linux-2.6.32.21/arch/sh/mm/mmap.c 2010-08-13 16:24:37.000000000 -0400 ++++ linux-2.6.32.21/arch/sh/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400 +@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -106,7 +105,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } diff -urNp linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h --- linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-08-29 21:08:20.000000000 -0400 +++ linux-2.6.32.21/arch/sparc/include/asm/atomic_64.h 2010-09-15 02:34:10.000000000 -0400 @@ -3669,7 +3851,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/pci_sun4v.c linux-2.6.32.21/arch/sp .map_page = dma_4v_map_page, diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c --- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 18:34:04.000000000 -0400 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str if (ARCH_SUN4C && len > 0x20000000) return -ENOMEM; @@ -3679,9 +3861,18 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_32.c linux-2.6.32.21/arch if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); +@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str + } + if (TASK_SIZE - PAGE_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c --- linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 18:34:04.000000000 -0400 @@ -125,7 +125,7 @@ unsigned long arch_get_unmapped_area(str /* We do not accept a shared mapping if it would violate * cache aliasing constraints. @@ -3702,7 +3893,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -153,9 +157,9 @@ unsigned long arch_get_unmapped_area(str +@@ -147,15 +151,14 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; } if (len > mm->cached_hole_size) { @@ -3714,7 +3912,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch mm->cached_hole_size = 0; } -@@ -175,8 +179,8 @@ full_search: +@@ -175,14 +178,14 @@ full_search: vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { @@ -3725,7 +3923,14 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch mm->cached_hole_size = 0; goto full_search; } -@@ -216,7 +220,7 @@ arch_get_unmapped_area_topdown(struct fi + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -216,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ @@ -3734,7 +3939,35 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; -@@ -384,6 +388,12 @@ void arch_pick_mmap_layout(struct mm_str +@@ -237,8 +240,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -259,7 +261,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -279,7 +281,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -384,6 +386,12 @@ void arch_pick_mmap_layout(struct mm_str current->signal->rlim[RLIMIT_STACK].rlim_cur == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; @@ -3747,7 +3980,7 @@ diff -urNp linux-2.6.32.21/arch/sparc/kernel/sys_sparc_64.c linux-2.6.32.21/arch mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { -@@ -398,6 +408,12 @@ void arch_pick_mmap_layout(struct mm_str +@@ -398,6 +406,12 @@ void arch_pick_mmap_layout(struct mm_str gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); @@ -4033,8 +4266,8 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/atomic_64.S linux-2.6.32.21/arch/sparc bne,pn %xcc, 2f diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib/ksyms.c --- linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-04 15:54:51.000000000 -0400 -@@ -144,12 +144,15 @@ EXPORT_SYMBOL(__downgrade_write); ++++ linux-2.6.32.21/arch/sparc/lib/ksyms.c 2010-09-17 17:45:39.000000000 -0400 +@@ -144,12 +144,17 @@ EXPORT_SYMBOL(__downgrade_write); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); @@ -4044,7 +4277,9 @@ diff -urNp linux-2.6.32.21/arch/sparc/lib/ksyms.c linux-2.6.32.21/arch/sparc/lib +EXPORT_SYMBOL(atomic_sub_unchecked); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); EXPORT_SYMBOL(atomic64_sub); +EXPORT_SYMBOL(atomic64_sub_unchecked); EXPORT_SYMBOL(atomic64_sub_ret); @@ -4969,6 +5204,46 @@ diff -urNp linux-2.6.32.21/arch/sparc/mm/fault_64.c linux-2.6.32.21/arch/sparc/m /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the +diff -urNp linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c +--- linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400 ++++ linux-2.6.32.21/arch/sparc/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400 +@@ -69,7 +69,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -108,7 +108,7 @@ hugetlb_get_unmapped_area_topdown(struct + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -126,7 +126,7 @@ hugetlb_get_unmapped_area_topdown(struct + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -183,8 +183,7 @@ hugetlb_get_unmapped_area(struct file *f + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.32.21/arch/sparc/mm/init_32.c linux-2.6.32.21/arch/sparc/mm/init_32.c --- linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/sparc/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400 @@ -9393,7 +9668,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess_64.h linux-2.6.32.21/arc #endif /* _ASM_X86_UACCESS_64_H */ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x86/include/asm/uaccess.h --- linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/include/asm/uaccess.h 2010-09-16 23:14:31.000000000 -0400 @@ -8,12 +8,15 @@ #include <linux/thread_info.h> #include <linux/prefetch.h> @@ -9458,22 +9733,9 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x /* * The exception table consists of pairs of addresses: the first is the -@@ -179,17 +213,34 @@ extern int __get_user_bad(void); - __ret_gu; \ - }) - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __put_user_x(size, x, ptr, __ret_pu) \ -+ ({ \ -+ int __dummy; \ -+ asm volatile("call __put_user_" #size : "=a" (__ret_pu), "=c" (__dummy) \ -+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx"); \ -+ }) -+#else - #define __put_user_x(size, x, ptr, __ret_pu) \ +@@ -183,13 +217,21 @@ extern int __get_user_bad(void); asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") -+#endif - +#ifdef CONFIG_X86_32 @@ -9496,7 +9758,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ -@@ -197,15 +248,18 @@ extern int __get_user_bad(void); +@@ -197,15 +239,18 @@ extern int __get_user_bad(void); _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (err) \ @@ -9519,7 +9781,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x #define __put_user_x8(x, ptr, __ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \ -@@ -374,16 +428,18 @@ do { \ +@@ -374,16 +419,18 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ @@ -9541,7 +9803,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x #define __get_user_size_ex(x, ptr, size) \ do { \ -@@ -407,10 +463,12 @@ do { \ +@@ -407,10 +454,12 @@ do { \ } while (0) #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ @@ -9556,7 +9818,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x #define __put_user_nocheck(x, ptr, size) \ ({ \ -@@ -424,13 +482,24 @@ do { \ +@@ -424,13 +473,24 @@ do { \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ @@ -9583,7 +9845,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x /* * Tell gcc we read from memory instead of writing: this is because -@@ -438,21 +507,26 @@ struct __large_struct { unsigned long bu +@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ @@ -9614,7 +9876,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x /* * uaccess_try and catch -@@ -530,7 +604,7 @@ struct __large_struct { unsigned long bu +@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ @@ -9623,7 +9885,7 @@ diff -urNp linux-2.6.32.21/arch/x86/include/asm/uaccess.h linux-2.6.32.21/arch/x } while (0) #ifdef CONFIG_X86_WP_WORKS_OK -@@ -567,6 +641,7 @@ extern struct movsl_mask { +@@ -567,6 +632,7 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 @@ -13721,7 +13983,26 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/signal.c linux-2.6.32.21/arch/x86/ker if (current_thread_info()->status & TS_RESTORE_SIGMASK) diff -urNp linux-2.6.32.21/arch/x86/kernel/smpboot.c linux-2.6.32.21/arch/x86/kernel/smpboot.c --- linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-08-29 21:08:20.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/kernel/smpboot.c 2010-09-17 17:44:35.000000000 -0400 +@@ -95,14 +95,14 @@ static DEFINE_PER_CPU(struct task_struct + */ + static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); + +-void cpu_hotplug_driver_lock() ++void cpu_hotplug_driver_lock(void) + { +- mutex_lock(&x86_cpu_hotplug_driver_mutex); ++ mutex_lock(&x86_cpu_hotplug_driver_mutex); + } + +-void cpu_hotplug_driver_unlock() ++void cpu_hotplug_driver_unlock(void) + { +- mutex_unlock(&x86_cpu_hotplug_driver_mutex); ++ mutex_unlock(&x86_cpu_hotplug_driver_mutex); + } + + ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } @@ -748,7 +748,11 @@ do_rest: (unsigned long)task_stack_page(c_idle.idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; @@ -13792,7 +14073,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/syscall_table_32.S linux-2.6.32.21/ar .long sys_exit diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c --- linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c 2010-09-17 18:34:04.000000000 -0400 @@ -24,6 +24,21 @@ #include <asm/syscalls.h> @@ -13815,7 +14096,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 /* * Perform the select(nd, in, out, ex, tv) and mmap() system * calls. Linux/i386 didn't use to be able to handle more than -@@ -58,6 +73,205 @@ out: +@@ -58,6 +73,208 @@ out: return err; } @@ -13844,10 +14125,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + + if (addr) { + addr = PAGE_ALIGN(addr); -+ vma = find_vma(mm, addr); -+ if (pax_task_size - len >= addr && -+ (!vma || addr + len <= vma->vm_start)) -+ return addr; ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; @@ -13887,13 +14169,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + } + return -ENOMEM; + } -+ if (!vma || addr + len <= vma->vm_start) { -+ /* -+ * Remember the place where we stopped the search: -+ */ -+ mm->free_area_cache = addr + len; -+ return addr; -+ } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; @@ -13903,6 +14180,12 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + goto full_search; + } + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; +} + +unsigned long @@ -13938,10 +14221,11 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); -+ vma = find_vma(mm, addr); -+ if (pax_task_size - len >= addr && -+ (!vma || addr + len <= vma->vm_start)) -+ return addr; ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ @@ -13956,7 +14240,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); -+ if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } @@ -13973,7 +14257,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 + * return with success: + */ + vma = find_vma(mm, addr); -+ if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + @@ -14021,7 +14305,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 struct sel_arg_struct { unsigned long n; -@@ -93,7 +307,7 @@ asmlinkage int sys_ipc(uint call, int fi +@@ -93,7 +310,7 @@ asmlinkage int sys_ipc(uint call, int fi return sys_semtimedop(first, (struct sembuf __user *)ptr, second, NULL); case SEMTIMEDOP: return sys_semtimedop(first, (struct sembuf __user *)ptr, second, @@ -14030,7 +14314,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 case SEMGET: return sys_semget(first, second, third); -@@ -140,7 +354,7 @@ asmlinkage int sys_ipc(uint call, int fi +@@ -140,7 +357,7 @@ asmlinkage int sys_ipc(uint call, int fi ret = do_shmat(first, (char __user *) ptr, second, &raddr); if (ret) return ret; @@ -14041,7 +14325,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_i386_32.c linux-2.6.32.21/arch/x8 if (!segment_eq(get_fs(), get_ds())) diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c --- linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c 2010-09-17 18:34:04.000000000 -0400 @@ -32,8 +32,8 @@ out: return error; } @@ -14062,7 +14346,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86 *end = TASK_SIZE; } } -@@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp +@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; @@ -14079,7 +14363,22 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86 if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); -@@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi +- if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) +@@ -106,7 +109,7 @@ full_search: + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -14088,7 +14387,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86 /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi +@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) goto bottomup; @@ -14099,7 +14398,32 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/sys_x86_64.c linux-2.6.32.21/arch/x86 /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); -@@ -198,13 +206,21 @@ bottomup: + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr-len; + } +@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + +@@ -198,13 +204,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -14599,22 +14923,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmi_32.c linux-2.6.32.21/arch/x86/ker local_irq_save(flags); diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S --- linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-04 15:54:51.000000000 -0400 -@@ -26,6 +26,22 @@ ++++ linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S 2010-09-17 17:29:28.000000000 -0400 +@@ -26,6 +26,13 @@ #include <asm/page_types.h> #include <asm/cache.h> #include <asm/boot.h> +#include <asm/segment.h> + -+#undef PMD_SIZE -+#undef PMD_SHIFT -+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) -+#define PMD_SHIFT 21 -+#else -+#define PMD_SHIFT 22 -+#endif -+#define PMD_SIZE (1 << PMD_SHIFT) -+ +#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) +#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) +#else @@ -14623,7 +14938,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 #undef i386 /* in case the preprocessor is a 32bit one */ -@@ -34,40 +50,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF +@@ -34,40 +41,55 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF #ifdef CONFIG_X86_32 OUTPUT_ARCH(i386) ENTRY(phys_startup_32) @@ -14689,7 +15004,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 HEAD_TEXT #ifdef CONFIG_X86_32 . = ALIGN(PAGE_SIZE); -@@ -82,28 +113,69 @@ SECTIONS +@@ -82,28 +104,69 @@ SECTIONS IRQENTRY_TEXT *(.fixup) *(.gnu.warning) @@ -14766,7 +15081,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 PAGE_ALIGNED_DATA(PAGE_SIZE) -@@ -166,12 +238,6 @@ SECTIONS +@@ -166,12 +229,6 @@ SECTIONS } vgetcpu_mode = VVIRT(.vgetcpu_mode); @@ -14779,7 +15094,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { *(.vsyscall_3) } -@@ -187,12 +253,19 @@ SECTIONS +@@ -187,12 +244,19 @@ SECTIONS #endif /* CONFIG_X86_64 */ /* Init code and data - will be freed after init */ @@ -14802,7 +15117,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 /* * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the * output PHDR, so the next output section - .init.text - should -@@ -201,12 +274,27 @@ SECTIONS +@@ -201,12 +265,27 @@ SECTIONS PERCPU_VADDR(0, :percpu) #endif @@ -14818,7 +15133,8 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 + VMLINUX_SYMBOL(_einittext) = .; + . = ALIGN(PAGE_SIZE); + } :text.init -+ + +- INIT_DATA_SECTION(16) + /* + * .exit.text is discard at runtime, not link time, to deal with + * references from .altinstructions and .eh_frame @@ -14828,14 +15144,13 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 + . = ALIGN(16); + } :text.exit + . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); - -- INIT_DATA_SECTION(16) ++ + . = ALIGN(PAGE_SIZE); + INIT_DATA_SECTION(16) :init .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { __x86_cpu_dev_start = .; -@@ -232,19 +320,11 @@ SECTIONS +@@ -232,19 +311,11 @@ SECTIONS *(.altinstr_replacement) } @@ -14856,7 +15171,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 PERCPU(PAGE_SIZE) #endif -@@ -267,12 +347,6 @@ SECTIONS +@@ -267,12 +338,6 @@ SECTIONS . = ALIGN(PAGE_SIZE); } @@ -14869,7 +15184,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 /* BSS */ . = ALIGN(PAGE_SIZE); .bss : AT(ADDR(.bss) - LOAD_OFFSET) { -@@ -288,6 +362,7 @@ SECTIONS +@@ -288,6 +353,7 @@ SECTIONS __brk_base = .; . += 64 * 1024; /* 64k alignment slop space */ *(.brk_reservation) /* areas brk users have reserved */ @@ -14877,7 +15192,7 @@ diff -urNp linux-2.6.32.21/arch/x86/kernel/vmlinux.lds.S linux-2.6.32.21/arch/x8 __brk_limit = .; } -@@ -316,13 +391,12 @@ SECTIONS +@@ -316,13 +382,12 @@ SECTIONS * for the boot processor. */ #define INIT_PER_CPU(x) init_per_cpu__##x = per_cpu__##x + __per_cpu_load @@ -18096,7 +18411,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/highmem_32.c linux-2.6.32.21/arch/x86/mm/ } diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm/hugetlbpage.c --- linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/mm/hugetlbpage.c 2010-09-17 18:34:04.000000000 -0400 @@ -267,13 +267,18 @@ static unsigned long hugetlb_get_unmappe struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; @@ -18120,7 +18435,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm } full_search: -@@ -281,13 +286,13 @@ full_search: +@@ -281,26 +286,27 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -18137,18 +18452,38 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm mm->cached_hole_size = 0; goto full_search; } -@@ -310,9 +315,8 @@ static unsigned long hugetlb_get_unmappe + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = ALIGN(vma->vm_end, huge_page_size(h)); + } ++ ++ mm->free_area_cache = addr + len; ++ return addr; + } + + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, +@@ -309,10 +315,9 @@ static unsigned long hugetlb_get_unmappe + { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; +- struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; ++ struct vm_area_struct *vma; + unsigned long base = mm->mmap_base, addr; unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) -@@ -322,7 +326,7 @@ static unsigned long hugetlb_get_unmappe +@@ -322,7 +327,7 @@ static unsigned long hugetlb_get_unmappe largest_hole = 0; mm->free_area_cache = base; } @@ -18157,7 +18492,51 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; -@@ -364,22 +368,26 @@ try_again: +@@ -330,33 +335,27 @@ try_again: + /* either no address requested or cant fit in requested address hole */ + addr = (mm->free_area_cache - len) & huge_page_mask(h); + do { ++ vma = find_vma(mm, addr); + /* + * Lookup failure means no vma is above this address, + * i.e. return with success: +- */ +- if (!(vma = find_vma_prev(mm, addr, &prev_vma))) +- return addr; +- +- /* + * new region fits between prev_vma->vm_end and + * vma->vm_start, use it: + */ +- if (addr + len <= vma->vm_start && +- (!prev_vma || (addr >= prev_vma->vm_end))) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ +- mm->cached_hole_size = largest_hole; +- return (mm->free_area_cache = addr); +- } else { +- /* pull free_area_cache down to the first hole */ +- if (mm->free_area_cache == vma->vm_end) { +- mm->free_area_cache = vma->vm_start; +- mm->cached_hole_size = largest_hole; +- } ++ mm->cached_hole_size = largest_hole; ++ return (mm->free_area_cache = addr); ++ } ++ /* pull free_area_cache down to the first hole */ ++ if (mm->free_area_cache == vma->vm_end) { ++ mm->free_area_cache = vma->vm_start; ++ mm->cached_hole_size = largest_hole; + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) +- largest_hole = vma->vm_start - addr; ++ largest_hole = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = (vma->vm_start - len) & huge_page_mask(h); +@@ -364,22 +363,26 @@ try_again: fail: /* @@ -18195,7 +18574,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); -@@ -387,6 +395,7 @@ fail: +@@ -387,6 +390,7 @@ fail: /* * Restore the topdown base: */ @@ -18203,7 +18582,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm mm->free_area_cache = base; mm->cached_hole_size = ~0UL; -@@ -400,10 +409,17 @@ hugetlb_get_unmapped_area(struct file *f +@@ -400,10 +404,17 @@ hugetlb_get_unmapped_area(struct file *f struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -18222,15 +18601,16 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/hugetlbpage.c linux-2.6.32.21/arch/x86/mm return -ENOMEM; if (flags & MAP_FIXED) { -@@ -415,7 +431,7 @@ hugetlb_get_unmapped_area(struct file *f +@@ -415,8 +426,7 @@ hugetlb_get_unmapped_area(struct file *f if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && -+ if (pax_task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) +- (!vma || addr + len <= vma->vm_start)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } + if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.32.21/arch/x86/mm/init_32.c linux-2.6.32.21/arch/x86/mm/init_32.c --- linux-2.6.32.21/arch/x86/mm/init_32.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/arch/x86/mm/init_32.c 2010-09-04 15:54:51.000000000 -0400 @@ -18602,7 +18982,7 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init_64.c linux-2.6.32.21/arch/x86/mm/ini return "[vsyscall]"; diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c --- linux-2.6.32.21/arch/x86/mm/init.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/mm/init.c 2010-09-16 22:50:17.000000000 -0400 @@ -69,11 +69,7 @@ static void __init find_early_table_spac * cause a hotspot and fill up ZONE_DMA. The page tables * need roughly 0.5KB per GB. @@ -18616,6 +18996,15 @@ diff -urNp linux-2.6.32.21/arch/x86/mm/init.c linux-2.6.32.21/arch/x86/mm/init.c e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, tables, PAGE_SIZE); if (e820_table_start == -1UL) +@@ -147,7 +143,7 @@ unsigned long __init_refok init_memory_m + #endif + + set_nx(); +- if (nx_enabled) ++ if (nx_enabled && cpu_has_nx) + printk(KERN_INFO "NX (Execute Disable) protection: active\n"); + + /* Enable PSE if available */ @@ -331,7 +327,13 @@ unsigned long __init_refok init_memory_m */ int devmem_is_allowed(unsigned long pagenr) @@ -20213,7 +20602,7 @@ diff -urNp linux-2.6.32.21/arch/x86/vdso/vma.c linux-2.6.32.21/arch/x86/vdso/vma -__setup("vdso=", vdso_setup); diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen/enlighten.c --- linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-04 15:54:51.000000000 -0400 ++++ linux-2.6.32.21/arch/x86/xen/enlighten.c 2010-09-17 17:30:16.000000000 -0400 @@ -71,8 +71,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; @@ -20241,10 +20630,10 @@ diff -urNp linux-2.6.32.21/arch/x86/xen/enlighten.c linux-2.6.32.21/arch/x86/xen - check_efer(); +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && -+ (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) { ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { + unsigned l, h; + -+#if defined(CONFIG_X86_32) ++#ifdef CONFIG_X86_PAE + nx_enabled = 1; +#endif + __supported_pte_mask |= _PAGE_NX; @@ -31772,19 +32161,6 @@ diff -urNp linux-2.6.32.21/fs/ext4/balloc.c linux-2.6.32.21/fs/ext4/balloc.c if (free_blocks >= (nblocks + dirty_blocks)) return 1; } -diff -urNp linux-2.6.32.21/fs/ext4/ioctl.c linux-2.6.32.21/fs/ext4/ioctl.c ---- linux-2.6.32.21/fs/ext4/ioctl.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/fs/ext4/ioctl.c 2010-09-04 15:54:52.000000000 -0400 -@@ -230,6 +230,9 @@ setversion_out: - struct file *donor_filp; - int err; - -+ /* temporary workaround for bugs in here */ -+ return -EOPNOTSUPP; -+ - if (!(filp->f_mode & FMODE_READ) || - !(filp->f_mode & FMODE_WRITE)) - return -EBADF; diff -urNp linux-2.6.32.21/fs/ext4/namei.c linux-2.6.32.21/fs/ext4/namei.c --- linux-2.6.32.21/fs/ext4/namei.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/fs/ext4/namei.c 2010-09-04 15:54:52.000000000 -0400 @@ -34418,7 +34794,7 @@ diff -urNp linux-2.6.32.21/fs/proc/root.c linux-2.6.32.21/fs/proc/root.c diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c --- linux-2.6.32.21/fs/proc/task_mmu.c 2010-08-29 21:08:16.000000000 -0400 -+++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/fs/proc/task_mmu.c 2010-09-17 18:40:06.000000000 -0400 @@ -46,15 +46,26 @@ void task_mem(struct seq_file *m, struct "VmStk:\t%8lu kB\n" "VmExe:\t%8lu kB\n" @@ -34462,15 +34838,30 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; -@@ -223,13 +240,22 @@ static void show_map_vma(struct seq_file - start += PAGE_SIZE; +@@ -206,7 +223,6 @@ static void show_map_vma(struct seq_file + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; +- unsigned long start; + dev_t dev = 0; + int len; +@@ -217,19 +233,23 @@ static void show_map_vma(struct seq_file + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + +- /* We don't show the stack guard page in /proc/maps */ +- start = vma->vm_start; +- if (vma->vm_flags & VM_GROWSDOWN) +- start += PAGE_SIZE; +- seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- start, +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ PAX_RAND_FLAGS(mm) ? 0UL : start, ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start, + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end, +#else - start, ++ vma->vm_start, vma->vm_end, +#endif flags & VM_READ ? 'r' : '-', @@ -34485,7 +34876,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c MAJOR(dev), MINOR(dev), ino, &len); /* -@@ -238,16 +264,16 @@ static void show_map_vma(struct seq_file +@@ -238,16 +258,16 @@ static void show_map_vma(struct seq_file */ if (file) { pad_len_spaces(m, len); @@ -34507,7 +34898,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c name = "[stack]"; } } else { -@@ -390,9 +416,16 @@ static int show_smap(struct seq_file *m, +@@ -390,9 +410,16 @@ static int show_smap(struct seq_file *m, }; memset(&mss, 0, sizeof mss); @@ -34527,7 +34918,7 @@ diff -urNp linux-2.6.32.21/fs/proc/task_mmu.c linux-2.6.32.21/fs/proc/task_mmu.c show_map_vma(m, vma); -@@ -408,7 +441,11 @@ static int show_smap(struct seq_file *m, +@@ -408,7 +435,11 @@ static int show_smap(struct seq_file *m, "Swap: %8lu kB\n" "KernelPageSize: %8lu kB\n" "MMUPageSize: %8lu kB\n", @@ -41692,8 +42083,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_fork.c linux-2.6.32.21/grsecurity/gr +} diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/grsec_init.c --- linux-2.6.32.21/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-04 15:54:52.000000000 -0400 -@@ -0,0 +1,258 @@ ++++ linux-2.6.32.21/grsecurity/grsec_init.c 2010-09-17 19:24:55.000000000 -0400 +@@ -0,0 +1,266 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> @@ -41742,6 +42133,7 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr +#endif +int grsec_lastack_retries; +int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; +int grsec_enable_socket_all; +int grsec_socket_all_gid; +int grsec_enable_socket_client; @@ -41832,6 +42224,13 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_init.c linux-2.6.32.21/grsecurity/gr +#endif +#endif + ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) +#ifndef CONFIG_GRKERNSEC_SYSCTL + grsec_lock = 1; @@ -42828,8 +43227,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sock.c linux-2.6.32.21/grsecurity/gr +} diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/grsec_sysctl.c --- linux-2.6.32.21/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-04 15:54:52.000000000 -0400 -@@ -0,0 +1,459 @@ ++++ linux-2.6.32.21/grsecurity/grsec_sysctl.c 2010-09-17 19:22:27.000000000 -0400 +@@ -0,0 +1,469 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sysctl.h> @@ -43103,6 +43502,16 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_sysctl.c linux-2.6.32.21/grsecurity/ + .proc_handler = &proc_dointvec, + }, +#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .ctl_name = CTL_UNNUMBERED, ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif +#ifdef CONFIG_GRKERNSEC_TPE_ALL + { + .ctl_name = CTL_UNNUMBERED, @@ -43328,8 +43737,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_time.c linux-2.6.32.21/grsecurity/gr +} diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grsec_tpe.c --- linux-2.6.32.21/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-04 15:54:52.000000000 -0400 -@@ -0,0 +1,38 @@ ++++ linux-2.6.32.21/grsecurity/grsec_tpe.c 2010-09-17 19:28:20.000000000 -0400 +@@ -0,0 +1,39 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/file.h> @@ -43347,7 +43756,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsec_tpe.c linux-2.6.32.21/grsecurity/grs + + if (cred->uid && ((grsec_enable_tpe && +#ifdef CONFIG_GRKERNSEC_TPE_INVERT -+ !in_group_p(grsec_tpe_gid) ++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) || ++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))) +#else + in_group_p(grsec_tpe_gid) +#endif @@ -43435,8 +43845,8 @@ diff -urNp linux-2.6.32.21/grsecurity/grsum.c linux-2.6.32.21/grsecurity/grsum.c +} diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig --- linux-2.6.32.21/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-14 21:34:38.000000000 -0400 -@@ -0,0 +1,987 @@ ++++ linux-2.6.32.21/grsecurity/Kconfig 2010-09-17 19:36:28.000000000 -0400 +@@ -0,0 +1,986 @@ +# +# grecurity configuration +# @@ -43588,7 +43998,7 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig + select PAX_PT_PAX_FLAGS + select PAX_HAVE_ACL_FLAGS + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) -+ select PAX_MEMORY_UDEREF if (X86_32 && !XEN) ++ select PAX_MEMORY_UDEREF if (X86 && !XEN) + select PAX_RANDKSTACK if (X86_TSC && !X86_64) + select PAX_SEGMEXEC if (X86_32) + select PAX_PAGEEXEC @@ -44197,11 +44607,14 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig + is enabled, a sysctl option with name "tpe" is created. + +config GRKERNSEC_TPE_ALL -+ bool "Partially restrict non-root users" ++ bool "Partially restrict all non-root users" + depends on GRKERNSEC_TPE + help -+ If you say Y here, All non-root users other than the ones in the -+ group specified in the main TPE option will only be allowed to ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to + execute files in directories they own that are not group or + world-writable, or in directories owned by root and writable only by + root. If the sysctl option is enabled, a sysctl option with name @@ -44214,31 +44627,27 @@ diff -urNp linux-2.6.32.21/grsecurity/Kconfig linux-2.6.32.21/grsecurity/Kconfig + If you say Y here, the group you specify in the TPE configuration will + decide what group TPE restrictions will be *disabled* for. This + option is useful if you want TPE restrictions to be applied to most -+ users on the system. ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. + +config GRKERNSEC_TPE_GID + int "GID for untrusted users" + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT + default 1005 + help -+ If you have selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *disabled* for. -+ If you have not selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *enabled* for. -+ If the sysctl option is enabled, a sysctl option with name "tpe_gid" -+ is created. ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. + +config GRKERNSEC_TPE_GID + int "GID for trusted users" + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT + default 1005 + help -+ If you have selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *disabled* for. -+ If you have not selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *enabled* for. -+ If the sysctl option is enabled, a sysctl option with name "tpe_gid" -+ is created. ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. + +endmenu +menu "Network Protections" @@ -46216,7 +46625,7 @@ diff -urNp linux-2.6.32.21/include/linux/grdefs.h linux-2.6.32.21/include/linux/ +#endif diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/linux/grinternal.h --- linux-2.6.32.21/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/include/linux/grinternal.h 2010-09-17 19:39:50.000000000 -0400 @@ -0,0 +1,211 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H @@ -46282,7 +46691,7 @@ diff -urNp linux-2.6.32.21/include/linux/grinternal.h linux-2.6.32.21/include/li +extern int grsec_enable_tpe; +extern int grsec_tpe_gid; +extern int grsec_enable_tpe_all; -+extern int grsec_enable_sidcaps; ++extern int grsec_enable_tpe_invert; +extern int grsec_enable_socket_all; +extern int grsec_socket_all_gid; +extern int grsec_enable_socket_client; @@ -47499,7 +47908,7 @@ diff -urNp linux-2.6.32.21/include/linux/reiserfs_fs_sb.h linux-2.6.32.21/includ on-disk FS format */ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/sched.h --- linux-2.6.32.21/include/linux/sched.h 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/include/linux/sched.h 2010-09-14 18:41:02.000000000 -0400 ++++ linux-2.6.32.21/include/linux/sched.h 2010-09-17 18:34:04.000000000 -0400 @@ -101,6 +101,7 @@ struct bio; struct fs_struct; struct bts_context; @@ -47508,7 +47917,19 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s /* * List of flags we want to share for kernel threads, -@@ -667,6 +668,15 @@ struct signal_struct { +@@ -372,9 +373,11 @@ struct user_namespace; + #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + #include <linux/aio.h> + ++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, + unsigned long, unsigned long); +@@ -667,6 +670,15 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif @@ -47524,7 +47945,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s int oom_adj; /* OOM kill score adjustment (bit shift) */ }; -@@ -1220,7 +1230,7 @@ struct rcu_node; +@@ -1220,7 +1232,7 @@ struct rcu_node; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -47533,7 +47954,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s atomic_t usage; unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; -@@ -1332,8 +1342,8 @@ struct task_struct { +@@ -1332,8 +1344,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ @@ -47544,7 +47965,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; -@@ -1347,16 +1357,6 @@ struct task_struct { +@@ -1347,16 +1359,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -47561,7 +47982,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) -@@ -1440,6 +1440,15 @@ struct task_struct { +@@ -1440,6 +1442,15 @@ struct task_struct { int hardirq_context; int softirq_context; #endif @@ -47577,7 +47998,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL u64 curr_chain_key; -@@ -1460,6 +1469,9 @@ struct task_struct { +@@ -1460,6 +1471,9 @@ struct task_struct { struct backing_dev_info *backing_dev_info; @@ -47587,7 +48008,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s struct io_context *io_context; unsigned long ptrace_message; -@@ -1523,6 +1535,20 @@ struct task_struct { +@@ -1523,6 +1537,20 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -47608,7 +48029,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored adress in ret_stack */ int curr_ret_stack; -@@ -1546,6 +1572,52 @@ struct task_struct { +@@ -1546,6 +1574,52 @@ struct task_struct { #endif /* CONFIG_TRACING */ }; @@ -47661,7 +48082,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpumask(tsk) (&(tsk)->cpus_allowed) -@@ -2146,7 +2218,7 @@ extern void __cleanup_sighand(struct sig +@@ -2146,7 +2220,7 @@ extern void __cleanup_sighand(struct sig extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -47670,7 +48091,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2259,8 +2331,8 @@ static inline void unlock_task_sighand(s +@@ -2259,8 +2333,8 @@ static inline void unlock_task_sighand(s #ifndef __HAVE_THREAD_FUNCTIONS @@ -47681,7 +48102,7 @@ diff -urNp linux-2.6.32.21/include/linux/sched.h linux-2.6.32.21/include/linux/s static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { -@@ -2275,13 +2347,17 @@ static inline unsigned long *end_of_stac +@@ -2275,13 +2349,17 @@ static inline unsigned long *end_of_stac #endif @@ -49315,7 +49736,7 @@ diff -urNp linux-2.6.32.21/kernel/fork.c linux-2.6.32.21/kernel/fork.c new_fs = fs; diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c --- linux-2.6.32.21/kernel/futex.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/kernel/futex.c 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/kernel/futex.c 2010-09-17 17:43:01.000000000 -0400 @@ -54,6 +54,7 @@ #include <linux/mount.h> #include <linux/pagemap.h> @@ -49345,19 +49766,17 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; -@@ -2376,7 +2382,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi +@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi { struct robust_list_head __user *head; unsigned long ret; -- const struct cred *cred = current_cred(), *pcred; +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP -+ const struct cred *cred = current_cred(); -+ const struct cred *pcred; + const struct cred *cred = current_cred(), *pcred; +#endif if (!futex_cmpxchg_enabled) return -ENOSYS; -@@ -2392,11 +2401,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi +@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi if (!p) goto err_unlock; ret = -EPERM; @@ -49374,7 +49793,7 @@ diff -urNp linux-2.6.32.21/kernel/futex.c linux-2.6.32.21/kernel/futex.c head = p->robust_list; rcu_read_unlock(); } -@@ -2458,7 +2472,7 @@ retry: +@@ -2458,7 +2471,7 @@ retry: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, @@ -50948,7 +51367,7 @@ diff -urNp linux-2.6.32.21/kernel/sys.c linux-2.6.32.21/kernel/sys.c } diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c --- linux-2.6.32.21/kernel/sysctl.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/kernel/sysctl.c 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/kernel/sysctl.c 2010-09-17 18:34:04.000000000 -0400 @@ -63,6 +63,13 @@ static int deprecated_sysctl_warning(struct __sysctl_args *args); @@ -51018,7 +51437,21 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c { .ctl_name = CTL_UNNUMBERED, .procname = "sched_child_runs_first", -@@ -1803,6 +1844,8 @@ static int do_sysctl_strategy(struct ctl +@@ -1247,6 +1288,13 @@ static struct ctl_table vm_table[] = { + .mode = 0644, + .proc_handler = &proc_dointvec + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .ctl_name = CTL_UNNUMBERED, +@@ -1803,6 +1851,8 @@ static int do_sysctl_strategy(struct ctl return 0; } @@ -51027,7 +51460,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c static int parse_table(int __user *name, int nlen, void __user *oldval, size_t __user *oldlenp, void __user *newval, size_t newlen, -@@ -1821,7 +1864,7 @@ repeat: +@@ -1821,7 +1871,7 @@ repeat: if (n == table->ctl_name) { int error; if (table->child) { @@ -51036,7 +51469,7 @@ diff -urNp linux-2.6.32.21/kernel/sysctl.c linux-2.6.32.21/kernel/sysctl.c return -EPERM; name++; nlen--; -@@ -1906,6 +1949,33 @@ int sysctl_perm(struct ctl_table_root *r +@@ -1906,6 +1956,33 @@ int sysctl_perm(struct ctl_table_root *r int error; int mode; @@ -51260,25 +51693,6 @@ diff -urNp linux-2.6.32.21/kernel/trace/ftrace.c linux-2.6.32.21/kernel/trace/ft } /* -diff -urNp linux-2.6.32.21/kernel/trace/Kconfig linux-2.6.32.21/kernel/trace/Kconfig ---- linux-2.6.32.21/kernel/trace/Kconfig 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/kernel/trace/Kconfig 2010-09-04 15:54:52.000000000 -0400 -@@ -126,6 +126,7 @@ if FTRACE - config FUNCTION_TRACER - bool "Kernel Function Tracer" - depends on HAVE_FUNCTION_TRACER -+ depends on !PAX_KERNEXEC - select FRAME_POINTER - select KALLSYMS - select GENERIC_TRACER -@@ -343,6 +344,7 @@ config POWER_TRACER - config STACK_TRACER - bool "Trace max stack" - depends on HAVE_FUNCTION_TRACER -+ depends on !PAX_KERNEXEC - select FUNCTION_TRACER - select STACKTRACE - select KALLSYMS diff -urNp linux-2.6.32.21/kernel/trace/ring_buffer.c linux-2.6.32.21/kernel/trace/ring_buffer.c --- linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-08-13 16:24:37.000000000 -0400 +++ linux-2.6.32.21/kernel/trace/ring_buffer.c 2010-09-04 15:54:52.000000000 -0400 @@ -51821,16 +52235,8 @@ diff -urNp linux-2.6.32.21/mm/madvise.c linux-2.6.32.21/mm/madvise.c goto out; diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c --- linux-2.6.32.21/mm/memory.c 2010-08-29 21:08:20.000000000 -0400 -+++ linux-2.6.32.21/mm/memory.c 2010-09-04 15:54:52.000000000 -0400 -@@ -48,6 +48,7 @@ - #include <linux/ksm.h> - #include <linux/rmap.h> - #include <linux/module.h> -+#include <linux/security.h> - #include <linux/delayacct.h> - #include <linux/init.h> - #include <linux/writeback.h> -@@ -187,8 +188,12 @@ static inline void free_pmd_range(struct ++++ linux-2.6.32.21/mm/memory.c 2010-09-17 18:20:06.000000000 -0400 +@@ -187,8 +187,12 @@ static inline void free_pmd_range(struct return; pmd = pmd_offset(pud, start); @@ -51843,7 +52249,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, -@@ -220,8 +225,12 @@ static inline void free_pud_range(struct +@@ -220,8 +224,12 @@ static inline void free_pud_range(struct return; pud = pud_offset(pgd, start); @@ -51856,7 +52262,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c } /* -@@ -1251,10 +1260,10 @@ int __get_user_pages(struct task_struct +@@ -1251,10 +1259,10 @@ int __get_user_pages(struct task_struct (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; @@ -51869,7 +52275,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c if (!vma && in_gate_area(tsk, start)) { unsigned long pg = start & PAGE_MASK; struct vm_area_struct *gate_vma = get_gate_vma(tsk); -@@ -1306,7 +1315,7 @@ int __get_user_pages(struct task_struct +@@ -1306,7 +1314,7 @@ int __get_user_pages(struct task_struct continue; } @@ -51878,7 +52284,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; -@@ -1381,7 +1390,7 @@ int __get_user_pages(struct task_struct +@@ -1381,7 +1389,7 @@ int __get_user_pages(struct task_struct start += PAGE_SIZE; nr_pages--; } while (nr_pages && start < vma->vm_end); @@ -51887,7 +52293,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c return i; } -@@ -1977,6 +1986,186 @@ static inline void cow_user_page(struct +@@ -1977,6 +1985,186 @@ static inline void cow_user_page(struct copy_user_highpage(dst, src, va, vma); } @@ -52074,7 +52480,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address -@@ -2156,6 +2345,12 @@ gotten: +@@ -2156,6 +2344,12 @@ gotten: */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { @@ -52087,7 +52493,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter(mm, file_rss); -@@ -2207,6 +2402,10 @@ gotten: +@@ -2207,6 +2401,10 @@ gotten: page_remove_rmap(old_page); } @@ -52098,7 +52504,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; -@@ -2604,6 +2803,11 @@ static int do_swap_page(struct mm_struct +@@ -2604,6 +2802,11 @@ static int do_swap_page(struct mm_struct swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); @@ -52110,7 +52516,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c unlock_page(page); if (flags & FAULT_FLAG_WRITE) { -@@ -2615,6 +2819,11 @@ static int do_swap_page(struct mm_struct +@@ -2615,6 +2818,11 @@ static int do_swap_page(struct mm_struct /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, pte); @@ -52122,7 +52528,41 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c unlock: pte_unmap_unlock(page_table, ptl); out: -@@ -2665,7 +2874,7 @@ static int do_anonymous_page(struct mm_s +@@ -2630,33 +2838,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_downwards()", +- * except we must first make sure that 'address-PAGE_SIZE' +- * doesn't hit another vma. +- * +- * The "find_vma()" will do the right thing even if we wrap +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_stack(vma, address - PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -2665,27 +2846,23 @@ static int do_anonymous_page(struct mm_s unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { @@ -52131,7 +52571,31 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c spinlock_t *ptl; pte_t entry; -@@ -2704,6 +2913,11 @@ static int do_anonymous_page(struct mm_s +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -2704,6 +2881,11 @@ static int do_anonymous_page(struct mm_s if (!pte_none(*page_table)) goto release; @@ -52143,7 +52607,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c inc_mm_counter(mm, anon_rss); page_add_new_anon_rmap(page, vma, address); setpte: -@@ -2711,6 +2925,12 @@ setpte: +@@ -2711,6 +2893,12 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, entry); @@ -52156,7 +52620,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c unlock: pte_unmap_unlock(page_table, ptl); return 0; -@@ -2853,6 +3073,12 @@ static int __do_fault(struct mm_struct * +@@ -2853,6 +3041,12 @@ static int __do_fault(struct mm_struct * */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { @@ -52169,7 +52633,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) -@@ -2872,6 +3098,14 @@ static int __do_fault(struct mm_struct * +@@ -2872,6 +3066,14 @@ static int __do_fault(struct mm_struct * /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, entry); @@ -52184,7 +52648,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c } else { if (charged) mem_cgroup_uncharge_page(page); -@@ -3019,6 +3253,12 @@ static inline int handle_pte_fault(struc +@@ -3019,6 +3221,12 @@ static inline int handle_pte_fault(struc if (flags & FAULT_FLAG_WRITE) flush_tlb_page(vma, address); } @@ -52197,7 +52661,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c unlock: pte_unmap_unlock(pte, ptl); return 0; -@@ -3035,6 +3275,10 @@ int handle_mm_fault(struct mm_struct *mm +@@ -3035,6 +3243,10 @@ int handle_mm_fault(struct mm_struct *mm pmd_t *pmd; pte_t *pte; @@ -52208,7 +52672,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); -@@ -3042,6 +3286,34 @@ int handle_mm_fault(struct mm_struct *mm +@@ -3042,6 +3254,34 @@ int handle_mm_fault(struct mm_struct *mm if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); @@ -52243,7 +52707,7 @@ diff -urNp linux-2.6.32.21/mm/memory.c linux-2.6.32.21/mm/memory.c pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) -@@ -3139,7 +3411,7 @@ static int __init gate_vma_init(void) +@@ -3139,7 +3379,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; @@ -52386,7 +52850,7 @@ diff -urNp linux-2.6.32.21/mm/migrate.c linux-2.6.32.21/mm/migrate.c goto out; diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c --- linux-2.6.32.21/mm/mlock.c 2010-08-29 21:08:20.000000000 -0400 -+++ linux-2.6.32.21/mm/mlock.c 2010-09-04 15:54:56.000000000 -0400 ++++ linux-2.6.32.21/mm/mlock.c 2010-09-17 18:47:09.000000000 -0400 @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/mempolicy.h> @@ -52395,7 +52859,40 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c #include <linux/sched.h> #include <linux/module.h> #include <linux/rmap.h> -@@ -454,6 +455,9 @@ static int do_mlock(unsigned long start, +@@ -138,19 +139,6 @@ void munlock_vma_page(struct page *page) + } + } + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_stack_continue(vma->vm_prev, addr); +-} +- + /** + * __mlock_vma_pages_range() - mlock a range of pages in the vma. + * @vma: target vma +@@ -183,12 +171,6 @@ static long __mlock_vma_pages_range(stru + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + +- /* We don't try to access the guard page of a stack vma */ +- if (stack_guard_page(vma, start)) { +- addr += PAGE_SIZE; +- nr_pages--; +- } +- + while (nr_pages > 0) { + int i; + +@@ -454,6 +436,9 @@ static int do_mlock(unsigned long start, return -EINVAL; if (end == start) return 0; @@ -52405,7 +52902,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c vma = find_vma_prev(current->mm, start, &prev); if (!vma || vma->vm_start > start) return -ENOMEM; -@@ -464,6 +468,11 @@ static int do_mlock(unsigned long start, +@@ -464,6 +449,11 @@ static int do_mlock(unsigned long start, for (nstart = start ; ; ) { unsigned int newflags; @@ -52417,7 +52914,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags | VM_LOCKED; -@@ -513,6 +522,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st +@@ -513,6 +503,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st lock_limit >>= PAGE_SHIFT; /* check against resource limits */ @@ -52425,7 +52922,7 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); up_write(¤t->mm->mmap_sem); -@@ -534,17 +544,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, +@@ -534,17 +525,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, static int do_mlockall(int flags) { struct vm_area_struct * vma, * prev = NULL; @@ -52453,17 +52950,17 @@ diff -urNp linux-2.6.32.21/mm/mlock.c linux-2.6.32.21/mm/mlock.c newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; -@@ -576,6 +592,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) +@@ -576,6 +573,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; -+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1); ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c --- linux-2.6.32.21/mm/mmap.c 2010-08-29 21:08:20.000000000 -0400 -+++ linux-2.6.32.21/mm/mmap.c 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/mm/mmap.c 2010-09-17 18:34:04.000000000 -0400 @@ -45,6 +45,16 @@ #define arch_rebalance_pgtables(addr, len) (addr) #endif @@ -52481,7 +52978,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); -@@ -70,16 +80,25 @@ static void unmap_region(struct mm_struc +@@ -70,22 +80,32 @@ static void unmap_region(struct mm_struc * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * */ @@ -52509,7 +53006,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c } EXPORT_SYMBOL(vm_get_page_prot); -@@ -231,6 +250,7 @@ static struct vm_area_struct *remove_vma + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + struct percpu_counter vm_committed_as; + + /* +@@ -231,6 +251,7 @@ static struct vm_area_struct *remove_vma struct vm_area_struct *next = vma->vm_next; might_sleep(); @@ -52517,7 +53021,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) { -@@ -267,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) +@@ -267,6 +288,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * not page aligned -Ram Gupta */ rlim = current->signal->rlim[RLIMIT_DATA].rlim_cur; @@ -52525,7 +53029,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + (mm->end_data - mm->start_data) > rlim) goto out; -@@ -704,6 +725,12 @@ static int +@@ -704,6 +726,12 @@ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { @@ -52538,7 +53042,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { if (vma->vm_pgoff == vm_pgoff) -@@ -723,6 +750,12 @@ static int +@@ -723,6 +751,12 @@ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { @@ -52551,7 +53055,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { pgoff_t vm_pglen; -@@ -765,12 +798,19 @@ can_vma_merge_after(struct vm_area_struc +@@ -765,12 +799,19 @@ can_vma_merge_after(struct vm_area_struc struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, @@ -52572,7 +53076,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. -@@ -786,6 +826,15 @@ struct vm_area_struct *vma_merge(struct +@@ -786,6 +827,15 @@ struct vm_area_struct *vma_merge(struct if (next && next->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; @@ -52588,7 +53092,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * Can it merge with the predecessor? */ -@@ -805,9 +854,24 @@ struct vm_area_struct *vma_merge(struct +@@ -805,9 +855,24 @@ struct vm_area_struct *vma_merge(struct /* cases 1, 6 */ vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL); @@ -52614,7 +53118,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return prev; } -@@ -818,12 +882,27 @@ struct vm_area_struct *vma_merge(struct +@@ -818,12 +883,27 @@ struct vm_area_struct *vma_merge(struct mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen)) { @@ -52644,7 +53148,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return area; } -@@ -898,14 +977,11 @@ none: +@@ -898,14 +978,11 @@ none: void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { @@ -52660,7 +53164,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c mm->stack_vm += pages; if (flags & (VM_RESERVED|VM_IO)) mm->reserved_vm += pages; -@@ -932,7 +1008,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -932,7 +1009,7 @@ unsigned long do_mmap_pgoff(struct file * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ @@ -52669,7 +53173,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; -@@ -958,7 +1034,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -958,7 +1035,7 @@ unsigned long do_mmap_pgoff(struct file /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ @@ -52678,7 +53182,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (addr & ~PAGE_MASK) return addr; -@@ -969,6 +1045,28 @@ unsigned long do_mmap_pgoff(struct file +@@ -969,6 +1046,28 @@ unsigned long do_mmap_pgoff(struct file vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; @@ -52707,7 +53211,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -980,6 +1078,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -980,6 +1079,7 @@ unsigned long do_mmap_pgoff(struct file locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; @@ -52715,7 +53219,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } -@@ -1053,6 +1152,9 @@ unsigned long do_mmap_pgoff(struct file +@@ -1053,6 +1153,9 @@ unsigned long do_mmap_pgoff(struct file if (error) return error; @@ -52725,7 +53229,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return mmap_region(file, addr, len, flags, vm_flags, pgoff); } EXPORT_SYMBOL(do_mmap_pgoff); -@@ -1065,10 +1167,10 @@ EXPORT_SYMBOL(do_mmap_pgoff); +@@ -1065,10 +1168,10 @@ EXPORT_SYMBOL(do_mmap_pgoff); */ int vma_wants_writenotify(struct vm_area_struct *vma) { @@ -52738,7 +53242,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return 0; /* The backer wishes to know when pages are first written to? */ -@@ -1117,14 +1219,24 @@ unsigned long mmap_region(struct file *f +@@ -1117,14 +1220,24 @@ unsigned long mmap_region(struct file *f unsigned long charged = 0; struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; @@ -52765,7 +53269,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c } /* Check against address space limit. */ -@@ -1173,6 +1285,16 @@ munmap_back: +@@ -1173,6 +1286,16 @@ munmap_back: goto unacct_error; } @@ -52782,7 +53286,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; -@@ -1195,6 +1317,19 @@ munmap_back: +@@ -1195,6 +1318,19 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; @@ -52802,7 +53306,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (vm_flags & VM_EXECUTABLE) added_exe_file_vma(mm); -@@ -1218,6 +1353,11 @@ munmap_back: +@@ -1218,6 +1354,11 @@ munmap_back: vma_link(mm, vma, prev, rb_link, rb_parent); file = vma->vm_file; @@ -52814,7 +53318,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* Once vma denies write, undo our temporary denial count */ if (correct_wcount) atomic_inc(&inode->i_writecount); -@@ -1226,6 +1366,7 @@ out: +@@ -1226,6 +1367,7 @@ out: mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); @@ -52822,7 +53326,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (vm_flags & VM_LOCKED) { /* * makes pages present; downgrades, drops, reacquires mmap_sem -@@ -1248,6 +1389,12 @@ unmap_and_free_vma: +@@ -1248,6 +1390,12 @@ unmap_and_free_vma: unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; free_vma: @@ -52835,7 +53339,41 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -1281,6 +1428,10 @@ arch_get_unmapped_area(struct file *filp +@@ -1255,6 +1403,33 @@ unacct_error: + return error; + } + ++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++#endif ++ ++ return true; ++} ++ + /* Get an address range which is currently unmapped. + * For shmat() with addr=0. + * +@@ -1281,18 +1456,23 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; @@ -52845,9 +53383,15 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c + if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); -@@ -1289,10 +1440,10 @@ arch_get_unmapped_area(struct file *filp - return addr; +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; @@ -52860,7 +53404,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c } full_search: -@@ -1303,9 +1454,8 @@ full_search: +@@ -1303,34 +1483,40 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -52872,7 +53416,29 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c mm->cached_hole_size = 0; goto full_search; } -@@ -1327,10 +1477,16 @@ full_search: + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- /* +- * Remember the place where we stopped the search: +- */ +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; + } + #endif void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { @@ -52890,7 +53456,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } -@@ -1348,7 +1504,7 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1348,7 +1534,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -52899,7 +53465,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -1357,6 +1513,10 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1357,13 +1543,18 @@ arch_get_unmapped_area_topdown(struct fi if (flags & MAP_FIXED) return addr; @@ -52910,7 +53476,37 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); -@@ -1414,13 +1574,21 @@ bottomup: +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -1378,7 +1569,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -1395,7 +1586,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + +@@ -1414,13 +1605,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -52934,7 +53530,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c mm->cached_hole_size = ~0UL; return addr; -@@ -1429,6 +1597,12 @@ bottomup: +@@ -1429,6 +1628,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { @@ -52947,7 +53543,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * Is this a new hole at the highest possible address? */ -@@ -1436,8 +1610,10 @@ void arch_unmap_area_topdown(struct mm_s +@@ -1436,8 +1641,10 @@ void arch_unmap_area_topdown(struct mm_s mm->free_area_cache = addr; /* dont allow allocations above current base */ @@ -52959,7 +53555,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c } unsigned long -@@ -1545,6 +1721,27 @@ out: +@@ -1545,6 +1752,27 @@ out: return prev ? prev->vm_next : vma; } @@ -52987,7 +53583,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the -@@ -1561,6 +1758,7 @@ static int acct_stack_growth(struct vm_a +@@ -1561,6 +1789,7 @@ static int acct_stack_growth(struct vm_a return -ENOMEM; /* Stack limit test */ @@ -52995,7 +53591,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (size > rlim[RLIMIT_STACK].rlim_cur) return -ENOMEM; -@@ -1570,6 +1768,7 @@ static int acct_stack_growth(struct vm_a +@@ -1570,6 +1799,7 @@ static int acct_stack_growth(struct vm_a unsigned long limit; locked = mm->locked_vm + grow; limit = rlim[RLIMIT_MEMLOCK].rlim_cur >> PAGE_SHIFT; @@ -53003,7 +53599,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1605,35 +1804,40 @@ static +@@ -1605,35 +1835,42 @@ static #endif int expand_upwards(struct vm_area_struct *vma, unsigned long address) { @@ -53026,7 +53622,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); -+ if (locknext && unlikely(anon_vma_prepare(vma->vm_next))) ++ if (locknext && anon_vma_prepare(vma->vm_next)) + return -ENOMEM; anon_vma_lock(vma); + if (locknext) @@ -53050,11 +53646,13 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { -+ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { unsigned long size, grow; size = address - vma->vm_start; -@@ -1643,6 +1847,8 @@ int expand_upwards(struct vm_area_struct +@@ -1643,6 +1880,8 @@ int expand_upwards(struct vm_area_struct if (!error) vma->vm_end = address; } @@ -53063,25 +53661,25 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c anon_vma_unlock(vma); return error; } -@@ -1654,7 +1860,8 @@ int expand_upwards(struct vm_area_struct +@@ -1654,7 +1893,8 @@ int expand_upwards(struct vm_area_struct static int expand_downwards(struct vm_area_struct *vma, unsigned long address) { - int error; + int error, lockprev = 0; -+ struct vm_area_struct *prev = NULL; ++ struct vm_area_struct *prev; /* * We must make sure the anon_vma is allocated -@@ -1668,6 +1875,15 @@ static int expand_downwards(struct vm_ar +@@ -1668,6 +1908,15 @@ static int expand_downwards(struct vm_ar if (error) return error; ++ prev = vma->vm_prev; +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) -+ find_vma_prev(vma->vm_mm, address, &prev); + lockprev = prev && (prev->vm_flags & VM_GROWSUP); +#endif -+ if (lockprev && unlikely(anon_vma_prepare(prev))) ++ if (lockprev && anon_vma_prepare(prev)) + return -ENOMEM; + if (lockprev) + anon_vma_lock(prev); @@ -53089,12 +53687,14 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c anon_vma_lock(vma); /* -@@ -1677,9 +1893,15 @@ static int expand_downwards(struct vm_ar +@@ -1677,9 +1926,17 @@ static int expand_downwards(struct vm_ar */ /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { -+ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { unsigned long size, grow; +#ifdef CONFIG_PAX_SEGMEXEC @@ -53106,7 +53706,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1687,9 +1909,20 @@ static int expand_downwards(struct vm_ar +@@ -1687,9 +1944,20 @@ static int expand_downwards(struct vm_ar if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -53127,7 +53727,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return error; } -@@ -1765,6 +1998,13 @@ static void remove_vma_list(struct mm_st +@@ -1765,6 +2033,13 @@ static void remove_vma_list(struct mm_st do { long nrpages = vma_pages(vma); @@ -53141,7 +53741,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -@@ -1810,6 +2050,16 @@ detach_vmas_to_be_unmapped(struct mm_str +@@ -1810,6 +2085,16 @@ detach_vmas_to_be_unmapped(struct mm_str insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { @@ -53158,7 +53758,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1837,10 +2087,25 @@ int split_vma(struct mm_struct * mm, str +@@ -1837,10 +2122,25 @@ int split_vma(struct mm_struct * mm, str struct mempolicy *pol; struct vm_area_struct *new; @@ -53184,7 +53784,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -1848,6 +2113,16 @@ int split_vma(struct mm_struct * mm, str +@@ -1848,6 +2148,16 @@ int split_vma(struct mm_struct * mm, str if (!new) return -ENOMEM; @@ -53201,7 +53801,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1858,8 +2133,29 @@ int split_vma(struct mm_struct * mm, str +@@ -1858,8 +2168,29 @@ int split_vma(struct mm_struct * mm, str new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -53231,7 +53831,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c kmem_cache_free(vm_area_cachep, new); return PTR_ERR(pol); } -@@ -1880,6 +2176,28 @@ int split_vma(struct mm_struct * mm, str +@@ -1880,6 +2211,28 @@ int split_vma(struct mm_struct * mm, str else vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -53260,7 +53860,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return 0; } -@@ -1888,11 +2206,30 @@ int split_vma(struct mm_struct * mm, str +@@ -1888,11 +2241,30 @@ int split_vma(struct mm_struct * mm, str * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ @@ -53291,7 +53891,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -1956,6 +2293,8 @@ int do_munmap(struct mm_struct *mm, unsi +@@ -1956,6 +2328,8 @@ int do_munmap(struct mm_struct *mm, unsi /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -53300,7 +53900,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return 0; } -@@ -1968,22 +2307,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a +@@ -1968,22 +2342,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a profile_munmap(addr); @@ -53329,7 +53929,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -1997,6 +2332,7 @@ unsigned long do_brk(unsigned long addr, +@@ -1997,6 +2367,7 @@ unsigned long do_brk(unsigned long addr, struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -53337,7 +53937,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c len = PAGE_ALIGN(len); if (!len) -@@ -2008,16 +2344,30 @@ unsigned long do_brk(unsigned long addr, +@@ -2008,16 +2379,30 @@ unsigned long do_brk(unsigned long addr, flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -53369,7 +53969,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c locked += mm->locked_vm; lock_limit = current->signal->rlim[RLIMIT_MEMLOCK].rlim_cur; lock_limit >>= PAGE_SHIFT; -@@ -2034,22 +2384,22 @@ unsigned long do_brk(unsigned long addr, +@@ -2034,22 +2419,22 @@ unsigned long do_brk(unsigned long addr, /* * Clear old maps. this also does some error checking for us */ @@ -53396,7 +53996,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2063,7 +2413,7 @@ unsigned long do_brk(unsigned long addr, +@@ -2063,7 +2448,7 @@ unsigned long do_brk(unsigned long addr, */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -53405,7 +54005,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return -ENOMEM; } -@@ -2075,11 +2425,12 @@ unsigned long do_brk(unsigned long addr, +@@ -2075,11 +2460,12 @@ unsigned long do_brk(unsigned long addr, vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: @@ -53420,7 +54020,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return addr; } -@@ -2126,8 +2477,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2126,8 +2512,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -53432,7 +54032,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2141,6 +2494,10 @@ int insert_vm_struct(struct mm_struct * +@@ -2141,6 +2529,10 @@ int insert_vm_struct(struct mm_struct * struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -53443,7 +54043,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2163,7 +2520,22 @@ int insert_vm_struct(struct mm_struct * +@@ -2163,7 +2555,22 @@ int insert_vm_struct(struct mm_struct * if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -53466,7 +54066,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c return 0; } -@@ -2181,6 +2553,8 @@ struct vm_area_struct *copy_vma(struct v +@@ -2181,6 +2588,8 @@ struct vm_area_struct *copy_vma(struct v struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -53475,7 +54075,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2224,6 +2598,35 @@ struct vm_area_struct *copy_vma(struct v +@@ -2224,6 +2633,35 @@ struct vm_area_struct *copy_vma(struct v return new_vma; } @@ -53511,7 +54111,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c /* * Return true if the calling process may expand its vm space by the passed * number of pages -@@ -2234,7 +2637,7 @@ int may_expand_vm(struct mm_struct *mm, +@@ -2234,7 +2672,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long lim; lim = current->signal->rlim[RLIMIT_AS].rlim_cur >> PAGE_SHIFT; @@ -53520,7 +54120,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c if (cur + npages > lim) return 0; return 1; -@@ -2303,6 +2706,17 @@ int install_special_mapping(struct mm_st +@@ -2303,6 +2741,17 @@ int install_special_mapping(struct mm_st vma->vm_start = addr; vma->vm_end = addr + len; @@ -53540,7 +54140,7 @@ diff -urNp linux-2.6.32.21/mm/mmap.c linux-2.6.32.21/mm/mmap.c diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c --- linux-2.6.32.21/mm/mprotect.c 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/mm/mprotect.c 2010-09-04 15:54:52.000000000 -0400 ++++ linux-2.6.32.21/mm/mprotect.c 2010-09-17 18:34:04.000000000 -0400 @@ -24,10 +24,16 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> @@ -53607,7 +54207,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) -@@ -144,6 +192,14 @@ mprotect_fixup(struct vm_area_struct *vm +@@ -144,11 +192,29 @@ mprotect_fixup(struct vm_area_struct *vm int error; int dirty_accountable = 0; @@ -53622,7 +54222,22 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c if (newflags == oldflags) { *pprev = vma; return 0; -@@ -165,6 +221,38 @@ mprotect_fixup(struct vm_area_struct *vm + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -165,6 +231,38 @@ mprotect_fixup(struct vm_area_struct *vm } } @@ -53661,7 +54276,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c /* * First try to merge with previous and/or next vma. */ -@@ -195,9 +283,21 @@ success: +@@ -195,9 +293,21 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ @@ -53684,7 +54299,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c if (vma_wants_writenotify(vma)) { vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); -@@ -238,6 +338,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -238,6 +348,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, end = start + len; if (end <= start) return -ENOMEM; @@ -53702,7 +54317,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c if (!arch_validate_prot(prot)) return -EINVAL; -@@ -245,7 +356,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -245,7 +366,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, /* * Does the application expect PROT_READ to imply PROT_EXEC: */ @@ -53711,7 +54326,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); -@@ -277,6 +388,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -277,6 +398,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long, if (start > vma->vm_start) prev = vma; @@ -53728,7 +54343,7 @@ diff -urNp linux-2.6.32.21/mm/mprotect.c linux-2.6.32.21/mm/mprotect.c for (nstart = start ; ; ) { unsigned long newflags; -@@ -301,6 +422,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -301,6 +432,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, if (error) goto out; perf_event_mmap(vma); @@ -53843,8 +54458,16 @@ diff -urNp linux-2.6.32.21/mm/mremap.c linux-2.6.32.21/mm/mremap.c if (ret & ~PAGE_MASK) diff -urNp linux-2.6.32.21/mm/nommu.c linux-2.6.32.21/mm/nommu.c --- linux-2.6.32.21/mm/nommu.c 2010-08-29 21:08:20.000000000 -0400 -+++ linux-2.6.32.21/mm/nommu.c 2010-09-04 15:54:52.000000000 -0400 -@@ -761,15 +761,6 @@ struct vm_area_struct *find_vma(struct m ++++ linux-2.6.32.21/mm/nommu.c 2010-09-17 18:34:04.000000000 -0400 +@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -761,15 +760,6 @@ struct vm_area_struct *find_vma(struct m EXPORT_SYMBOL(find_vma); /* @@ -56492,7 +57115,7 @@ diff -urNp linux-2.6.32.21/security/integrity/ima/ima_queue.c linux-2.6.32.21/se return 0; diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig --- linux-2.6.32.21/security/Kconfig 2010-08-13 16:24:37.000000000 -0400 -+++ linux-2.6.32.21/security/Kconfig 2010-09-14 20:52:17.000000000 -0400 ++++ linux-2.6.32.21/security/Kconfig 2010-09-17 17:39:35.000000000 -0400 @@ -4,6 +4,505 @@ menu "Security options" @@ -56516,7 +57139,7 @@ diff -urNp linux-2.6.32.21/security/Kconfig linux-2.6.32.21/security/Kconfig + +config PAX + bool "Enable various PaX features" -+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) + help + This allows you to enable various PaX features. PaX adds + intrusion prevention mechanisms to the kernel that reduce diff --git a/2.6.32/4425_grsec-pax-without-grsec.patch b/2.6.32/4425_grsec-pax-without-grsec.patch index 18fa48e..578f33a 100644 --- a/2.6.32/4425_grsec-pax-without-grsec.patch +++ b/2.6.32/4425_grsec-pax-without-grsec.patch @@ -54,7 +54,7 @@ The original version of this patch contained no credits/description. current->comm, task_pid_nr(current), current_uid(), current_euid()); print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); @@ -1838,10 +1842,12 @@ - #ifdef CONFIG_PAX_USERCOPY + void pax_report_leak_to_user(const void *ptr, unsigned long len) { +#ifdef CONFIG_GRKERNSEC @@ -82,11 +82,11 @@ The original version of this patch contained no credits/description. --- a/security/Kconfig +++ b/security/Kconfig @@ -23,7 +23,7 @@ - + config PAX bool "Enable various PaX features" -- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) -+ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) +- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) help This allows you to enable various PaX features. PaX adds intrusion prevention mechanisms to the kernel that reduce diff --git a/2.6.32/4430_grsec-kconfig-default-gids.patch b/2.6.32/4430_grsec-kconfig-default-gids.patch index b7a0413..7ba8aa2 100644 --- a/2.6.32/4430_grsec-kconfig-default-gids.patch +++ b/2.6.32/4430_grsec-kconfig-default-gids.patch @@ -29,25 +29,25 @@ from shooting themselves in the foot. config GRKERNSEC_EXECLOG bool "Exec logging" -@@ -780,7 +780,7 @@ +@@ -785,7 +785,7 @@ config GRKERNSEC_TPE_GID int "GID for untrusted users" depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT - default 1005 + default 100 help - If you have selected the "Invert GID option" above, setting this - GID determines what group TPE restrictions will be *disabled* for. -@@ -792,7 +792,7 @@ + Setting this GID determines what group TPE restrictions will be + *enabled* for. If the sysctl option is enabled, a sysctl option +@@ -794,7 +794,7 @@ config GRKERNSEC_TPE_GID int "GID for trusted users" depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT - default 1005 + default 10 help - If you have selected the "Invert GID option" above, setting this - GID determines what group TPE restrictions will be *disabled* for. -@@ -866,7 +866,7 @@ + Setting this GID determines what group TPE restrictions will be + *disabled* for. If the sysctl option is enabled, a sysctl option +@@ -865,7 +865,7 @@ config GRKERNSEC_SOCKET_ALL_GID int "GID to deny all sockets for" depends on GRKERNSEC_SOCKET_ALL @@ -56,7 +56,7 @@ from shooting themselves in the foot. help Here you can choose the GID to disable socket access for. Remember to add the users you want socket access disabled for to the GID -@@ -887,7 +887,7 @@ +@@ -886,7 +886,7 @@ config GRKERNSEC_SOCKET_CLIENT_GID int "GID to deny client sockets for" depends on GRKERNSEC_SOCKET_CLIENT @@ -65,7 +65,7 @@ from shooting themselves in the foot. help Here you can choose the GID to disable client socket access for. Remember to add the users you want client socket access disabled for to -@@ -905,7 +905,7 @@ +@@ -904,7 +904,7 @@ config GRKERNSEC_SOCKET_SERVER_GID int "GID to deny server sockets for" depends on GRKERNSEC_SOCKET_SERVER diff --git a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch index 9c7f7be..aa2403a 100644 --- a/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch +++ b/2.6.32/4440_selinux-avc_audit-log-curr_ip.patch @@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org> --- a/grsecurity/Kconfig +++ b/grsecurity/Kconfig -@@ -1372,6 +1372,27 @@ +@@ -1371,6 +1371,27 @@ menu "Logging Options" depends on GRKERNSEC diff --git a/2.6.34/0000_README b/2.6.34/0000_README index 596261e..64ae95b 100644 --- a/2.6.34/0000_README +++ b/2.6.34/0000_README @@ -3,7 +3,7 @@ README Individual Patch Descriptions: ----------------------------------------------------------------------------- -Patch: 4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch +Patch: 4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch From: http://www.grsecurity.net Desc: hardened-sources base patch from upstream grsecurity diff --git a/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch b/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch index 26f3ea8..07190cf 100644 --- a/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009162222.patch +++ b/2.6.34/4420_grsecurity-2.2.0-2.6.34.7-201009171945.patch @@ -65,7 +65,16 @@ diff -urNp linux-2.6.34.7/arch/alpha/kernel/module.c linux-2.6.34.7/arch/alpha/k for (i = 0; i < n; i++) { diff -urNp linux-2.6.34.7/arch/alpha/kernel/osf_sys.c linux-2.6.34.7/arch/alpha/kernel/osf_sys.c --- linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/alpha/kernel/osf_sys.c 2010-09-17 18:52:03.000000000 -0400 +@@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long a + /* At this point: (!vma || addr < vma->vm_end). */ + if (limit - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + vma = vma->vm_next; @@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp merely specific addresses, but regions of memory -- perhaps this feature should be incorporated into all ports? */ @@ -513,7 +522,7 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/fault.c linux-2.6.34.7/arch/arm/mm/fault.c * diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c --- linux-2.6.34.7/arch/arm/mm/mmap.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/arm/mm/mmap.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/arm/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400 @@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp if (len > TASK_SIZE) return -ENOMEM; @@ -525,7 +534,13 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c if (addr) { if (do_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -75,10 +79,10 @@ arch_get_unmapped_area(struct file *filp +@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } if (len > mm->cached_hole_size) { @@ -539,7 +554,7 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c } full_search: -@@ -94,8 +98,8 @@ full_search: +@@ -94,14 +97,14 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -550,6 +565,13 @@ diff -urNp linux-2.6.34.7/arch/arm/mm/mmap.c linux-2.6.34.7/arch/arm/mm/mmap.c mm->cached_hole_size = 0; goto full_search; } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ diff -urNp linux-2.6.34.7/arch/arm/plat-samsung/pm.c linux-2.6.34.7/arch/arm/plat-samsung/pm.c --- linux-2.6.34.7/arch/arm/plat-samsung/pm.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/arm/plat-samsung/pm.c 2010-08-13 18:38:11.000000000 -0400 @@ -706,6 +728,37 @@ diff -urNp linux-2.6.34.7/arch/frv/include/asm/kmap_types.h linux-2.6.34.7/arch/ KM_TYPE_NR }; +diff -urNp linux-2.6.34.7/arch/frv/mm/elf-fdpic.c linux-2.6.34.7/arch/frv/mm/elf-fdpic.c +--- linux-2.6.34.7/arch/frv/mm/elf-fdpic.c 2010-08-13 16:29:15.000000000 -0400 ++++ linux-2.6.34.7/arch/frv/mm/elf-fdpic.c 2010-09-17 18:52:03.000000000 -0400 +@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + goto success; + } + +@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } diff -urNp linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c --- linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/ia64/hp/common/hwsw_iommu.c 2010-08-13 18:38:11.000000000 -0400 @@ -1075,7 +1128,7 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/pci-swiotlb.c linux-2.6.34.7/arch/ia6 .map_page = swiotlb_map_page, diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c --- linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c 2010-09-17 18:52:03.000000000 -0400 @@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil if (REGION_NUMBER(addr) == RGN_HPAGE) addr = 0; @@ -1090,7 +1143,7 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/k if (!addr) addr = mm->free_area_cache; -@@ -61,9 +68,9 @@ arch_get_unmapped_area (struct file *fil +@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { @@ -1102,6 +1155,12 @@ diff -urNp linux-2.6.34.7/arch/ia64/kernel/sys_ia64.c linux-2.6.34.7/arch/ia64/k goto full_search; } return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* Remember the address where we stopped this search: */ + mm->free_area_cache = addr + len; + return addr; diff -urNp linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S --- linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/ia64/kernel/vmlinux.lds.S 2010-08-13 18:38:11.000000000 -0400 @@ -1166,6 +1225,18 @@ diff -urNp linux-2.6.34.7/arch/ia64/mm/fault.c linux-2.6.34.7/arch/ia64/mm/fault survive: /* * If for any reason at all we couldn't handle the fault, make +diff -urNp linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c +--- linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400 ++++ linux-2.6.34.7/arch/ia64/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400 +@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area( + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) + return -ENOMEM; +- if (!vmm || (addr + len) <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = ALIGN(vmm->vm_end, HPAGE_SIZE); + } diff -urNp linux-2.6.34.7/arch/ia64/mm/init.c linux-2.6.34.7/arch/ia64/mm/init.c --- linux-2.6.34.7/arch/ia64/mm/init.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/ia64/mm/init.c 2010-08-13 18:38:11.000000000 -0400 @@ -1487,8 +1558,8 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/process.c linux-2.6.34.7/arch/mips/ke -} diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/kernel/syscall.c --- linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-08-13 18:38:11.000000000 -0400 -@@ -106,6 +106,11 @@ unsigned long arch_get_unmapped_area(str ++++ linux-2.6.34.7/arch/mips/kernel/syscall.c 2010-09-17 18:52:03.000000000 -0400 +@@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(str do_color_align = 0; if (filp || (flags & MAP_SHARED)) do_color_align = 1; @@ -1500,8 +1571,12 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/ke if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -116,7 +121,7 @@ unsigned long arch_get_unmapped_area(str - (!vmm || addr + len <= vmm->vm_start)) + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); +- if (task_size - len >= addr && +- (!vmm || addr + len <= vmm->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len)) return addr; } - addr = TASK_UNMAPPED_BASE; @@ -1509,6 +1584,15 @@ diff -urNp linux-2.6.34.7/arch/mips/kernel/syscall.c linux-2.6.34.7/arch/mips/ke if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); else +@@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(str + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (task_size - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (do_color_align) diff -urNp linux-2.6.34.7/arch/mips/loongson/common/pm.c linux-2.6.34.7/arch/mips/loongson/common/pm.c --- linux-2.6.34.7/arch/mips/loongson/common/pm.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/mips/loongson/common/pm.c 2010-08-13 18:38:11.000000000 -0400 @@ -1703,7 +1787,25 @@ diff -urNp linux-2.6.34.7/arch/parisc/kernel/module.c linux-2.6.34.7/arch/parisc me->arch.unwind_section, table, end, gp); diff -urNp linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c --- linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/parisc/kernel/sys_parisc.c 2010-09-17 18:52:03.000000000 -0400 +@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + } +@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; + if (addr < vma->vm_end) /* handle wraparound */ @@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str if (flags & MAP_FIXED) return addr; @@ -2830,8 +2932,38 @@ diff -urNp linux-2.6.34.7/arch/powerpc/mm/mmap_64.c linux-2.6.34.7/arch/powerpc/ } diff -urNp linux-2.6.34.7/arch/powerpc/mm/slice.c linux-2.6.34.7/arch/powerpc/mm/slice.c --- linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-08-13 18:38:11.000000000 -0400 -@@ -426,6 +426,11 @@ unsigned long slice_get_unmapped_area(un ++++ linux-2.6.34.7/arch/powerpc/mm/slice.c 2010-09-17 18:52:03.000000000 -0400 +@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_ + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len); + } + +-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) + { + return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, + 1ul << SLICE_LOW_SHIFT); +@@ -256,7 +255,7 @@ full_search: + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); + continue; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || (addr + len) <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ + if (use_cache) + mm->free_area_cache = addr; +@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un if (fixed && addr > (mm->task_size - len)) return -EINVAL; @@ -3316,6 +3448,56 @@ diff -urNp linux-2.6.34.7/arch/sh/mm/consistent.c linux-2.6.34.7/arch/sh/mm/cons EXPORT_SYMBOL(dma_ops); static int __init dma_init(void) +diff -urNp linux-2.6.34.7/arch/sh/mm/mmap.c linux-2.6.34.7/arch/sh/mm/mmap.c +--- linux-2.6.34.7/arch/sh/mm/mmap.c 2010-08-13 16:29:15.000000000 -0400 ++++ linux-2.6.34.7/arch/sh/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400 +@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -106,7 +105,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } diff -urNp linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h --- linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h 2010-08-29 21:16:43.000000000 -0400 +++ linux-2.6.34.7/arch/sparc/include/asm/atomic_64.h 2010-09-15 02:25:59.000000000 -0400 @@ -3873,7 +4055,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/pci_sun4v.c linux-2.6.34.7/arch/spar .map_page = dma_4v_map_page, diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c --- linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 18:52:03.000000000 -0400 @@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str if (ARCH_SUN4C && len > 0x20000000) return -ENOMEM; @@ -3883,9 +4065,18 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_32.c linux-2.6.34.7/arch/s if (flags & MAP_SHARED) addr = COLOUR_ALIGN(addr); +@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str + } + if (TASK_SIZE - PAGE_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c --- linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 18:52:03.000000000 -0400 @@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str /* We do not accept a shared mapping if it would violate * cache aliasing constraints. @@ -3906,7 +4097,14 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s if (addr) { if (do_color_align) addr = COLOUR_ALIGN(addr, pgoff); -@@ -152,9 +156,9 @@ unsigned long arch_get_unmapped_area(str +@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; } if (len > mm->cached_hole_size) { @@ -3918,7 +4116,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s mm->cached_hole_size = 0; } -@@ -174,8 +178,8 @@ full_search: +@@ -174,14 +177,14 @@ full_search: vma = find_vma(mm, VA_EXCLUDE_END); } if (unlikely(task_size < addr)) { @@ -3929,7 +4127,14 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s mm->cached_hole_size = 0; goto full_search; } -@@ -215,7 +219,7 @@ arch_get_unmapped_area_topdown(struct fi + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi /* We do not accept a shared mapping if it would violate * cache aliasing constraints. */ @@ -3938,7 +4143,35 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) return -EINVAL; return addr; -@@ -385,6 +389,12 @@ void arch_pick_mmap_layout(struct mm_str +@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str gap == RLIM_INFINITY || sysctl_legacy_va_layout) { mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; @@ -3951,7 +4184,7 @@ diff -urNp linux-2.6.34.7/arch/sparc/kernel/sys_sparc_64.c linux-2.6.34.7/arch/s mm->get_unmapped_area = arch_get_unmapped_area; mm->unmap_area = arch_unmap_area; } else { -@@ -397,6 +407,12 @@ void arch_pick_mmap_layout(struct mm_str +@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str gap = (task_size / 6 * 5); mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); @@ -4237,8 +4470,8 @@ diff -urNp linux-2.6.34.7/arch/sparc/lib/atomic_64.S linux-2.6.34.7/arch/sparc/l bne,pn %xcc, 2f diff -urNp linux-2.6.34.7/arch/sparc/lib/ksyms.c linux-2.6.34.7/arch/sparc/lib/ksyms.c --- linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-08-13 18:38:11.000000000 -0400 -@@ -142,12 +142,15 @@ EXPORT_SYMBOL(__downgrade_write); ++++ linux-2.6.34.7/arch/sparc/lib/ksyms.c 2010-09-17 18:05:15.000000000 -0400 +@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write); /* Atomic counter implementation. */ EXPORT_SYMBOL(atomic_add); @@ -4248,7 +4481,9 @@ diff -urNp linux-2.6.34.7/arch/sparc/lib/ksyms.c linux-2.6.34.7/arch/sparc/lib/k +EXPORT_SYMBOL(atomic_sub_unchecked); EXPORT_SYMBOL(atomic_sub_ret); EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); EXPORT_SYMBOL(atomic64_sub); +EXPORT_SYMBOL(atomic64_sub_unchecked); EXPORT_SYMBOL(atomic64_sub_ret); @@ -5175,6 +5410,46 @@ diff -urNp linux-2.6.34.7/arch/sparc/mm/fault_64.c linux-2.6.34.7/arch/sparc/mm/ /* Pure DTLB misses do not tell us whether the fault causing * load/store/atomic was a write or not, it only says that there * was no match. So in such a case we (carefully) read the +diff -urNp linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c +--- linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400 ++++ linux-2.6.34.7/arch/sparc/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400 +@@ -68,7 +68,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.34.7/arch/sparc/mm/init_32.c linux-2.6.34.7/arch/sparc/mm/init_32.c --- linux-2.6.34.7/arch/sparc/mm/init_32.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/sparc/mm/init_32.c 2010-08-13 18:38:11.000000000 -0400 @@ -9336,7 +9611,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess_64.h linux-2.6.34.7/arch/ #endif /* _ASM_X86_UACCESS_64_H */ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86/include/asm/uaccess.h --- linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/x86/include/asm/uaccess.h 2010-09-17 18:00:42.000000000 -0400 @@ -8,12 +8,15 @@ #include <linux/thread_info.h> #include <linux/prefetch.h> @@ -9401,22 +9676,9 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 /* * The exception table consists of pairs of addresses: the first is the -@@ -179,17 +213,34 @@ extern int __get_user_bad(void); - __ret_gu; \ - }) - -+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) -+#define __put_user_x(size, x, ptr, __ret_pu) \ -+ ({ \ -+ int __dummy; \ -+ asm volatile("call __put_user_" #size : "=a" (__ret_pu), "=c" (__dummy) \ -+ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx"); \ -+ }) -+#else - #define __put_user_x(size, x, ptr, __ret_pu) \ +@@ -183,13 +217,21 @@ extern int __get_user_bad(void); asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") -+#endif - +#ifdef CONFIG_X86_32 @@ -9439,7 +9701,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 ".section .fixup,\"ax\"\n" \ "4: movl %3,%0\n" \ " jmp 3b\n" \ -@@ -197,15 +248,18 @@ extern int __get_user_bad(void); +@@ -197,15 +239,18 @@ extern int __get_user_bad(void); _ASM_EXTABLE(1b, 4b) \ _ASM_EXTABLE(2b, 4b) \ : "=r" (err) \ @@ -9462,7 +9724,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 #define __put_user_x8(x, ptr, __ret_pu) \ asm volatile("call __put_user_8" : "=a" (__ret_pu) \ -@@ -374,16 +428,18 @@ do { \ +@@ -374,16 +419,18 @@ do { \ } while (0) #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ @@ -9484,7 +9746,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 #define __get_user_size_ex(x, ptr, size) \ do { \ -@@ -407,10 +463,12 @@ do { \ +@@ -407,10 +454,12 @@ do { \ } while (0) #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ @@ -9499,7 +9761,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 #define __put_user_nocheck(x, ptr, size) \ ({ \ -@@ -424,13 +482,24 @@ do { \ +@@ -424,13 +473,24 @@ do { \ int __gu_err; \ unsigned long __gu_val; \ __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ @@ -9526,7 +9788,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 /* * Tell gcc we read from memory instead of writing: this is because -@@ -438,21 +507,26 @@ struct __large_struct { unsigned long bu +@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu * aliasing issues. */ #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ @@ -9557,7 +9819,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 /* * uaccess_try and catch -@@ -530,7 +604,7 @@ struct __large_struct { unsigned long bu +@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu #define get_user_ex(x, ptr) do { \ unsigned long __gue_val; \ __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ @@ -9566,7 +9828,7 @@ diff -urNp linux-2.6.34.7/arch/x86/include/asm/uaccess.h linux-2.6.34.7/arch/x86 } while (0) #ifdef CONFIG_X86_WP_WORKS_OK -@@ -567,6 +641,7 @@ extern struct movsl_mask { +@@ -567,6 +632,7 @@ extern struct movsl_mask { #define ARCH_HAS_NOCACHE_UACCESS 1 @@ -13542,7 +13804,26 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/signal.c linux-2.6.34.7/arch/x86/kerne if (current_thread_info()->status & TS_RESTORE_SIGMASK) diff -urNp linux-2.6.34.7/arch/x86/kernel/smpboot.c linux-2.6.34.7/arch/x86/kernel/smpboot.c --- linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-08-29 21:16:43.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-08-29 21:17:11.000000000 -0400 ++++ linux-2.6.34.7/arch/x86/kernel/smpboot.c 2010-09-17 18:04:17.000000000 -0400 +@@ -98,14 +98,14 @@ static DEFINE_PER_CPU(struct task_struct + */ + static DEFINE_MUTEX(x86_cpu_hotplug_driver_mutex); + +-void cpu_hotplug_driver_lock() ++void cpu_hotplug_driver_lock(void) + { +- mutex_lock(&x86_cpu_hotplug_driver_mutex); ++ mutex_lock(&x86_cpu_hotplug_driver_mutex); + } + +-void cpu_hotplug_driver_unlock() ++void cpu_hotplug_driver_unlock(void) + { +- mutex_unlock(&x86_cpu_hotplug_driver_mutex); ++ mutex_unlock(&x86_cpu_hotplug_driver_mutex); + } + + ssize_t arch_cpu_probe(const char *buf, size_t count) { return -1; } @@ -780,7 +780,11 @@ do_rest: (unsigned long)task_stack_page(c_idle.idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; @@ -13613,8 +13894,8 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/syscall_table_32.S linux-2.6.34.7/arch .long sys_exit diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c --- linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-08-13 18:38:11.000000000 -0400 -@@ -24,6 +24,221 @@ ++++ linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c 2010-09-17 18:52:03.000000000 -0400 +@@ -24,6 +24,224 @@ #include <asm/syscalls.h> @@ -13658,10 +13939,11 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + + if (addr) { + addr = PAGE_ALIGN(addr); -+ vma = find_vma(mm, addr); -+ if (pax_task_size - len >= addr && -+ (!vma || addr + len <= vma->vm_start)) -+ return addr; ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + if (len > mm->cached_hole_size) { + start_addr = addr = mm->free_area_cache; @@ -13701,13 +13983,8 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + } + return -ENOMEM; + } -+ if (!vma || addr + len <= vma->vm_start) { -+ /* -+ * Remember the place where we stopped the search: -+ */ -+ mm->free_area_cache = addr + len; -+ return addr; -+ } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; @@ -13717,6 +13994,12 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + goto full_search; + } + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; +} + +unsigned long @@ -13752,10 +14035,11 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); -+ vma = find_vma(mm, addr); -+ if (pax_task_size - len >= addr && -+ (!vma || addr + len <= vma->vm_start)) -+ return addr; ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ @@ -13770,7 +14054,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); -+ if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } @@ -13787,7 +14071,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ + * return with success: + */ + vma = find_vma(mm, addr); -+ if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + @@ -13838,7 +14122,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_i386_32.c linux-2.6.34.7/arch/x86/ * end up with proper pt_regs. diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c --- linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c 2010-09-17 18:52:03.000000000 -0400 @@ -32,8 +32,8 @@ out: return error; } @@ -13859,7 +14143,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k *end = TASK_SIZE; } } -@@ -69,11 +69,15 @@ arch_get_unmapped_area(struct file *filp +@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; @@ -13876,7 +14160,22 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k if (addr) { addr = PAGE_ALIGN(addr); vma = find_vma(mm, addr); -@@ -128,7 +132,7 @@ arch_get_unmapped_area_topdown(struct fi +- if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) +@@ -106,7 +109,7 @@ full_search: + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -13885,7 +14184,7 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -141,6 +145,10 @@ arch_get_unmapped_area_topdown(struct fi +@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) goto bottomup; @@ -13896,7 +14195,32 @@ diff -urNp linux-2.6.34.7/arch/x86/kernel/sys_x86_64.c linux-2.6.34.7/arch/x86/k /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); -@@ -198,13 +206,21 @@ bottomup: + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr-len; + } +@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + +@@ -198,13 +204,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -17862,7 +18186,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/highmem_32.c linux-2.6.34.7/arch/x86/mm/hi } diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/hugetlbpage.c --- linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/x86/mm/hugetlbpage.c 2010-09-17 18:52:03.000000000 -0400 @@ -266,13 +266,18 @@ static unsigned long hugetlb_get_unmappe struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; @@ -17886,7 +18210,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h } full_search: -@@ -280,13 +285,13 @@ full_search: +@@ -280,26 +285,27 @@ full_search: for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { /* At this point: (!vma || addr < vma->vm_end). */ @@ -17903,18 +18227,38 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h mm->cached_hole_size = 0; goto full_search; } -@@ -309,9 +314,8 @@ static unsigned long hugetlb_get_unmappe + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = ALIGN(vma->vm_end, huge_page_size(h)); + } ++ ++ mm->free_area_cache = addr + len; ++ return addr; + } + + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, +@@ -308,10 +314,9 @@ static unsigned long hugetlb_get_unmappe + { struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; - struct vm_area_struct *vma, *prev_vma; +- struct vm_area_struct *vma, *prev_vma; - unsigned long base = mm->mmap_base, addr = addr0; ++ struct vm_area_struct *vma; + unsigned long base = mm->mmap_base, addr; unsigned long largest_hole = mm->cached_hole_size; - int first_time = 1; /* don't allow allocations above current base */ if (mm->free_area_cache > base) -@@ -321,7 +325,7 @@ static unsigned long hugetlb_get_unmappe +@@ -321,7 +326,7 @@ static unsigned long hugetlb_get_unmappe largest_hole = 0; mm->free_area_cache = base; } @@ -17923,7 +18267,51 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h /* make sure it can fit in the remaining address space */ if (mm->free_area_cache < len) goto fail; -@@ -363,22 +367,26 @@ try_again: +@@ -329,33 +334,27 @@ try_again: + /* either no address requested or cant fit in requested address hole */ + addr = (mm->free_area_cache - len) & huge_page_mask(h); + do { ++ vma = find_vma(mm, addr); + /* + * Lookup failure means no vma is above this address, + * i.e. return with success: +- */ +- if (!(vma = find_vma_prev(mm, addr, &prev_vma))) +- return addr; +- +- /* + * new region fits between prev_vma->vm_end and + * vma->vm_start, use it: + */ +- if (addr + len <= vma->vm_start && +- (!prev_vma || (addr >= prev_vma->vm_end))) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ +- mm->cached_hole_size = largest_hole; +- return (mm->free_area_cache = addr); +- } else { +- /* pull free_area_cache down to the first hole */ +- if (mm->free_area_cache == vma->vm_end) { +- mm->free_area_cache = vma->vm_start; +- mm->cached_hole_size = largest_hole; +- } ++ mm->cached_hole_size = largest_hole; ++ return (mm->free_area_cache = addr); ++ } ++ /* pull free_area_cache down to the first hole */ ++ if (mm->free_area_cache == vma->vm_end) { ++ mm->free_area_cache = vma->vm_start; ++ mm->cached_hole_size = largest_hole; + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) +- largest_hole = vma->vm_start - addr; ++ largest_hole = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = (vma->vm_start - len) & huge_page_mask(h); +@@ -363,22 +362,26 @@ try_again: fail: /* @@ -17961,7 +18349,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h mm->cached_hole_size = ~0UL; addr = hugetlb_get_unmapped_area_bottomup(file, addr0, len, pgoff, flags); -@@ -386,6 +394,7 @@ fail: +@@ -386,6 +389,7 @@ fail: /* * Restore the topdown base: */ @@ -17969,7 +18357,7 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h mm->free_area_cache = base; mm->cached_hole_size = ~0UL; -@@ -399,10 +408,17 @@ hugetlb_get_unmapped_area(struct file *f +@@ -399,10 +403,17 @@ hugetlb_get_unmapped_area(struct file *f struct hstate *h = hstate_file(file); struct mm_struct *mm = current->mm; struct vm_area_struct *vma; @@ -17988,15 +18376,16 @@ diff -urNp linux-2.6.34.7/arch/x86/mm/hugetlbpage.c linux-2.6.34.7/arch/x86/mm/h return -ENOMEM; if (flags & MAP_FIXED) { -@@ -414,7 +430,7 @@ hugetlb_get_unmapped_area(struct file *f +@@ -414,8 +425,7 @@ hugetlb_get_unmapped_area(struct file *f if (addr) { addr = ALIGN(addr, huge_page_size(h)); vma = find_vma(mm, addr); - if (TASK_SIZE - len >= addr && -+ if (pax_task_size - len >= addr && - (!vma || addr + len <= vma->vm_start)) +- (!vma || addr + len <= vma->vm_start)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) return addr; } + if (mm->get_unmapped_area == arch_get_unmapped_area) diff -urNp linux-2.6.34.7/arch/x86/mm/init_32.c linux-2.6.34.7/arch/x86/mm/init_32.c --- linux-2.6.34.7/arch/x86/mm/init_32.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/arch/x86/mm/init_32.c 2010-08-13 18:38:11.000000000 -0400 @@ -19938,7 +20327,7 @@ diff -urNp linux-2.6.34.7/arch/x86/vdso/vma.c linux-2.6.34.7/arch/x86/vdso/vma.c -__setup("vdso=", vdso_setup); diff -urNp linux-2.6.34.7/arch/x86/xen/enlighten.c linux-2.6.34.7/arch/x86/xen/enlighten.c --- linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-08-13 18:38:11.000000000 -0400 ++++ linux-2.6.34.7/arch/x86/xen/enlighten.c 2010-09-17 18:02:09.000000000 -0400 @@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); struct shared_info xen_dummy_shared_info; @@ -19964,7 +20353,7 @@ diff -urNp linux-2.6.34.7/arch/x86/xen/enlighten.c linux-2.6.34.7/arch/x86/xen/e - x86_configure_nx(); +#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && -+ (cpuid_edx(0x80000001) & (1 << (X86_FEATURE_NX & 31)))) { ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { + unsigned l, h; + + __supported_pte_mask |= _PAGE_NX; @@ -30530,19 +30919,6 @@ diff -urNp linux-2.6.34.7/fs/ext4/balloc.c linux-2.6.34.7/fs/ext4/balloc.c if (free_blocks >= (nblocks + dirty_blocks)) return 1; } -diff -urNp linux-2.6.34.7/fs/ext4/ioctl.c linux-2.6.34.7/fs/ext4/ioctl.c ---- linux-2.6.34.7/fs/ext4/ioctl.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/fs/ext4/ioctl.c 2010-08-13 18:38:12.000000000 -0400 -@@ -230,6 +230,9 @@ setversion_out: - struct file *donor_filp; - int err; - -+ /* temporary workaround for bugs in here */ -+ return -EOPNOTSUPP; -+ - if (!(filp->f_mode & FMODE_READ) || - !(filp->f_mode & FMODE_WRITE)) - return -EBADF; diff -urNp linux-2.6.34.7/fs/ext4/namei.c linux-2.6.34.7/fs/ext4/namei.c --- linux-2.6.34.7/fs/ext4/namei.c 2010-08-13 16:29:15.000000000 -0400 +++ linux-2.6.34.7/fs/ext4/namei.c 2010-08-13 18:38:12.000000000 -0400 @@ -33059,7 +33435,7 @@ diff -urNp linux-2.6.34.7/fs/proc/root.c linux-2.6.34.7/fs/proc/root.c diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c --- linux-2.6.34.7/fs/proc/task_mmu.c 2010-08-29 21:16:40.000000000 -0400 -+++ linux-2.6.34.7/fs/proc/task_mmu.c 2010-08-13 18:39:46.000000000 -0400 ++++ linux-2.6.34.7/fs/proc/task_mmu.c 2010-09-17 18:39:47.000000000 -0400 @@ -49,8 +49,13 @@ void task_mem(struct seq_file *m, struct "VmExe:\t%8lu kB\n" "VmLib:\t%8lu kB\n" @@ -33104,22 +33480,30 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) { struct mm_struct *mm = vma->vm_mm; -@@ -221,19 +238,29 @@ static void show_map_vma(struct seq_file +@@ -210,7 +227,6 @@ static void show_map_vma(struct seq_file + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; +- unsigned long start; + dev_t dev = 0; + int len; + +@@ -221,19 +237,24 @@ static void show_map_vma(struct seq_file pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; } -+ - /* We don't show the stack guard page in /proc/maps */ - start = vma->vm_start; - if (vma->vm_flags & VM_GROWSDOWN) - start += PAGE_SIZE; +- /* We don't show the stack guard page in /proc/maps */ +- start = vma->vm_start; +- if (vma->vm_flags & VM_GROWSDOWN) +- start += PAGE_SIZE; seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- start, +#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP -+ PAX_RAND_FLAGS(mm) ? 0UL : start, ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start, + PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end, +#else - start, ++ vma->vm_start, vma->vm_end, +#endif flags & VM_READ ? 'r' : '-', @@ -33134,7 +33518,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c MAJOR(dev), MINOR(dev), ino, &len); /* -@@ -242,16 +269,16 @@ static void show_map_vma(struct seq_file +@@ -242,16 +263,16 @@ static void show_map_vma(struct seq_file */ if (file) { pad_len_spaces(m, len); @@ -33156,7 +33540,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c name = "[stack]"; } } else { -@@ -393,11 +420,16 @@ static int show_smap(struct seq_file *m, +@@ -393,11 +414,16 @@ static int show_smap(struct seq_file *m, }; memset(&mss, 0, sizeof mss); @@ -33178,7 +33562,7 @@ diff -urNp linux-2.6.34.7/fs/proc/task_mmu.c linux-2.6.34.7/fs/proc/task_mmu.c show_map_vma(m, vma); seq_printf(m, -@@ -412,7 +444,11 @@ static int show_smap(struct seq_file *m, +@@ -412,7 +438,11 @@ static int show_smap(struct seq_file *m, "Swap: %8lu kB\n" "KernelPageSize: %8lu kB\n" "MMUPageSize: %8lu kB\n", @@ -40282,8 +40666,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_fork.c linux-2.6.34.7/grsecurity/grse +} diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grsec_init.c --- linux-2.6.34.7/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.34.7/grsecurity/grsec_init.c 2010-08-13 18:38:12.000000000 -0400 -@@ -0,0 +1,258 @@ ++++ linux-2.6.34.7/grsecurity/grsec_init.c 2010-09-17 19:44:58.000000000 -0400 +@@ -0,0 +1,266 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/mm.h> @@ -40332,6 +40716,7 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grse +#endif +int grsec_lastack_retries; +int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; +int grsec_enable_socket_all; +int grsec_socket_all_gid; +int grsec_enable_socket_client; @@ -40422,6 +40807,13 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_init.c linux-2.6.34.7/grsecurity/grse +#endif +#endif + ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ +#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) +#ifndef CONFIG_GRKERNSEC_SYSCTL + grsec_lock = 1; @@ -41418,8 +41810,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_sock.c linux-2.6.34.7/grsecurity/grse +} diff -urNp linux-2.6.34.7/grsecurity/grsec_sysctl.c linux-2.6.34.7/grsecurity/grsec_sysctl.c --- linux-2.6.34.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.34.7/grsecurity/grsec_sysctl.c 2010-08-13 18:38:12.000000000 -0400 -@@ -0,0 +1,415 @@ ++++ linux-2.6.34.7/grsecurity/grsec_sysctl.c 2010-09-17 19:45:17.000000000 -0400 +@@ -0,0 +1,424 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/sysctl.h> @@ -41668,6 +42060,15 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_sysctl.c linux-2.6.34.7/grsecurity/gr + .proc_handler = &proc_dointvec, + }, +#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif +#ifdef CONFIG_GRKERNSEC_TPE_ALL + { + .procname = "tpe_restrict_all", @@ -41874,8 +42275,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_time.c linux-2.6.34.7/grsecurity/grse +} diff -urNp linux-2.6.34.7/grsecurity/grsec_tpe.c linux-2.6.34.7/grsecurity/grsec_tpe.c --- linux-2.6.34.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.34.7/grsecurity/grsec_tpe.c 2010-08-13 18:38:12.000000000 -0400 -@@ -0,0 +1,38 @@ ++++ linux-2.6.34.7/grsecurity/grsec_tpe.c 2010-09-17 19:44:58.000000000 -0400 +@@ -0,0 +1,39 @@ +#include <linux/kernel.h> +#include <linux/sched.h> +#include <linux/file.h> @@ -41893,7 +42294,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsec_tpe.c linux-2.6.34.7/grsecurity/grsec + + if (cred->uid && ((grsec_enable_tpe && +#ifdef CONFIG_GRKERNSEC_TPE_INVERT -+ !in_group_p(grsec_tpe_gid) ++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) || ++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))) +#else + in_group_p(grsec_tpe_gid) +#endif @@ -41981,8 +42383,8 @@ diff -urNp linux-2.6.34.7/grsecurity/grsum.c linux-2.6.34.7/grsecurity/grsum.c +} diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig --- linux-2.6.34.7/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.34.7/grsecurity/Kconfig 2010-09-15 02:12:22.000000000 -0400 -@@ -0,0 +1,987 @@ ++++ linux-2.6.34.7/grsecurity/Kconfig 2010-09-17 19:44:58.000000000 -0400 +@@ -0,0 +1,986 @@ +# +# grecurity configuration +# @@ -42134,7 +42536,7 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig + select PAX_PT_PAX_FLAGS + select PAX_HAVE_ACL_FLAGS + select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) -+ select PAX_MEMORY_UDEREF if (X86_32 && !XEN) ++ select PAX_MEMORY_UDEREF if (X86 && !XEN) + select PAX_RANDKSTACK if (X86_TSC && !X86_64) + select PAX_SEGMEXEC if (X86_32) + select PAX_PAGEEXEC @@ -42743,11 +43145,14 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig + is enabled, a sysctl option with name "tpe" is created. + +config GRKERNSEC_TPE_ALL -+ bool "Partially restrict non-root users" ++ bool "Partially restrict all non-root users" + depends on GRKERNSEC_TPE + help -+ If you say Y here, All non-root users other than the ones in the -+ group specified in the main TPE option will only be allowed to ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to + execute files in directories they own that are not group or + world-writable, or in directories owned by root and writable only by + root. If the sysctl option is enabled, a sysctl option with name @@ -42760,31 +43165,27 @@ diff -urNp linux-2.6.34.7/grsecurity/Kconfig linux-2.6.34.7/grsecurity/Kconfig + If you say Y here, the group you specify in the TPE configuration will + decide what group TPE restrictions will be *disabled* for. This + option is useful if you want TPE restrictions to be applied to most -+ users on the system. ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. + +config GRKERNSEC_TPE_GID + int "GID for untrusted users" + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT + default 1005 + help -+ If you have selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *disabled* for. -+ If you have not selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *enabled* for. -+ If the sysctl option is enabled, a sysctl option with name "tpe_gid" -+ is created. ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. + +config GRKERNSEC_TPE_GID + int "GID for trusted users" + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT + default 1005 + help -+ If you have selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *disabled* for. -+ If you have not selected the "Invert GID option" above, setting this -+ GID determines what group TPE restrictions will be *enabled* for. -+ If the sysctl option is enabled, a sysctl option with name "tpe_gid" -+ is created. ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. + +endmenu +menu "Network Protections" @@ -44710,7 +45111,7 @@ diff -urNp linux-2.6.34.7/include/linux/grdefs.h linux-2.6.34.7/include/linux/gr +#endif diff -urNp linux-2.6.34.7/include/linux/grinternal.h linux-2.6.34.7/include/linux/grinternal.h --- linux-2.6.34.7/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500 -+++ linux-2.6.34.7/include/linux/grinternal.h 2010-08-13 18:38:12.000000000 -0400 ++++ linux-2.6.34.7/include/linux/grinternal.h 2010-09-17 19:44:58.000000000 -0400 @@ -0,0 +1,211 @@ +#ifndef __GRINTERNAL_H +#define __GRINTERNAL_H @@ -44776,7 +45177,7 @@ diff -urNp linux-2.6.34.7/include/linux/grinternal.h linux-2.6.34.7/include/linu +extern int grsec_enable_tpe; +extern int grsec_tpe_gid; +extern int grsec_enable_tpe_all; -+extern int grsec_enable_sidcaps; ++extern int grsec_enable_tpe_invert; +extern int grsec_enable_socket_all; +extern int grsec_socket_all_gid; +extern int grsec_enable_socket_client; @@ -45963,7 +46364,7 @@ diff -urNp linux-2.6.34.7/include/linux/rmap.h linux-2.6.34.7/include/linux/rmap diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sched.h --- linux-2.6.34.7/include/linux/sched.h 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/include/linux/sched.h 2010-09-15 02:12:09.000000000 -0400 ++++ linux-2.6.34.7/include/linux/sched.h 2010-09-17 18:52:03.000000000 -0400 @@ -101,6 +101,7 @@ struct bio_list; struct fs_struct; struct bts_context; @@ -45972,7 +46373,20 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch /* * List of flags we want to share for kernel threads, -@@ -628,6 +629,15 @@ struct signal_struct { +@@ -382,10 +383,12 @@ struct user_namespace; + #define DEFAULT_MAX_MAP_COUNT (USHORT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + #include <linux/aio.h> + + #ifdef CONFIG_MMU ++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len); + extern void arch_pick_mmap_layout(struct mm_struct *mm); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, +@@ -628,6 +631,15 @@ struct signal_struct { struct tty_audit_buf *tty_audit_buf; #endif @@ -45988,7 +46402,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch int oom_adj; /* OOM kill score adjustment (bit shift) */ }; -@@ -1169,7 +1179,7 @@ struct rcu_node; +@@ -1169,7 +1181,7 @@ struct rcu_node; struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ @@ -45997,7 +46411,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch atomic_t usage; unsigned int flags; /* per process flags, defined below */ unsigned int ptrace; -@@ -1283,8 +1293,8 @@ struct task_struct { +@@ -1283,8 +1295,8 @@ struct task_struct { struct list_head thread_group; struct completion *vfork_done; /* for vfork() */ @@ -46008,7 +46422,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch cputime_t utime, stime, utimescaled, stimescaled; cputime_t gtime; -@@ -1300,16 +1310,6 @@ struct task_struct { +@@ -1300,16 +1312,6 @@ struct task_struct { struct task_cputime cputime_expires; struct list_head cpu_timers[3]; @@ -46025,7 +46439,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch char comm[TASK_COMM_LEN]; /* executable name excluding path - access with [gs]et_task_comm (which lock it with task_lock()) -@@ -1393,6 +1393,15 @@ struct task_struct { +@@ -1393,6 +1395,15 @@ struct task_struct { int softirqs_enabled; int softirq_context; #endif @@ -46041,7 +46455,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch #ifdef CONFIG_LOCKDEP # define MAX_LOCK_DEPTH 48UL u64 curr_chain_key; -@@ -1413,6 +1422,9 @@ struct task_struct { +@@ -1413,6 +1424,9 @@ struct task_struct { struct backing_dev_info *backing_dev_info; @@ -46051,7 +46465,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch struct io_context *io_context; unsigned long ptrace_message; -@@ -1476,6 +1488,20 @@ struct task_struct { +@@ -1476,6 +1490,20 @@ struct task_struct { unsigned long default_timer_slack_ns; struct list_head *scm_work_list; @@ -46072,7 +46486,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch #ifdef CONFIG_FUNCTION_GRAPH_TRACER /* Index of current stored address in ret_stack */ int curr_ret_stack; -@@ -1507,6 +1533,52 @@ struct task_struct { +@@ -1507,6 +1535,52 @@ struct task_struct { #endif }; @@ -46125,7 +46539,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch /* Future-safe accessor for struct task_struct's cpus_allowed. */ #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) -@@ -2108,7 +2180,7 @@ extern void __cleanup_sighand(struct sig +@@ -2108,7 +2182,7 @@ extern void __cleanup_sighand(struct sig extern void exit_itimers(struct signal_struct *); extern void flush_itimer_signals(void); @@ -46134,7 +46548,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch extern void daemonize(const char *, ...); extern int allow_signal(int); -@@ -2221,8 +2293,8 @@ static inline void unlock_task_sighand(s +@@ -2221,8 +2295,8 @@ static inline void unlock_task_sighand(s #ifndef __HAVE_THREAD_FUNCTIONS @@ -46145,7 +46559,7 @@ diff -urNp linux-2.6.34.7/include/linux/sched.h linux-2.6.34.7/include/linux/sch static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) { -@@ -2237,13 +2309,17 @@ static inline unsigned long *end_of_stac +@@ -2237,13 +2311,17 @@ static inline unsigned long *end_of_stac #endif @@ -47932,7 +48346,7 @@ diff -urNp linux-2.6.34.7/kernel/fork.c linux-2.6.34.7/kernel/fork.c new_fs = fs; diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c --- linux-2.6.34.7/kernel/futex.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/kernel/futex.c 2010-08-13 18:38:12.000000000 -0400 ++++ linux-2.6.34.7/kernel/futex.c 2010-09-17 17:43:22.000000000 -0400 @@ -54,6 +54,7 @@ #include <linux/mount.h> #include <linux/pagemap.h> @@ -47962,19 +48376,17 @@ diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c restart->futex.val = val; restart->futex.time = abs_time->tv64; restart->futex.bitset = bitset; -@@ -2376,7 +2382,10 @@ SYSCALL_DEFINE3(get_robust_list, int, pi +@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi { struct robust_list_head __user *head; unsigned long ret; -- const struct cred *cred = current_cred(), *pcred; +#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP -+ const struct cred *cred = current_cred(); -+ const struct cred *pcred; + const struct cred *cred = current_cred(), *pcred; +#endif if (!futex_cmpxchg_enabled) return -ENOSYS; -@@ -2392,11 +2401,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi +@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi if (!p) goto err_unlock; ret = -EPERM; @@ -47991,7 +48403,7 @@ diff -urNp linux-2.6.34.7/kernel/futex.c linux-2.6.34.7/kernel/futex.c head = p->robust_list; rcu_read_unlock(); } -@@ -2458,7 +2472,7 @@ retry: +@@ -2458,7 +2471,7 @@ retry: */ static inline int fetch_robust_entry(struct robust_list __user **entry, struct robust_list __user * __user *head, @@ -49462,7 +49874,7 @@ diff -urNp linux-2.6.34.7/kernel/sys.c linux-2.6.34.7/kernel/sys.c } diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c --- linux-2.6.34.7/kernel/sysctl.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/kernel/sysctl.c 2010-08-13 18:38:12.000000000 -0400 ++++ linux-2.6.34.7/kernel/sysctl.c 2010-09-17 18:52:03.000000000 -0400 @@ -76,6 +76,13 @@ @@ -49529,7 +49941,21 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c { .procname = "sched_child_runs_first", .data = &sysctl_sched_child_runs_first, -@@ -1630,6 +1668,16 @@ int sysctl_perm(struct ctl_table_root *r +@@ -1124,6 +1162,13 @@ static struct ctl_table vm_table[] = { + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .procname = "nr_trim_pages", +@@ -1630,6 +1675,16 @@ int sysctl_perm(struct ctl_table_root *r int error; int mode; @@ -49546,7 +49972,7 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC)); if (error) return error; -@@ -2138,6 +2186,8 @@ static int __do_proc_dointvec(void *tbl_ +@@ -2138,6 +2193,8 @@ static int __do_proc_dointvec(void *tbl_ len = strlen(buf); if (len > left) len = left; @@ -49555,7 +49981,7 @@ diff -urNp linux-2.6.34.7/kernel/sysctl.c linux-2.6.34.7/kernel/sysctl.c if(copy_to_user(s, buf, len)) return -EFAULT; left -= len; -@@ -2363,6 +2413,8 @@ static int __do_proc_doulongvec_minmax(v +@@ -2363,6 +2420,8 @@ static int __do_proc_doulongvec_minmax(v len = strlen(buf); if (len > left) len = left; @@ -49754,25 +50180,6 @@ diff -urNp linux-2.6.34.7/kernel/trace/ftrace.c linux-2.6.34.7/kernel/trace/ftra } /* -diff -urNp linux-2.6.34.7/kernel/trace/Kconfig linux-2.6.34.7/kernel/trace/Kconfig ---- linux-2.6.34.7/kernel/trace/Kconfig 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/kernel/trace/Kconfig 2010-08-13 18:38:12.000000000 -0400 -@@ -124,6 +124,7 @@ if FTRACE - config FUNCTION_TRACER - bool "Kernel Function Tracer" - depends on HAVE_FUNCTION_TRACER -+ depends on !PAX_KERNEXEC - select FRAME_POINTER - select KALLSYMS - select GENERIC_TRACER -@@ -353,6 +354,7 @@ config PROFILE_KSYM_TRACER - config STACK_TRACER - bool "Trace max stack" - depends on HAVE_FUNCTION_TRACER -+ depends on !PAX_KERNEXEC - select FUNCTION_TRACER - select STACKTRACE - select KALLSYMS diff -urNp linux-2.6.34.7/kernel/trace/ring_buffer.c linux-2.6.34.7/kernel/trace/ring_buffer.c --- linux-2.6.34.7/kernel/trace/ring_buffer.c 2010-08-29 21:16:43.000000000 -0400 +++ linux-2.6.34.7/kernel/trace/ring_buffer.c 2010-08-29 21:17:12.000000000 -0400 @@ -50272,16 +50679,8 @@ diff -urNp linux-2.6.34.7/mm/madvise.c linux-2.6.34.7/mm/madvise.c goto out; diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c --- linux-2.6.34.7/mm/memory.c 2010-08-29 21:16:43.000000000 -0400 -+++ linux-2.6.34.7/mm/memory.c 2010-08-29 21:17:12.000000000 -0400 -@@ -48,6 +48,7 @@ - #include <linux/ksm.h> - #include <linux/rmap.h> - #include <linux/module.h> -+#include <linux/security.h> - #include <linux/delayacct.h> - #include <linux/init.h> - #include <linux/writeback.h> -@@ -259,8 +260,12 @@ static inline void free_pmd_range(struct ++++ linux-2.6.34.7/mm/memory.c 2010-09-17 18:41:42.000000000 -0400 +@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct return; pmd = pmd_offset(pud, start); @@ -50294,7 +50693,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c } static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, -@@ -292,8 +297,12 @@ static inline void free_pud_range(struct +@@ -292,8 +296,12 @@ static inline void free_pud_range(struct return; pud = pud_offset(pgd, start); @@ -50307,7 +50706,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c } /* -@@ -1354,10 +1363,10 @@ int __get_user_pages(struct task_struct +@@ -1354,10 +1362,10 @@ int __get_user_pages(struct task_struct (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); i = 0; @@ -50320,7 +50719,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c if (!vma && in_gate_area(tsk, start)) { unsigned long pg = start & PAGE_MASK; struct vm_area_struct *gate_vma = get_gate_vma(tsk); -@@ -1409,7 +1418,7 @@ int __get_user_pages(struct task_struct +@@ -1409,7 +1417,7 @@ int __get_user_pages(struct task_struct continue; } @@ -50329,7 +50728,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c (vma->vm_flags & (VM_IO | VM_PFNMAP)) || !(vm_flags & vma->vm_flags)) return i ? : -EFAULT; -@@ -1484,7 +1493,7 @@ int __get_user_pages(struct task_struct +@@ -1484,7 +1492,7 @@ int __get_user_pages(struct task_struct start += PAGE_SIZE; nr_pages--; } while (nr_pages && start < vma->vm_end); @@ -50338,7 +50737,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c return i; } -@@ -2080,6 +2089,186 @@ static inline void cow_user_page(struct +@@ -2080,6 +2088,186 @@ static inline void cow_user_page(struct copy_user_highpage(dst, src, va, vma); } @@ -50525,7 +50924,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c /* * This routine handles present pages, when users try to write * to a shared page. It is done by copying the page to a new address -@@ -2266,6 +2455,12 @@ gotten: +@@ -2266,6 +2454,12 @@ gotten: */ page_table = pte_offset_map_lock(mm, pmd, address, &ptl); if (likely(pte_same(*page_table, orig_pte))) { @@ -50538,7 +50937,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c if (old_page) { if (!PageAnon(old_page)) { dec_mm_counter_fast(mm, MM_FILEPAGES); -@@ -2317,6 +2512,10 @@ gotten: +@@ -2317,6 +2511,10 @@ gotten: page_remove_rmap(old_page); } @@ -50549,7 +50948,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c /* Free the old page.. */ new_page = old_page; ret |= VM_FAULT_WRITE; -@@ -2725,6 +2924,11 @@ static int do_swap_page(struct mm_struct +@@ -2725,6 +2923,11 @@ static int do_swap_page(struct mm_struct swap_free(entry); if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) try_to_free_swap(page); @@ -50561,7 +50960,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c unlock_page(page); if (flags & FAULT_FLAG_WRITE) { -@@ -2736,6 +2940,11 @@ static int do_swap_page(struct mm_struct +@@ -2736,6 +2939,11 @@ static int do_swap_page(struct mm_struct /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -50573,7 +50972,41 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c unlock: pte_unmap_unlock(page_table, ptl); out: -@@ -2786,7 +2995,7 @@ static int do_anonymous_page(struct mm_s +@@ -2751,33 +2959,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_downwards()", +- * except we must first make sure that 'address-PAGE_SIZE' +- * doesn't hit another vma. +- * +- * The "find_vma()" will do the right thing even if we wrap +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_stack(vma, address - PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -2786,27 +2967,23 @@ static int do_anonymous_page(struct mm_s unsigned long address, pte_t *page_table, pmd_t *pmd, unsigned int flags) { @@ -50582,7 +51015,31 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c spinlock_t *ptl; pte_t entry; -@@ -2825,6 +3034,11 @@ static int do_anonymous_page(struct mm_s +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -2825,6 +3002,11 @@ static int do_anonymous_page(struct mm_s if (!pte_none(*page_table)) goto release; @@ -50594,7 +51051,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c inc_mm_counter_fast(mm, MM_ANONPAGES); page_add_new_anon_rmap(page, vma, address); setpte: -@@ -2832,6 +3046,12 @@ setpte: +@@ -2832,6 +3014,12 @@ setpte: /* No need to invalidate - it was non-present before */ update_mmu_cache(vma, address, page_table); @@ -50607,7 +51064,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c unlock: pte_unmap_unlock(page_table, ptl); return 0; -@@ -2974,6 +3194,12 @@ static int __do_fault(struct mm_struct * +@@ -2974,6 +3162,12 @@ static int __do_fault(struct mm_struct * */ /* Only go through if we didn't race with anybody else... */ if (likely(pte_same(*page_table, orig_pte))) { @@ -50620,7 +51077,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c flush_icache_page(vma, page); entry = mk_pte(page, vma->vm_page_prot); if (flags & FAULT_FLAG_WRITE) -@@ -2993,6 +3219,14 @@ static int __do_fault(struct mm_struct * +@@ -2993,6 +3187,14 @@ static int __do_fault(struct mm_struct * /* no need to invalidate: a not-present page won't be cached */ update_mmu_cache(vma, address, page_table); @@ -50635,7 +51092,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c } else { if (charged) mem_cgroup_uncharge_page(page); -@@ -3140,6 +3374,12 @@ static inline int handle_pte_fault(struc +@@ -3140,6 +3342,12 @@ static inline int handle_pte_fault(struc if (flags & FAULT_FLAG_WRITE) flush_tlb_page(vma, address); } @@ -50648,7 +51105,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c unlock: pte_unmap_unlock(pte, ptl); return 0; -@@ -3156,6 +3396,10 @@ int handle_mm_fault(struct mm_struct *mm +@@ -3156,6 +3364,10 @@ int handle_mm_fault(struct mm_struct *mm pmd_t *pmd; pte_t *pte; @@ -50659,7 +51116,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c __set_current_state(TASK_RUNNING); count_vm_event(PGFAULT); -@@ -3166,6 +3410,34 @@ int handle_mm_fault(struct mm_struct *mm +@@ -3166,6 +3378,34 @@ int handle_mm_fault(struct mm_struct *mm if (unlikely(is_vm_hugetlb_page(vma))) return hugetlb_fault(mm, vma, address, flags); @@ -50694,7 +51151,7 @@ diff -urNp linux-2.6.34.7/mm/memory.c linux-2.6.34.7/mm/memory.c pgd = pgd_offset(mm, address); pud = pud_alloc(mm, pgd, address); if (!pud) -@@ -3263,7 +3535,7 @@ static int __init gate_vma_init(void) +@@ -3263,7 +3503,7 @@ static int __init gate_vma_init(void) gate_vma.vm_start = FIXADDR_USER_START; gate_vma.vm_end = FIXADDR_USER_END; gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; @@ -50873,7 +51330,7 @@ diff -urNp linux-2.6.34.7/mm/migrate.c linux-2.6.34.7/mm/migrate.c goto out; diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c --- linux-2.6.34.7/mm/mlock.c 2010-08-29 21:16:43.000000000 -0400 -+++ linux-2.6.34.7/mm/mlock.c 2010-09-04 15:37:36.000000000 -0400 ++++ linux-2.6.34.7/mm/mlock.c 2010-09-17 18:44:51.000000000 -0400 @@ -13,6 +13,7 @@ #include <linux/pagemap.h> #include <linux/mempolicy.h> @@ -50882,7 +51339,40 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c #include <linux/sched.h> #include <linux/module.h> #include <linux/rmap.h> -@@ -451,6 +452,9 @@ static int do_mlock(unsigned long start, +@@ -135,19 +136,6 @@ void munlock_vma_page(struct page *page) + } + } + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_stack_continue(vma->vm_prev, addr); +-} +- + /** + * __mlock_vma_pages_range() - mlock a range of pages in the vma. + * @vma: target vma +@@ -180,12 +168,6 @@ static long __mlock_vma_pages_range(stru + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + +- /* We don't try to access the guard page of a stack vma */ +- if (stack_guard_page(vma, start)) { +- addr += PAGE_SIZE; +- nr_pages--; +- } +- + while (nr_pages > 0) { + int i; + +@@ -451,6 +433,9 @@ static int do_mlock(unsigned long start, return -EINVAL; if (end == start) return 0; @@ -50892,7 +51382,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c vma = find_vma_prev(current->mm, start, &prev); if (!vma || vma->vm_start > start) return -ENOMEM; -@@ -461,6 +465,11 @@ static int do_mlock(unsigned long start, +@@ -461,6 +446,11 @@ static int do_mlock(unsigned long start, for (nstart = start ; ; ) { unsigned int newflags; @@ -50904,7 +51394,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ newflags = vma->vm_flags | VM_LOCKED; -@@ -510,6 +519,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st +@@ -510,6 +500,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st lock_limit >>= PAGE_SHIFT; /* check against resource limits */ @@ -50912,7 +51402,7 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) error = do_mlock(start, len, 1); up_write(¤t->mm->mmap_sem); -@@ -531,17 +541,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, +@@ -531,17 +522,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, static int do_mlockall(int flags) { struct vm_area_struct * vma, * prev = NULL; @@ -50940,17 +51430,17 @@ diff -urNp linux-2.6.34.7/mm/mlock.c linux-2.6.34.7/mm/mlock.c newflags = vma->vm_flags | VM_LOCKED; if (!(flags & MCL_CURRENT)) newflags &= ~VM_LOCKED; -@@ -573,6 +589,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) +@@ -573,6 +570,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) lock_limit >>= PAGE_SHIFT; ret = -ENOMEM; -+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1); ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || capable(CAP_IPC_LOCK)) ret = do_mlockall(flags); diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c --- linux-2.6.34.7/mm/mmap.c 2010-08-29 21:16:43.000000000 -0400 -+++ linux-2.6.34.7/mm/mmap.c 2010-09-04 15:37:36.000000000 -0400 ++++ linux-2.6.34.7/mm/mmap.c 2010-09-17 18:52:03.000000000 -0400 @@ -44,6 +44,16 @@ #define arch_rebalance_pgtables(addr, len) (addr) #endif @@ -50968,7 +51458,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c static void unmap_region(struct mm_struct *mm, struct vm_area_struct *vma, struct vm_area_struct *prev, unsigned long start, unsigned long end); -@@ -69,16 +79,25 @@ static void unmap_region(struct mm_struc +@@ -69,22 +79,32 @@ static void unmap_region(struct mm_struc * x: (no) no x: (no) yes x: (no) yes x: (yes) yes * */ @@ -50996,7 +51486,14 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c } EXPORT_SYMBOL(vm_get_page_prot); -@@ -230,6 +249,7 @@ static struct vm_area_struct *remove_vma + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + struct percpu_counter vm_committed_as; + + /* +@@ -230,6 +250,7 @@ static struct vm_area_struct *remove_vma struct vm_area_struct *next = vma->vm_next; might_sleep(); @@ -51004,7 +51501,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (vma->vm_ops && vma->vm_ops->close) vma->vm_ops->close(vma); if (vma->vm_file) { -@@ -266,6 +286,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) +@@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) * not page aligned -Ram Gupta */ rlim = rlimit(RLIMIT_DATA); @@ -51012,7 +51509,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + (mm->end_data - mm->start_data) > rlim) goto out; -@@ -695,6 +716,12 @@ static int +@@ -695,6 +717,12 @@ static int can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { @@ -51025,7 +51522,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { if (vma->vm_pgoff == vm_pgoff) -@@ -714,6 +741,12 @@ static int +@@ -714,6 +742,12 @@ static int can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) { @@ -51038,7 +51535,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (is_mergeable_vma(vma, file, vm_flags) && is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { pgoff_t vm_pglen; -@@ -756,13 +789,20 @@ can_vma_merge_after(struct vm_area_struc +@@ -756,13 +790,20 @@ can_vma_merge_after(struct vm_area_struc struct vm_area_struct *vma_merge(struct mm_struct *mm, struct vm_area_struct *prev, unsigned long addr, unsigned long end, unsigned long vm_flags, @@ -51060,7 +51557,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * We later require that vma->vm_flags == vm_flags, * so this tests vma->vm_flags & VM_SPECIAL, too. -@@ -778,6 +818,15 @@ struct vm_area_struct *vma_merge(struct +@@ -778,6 +819,15 @@ struct vm_area_struct *vma_merge(struct if (next && next->vm_end == end) /* cases 6, 7, 8 */ next = next->vm_next; @@ -51076,7 +51573,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * Can it merge with the predecessor? */ -@@ -797,9 +846,24 @@ struct vm_area_struct *vma_merge(struct +@@ -797,9 +847,24 @@ struct vm_area_struct *vma_merge(struct /* cases 1, 6 */ err = vma_adjust(prev, prev->vm_start, next->vm_end, prev->vm_pgoff, NULL); @@ -51102,7 +51599,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (err) return NULL; return prev; -@@ -812,12 +876,27 @@ struct vm_area_struct *vma_merge(struct +@@ -812,12 +877,27 @@ struct vm_area_struct *vma_merge(struct mpol_equal(policy, vma_policy(next)) && can_vma_merge_before(next, vm_flags, anon_vma, file, pgoff+pglen)) { @@ -51132,7 +51629,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (err) return NULL; return area; -@@ -932,14 +1011,11 @@ none: +@@ -932,14 +1012,11 @@ none: void vm_stat_account(struct mm_struct *mm, unsigned long flags, struct file *file, long pages) { @@ -51148,7 +51645,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c mm->stack_vm += pages; if (flags & (VM_RESERVED|VM_IO)) mm->reserved_vm += pages; -@@ -966,7 +1042,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -966,7 +1043,7 @@ unsigned long do_mmap_pgoff(struct file * (the exception is when the underlying filesystem is noexec * mounted, in which case we dont add PROT_EXEC.) */ @@ -51157,7 +51654,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) prot |= PROT_EXEC; -@@ -992,7 +1068,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file /* Obtain the address to map to. we verify (or select) it and ensure * that it represents a valid section of the address space. */ @@ -51166,7 +51663,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (addr & ~PAGE_MASK) return addr; -@@ -1003,6 +1079,28 @@ unsigned long do_mmap_pgoff(struct file +@@ -1003,6 +1080,28 @@ unsigned long do_mmap_pgoff(struct file vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; @@ -51195,7 +51692,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (flags & MAP_LOCKED) if (!can_do_mlock()) return -EPERM; -@@ -1014,6 +1112,7 @@ unsigned long do_mmap_pgoff(struct file +@@ -1014,6 +1113,7 @@ unsigned long do_mmap_pgoff(struct file locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; @@ -51203,7 +51700,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (locked > lock_limit && !capable(CAP_IPC_LOCK)) return -EAGAIN; } -@@ -1084,6 +1183,9 @@ unsigned long do_mmap_pgoff(struct file +@@ -1084,6 +1184,9 @@ unsigned long do_mmap_pgoff(struct file if (error) return error; @@ -51213,7 +51710,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return mmap_region(file, addr, len, flags, vm_flags, pgoff); } EXPORT_SYMBOL(do_mmap_pgoff); -@@ -1160,10 +1262,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar +@@ -1160,10 +1263,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar */ int vma_wants_writenotify(struct vm_area_struct *vma) { @@ -51226,7 +51723,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return 0; /* The backer wishes to know when pages are first written to? */ -@@ -1212,14 +1314,24 @@ unsigned long mmap_region(struct file *f +@@ -1212,14 +1315,24 @@ unsigned long mmap_region(struct file *f unsigned long charged = 0; struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; @@ -51253,7 +51750,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c } /* Check against address space limit. */ -@@ -1268,6 +1380,16 @@ munmap_back: +@@ -1268,6 +1381,16 @@ munmap_back: goto unacct_error; } @@ -51270,7 +51767,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c vma->vm_mm = mm; vma->vm_start = addr; vma->vm_end = addr + len; -@@ -1291,6 +1413,19 @@ munmap_back: +@@ -1291,6 +1414,19 @@ munmap_back: error = file->f_op->mmap(file, vma); if (error) goto unmap_and_free_vma; @@ -51290,7 +51787,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (vm_flags & VM_EXECUTABLE) added_exe_file_vma(mm); -@@ -1326,6 +1461,11 @@ munmap_back: +@@ -1326,6 +1462,11 @@ munmap_back: vma_link(mm, vma, prev, rb_link, rb_parent); file = vma->vm_file; @@ -51302,7 +51799,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* Once vma denies write, undo our temporary denial count */ if (correct_wcount) atomic_inc(&inode->i_writecount); -@@ -1334,6 +1474,7 @@ out: +@@ -1334,6 +1475,7 @@ out: mm->total_vm += len >> PAGE_SHIFT; vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); @@ -51310,7 +51807,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (vm_flags & VM_LOCKED) { if (!mlock_vma_pages_range(vma, addr, addr + len)) mm->locked_vm += (len >> PAGE_SHIFT); -@@ -1351,6 +1492,12 @@ unmap_and_free_vma: +@@ -1351,6 +1493,12 @@ unmap_and_free_vma: unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); charged = 0; free_vma: @@ -51323,7 +51820,41 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c kmem_cache_free(vm_area_cachep, vma); unacct_error: if (charged) -@@ -1384,6 +1531,10 @@ arch_get_unmapped_area(struct file *filp +@@ -1358,6 +1506,33 @@ unacct_error: + return error; + } + ++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++#endif ++ ++ return true; ++} ++ + /* Get an address range which is currently unmapped. + * For shmat() with addr=0. + * +@@ -1384,18 +1559,23 @@ arch_get_unmapped_area(struct file *filp if (flags & MAP_FIXED) return addr; @@ -51333,9 +51864,15 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c + if (addr) { addr = PAGE_ALIGN(addr); - vma = find_vma(mm, addr); -@@ -1392,10 +1543,10 @@ arch_get_unmapped_area(struct file *filp - return addr; +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } } if (len > mm->cached_hole_size) { - start_addr = addr = mm->free_area_cache; @@ -51348,7 +51885,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c } full_search: -@@ -1406,9 +1557,8 @@ full_search: +@@ -1406,34 +1586,40 @@ full_search: * Start a new search - just in case we missed * some holes. */ @@ -51360,7 +51897,29 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c mm->cached_hole_size = 0; goto full_search; } -@@ -1430,10 +1580,16 @@ full_search: + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- /* +- * Remember the place where we stopped the search: +- */ +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; + } + #endif void arch_unmap_area(struct mm_struct *mm, unsigned long addr) { @@ -51378,7 +51937,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c mm->free_area_cache = addr; mm->cached_hole_size = ~0UL; } -@@ -1451,7 +1607,7 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1451,7 +1637,7 @@ arch_get_unmapped_area_topdown(struct fi { struct vm_area_struct *vma; struct mm_struct *mm = current->mm; @@ -51387,7 +51946,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* requested length too big for entire address space */ if (len > TASK_SIZE) -@@ -1460,6 +1616,10 @@ arch_get_unmapped_area_topdown(struct fi +@@ -1460,13 +1646,18 @@ arch_get_unmapped_area_topdown(struct fi if (flags & MAP_FIXED) return addr; @@ -51398,7 +51957,37 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* requesting a specific address */ if (addr) { addr = PAGE_ALIGN(addr); -@@ -1517,13 +1677,21 @@ bottomup: +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -1481,7 +1672,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -1498,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + +@@ -1517,13 +1708,21 @@ bottomup: * can happen with large stack limits and large mmap() * allocations. */ @@ -51422,7 +52011,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c mm->cached_hole_size = ~0UL; return addr; -@@ -1532,6 +1700,12 @@ bottomup: +@@ -1532,6 +1731,12 @@ bottomup: void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) { @@ -51435,7 +52024,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * Is this a new hole at the highest possible address? */ -@@ -1539,8 +1713,10 @@ void arch_unmap_area_topdown(struct mm_s +@@ -1539,8 +1744,10 @@ void arch_unmap_area_topdown(struct mm_s mm->free_area_cache = addr; /* dont allow allocations above current base */ @@ -51447,7 +52036,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c } unsigned long -@@ -1648,6 +1824,34 @@ out: +@@ -1648,6 +1855,34 @@ out: return prev ? prev->vm_next : vma; } @@ -51482,7 +52071,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * Verify that the stack growth is acceptable and * update accounting. This is shared with both the -@@ -1664,6 +1868,7 @@ static int acct_stack_growth(struct vm_a +@@ -1664,6 +1899,7 @@ static int acct_stack_growth(struct vm_a return -ENOMEM; /* Stack limit test */ @@ -51490,7 +52079,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) return -ENOMEM; -@@ -1674,6 +1879,7 @@ static int acct_stack_growth(struct vm_a +@@ -1674,6 +1910,7 @@ static int acct_stack_growth(struct vm_a locked = mm->locked_vm + grow; limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); limit >>= PAGE_SHIFT; @@ -51498,7 +52087,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (locked > limit && !capable(CAP_IPC_LOCK)) return -ENOMEM; } -@@ -1709,35 +1915,40 @@ static +@@ -1709,35 +1946,42 @@ static #endif int expand_upwards(struct vm_area_struct *vma, unsigned long address) { @@ -51521,7 +52110,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (unlikely(anon_vma_prepare(vma))) return -ENOMEM; + locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); -+ if (locknext && unlikely(anon_vma_prepare(vma->vm_next))) ++ if (locknext && anon_vma_prepare(vma->vm_next)) + return -ENOMEM; anon_vma_lock(vma); + if (locknext) @@ -51545,11 +52134,13 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* Somebody else might have raced and expanded it already */ - if (address > vma->vm_end) { -+ if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { unsigned long size, grow; size = address - vma->vm_start; -@@ -1747,6 +1958,8 @@ int expand_upwards(struct vm_area_struct +@@ -1747,6 +1991,8 @@ int expand_upwards(struct vm_area_struct if (!error) vma->vm_end = address; } @@ -51558,25 +52149,25 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c anon_vma_unlock(vma); return error; } -@@ -1758,7 +1971,8 @@ int expand_upwards(struct vm_area_struct +@@ -1758,7 +2004,8 @@ int expand_upwards(struct vm_area_struct static int expand_downwards(struct vm_area_struct *vma, unsigned long address) { - int error; + int error, lockprev = 0; -+ struct vm_area_struct *prev = NULL; ++ struct vm_area_struct *prev; /* * We must make sure the anon_vma is allocated -@@ -1772,6 +1986,15 @@ static int expand_downwards(struct vm_ar +@@ -1772,6 +2019,15 @@ static int expand_downwards(struct vm_ar if (error) return error; ++ prev = vma->vm_prev; +#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) -+ find_vma_prev(vma->vm_mm, address, &prev); + lockprev = prev && (prev->vm_flags & VM_GROWSUP); +#endif -+ if (lockprev && unlikely(anon_vma_prepare(prev))) ++ if (lockprev && anon_vma_prepare(prev)) + return -ENOMEM; + if (lockprev) + anon_vma_lock(prev); @@ -51584,12 +52175,14 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c anon_vma_lock(vma); /* -@@ -1781,9 +2004,15 @@ static int expand_downwards(struct vm_ar +@@ -1781,9 +2037,17 @@ static int expand_downwards(struct vm_ar */ /* Somebody else might have raced and expanded it already */ - if (address < vma->vm_start) { -+ if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { unsigned long size, grow; +#ifdef CONFIG_PAX_SEGMEXEC @@ -51601,7 +52194,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c size = vma->vm_end - address; grow = (vma->vm_start - address) >> PAGE_SHIFT; -@@ -1791,9 +2020,20 @@ static int expand_downwards(struct vm_ar +@@ -1791,9 +2055,20 @@ static int expand_downwards(struct vm_ar if (!error) { vma->vm_start = address; vma->vm_pgoff -= grow; @@ -51622,7 +52215,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return error; } -@@ -1867,6 +2107,13 @@ static void remove_vma_list(struct mm_st +@@ -1867,6 +2142,13 @@ static void remove_vma_list(struct mm_st do { long nrpages = vma_pages(vma); @@ -51636,7 +52229,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c mm->total_vm -= nrpages; vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); vma = remove_vma(vma); -@@ -1912,6 +2159,16 @@ detach_vmas_to_be_unmapped(struct mm_str +@@ -1912,6 +2194,16 @@ detach_vmas_to_be_unmapped(struct mm_str insertion_point = (prev ? &prev->vm_next : &mm->mmap); vma->vm_prev = NULL; do { @@ -51653,7 +52246,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c rb_erase(&vma->vm_rb, &mm->mm_rb); mm->map_count--; tail_vma = vma; -@@ -1940,14 +2197,33 @@ static int __split_vma(struct mm_struct +@@ -1940,14 +2232,33 @@ static int __split_vma(struct mm_struct struct vm_area_struct *new; int err = -ENOMEM; @@ -51687,7 +52280,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* most fields are the same, copy all, and then fixup */ *new = *vma; -@@ -1960,6 +2236,22 @@ static int __split_vma(struct mm_struct +@@ -1960,6 +2271,22 @@ static int __split_vma(struct mm_struct new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); } @@ -51710,7 +52303,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c pol = mpol_dup(vma_policy(vma)); if (IS_ERR(pol)) { err = PTR_ERR(pol); -@@ -1985,6 +2277,42 @@ static int __split_vma(struct mm_struct +@@ -1985,6 +2312,42 @@ static int __split_vma(struct mm_struct else err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); @@ -51753,7 +52346,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* Success. */ if (!err) return 0; -@@ -2000,6 +2328,15 @@ static int __split_vma(struct mm_struct +@@ -2000,6 +2363,15 @@ static int __split_vma(struct mm_struct out_free_mpol: mpol_put(pol); out_free_vma: @@ -51769,7 +52362,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c kmem_cache_free(vm_area_cachep, new); out_err: return err; -@@ -2012,6 +2349,15 @@ static int __split_vma(struct mm_struct +@@ -2012,6 +2384,15 @@ static int __split_vma(struct mm_struct int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, unsigned long addr, int new_below) { @@ -51785,7 +52378,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (mm->map_count >= sysctl_max_map_count) return -ENOMEM; -@@ -2023,11 +2369,30 @@ int split_vma(struct mm_struct *mm, stru +@@ -2023,11 +2404,30 @@ int split_vma(struct mm_struct *mm, stru * work. This now handles partial unmappings. * Jeremy Fitzhardinge <jeremy@goop.org> */ @@ -51816,7 +52409,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) return -EINVAL; -@@ -2101,6 +2466,8 @@ int do_munmap(struct mm_struct *mm, unsi +@@ -2101,6 +2501,8 @@ int do_munmap(struct mm_struct *mm, unsi /* Fix up all other VM information */ remove_vma_list(mm, vma); @@ -51825,7 +52418,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return 0; } -@@ -2113,22 +2480,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a +@@ -2113,22 +2515,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a profile_munmap(addr); @@ -51854,7 +52447,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * this is really a simplified "do_mmap". it only handles * anonymous maps. eventually we may be able to do some -@@ -2142,6 +2505,7 @@ unsigned long do_brk(unsigned long addr, +@@ -2142,6 +2540,7 @@ unsigned long do_brk(unsigned long addr, struct rb_node ** rb_link, * rb_parent; pgoff_t pgoff = addr >> PAGE_SHIFT; int error; @@ -51862,7 +52455,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c len = PAGE_ALIGN(len); if (!len) -@@ -2153,16 +2517,30 @@ unsigned long do_brk(unsigned long addr, +@@ -2153,16 +2552,30 @@ unsigned long do_brk(unsigned long addr, flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; @@ -51894,7 +52487,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c locked += mm->locked_vm; lock_limit = rlimit(RLIMIT_MEMLOCK); lock_limit >>= PAGE_SHIFT; -@@ -2179,22 +2557,22 @@ unsigned long do_brk(unsigned long addr, +@@ -2179,22 +2592,22 @@ unsigned long do_brk(unsigned long addr, /* * Clear old maps. this also does some error checking for us */ @@ -51921,7 +52514,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return -ENOMEM; /* Can we just expand an old private anonymous mapping? */ -@@ -2208,7 +2586,7 @@ unsigned long do_brk(unsigned long addr, +@@ -2208,7 +2621,7 @@ unsigned long do_brk(unsigned long addr, */ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); if (!vma) { @@ -51930,7 +52523,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return -ENOMEM; } -@@ -2221,11 +2599,12 @@ unsigned long do_brk(unsigned long addr, +@@ -2221,11 +2634,12 @@ unsigned long do_brk(unsigned long addr, vma->vm_page_prot = vm_get_page_prot(flags); vma_link(mm, vma, prev, rb_link, rb_parent); out: @@ -51945,7 +52538,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return addr; } -@@ -2272,8 +2651,10 @@ void exit_mmap(struct mm_struct *mm) +@@ -2272,8 +2686,10 @@ void exit_mmap(struct mm_struct *mm) * Walk the list again, actually closing and freeing it, * with preemption enabled, without holding any MM locks. */ @@ -51957,7 +52550,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); } -@@ -2287,6 +2668,10 @@ int insert_vm_struct(struct mm_struct * +@@ -2287,6 +2703,10 @@ int insert_vm_struct(struct mm_struct * struct vm_area_struct * __vma, * prev; struct rb_node ** rb_link, * rb_parent; @@ -51968,7 +52561,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * The vm_pgoff of a purely anonymous vma should be irrelevant * until its first write fault, when page's anon_vma and index -@@ -2309,7 +2694,22 @@ int insert_vm_struct(struct mm_struct * +@@ -2309,7 +2729,22 @@ int insert_vm_struct(struct mm_struct * if ((vma->vm_flags & VM_ACCOUNT) && security_vm_enough_memory_mm(mm, vma_pages(vma))) return -ENOMEM; @@ -51991,7 +52584,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c return 0; } -@@ -2327,6 +2727,8 @@ struct vm_area_struct *copy_vma(struct v +@@ -2327,6 +2762,8 @@ struct vm_area_struct *copy_vma(struct v struct rb_node **rb_link, *rb_parent; struct mempolicy *pol; @@ -52000,7 +52593,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * If anonymous vma has not yet been faulted, update new pgoff * to match new location, to increase its chance of merging. -@@ -2376,6 +2778,39 @@ struct vm_area_struct *copy_vma(struct v +@@ -2376,6 +2813,39 @@ struct vm_area_struct *copy_vma(struct v kmem_cache_free(vm_area_cachep, new_vma); return NULL; } @@ -52040,7 +52633,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c /* * Return true if the calling process may expand its vm space by the passed -@@ -2387,7 +2822,7 @@ int may_expand_vm(struct mm_struct *mm, +@@ -2387,7 +2857,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long lim; lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; @@ -52049,7 +52642,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c if (cur + npages > lim) return 0; return 1; -@@ -2457,6 +2892,17 @@ int install_special_mapping(struct mm_st +@@ -2457,6 +2927,17 @@ int install_special_mapping(struct mm_st vma->vm_start = addr; vma->vm_end = addr + len; @@ -52069,7 +52662,7 @@ diff -urNp linux-2.6.34.7/mm/mmap.c linux-2.6.34.7/mm/mmap.c diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c --- linux-2.6.34.7/mm/mprotect.c 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/mm/mprotect.c 2010-08-13 18:38:12.000000000 -0400 ++++ linux-2.6.34.7/mm/mprotect.c 2010-09-17 18:52:03.000000000 -0400 @@ -23,10 +23,16 @@ #include <linux/mmu_notifier.h> #include <linux/migrate.h> @@ -52136,7 +52729,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, unsigned long start, unsigned long end, unsigned long newflags) -@@ -143,6 +191,14 @@ mprotect_fixup(struct vm_area_struct *vm +@@ -143,11 +191,29 @@ mprotect_fixup(struct vm_area_struct *vm int error; int dirty_accountable = 0; @@ -52151,7 +52744,22 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c if (newflags == oldflags) { *pprev = vma; return 0; -@@ -164,6 +220,42 @@ mprotect_fixup(struct vm_area_struct *vm + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -164,6 +230,42 @@ mprotect_fixup(struct vm_area_struct *vm } } @@ -52194,7 +52802,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c /* * First try to merge with previous and/or next vma. */ -@@ -194,9 +286,21 @@ success: +@@ -194,9 +296,21 @@ success: * vm_flags and vm_page_prot are protected by the mmap_sem * held in write mode. */ @@ -52217,7 +52825,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c if (vma_wants_writenotify(vma)) { vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); -@@ -237,6 +341,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -237,6 +351,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, end = start + len; if (end <= start) return -ENOMEM; @@ -52235,7 +52843,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c if (!arch_validate_prot(prot)) return -EINVAL; -@@ -244,7 +359,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -244,7 +369,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, /* * Does the application expect PROT_READ to imply PROT_EXEC: */ @@ -52244,7 +52852,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c prot |= PROT_EXEC; vm_flags = calc_vm_prot_bits(prot); -@@ -276,6 +391,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -276,6 +401,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long, if (start > vma->vm_start) prev = vma; @@ -52261,7 +52869,7 @@ diff -urNp linux-2.6.34.7/mm/mprotect.c linux-2.6.34.7/mm/mprotect.c for (nstart = start ; ; ) { unsigned long newflags; -@@ -300,6 +425,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, +@@ -300,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, if (error) goto out; perf_event_mmap(vma); @@ -52376,8 +52984,16 @@ diff -urNp linux-2.6.34.7/mm/mremap.c linux-2.6.34.7/mm/mremap.c if (ret & ~PAGE_MASK) diff -urNp linux-2.6.34.7/mm/nommu.c linux-2.6.34.7/mm/nommu.c --- linux-2.6.34.7/mm/nommu.c 2010-08-29 21:16:43.000000000 -0400 -+++ linux-2.6.34.7/mm/nommu.c 2010-08-29 21:17:12.000000000 -0400 -@@ -762,15 +762,6 @@ struct vm_area_struct *find_vma(struct m ++++ linux-2.6.34.7/mm/nommu.c 2010-09-17 18:52:03.000000000 -0400 +@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -762,15 +761,6 @@ struct vm_area_struct *find_vma(struct m EXPORT_SYMBOL(find_vma); /* @@ -52393,7 +53009,7 @@ diff -urNp linux-2.6.34.7/mm/nommu.c linux-2.6.34.7/mm/nommu.c * expand a stack to a given address * - not supported under NOMMU conditions */ -@@ -1487,6 +1478,7 @@ int split_vma(struct mm_struct *mm, stru +@@ -1487,6 +1477,7 @@ int split_vma(struct mm_struct *mm, stru /* most fields are the same, copy all, and then fixup */ *new = *vma; @@ -55100,7 +55716,7 @@ diff -urNp linux-2.6.34.7/security/integrity/ima/ima_queue.c linux-2.6.34.7/secu return 0; diff -urNp linux-2.6.34.7/security/Kconfig linux-2.6.34.7/security/Kconfig --- linux-2.6.34.7/security/Kconfig 2010-08-13 16:29:15.000000000 -0400 -+++ linux-2.6.34.7/security/Kconfig 2010-09-15 02:12:09.000000000 -0400 ++++ linux-2.6.34.7/security/Kconfig 2010-09-17 17:39:50.000000000 -0400 @@ -4,6 +4,505 @@ menu "Security options" @@ -55124,7 +55740,7 @@ diff -urNp linux-2.6.34.7/security/Kconfig linux-2.6.34.7/security/Kconfig + +config PAX + bool "Enable various PaX features" -+ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) + help + This allows you to enable various PaX features. PaX adds + intrusion prevention mechanisms to the kernel that reduce diff --git a/2.6.34/4425_grsec-pax-without-grsec.patch b/2.6.34/4425_grsec-pax-without-grsec.patch index 9aec296..2fc4199 100644 --- a/2.6.34/4425_grsec-pax-without-grsec.patch +++ b/2.6.34/4425_grsec-pax-without-grsec.patch @@ -54,7 +54,7 @@ The original version of this patch contained no credits/description. current->comm, task_pid_nr(current), current_uid(), current_euid()); print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); @@ -1846,10 +1850,12 @@ - #ifdef CONFIG_PAX_USERCOPY + void pax_report_leak_to_user(const void *ptr, unsigned long len) { +#ifdef CONFIG_GRKERNSEC @@ -81,12 +81,12 @@ The original version of this patch contained no credits/description. do_group_exit(SIGKILL); --- a/security/Kconfig +++ b/security/Kconfig -@@ -23,7 +23,7 @@ menu "PaX" - +@@ -23,7 +23,7 @@ + config PAX bool "Enable various PaX features" -- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) -+ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS32 || MIPS64 || PARISC || PPC || SPARC || X86) +- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) help This allows you to enable various PaX features. PaX adds intrusion prevention mechanisms to the kernel that reduce diff --git a/2.6.34/4430_grsec-kconfig-default-gids.patch b/2.6.34/4430_grsec-kconfig-default-gids.patch index b7a0413..7ba8aa2 100644 --- a/2.6.34/4430_grsec-kconfig-default-gids.patch +++ b/2.6.34/4430_grsec-kconfig-default-gids.patch @@ -29,25 +29,25 @@ from shooting themselves in the foot. config GRKERNSEC_EXECLOG bool "Exec logging" -@@ -780,7 +780,7 @@ +@@ -785,7 +785,7 @@ config GRKERNSEC_TPE_GID int "GID for untrusted users" depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT - default 1005 + default 100 help - If you have selected the "Invert GID option" above, setting this - GID determines what group TPE restrictions will be *disabled* for. -@@ -792,7 +792,7 @@ + Setting this GID determines what group TPE restrictions will be + *enabled* for. If the sysctl option is enabled, a sysctl option +@@ -794,7 +794,7 @@ config GRKERNSEC_TPE_GID int "GID for trusted users" depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT - default 1005 + default 10 help - If you have selected the "Invert GID option" above, setting this - GID determines what group TPE restrictions will be *disabled* for. -@@ -866,7 +866,7 @@ + Setting this GID determines what group TPE restrictions will be + *disabled* for. If the sysctl option is enabled, a sysctl option +@@ -865,7 +865,7 @@ config GRKERNSEC_SOCKET_ALL_GID int "GID to deny all sockets for" depends on GRKERNSEC_SOCKET_ALL @@ -56,7 +56,7 @@ from shooting themselves in the foot. help Here you can choose the GID to disable socket access for. Remember to add the users you want socket access disabled for to the GID -@@ -887,7 +887,7 @@ +@@ -886,7 +886,7 @@ config GRKERNSEC_SOCKET_CLIENT_GID int "GID to deny client sockets for" depends on GRKERNSEC_SOCKET_CLIENT @@ -65,7 +65,7 @@ from shooting themselves in the foot. help Here you can choose the GID to disable client socket access for. Remember to add the users you want client socket access disabled for to -@@ -905,7 +905,7 @@ +@@ -904,7 +904,7 @@ config GRKERNSEC_SOCKET_SERVER_GID int "GID to deny server sockets for" depends on GRKERNSEC_SOCKET_SERVER diff --git a/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch index dfedf22..64d6cf3 100644 --- a/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch +++ b/2.6.34/4440_selinux-avc_audit-log-curr_ip.patch @@ -21,7 +21,7 @@ Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org> --- a/grsecurity/Kconfig +++ b/grsecurity/Kconfig -@@ -1372,6 +1372,27 @@ +@@ -1371,6 +1371,27 @@ menu "Logging Options" depends on GRKERNSEC diff --git a/2.6.35/0000_README b/2.6.35/0000_README new file mode 100644 index 0000000..53076a6 --- /dev/null +++ b/2.6.35/0000_README @@ -0,0 +1,50 @@ +README +----------------------------------------------------------------------------- + +Individual Patch Descriptions: +----------------------------------------------------------------------------- +Patch: 4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch +From: http://www.grsecurity.net +Desc: hardened-sources base patch from upstream grsecurity + +Patch: 4421_grsec-remove-localversion-grsec.patch +From: Kerin Millar <kerframil@gmail.com> +Desc: Removes grsecurity's localversion-grsec file + +Patch: 4422_grsec-mute-warnings.patch +From: Alexander Gabert <gaberta@fh-trier.de> + Gordon Malm <gengor@gentoo.org> +Desc: Removes verbose compile warning settings from grsecurity, restores + mainline Linux kernel behavior + +Patch: 4423_grsec-remove-protected-paths.patch +From: Anthony G. Basile, Ph. D. <basile@opensource.dyc.edu> +Desc: Removes chmod statements from grsecurity/Makefile + +Patch: 4425_grsec-pax-without-grsec.patch +From: Gordon Malm <gengor@gentoo.org> +Desc: Allows PaX features to be selected without enabling GRKERNSEC + +Patch: 4430_grsec-kconfig-default-gids.patch +From: Kerin Millar <kerframil@gmail.com> +Desc: Sets sane(r) default GIDs on various grsecurity group-dependent + features + +Patch: 4435_grsec-kconfig-gentoo.patch +From: Gordon Malm <gengor@gentoo.org> + Kerin Millar <kerframil@gmail.com> +Desc: Adds Hardened Gentoo [server/workstation] security levels, sets + Hardened Gentoo [workstation] as default + +Patch: 4440_selinux-avc_audit-log-curr_ip.patch +From: Gordon Malm <gengor@gentoo.org> +Desc: Configurable option to add src IP address to SELinux log messages + +Patch: 4445_disable-compat_vdso.patch +From: Gordon Malm <gengor@gentoo.org> + Kerin Millar <kerframil@gmail.com> +Desc: Disables VDSO_COMPAT operation completely + +Patch: 4450_check_ssp_fix.patch +From: Magnus Granberg <zorry@gentoo.org> +Desc: Fixes kernel check script for ssp diff --git a/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch b/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch new file mode 100644 index 0000000..ff2fb9b --- /dev/null +++ b/2.6.35/4420_grsecurity-2.2.0-2.6.35.4-201009172030.patch @@ -0,0 +1,56800 @@ +diff -urNp linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -3,9 +3,9 @@ + + #include <linux/dma-attrs.h> + +-extern struct dma_map_ops *dma_ops; ++extern const struct dma_map_ops *dma_ops; + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + return dma_ops; + } +diff -urNp linux-2.6.35.4/arch/alpha/include/asm/elf.h linux-2.6.35.4/arch/alpha/include/asm/elf.h +--- linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -90,6 +90,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19) ++#endif ++ + /* $0 is set by ld.so to a pointer to a function which might be + registered using atexit. This provides a mean for the dynamic + linker to call DT_FINI functions for shared libraries that have +diff -urNp linux-2.6.35.4/arch/alpha/include/asm/pgtable.h linux-2.6.35.4/arch/alpha/include/asm/pgtable.h +--- linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -101,6 +101,17 @@ struct vm_area_struct; + #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS) + #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) + #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE) + + #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x)) +diff -urNp linux-2.6.35.4/arch/alpha/kernel/module.c linux-2.6.35.4/arch/alpha/kernel/module.c +--- linux-2.6.35.4/arch/alpha/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -182,7 +182,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, + + /* The small sections were sorted to the end of the segment. + The following should definitely cover them. */ +- gp = (u64)me->module_core + me->core_size - 0x8000; ++ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000; + got = sechdrs[me->arch.gotsecindex].sh_addr; + + for (i = 0; i < n; i++) { +diff -urNp linux-2.6.35.4/arch/alpha/kernel/osf_sys.c linux-2.6.35.4/arch/alpha/kernel/osf_sys.c +--- linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/kernel/osf_sys.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1170,7 +1170,7 @@ arch_get_unmapped_area_1(unsigned long a + /* At this point: (!vma || addr < vma->vm_end). */ + if (limit - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + vma = vma->vm_next; +@@ -1206,6 +1206,10 @@ arch_get_unmapped_area(struct file *filp + merely specific addresses, but regions of memory -- perhaps + this feature should be incorporated into all ports? */ + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit); + if (addr != (unsigned long) -ENOMEM) +@@ -1213,8 +1217,8 @@ arch_get_unmapped_area(struct file *filp + } + + /* Next, try allocating at TASK_UNMAPPED_BASE. */ +- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE), +- len, limit); ++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(current->mm->mmap_base), len, limit); ++ + if (addr != (unsigned long) -ENOMEM) + return addr; + +diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c +--- linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/kernel/pci_iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -950,7 +950,7 @@ static int alpha_pci_set_mask(struct dev + return 0; + } + +-struct dma_map_ops alpha_pci_ops = { ++const struct dma_map_ops alpha_pci_ops = { + .alloc_coherent = alpha_pci_alloc_coherent, + .free_coherent = alpha_pci_free_coherent, + .map_page = alpha_pci_map_page, +@@ -962,5 +962,5 @@ struct dma_map_ops alpha_pci_ops = { + .set_dma_mask = alpha_pci_set_mask, + }; + +-struct dma_map_ops *dma_ops = &alpha_pci_ops; ++const struct dma_map_ops *dma_ops = &alpha_pci_ops; + EXPORT_SYMBOL(dma_ops); +diff -urNp linux-2.6.35.4/arch/alpha/kernel/pci-noop.c linux-2.6.35.4/arch/alpha/kernel/pci-noop.c +--- linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/kernel/pci-noop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -173,7 +173,7 @@ static int alpha_noop_set_mask(struct de + return 0; + } + +-struct dma_map_ops alpha_noop_ops = { ++const struct dma_map_ops alpha_noop_ops = { + .alloc_coherent = alpha_noop_alloc_coherent, + .free_coherent = alpha_noop_free_coherent, + .map_page = alpha_noop_map_page, +@@ -183,7 +183,7 @@ struct dma_map_ops alpha_noop_ops = { + .set_dma_mask = alpha_noop_set_mask, + }; + +-struct dma_map_ops *dma_ops = &alpha_noop_ops; ++const struct dma_map_ops *dma_ops = &alpha_noop_ops; + EXPORT_SYMBOL(dma_ops); + + void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen) +diff -urNp linux-2.6.35.4/arch/alpha/mm/fault.c linux-2.6.35.4/arch/alpha/mm/fault.c +--- linux-2.6.35.4/arch/alpha/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/alpha/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -54,6 +54,124 @@ __load_new_mm_context(struct mm_struct * + __reload_thread(pcb); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int ldah, ldq, jmp; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(ldq, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmp, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (ldq & 0xFFFF0000U) == 0xA77B0000U && ++ jmp == 0x6BFB0000U) ++ { ++ unsigned long r27, addr; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL; ++ ++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ err = get_user(r27, (unsigned long *)addr); ++ if (err) ++ break; ++ ++ regs->r27 = r27; ++ regs->pc = r27; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #2 */ ++ unsigned int ldah, lda, br; ++ ++ err = get_user(ldah, (unsigned int *)regs->pc); ++ err |= get_user(lda, (unsigned int *)(regs->pc+4)); ++ err |= get_user(br, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((ldah & 0xFFFF0000U) == 0x277B0000U && ++ (lda & 0xFFFF0000U) == 0xA77B0000U && ++ (br & 0xFFE00000U) == 0xC3E00000U) ++ { ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL; ++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16; ++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL; ++ ++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL); ++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int br; ++ ++ err = get_user(br, (unsigned int *)regs->pc); ++ ++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) { ++ unsigned int br2, ldq, nop, jmp; ++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver; ++ ++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2); ++ err = get_user(br2, (unsigned int *)addr); ++ err |= get_user(ldq, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ err |= get_user(jmp, (unsigned int *)(addr+12)); ++ err |= get_user(resolver, (unsigned long *)(addr+16)); ++ ++ if (err) ++ break; ++ ++ if (br2 == 0xC3600000U && ++ ldq == 0xA77B000CU && ++ nop == 0x47FF041FU && ++ jmp == 0x6B7B0000U) ++ { ++ regs->r28 = regs->pc+4; ++ regs->r27 = addr+16; ++ regs->pc = resolver; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif + + /* + * This routine handles page faults. It determines the address, +@@ -131,8 +249,29 @@ do_page_fault(unsigned long address, uns + good_area: + si_code = SEGV_ACCERR; + if (cause < 0) { +- if (!(vma->vm_flags & VM_EXEC)) ++ if (!(vma->vm_flags & VM_EXEC)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp()); ++ do_group_exit(SIGKILL); ++#else + goto bad_area; ++#endif ++ ++ } + } else if (!cause) { + /* Allow reads even for write-only mappings */ + if (!(vma->vm_flags & (VM_READ | VM_WRITE))) +diff -urNp linux-2.6.35.4/arch/arm/include/asm/elf.h linux-2.6.35.4/arch/arm/include/asm/elf.h +--- linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -111,7 +111,14 @@ int dump_task_regs(struct task_struct *t + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00008000UL ++ ++#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10) ++#endif + + /* When the program starts, a1 contains a pointer to a function to be + registered with atexit, as per the SVR4 ABI. A value of 0 means we +diff -urNp linux-2.6.35.4/arch/arm/include/asm/kmap_types.h linux-2.6.35.4/arch/arm/include/asm/kmap_types.h +--- linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -21,6 +21,7 @@ enum km_type { + KM_L1_CACHE, + KM_L2_CACHE, + KM_KDB, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff -urNp linux-2.6.35.4/arch/arm/include/asm/uaccess.h linux-2.6.35.4/arch/arm/include/asm/uaccess.h +--- linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -403,6 +403,9 @@ extern unsigned long __must_check __strn + + static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_READ, from, n)) + n = __copy_from_user(to, from, n); + else /* security hole - plug it */ +@@ -412,6 +415,9 @@ static inline unsigned long __must_check + + static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +diff -urNp linux-2.6.35.4/arch/arm/kernel/kgdb.c linux-2.6.35.4/arch/arm/kernel/kgdb.c +--- linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -208,7 +208,7 @@ void kgdb_arch_exit(void) + * and we handle the normal undef case within the do_undefinstr + * handler. + */ +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + #ifndef __ARMEB__ + .gdb_bpt_instr = {0xfe, 0xde, 0xff, 0xe7} + #else /* ! __ARMEB__ */ +diff -urNp linux-2.6.35.4/arch/arm/mach-at91/pm.c linux-2.6.35.4/arch/arm/mach-at91/pm.c +--- linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-at91/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -294,7 +294,7 @@ static void at91_pm_end(void) + } + + +-static struct platform_suspend_ops at91_pm_ops ={ ++static const struct platform_suspend_ops at91_pm_ops ={ + .valid = at91_pm_valid_state, + .begin = at91_pm_begin, + .enter = at91_pm_enter, +diff -urNp linux-2.6.35.4/arch/arm/mach-davinci/pm.c linux-2.6.35.4/arch/arm/mach-davinci/pm.c +--- linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-davinci/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -110,7 +110,7 @@ static int davinci_pm_enter(suspend_stat + return ret; + } + +-static struct platform_suspend_ops davinci_pm_ops = { ++static const struct platform_suspend_ops davinci_pm_ops = { + .enter = davinci_pm_enter, + .valid = suspend_valid_only_mem, + }; +diff -urNp linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c +--- linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-msm/last_radio_log.c 2010-09-17 20:12:09.000000000 -0400 +@@ -47,6 +47,7 @@ static ssize_t last_radio_log_read(struc + return count; + } + ++/* cannot be const, see msm_init_last_radio_log */ + static struct file_operations last_radio_log_fops = { + .read = last_radio_log_read + }; +diff -urNp linux-2.6.35.4/arch/arm/mach-omap1/pm.c linux-2.6.35.4/arch/arm/mach-omap1/pm.c +--- linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-omap1/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -647,7 +647,7 @@ static struct irqaction omap_wakeup_irq + + + +-static struct platform_suspend_ops omap_pm_ops ={ ++static const struct platform_suspend_ops omap_pm_ops ={ + .prepare = omap_pm_prepare, + .enter = omap_pm_enter, + .finish = omap_pm_finish, +diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c +--- linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-omap2/pm24xx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -325,7 +325,7 @@ static void omap2_pm_finish(void) + enable_hlt(); + } + +-static struct platform_suspend_ops omap_pm_ops = { ++static const struct platform_suspend_ops omap_pm_ops = { + .prepare = omap2_pm_prepare, + .enter = omap2_pm_enter, + .finish = omap2_pm_finish, +diff -urNp linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c +--- linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-omap2/pm34xx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -669,7 +669,7 @@ static void omap3_pm_end(void) + return; + } + +-static struct platform_suspend_ops omap_pm_ops = { ++static const struct platform_suspend_ops omap_pm_ops = { + .begin = omap3_pm_begin, + .end = omap3_pm_end, + .prepare = omap3_pm_prepare, +diff -urNp linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c +--- linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-pnx4008/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -119,7 +119,7 @@ static int pnx4008_pm_valid(suspend_stat + (state == PM_SUSPEND_MEM); + } + +-static struct platform_suspend_ops pnx4008_pm_ops = { ++static const struct platform_suspend_ops pnx4008_pm_ops = { + .enter = pnx4008_pm_enter, + .valid = pnx4008_pm_valid, + }; +diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/pm.c linux-2.6.35.4/arch/arm/mach-pxa/pm.c +--- linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-pxa/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -96,7 +96,7 @@ void pxa_pm_finish(void) + pxa_cpu_pm_fns->finish(); + } + +-static struct platform_suspend_ops pxa_pm_ops = { ++static const struct platform_suspend_ops pxa_pm_ops = { + .valid = pxa_pm_valid, + .enter = pxa_pm_enter, + .prepare = pxa_pm_prepare, +diff -urNp linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c +--- linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-pxa/sharpsl_pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -891,7 +891,7 @@ static void sharpsl_apm_get_power_status + } + + #ifdef CONFIG_PM +-static struct platform_suspend_ops sharpsl_pm_ops = { ++static const struct platform_suspend_ops sharpsl_pm_ops = { + .prepare = pxa_pm_prepare, + .finish = pxa_pm_finish, + .enter = corgi_pxa_pm_enter, +diff -urNp linux-2.6.35.4/arch/arm/mach-sa1100/pm.c linux-2.6.35.4/arch/arm/mach-sa1100/pm.c +--- linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mach-sa1100/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -120,7 +120,7 @@ unsigned long sleep_phys_sp(void *sp) + return virt_to_phys(sp); + } + +-static struct platform_suspend_ops sa11x0_pm_ops = { ++static const struct platform_suspend_ops sa11x0_pm_ops = { + .enter = sa11x0_pm_enter, + .valid = suspend_valid_only_mem, + }; +diff -urNp linux-2.6.35.4/arch/arm/mm/fault.c linux-2.6.35.4/arch/arm/mm/fault.c +--- linux-2.6.35.4/arch/arm/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -167,6 +167,13 @@ __do_user_fault(struct task_struct *tsk, + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (fsr & FSR_LNX_PF) { ++ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + tsk->thread.address = addr; + tsk->thread.error_code = fsr; + tsk->thread.trap_no = 14; +@@ -364,6 +371,33 @@ do_page_fault(unsigned long addr, unsign + } + #endif /* CONFIG_MMU */ + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (__force unsigned char __user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-4: "); ++ for (i = -1; i < 20; i++) { ++ unsigned long c; ++ if (get_user(c, (__force unsigned long __user *)sp+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08lx ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * First Level Translation Fault Handler + * +diff -urNp linux-2.6.35.4/arch/arm/mm/mmap.c linux-2.6.35.4/arch/arm/mm/mmap.c +--- linux-2.6.35.4/arch/arm/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -63,6 +63,10 @@ arch_get_unmapped_area(struct file *filp + if (len > TASK_SIZE) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -70,15 +74,14 @@ arch_get_unmapped_area(struct file *filp + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -94,14 +97,14 @@ full_search: + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +diff -urNp linux-2.6.35.4/arch/arm/plat-samsung/pm.c linux-2.6.35.4/arch/arm/plat-samsung/pm.c +--- linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/arm/plat-samsung/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -355,7 +355,7 @@ static void s3c_pm_finish(void) + s3c_pm_check_cleanup(); + } + +-static struct platform_suspend_ops s3c_pm_ops = { ++static const struct platform_suspend_ops s3c_pm_ops = { + .enter = s3c_pm_enter, + .prepare = s3c_pm_prepare, + .finish = s3c_pm_finish, +diff -urNp linux-2.6.35.4/arch/avr32/include/asm/elf.h linux-2.6.35.4/arch/avr32/include/asm/elf.h +--- linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/avr32/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpreg + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3) ++#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x00001000UL ++ ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +diff -urNp linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h +--- linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/avr32/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -22,7 +22,8 @@ D(10) KM_IRQ0, + D(11) KM_IRQ1, + D(12) KM_SOFTIRQ0, + D(13) KM_SOFTIRQ1, +-D(14) KM_TYPE_NR ++D(14) KM_CLEARPAGE, ++D(15) KM_TYPE_NR + }; + + #undef D +diff -urNp linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c +--- linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/avr32/mach-at32ap/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -176,7 +176,7 @@ out: + return 0; + } + +-static struct platform_suspend_ops avr32_pm_ops = { ++static const struct platform_suspend_ops avr32_pm_ops = { + .valid = avr32_pm_valid_state, + .enter = avr32_pm_enter, + }; +diff -urNp linux-2.6.35.4/arch/avr32/mm/fault.c linux-2.6.35.4/arch/avr32/mm/fault.c +--- linux-2.6.35.4/arch/avr32/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/avr32/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -41,6 +41,23 @@ static inline int notify_page_fault(stru + + int exception_trace = 1; + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (unsigned char *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address and the + * problem, and then passes it off to one of the appropriate routines. +@@ -157,6 +174,16 @@ bad_area: + up_read(&mm->mmap_sem); + + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) { ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + if (exception_trace && printk_ratelimit()) + printk("%s%s[%d]: segfault at %08lx pc %08lx " + "sp %08lx ecr %lu\n", +diff -urNp linux-2.6.35.4/arch/blackfin/kernel/kgdb.c linux-2.6.35.4/arch/blackfin/kernel/kgdb.c +--- linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/blackfin/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -397,7 +397,7 @@ int kgdb_arch_handle_exception(int vecto + return -1; /* this means that we do not want to exit from the handler */ + } + +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0xa1}, + #ifdef CONFIG_SMP + .flags = KGDB_HW_BREAKPOINT|KGDB_THR_PROC_SWAP, +diff -urNp linux-2.6.35.4/arch/blackfin/mach-common/pm.c linux-2.6.35.4/arch/blackfin/mach-common/pm.c +--- linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/blackfin/mach-common/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -232,7 +232,7 @@ static int bfin_pm_enter(suspend_state_t + return 0; + } + +-struct platform_suspend_ops bfin_pm_ops = { ++const struct platform_suspend_ops bfin_pm_ops = { + .enter = bfin_pm_enter, + .valid = bfin_pm_valid, + }; +diff -urNp linux-2.6.35.4/arch/blackfin/mm/maccess.c linux-2.6.35.4/arch/blackfin/mm/maccess.c +--- linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/blackfin/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400 +@@ -16,7 +16,7 @@ static int validate_memory_access_addres + return bfin_mem_access_type(addr, size); + } + +-long probe_kernel_read(void *dst, void *src, size_t size) ++long probe_kernel_read(void *dst, const void *src, size_t size) + { + unsigned long lsrc = (unsigned long)src; + int mem_type; +@@ -55,7 +55,7 @@ long probe_kernel_read(void *dst, void * + return -EFAULT; + } + +-long probe_kernel_write(void *dst, void *src, size_t size) ++long probe_kernel_write(void *dst, const void *src, size_t size) + { + unsigned long ldst = (unsigned long)dst; + int mem_type; +diff -urNp linux-2.6.35.4/arch/frv/include/asm/kmap_types.h linux-2.6.35.4/arch/frv/include/asm/kmap_types.h +--- linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/frv/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -23,6 +23,7 @@ enum km_type { + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff -urNp linux-2.6.35.4/arch/frv/mm/elf-fdpic.c linux-2.6.35.4/arch/frv/mm/elf-fdpic.c +--- linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/frv/mm/elf-fdpic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -73,8 +73,7 @@ unsigned long arch_get_unmapped_area(str + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(current->mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + goto success; + } + +@@ -89,7 +88,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +@@ -104,7 +103,7 @@ unsigned long arch_get_unmapped_area(str + for (; vma; vma = vma->vm_next) { + if (addr > limit) + break; +- if (addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + goto success; + addr = vma->vm_end; + } +diff -urNp linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c +--- linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/hp/common/hwsw_iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -17,7 +17,7 @@ + #include <linux/swiotlb.h> + #include <asm/machvec.h> + +-extern struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; ++extern const struct dma_map_ops sba_dma_ops, swiotlb_dma_ops; + + /* swiotlb declarations & definitions: */ + extern int swiotlb_late_init_with_default_size (size_t size); +@@ -33,7 +33,7 @@ static inline int use_swiotlb(struct dev + !sba_dma_ops.dma_supported(dev, *dev->dma_mask); + } + +-struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) ++const struct dma_map_ops *hwsw_dma_get_ops(struct device *dev) + { + if (use_swiotlb(dev)) + return &swiotlb_dma_ops; +diff -urNp linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c +--- linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/hp/common/sba_iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2097,7 +2097,7 @@ static struct acpi_driver acpi_sba_ioc_d + }, + }; + +-extern struct dma_map_ops swiotlb_dma_ops; ++extern const struct dma_map_ops swiotlb_dma_ops; + + static int __init + sba_init(void) +@@ -2211,7 +2211,7 @@ sba_page_override(char *str) + + __setup("sbapagesize=",sba_page_override); + +-struct dma_map_ops sba_dma_ops = { ++const struct dma_map_ops sba_dma_ops = { + .alloc_coherent = sba_alloc_coherent, + .free_coherent = sba_free_coherent, + .map_page = sba_map_page, +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/compat.h linux-2.6.35.4/arch/ia64/include/asm/compat.h +--- linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -199,7 +199,7 @@ ptr_to_compat(void __user *uptr) + } + + static __inline__ void __user * +-compat_alloc_user_space (long len) ++arch_compat_alloc_user_space (long len) + { + struct pt_regs *regs = task_pt_regs(current); + return (void __user *) (((regs->r12 & 0xffffffff) & -16) - len); +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,7 +12,7 @@ + + #define ARCH_HAS_DMA_GET_REQUIRED_MASK + +-extern struct dma_map_ops *dma_ops; ++extern const struct dma_map_ops *dma_ops; + extern struct ia64_machine_vector ia64_mv; + extern void set_iommu_machvec(void); + +@@ -24,7 +24,7 @@ extern void machvec_dma_sync_sg(struct d + static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *daddr, gfp_t gfp) + { +- struct dma_map_ops *ops = platform_dma_get_ops(dev); ++ const struct dma_map_ops *ops = platform_dma_get_ops(dev); + void *caddr; + + caddr = ops->alloc_coherent(dev, size, daddr, gfp); +@@ -35,7 +35,7 @@ static inline void *dma_alloc_coherent(s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *caddr, dma_addr_t daddr) + { +- struct dma_map_ops *ops = platform_dma_get_ops(dev); ++ const struct dma_map_ops *ops = platform_dma_get_ops(dev); + debug_dma_free_coherent(dev, size, caddr, daddr); + ops->free_coherent(dev, size, caddr, daddr); + } +@@ -49,13 +49,13 @@ static inline void dma_free_coherent(str + + static inline int dma_mapping_error(struct device *dev, dma_addr_t daddr) + { +- struct dma_map_ops *ops = platform_dma_get_ops(dev); ++ const struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->mapping_error(dev, daddr); + } + + static inline int dma_supported(struct device *dev, u64 mask) + { +- struct dma_map_ops *ops = platform_dma_get_ops(dev); ++ const struct dma_map_ops *ops = platform_dma_get_ops(dev); + return ops->dma_supported(dev, mask); + } + +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/elf.h linux-2.6.35.4/arch/ia64/include/asm/elf.h +--- linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -42,6 +42,13 @@ + */ + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13) ++#endif ++ + #define PT_IA_64_UNWIND 0x70000001 + + /* IA-64 relocations: */ +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/machvec.h linux-2.6.35.4/arch/ia64/include/asm/machvec.h +--- linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/machvec.h 2010-09-17 20:12:09.000000000 -0400 +@@ -45,7 +45,7 @@ typedef void ia64_mv_kernel_launch_event + /* DMA-mapping interface: */ + typedef void ia64_mv_dma_init (void); + typedef u64 ia64_mv_dma_get_required_mask (struct device *); +-typedef struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); ++typedef const struct dma_map_ops *ia64_mv_dma_get_ops(struct device *); + + /* + * WARNING: The legacy I/O space is _architected_. Platforms are +@@ -251,7 +251,7 @@ extern void machvec_init_from_cmdline(co + # endif /* CONFIG_IA64_GENERIC */ + + extern void swiotlb_dma_init(void); +-extern struct dma_map_ops *dma_get_ops(struct device *); ++extern const struct dma_map_ops *dma_get_ops(struct device *); + + /* + * Define default versions so we can extend machvec for new platforms without having +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/pgtable.h linux-2.6.35.4/arch/ia64/include/asm/pgtable.h +--- linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,7 +12,7 @@ + * David Mosberger-Tang <davidm@hpl.hp.com> + */ + +- ++#include <linux/const.h> + #include <asm/mman.h> + #include <asm/page.h> + #include <asm/processor.h> +@@ -143,6 +143,17 @@ + #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) + #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW) ++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++# define PAGE_COPY_NOEXEC PAGE_COPY ++#endif ++ + #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX) + #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX) + #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX) +diff -urNp linux-2.6.35.4/arch/ia64/include/asm/uaccess.h linux-2.6.35.4/arch/ia64/include/asm/uaccess.h +--- linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -257,7 +257,7 @@ __copy_from_user (void *to, const void _ + const void *__cu_from = (from); \ + long __cu_len = (n); \ + \ +- if (__access_ok(__cu_to, __cu_len, get_fs())) \ ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) \ + __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \ + __cu_len; \ + }) +@@ -269,7 +269,7 @@ __copy_from_user (void *to, const void _ + long __cu_len = (n); \ + \ + __chk_user_ptr(__cu_from); \ +- if (__access_ok(__cu_from, __cu_len, get_fs())) \ ++ if (__cu_len > 0 && __cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) \ + __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \ + __cu_len; \ + }) +diff -urNp linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c +--- linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/dma-mapping.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3,7 +3,7 @@ + /* Set this to 1 if there is a HW IOMMU in the system */ + int iommu_detected __read_mostly; + +-struct dma_map_ops *dma_ops; ++const struct dma_map_ops *dma_ops; + EXPORT_SYMBOL(dma_ops); + + #define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16) +@@ -16,7 +16,7 @@ static int __init dma_init(void) + } + fs_initcall(dma_init); + +-struct dma_map_ops *dma_get_ops(struct device *dev) ++const struct dma_map_ops *dma_get_ops(struct device *dev) + { + return dma_ops; + } +diff -urNp linux-2.6.35.4/arch/ia64/kernel/module.c linux-2.6.35.4/arch/ia64/kernel/module.c +--- linux-2.6.35.4/arch/ia64/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -315,8 +315,7 @@ module_alloc (unsigned long size) + void + module_free (struct module *mod, void *module_region) + { +- if (mod && mod->arch.init_unw_table && +- module_region == mod->module_init) { ++ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) { + unw_remove_unwind_table(mod->arch.init_unw_table); + mod->arch.init_unw_table = NULL; + } +@@ -502,15 +501,39 @@ module_frob_arch_sections (Elf_Ehdr *ehd + } + + static inline int ++in_init_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx; ++} ++ ++static inline int ++in_init_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw; ++} ++ ++static inline int + in_init (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_init < mod->init_size; ++ return in_init_rx(mod, addr) || in_init_rw(mod, addr); ++} ++ ++static inline int ++in_core_rx (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx; ++} ++ ++static inline int ++in_core_rw (const struct module *mod, uint64_t addr) ++{ ++ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw; + } + + static inline int + in_core (const struct module *mod, uint64_t addr) + { +- return addr - (uint64_t) mod->module_core < mod->core_size; ++ return in_core_rx(mod, addr) || in_core_rw(mod, addr); + } + + static inline int +@@ -693,7 +716,14 @@ do_reloc (struct module *mod, uint8_t r_ + break; + + case RV_BDREL: +- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core); ++ if (in_init_rx(mod, val)) ++ val -= (uint64_t) mod->module_init_rx; ++ else if (in_init_rw(mod, val)) ++ val -= (uint64_t) mod->module_init_rw; ++ else if (in_core_rx(mod, val)) ++ val -= (uint64_t) mod->module_core_rx; ++ else if (in_core_rw(mod, val)) ++ val -= (uint64_t) mod->module_core_rw; + break; + + case RV_LTV: +@@ -828,15 +858,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, + * addresses have been selected... + */ + uint64_t gp; +- if (mod->core_size > MAX_LTOFF) ++ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF) + /* + * This takes advantage of fact that SHF_ARCH_SMALL gets allocated + * at the end of the module. + */ +- gp = mod->core_size - MAX_LTOFF / 2; ++ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2; + else +- gp = mod->core_size / 2; +- gp = (uint64_t) mod->module_core + ((gp + 7) & -8); ++ gp = (mod->core_size_rx + mod->core_size_rw) / 2; ++ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8); + mod->arch.gp = gp; + DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp); + } +diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-dma.c linux-2.6.35.4/arch/ia64/kernel/pci-dma.c +--- linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/pci-dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,7 @@ struct device fallback_dev = { + .dma_mask = &fallback_dev.coherent_dma_mask, + }; + +-extern struct dma_map_ops intel_dma_ops; ++extern const struct dma_map_ops intel_dma_ops; + + static int __init pci_iommu_init(void) + { +diff -urNp linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c +--- linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/pci-swiotlb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -22,7 +22,7 @@ static void *ia64_swiotlb_alloc_coherent + return swiotlb_alloc_coherent(dev, size, dma_handle, gfp); + } + +-struct dma_map_ops swiotlb_dma_ops = { ++const struct dma_map_ops swiotlb_dma_ops = { + .alloc_coherent = ia64_swiotlb_alloc_coherent, + .free_coherent = swiotlb_free_coherent, + .map_page = swiotlb_map_page, +diff -urNp linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c +--- linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/sys_ia64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,6 +43,13 @@ arch_get_unmapped_area (struct file *fil + if (REGION_NUMBER(addr) == RGN_HPAGE) + addr = 0; + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ addr = mm->free_area_cache; ++ else ++#endif ++ + if (!addr) + addr = mm->free_area_cache; + +@@ -61,14 +68,14 @@ arch_get_unmapped_area (struct file *fil + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) { +- if (start_addr != TASK_UNMAPPED_BASE) { ++ if (start_addr != mm->mmap_base) { + /* Start a new search --- just in case we missed some holes. */ +- addr = TASK_UNMAPPED_BASE; ++ addr = mm->mmap_base; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* Remember the address where we stopped this search: */ + mm->free_area_cache = addr + len; + return addr; +diff -urNp linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S +--- linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/kernel/vmlinux.lds.S 2010-09-17 20:12:09.000000000 -0400 +@@ -196,7 +196,7 @@ SECTIONS + /* Per-cpu data: */ + . = ALIGN(PERCPU_PAGE_SIZE); + PERCPU_VADDR(PERCPU_ADDR, :percpu) +- __phys_per_cpu_start = __per_cpu_load; ++ __phys_per_cpu_start = per_cpu_load; + . = __phys_per_cpu_start + PERCPU_PAGE_SIZE; /* ensure percpu data fits + * into percpu page size + */ +diff -urNp linux-2.6.35.4/arch/ia64/mm/fault.c linux-2.6.35.4/arch/ia64/mm/fault.c +--- linux-2.6.35.4/arch/ia64/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned + return pte_present(pte); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + void __kprobes + ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs) + { +@@ -145,9 +162,23 @@ ia64_do_page_fault (unsigned long addres + mask = ( (((isr >> IA64_ISR_X_BIT) & 1UL) << VM_EXEC_BIT) + | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)); + +- if ((vma->vm_flags & mask) != mask) ++ if ((vma->vm_flags & mask) != mask) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) { ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip) ++ goto bad_area; ++ ++ up_read(&mm->mmap_sem); ++ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; + ++ } ++ + /* + * If for any reason at all we couldn't handle the fault, make + * sure we exit gracefully rather than endlessly redo the +diff -urNp linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c +--- linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400 +@@ -171,7 +171,7 @@ unsigned long hugetlb_get_unmapped_area( + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (REGION_OFFSET(addr) + len > RGN_MAP_LIMIT) + return -ENOMEM; +- if (!vmm || (addr + len) <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = ALIGN(vmm->vm_end, HPAGE_SIZE); + } +diff -urNp linux-2.6.35.4/arch/ia64/mm/init.c linux-2.6.35.4/arch/ia64/mm/init.c +--- linux-2.6.35.4/arch/ia64/mm/init.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/mm/init.c 2010-09-17 20:12:09.000000000 -0400 +@@ -122,6 +122,19 @@ ia64_init_addr_space (void) + vma->vm_start = current->thread.rbs_bot & PAGE_MASK; + vma->vm_end = vma->vm_start + PAGE_SIZE; + vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) { ++ vma->vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->pax_flags & MF_PAX_MPROTECT) ++ vma->vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + down_write(¤t->mm->mmap_sem); + if (insert_vm_struct(current->mm, vma)) { +diff -urNp linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c +--- linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/ia64/sn/pci/pci_dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -465,7 +465,7 @@ int sn_pci_legacy_write(struct pci_bus * + return ret; + } + +-static struct dma_map_ops sn_dma_ops = { ++static const struct dma_map_ops sn_dma_ops = { + .alloc_coherent = sn_dma_alloc_coherent, + .free_coherent = sn_dma_free_coherent, + .map_page = sn_dma_map_page, +diff -urNp linux-2.6.35.4/arch/m32r/lib/usercopy.c linux-2.6.35.4/arch/m32r/lib/usercopy.c +--- linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/m32r/lib/usercopy.c 2010-09-17 20:12:09.000000000 -0400 +@@ -14,6 +14,9 @@ + unsigned long + __generic_copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetch(from); + if (access_ok(VERIFY_WRITE, to, n)) + __copy_user(to,from,n); +@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, + unsigned long + __generic_copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + prefetchw(to); + if (access_ok(VERIFY_READ, from, n)) + __copy_user_zeroing(to,from,n); +diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/device.h linux-2.6.35.4/arch/microblaze/include/asm/device.h +--- linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/microblaze/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,7 +13,7 @@ struct device_node; + + struct dev_archdata { + /* DMA operations on that device */ +- struct dma_map_ops *dma_ops; ++ const struct dma_map_ops *dma_ops; + void *dma_data; + }; + +diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/microblaze/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -43,14 +43,14 @@ static inline unsigned long device_to_ma + return 0xfffffffful; + } + +-extern struct dma_map_ops *dma_ops; ++extern const struct dma_map_ops *dma_ops; + + /* + * Available generic sets of operations + */ +-extern struct dma_map_ops dma_direct_ops; ++extern const struct dma_map_ops dma_direct_ops; + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + /* We don't handle the NULL dev case for ISA for now. We could + * do it via an out of line call but it is not needed for now. The +@@ -63,14 +63,14 @@ static inline struct dma_map_ops *get_dm + return dev->archdata.dma_ops; + } + +-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) ++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) + { + dev->archdata.dma_ops = ops; + } + + static inline int dma_supported(struct device *dev, u64 mask) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (unlikely(!ops)) + return 0; +@@ -87,7 +87,7 @@ static inline int dma_supported(struct d + + static inline int dma_set_mask(struct device *dev, u64 dma_mask) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (unlikely(ops == NULL)) + return -EIO; +@@ -103,7 +103,7 @@ static inline int dma_set_mask(struct de + + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); + +@@ -117,7 +117,7 @@ static inline int dma_mapping_error(stru + static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + BUG_ON(!ops); +@@ -131,7 +131,7 @@ static inline void *dma_alloc_coherent(s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!ops); + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); +diff -urNp linux-2.6.35.4/arch/microblaze/include/asm/pci.h linux-2.6.35.4/arch/microblaze/include/asm/pci.h +--- linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/microblaze/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400 +@@ -54,8 +54,8 @@ static inline void pcibios_penalize_isa_ + } + + #ifdef CONFIG_PCI +-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); +-extern struct dma_map_ops *get_pci_dma_ops(void); ++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); ++extern const struct dma_map_ops *get_pci_dma_ops(void); + #else /* CONFIG_PCI */ + #define set_pci_dma_ops(d) + #define get_pci_dma_ops() NULL +diff -urNp linux-2.6.35.4/arch/microblaze/kernel/dma.c linux-2.6.35.4/arch/microblaze/kernel/dma.c +--- linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/microblaze/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -133,7 +133,7 @@ static inline void dma_direct_unmap_page + __dma_sync_page(dma_address, 0 , size, direction); + } + +-struct dma_map_ops dma_direct_ops = { ++const struct dma_map_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, +diff -urNp linux-2.6.35.4/arch/microblaze/pci/pci-common.c linux-2.6.35.4/arch/microblaze/pci/pci-common.c +--- linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/microblaze/pci/pci-common.c 2010-09-17 20:12:09.000000000 -0400 +@@ -46,14 +46,14 @@ resource_size_t isa_mem_base; + /* Default PCI flags is 0 on ppc32, modified at boot on ppc64 */ + unsigned int pci_flags; + +-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; ++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; + +-void set_pci_dma_ops(struct dma_map_ops *dma_ops) ++void set_pci_dma_ops(const struct dma_map_ops *dma_ops) + { + pci_dma_ops = dma_ops; + } + +-struct dma_map_ops *get_pci_dma_ops(void) ++const struct dma_map_ops *get_pci_dma_ops(void) + { + return pci_dma_ops; + } +diff -urNp linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c +--- linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/alchemy/devboards/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -110,7 +110,7 @@ static void db1x_pm_end(void) + + } + +-static struct platform_suspend_ops db1x_pm_ops = { ++static const struct platform_suspend_ops db1x_pm_ops = { + .valid = suspend_valid_only_mem, + .begin = db1x_pm_begin, + .enter = db1x_pm_enter, +diff -urNp linux-2.6.35.4/arch/mips/include/asm/compat.h linux-2.6.35.4/arch/mips/include/asm/compat.h +--- linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -145,7 +145,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = (struct pt_regs *) + ((unsigned long) current_thread_info() + THREAD_SIZE - 32) - 1; +diff -urNp linux-2.6.35.4/arch/mips/include/asm/elf.h linux-2.6.35.4/arch/mips/include/asm/elf.h +--- linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -368,6 +368,13 @@ extern const char *__elf_platform; + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) + #endif + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1 + struct linux_binprm; + extern int arch_setup_additional_pages(struct linux_binprm *bprm, +diff -urNp linux-2.6.35.4/arch/mips/include/asm/page.h linux-2.6.35.4/arch/mips/include/asm/page.h +--- linux-2.6.35.4/arch/mips/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400 +@@ -93,7 +93,7 @@ extern void copy_user_highpage(struct pa + #ifdef CONFIG_CPU_MIPS32 + typedef struct { unsigned long pte_low, pte_high; } pte_t; + #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32)) +- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; }) ++ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; }) + #else + typedef struct { unsigned long long pte; } pte_t; + #define pte_val(x) ((x).pte) +diff -urNp linux-2.6.35.4/arch/mips/include/asm/system.h linux-2.6.35.4/arch/mips/include/asm/system.h +--- linux-2.6.35.4/arch/mips/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400 +@@ -234,6 +234,6 @@ extern void per_cpu_trap_init(void); + */ + #define __ARCH_WANT_UNLOCKED_CTXSW + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ALMASK) + + #endif /* _ASM_SYSTEM_H */ +diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c +--- linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfn32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + #include <linux/module.h> + #include <linux/elfcore.h> +diff -urNp linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c +--- linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/kernel/binfmt_elfo32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -52,6 +52,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N + #undef ELF_ET_DYN_BASE + #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT) ++#endif ++ + #include <asm/processor.h> + + /* +diff -urNp linux-2.6.35.4/arch/mips/kernel/kgdb.c linux-2.6.35.4/arch/mips/kernel/kgdb.c +--- linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -270,6 +270,7 @@ int kgdb_arch_handle_exception(int vecto + return -1; + } + ++/* cannot be const, see kgdb_arch_init */ + struct kgdb_arch arch_kgdb_ops; + + /* +diff -urNp linux-2.6.35.4/arch/mips/kernel/process.c linux-2.6.35.4/arch/mips/kernel/process.c +--- linux-2.6.35.4/arch/mips/kernel/process.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/kernel/process.c 2010-09-17 20:12:09.000000000 -0400 +@@ -474,15 +474,3 @@ unsigned long get_wchan(struct task_stru + out: + return pc; + } +- +-/* +- * Don't forget that the stack pointer must be aligned on a 8 bytes +- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI. +- */ +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() & ~PAGE_MASK; +- +- return sp & ALMASK; +-} +diff -urNp linux-2.6.35.4/arch/mips/kernel/syscall.c linux-2.6.35.4/arch/mips/kernel/syscall.c +--- linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/kernel/syscall.c 2010-09-17 20:12:09.000000000 -0400 +@@ -106,17 +106,21 @@ unsigned long arch_get_unmapped_area(str + do_color_align = 0; + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else + addr = PAGE_ALIGN(addr); + vmm = find_vma(current->mm, addr); +- if (task_size - len >= addr && +- (!vmm || addr + len <= vmm->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vmm, addr, len)) + return addr; + } +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); + else +@@ -126,7 +130,7 @@ unsigned long arch_get_unmapped_area(str + /* At this point: (!vmm || addr < vmm->vm_end). */ + if (task_size - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (do_color_align) +diff -urNp linux-2.6.35.4/arch/mips/loongson/common/pm.c linux-2.6.35.4/arch/mips/loongson/common/pm.c +--- linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/loongson/common/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -147,7 +147,7 @@ static int loongson_pm_valid_state(suspe + } + } + +-static struct platform_suspend_ops loongson_pm_ops = { ++static const struct platform_suspend_ops loongson_pm_ops = { + .valid = loongson_pm_valid_state, + .enter = loongson_pm_enter, + }; +diff -urNp linux-2.6.35.4/arch/mips/mm/fault.c linux-2.6.35.4/arch/mips/mm/fault.c +--- linux-2.6.35.4/arch/mips/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/mips/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -26,6 +26,23 @@ + #include <asm/ptrace.h> + #include <asm/highmem.h> /* For VMALLOC_END */ + ++#ifdef CONFIG_PAX_PAGEEXEC ++void pax_report_insns(void *pc) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * This routine handles page faults. It determines the address, + * and the problem, and then passes it off to one of the appropriate +diff -urNp linux-2.6.35.4/arch/parisc/include/asm/compat.h linux-2.6.35.4/arch/parisc/include/asm/compat.h +--- linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -147,7 +147,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static __inline__ void __user *compat_alloc_user_space(long len) ++static __inline__ void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = ¤t->thread.regs; + return (void __user *)regs->gr[30]; +diff -urNp linux-2.6.35.4/arch/parisc/include/asm/elf.h linux-2.6.35.4/arch/parisc/include/asm/elf.h +--- linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration.. + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, + but it's not easy, and we've already done it here. */ +diff -urNp linux-2.6.35.4/arch/parisc/include/asm/pgtable.h linux-2.6.35.4/arch/parisc/include/asm/pgtable.h +--- linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -207,6 +207,17 @@ + #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED) + #define PAGE_COPY PAGE_EXECREAD + #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED) ++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + #define PAGE_KERNEL __pgprot(_PAGE_KERNEL) + #define PAGE_KERNEL_RO __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) + #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE) +diff -urNp linux-2.6.35.4/arch/parisc/kernel/module.c linux-2.6.35.4/arch/parisc/kernel/module.c +--- linux-2.6.35.4/arch/parisc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -96,16 +96,38 @@ + + /* three functions to determine where in the module core + * or init pieces the location is */ ++static inline int in_init_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rx && ++ loc < (me->module_init_rx + me->init_size_rx)); ++} ++ ++static inline int in_init_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_init_rw && ++ loc < (me->module_init_rw + me->init_size_rw)); ++} ++ + static inline int in_init(struct module *me, void *loc) + { +- return (loc >= me->module_init && +- loc <= (me->module_init + me->init_size)); ++ return in_init_rx(me, loc) || in_init_rw(me, loc); ++} ++ ++static inline int in_core_rx(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rx && ++ loc < (me->module_core_rx + me->core_size_rx)); ++} ++ ++static inline int in_core_rw(struct module *me, void *loc) ++{ ++ return (loc >= me->module_core_rw && ++ loc < (me->module_core_rw + me->core_size_rw)); + } + + static inline int in_core(struct module *me, void *loc) + { +- return (loc >= me->module_core && +- loc <= (me->module_core + me->core_size)); ++ return in_core_rx(me, loc) || in_core_rw(me, loc); + } + + static inline int in_local(struct module *me, void *loc) +@@ -365,13 +387,13 @@ int module_frob_arch_sections(CONST Elf_ + } + + /* align things a bit */ +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.got_offset = me->core_size; +- me->core_size += gots * sizeof(struct got_entry); +- +- me->core_size = ALIGN(me->core_size, 16); +- me->arch.fdesc_offset = me->core_size; +- me->core_size += fdescs * sizeof(Elf_Fdesc); ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += gots * sizeof(struct got_entry); ++ ++ me->core_size_rw = ALIGN(me->core_size_rw, 16); ++ me->arch.fdesc_offset = me->core_size_rw; ++ me->core_size_rw += fdescs * sizeof(Elf_Fdesc); + + me->arch.got_max = gots; + me->arch.fdesc_max = fdescs; +@@ -389,7 +411,7 @@ static Elf64_Word get_got(struct module + + BUG_ON(value == 0); + +- got = me->module_core + me->arch.got_offset; ++ got = me->module_core_rw + me->arch.got_offset; + for (i = 0; got[i].addr; i++) + if (got[i].addr == value) + goto out; +@@ -407,7 +429,7 @@ static Elf64_Word get_got(struct module + #ifdef CONFIG_64BIT + static Elf_Addr get_fdesc(struct module *me, unsigned long value) + { +- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset; ++ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset; + + if (!value) { + printk(KERN_ERR "%s: zero OPD requested!\n", me->name); +@@ -425,7 +447,7 @@ static Elf_Addr get_fdesc(struct module + + /* Create new one */ + fdesc->addr = value; +- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + return (Elf_Addr)fdesc; + } + #endif /* CONFIG_64BIT */ +@@ -849,7 +871,7 @@ register_unwind_table(struct module *me, + + table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr; + end = table + sechdrs[me->arch.unwind_section].sh_size; +- gp = (Elf_Addr)me->module_core + me->arch.got_offset; ++ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset; + + DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n", + me->arch.unwind_section, table, end, gp); +diff -urNp linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c +--- linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/kernel/sys_parisc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,7 @@ static unsigned long get_unshared_area(u + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = vma->vm_end; + } +@@ -79,7 +79,7 @@ static unsigned long get_shared_area(str + /* At this point: (!vma || addr < vma->vm_end). */ + if (TASK_SIZE - len < addr) + return -ENOMEM; +- if (!vma || addr + len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + return addr; + addr = DCACHE_ALIGN(vma->vm_end - offset) + offset; + if (addr < vma->vm_end) /* handle wraparound */ +@@ -98,7 +98,7 @@ unsigned long arch_get_unmapped_area(str + if (flags & MAP_FIXED) + return addr; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + if (filp) { + addr = get_shared_area(filp->f_mapping, addr, len, pgoff); +diff -urNp linux-2.6.35.4/arch/parisc/kernel/traps.c linux-2.6.35.4/arch/parisc/kernel/traps.c +--- linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/kernel/traps.c 2010-09-17 20:12:09.000000000 -0400 +@@ -733,9 +733,7 @@ void notrace handle_interruption(int cod + + down_read(¤t->mm->mmap_sem); + vma = find_vma(current->mm,regs->iaoq[0]); +- if (vma && (regs->iaoq[0] >= vma->vm_start) +- && (vma->vm_flags & VM_EXEC)) { +- ++ if (vma && (regs->iaoq[0] >= vma->vm_start)) { + fault_address = regs->iaoq[0]; + fault_space = regs->iasq[0]; + +diff -urNp linux-2.6.35.4/arch/parisc/mm/fault.c linux-2.6.35.4/arch/parisc/mm/fault.c +--- linux-2.6.35.4/arch/parisc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/parisc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -15,6 +15,7 @@ + #include <linux/sched.h> + #include <linux/interrupt.h> + #include <linux/module.h> ++#include <linux/unistd.h> + + #include <asm/uaccess.h> + #include <asm/traps.h> +@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, ex + static unsigned long + parisc_acctyp(unsigned long code, unsigned int inst) + { +- if (code == 6 || code == 16) ++ if (code == 6 || code == 7 || code == 16) + return VM_EXEC; + + switch (inst & 0xf0000000) { +@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsign + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when rt_sigreturn trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: unpatched PLT emulation */ ++ unsigned int bl, depwi; ++ ++ err = get_user(bl, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4)); ++ ++ if (err) ++ break; ++ ++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) { ++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12; ++ ++ err = get_user(ldw, (unsigned int *)addr); ++ err |= get_user(bv, (unsigned int *)(addr+4)); ++ err |= get_user(ldw2, (unsigned int *)(addr+8)); ++ ++ if (err) ++ break; ++ ++ if (ldw == 0x0E801096U && ++ bv == 0xEAC0C000U && ++ ldw2 == 0x0E881095U) ++ { ++ unsigned int resolver, map; ++ ++ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12)); ++ if (err) ++ break; ++ ++ regs->gr[20] = instruction_pointer(regs)+8; ++ regs->gr[21] = map; ++ regs->gr[22] = resolver; ++ regs->iaoq[0] = resolver | 3UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ return 3; ++ } ++ } ++ } while (0); ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ ++#ifndef CONFIG_PAX_EMUSIGRT ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++#endif ++ ++ do { /* PaX: rt_sigreturn emulation */ ++ unsigned int ldi1, ldi2, bel, nop; ++ ++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs)); ++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4)); ++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8)); ++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12)); ++ ++ if (err) ++ break; ++ ++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) && ++ ldi2 == 0x3414015AU && ++ bel == 0xE4008200U && ++ nop == 0x08000240U) ++ { ++ regs->gr[25] = (ldi1 & 2) >> 1; ++ regs->gr[20] = __NR_rt_sigreturn; ++ regs->gr[31] = regs->iaoq[1] + 16; ++ regs->sr[0] = regs->iasq[1]; ++ regs->iaoq[0] = 0x100UL; ++ regs->iaoq[1] = regs->iaoq[0] + 4; ++ regs->iasq[0] = regs->sr[2]; ++ regs->iasq[1] = regs->sr[2]; ++ return 2; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + int fixup_exception(struct pt_regs *regs) + { + const struct exception_table_entry *fix; +@@ -192,8 +303,33 @@ good_area: + + acc_type = parisc_acctyp(code,regs->iir); + +- if ((vma->vm_flags & acc_type) != acc_type) ++ if ((vma->vm_flags & acc_type) != acc_type) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) && ++ (address & ~3UL) == instruction_pointer(regs)) ++ { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 3: ++ return; ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ case 2: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + goto bad_area; ++ } + + /* + * If for any reason at all we couldn't handle the fault, make +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/compat.h linux-2.6.35.4/arch/powerpc/include/asm/compat.h +--- linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -134,7 +134,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = current->thread.regs; + unsigned long usp = regs->gpr[1]; +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/device.h linux-2.6.35.4/arch/powerpc/include/asm/device.h +--- linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,7 +11,7 @@ struct device_node; + + struct dev_archdata { + /* DMA operations on that device */ +- struct dma_map_ops *dma_ops; ++ const struct dma_map_ops *dma_ops; + + /* + * When an iommu is in use, dma_data is used as a ptr to the base of the +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -66,12 +66,13 @@ static inline unsigned long device_to_ma + /* + * Available generic sets of operations + */ ++/* cannot be const */ + #ifdef CONFIG_PPC64 + extern struct dma_map_ops dma_iommu_ops; + #endif +-extern struct dma_map_ops dma_direct_ops; ++extern const struct dma_map_ops dma_direct_ops; + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + /* We don't handle the NULL dev case for ISA for now. We could + * do it via an out of line call but it is not needed for now. The +@@ -84,7 +85,7 @@ static inline struct dma_map_ops *get_dm + return dev->archdata.dma_ops; + } + +-static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) ++static inline void set_dma_ops(struct device *dev, const struct dma_map_ops *ops) + { + dev->archdata.dma_ops = ops; + } +@@ -118,7 +119,7 @@ static inline void set_dma_offset(struct + + static inline int dma_supported(struct device *dev, u64 mask) + { +- struct dma_map_ops *dma_ops = get_dma_ops(dev); ++ const struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (unlikely(dma_ops == NULL)) + return 0; +@@ -129,7 +130,7 @@ static inline int dma_supported(struct d + + static inline int dma_set_mask(struct device *dev, u64 dma_mask) + { +- struct dma_map_ops *dma_ops = get_dma_ops(dev); ++ const struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (unlikely(dma_ops == NULL)) + return -EIO; +@@ -144,7 +145,7 @@ static inline int dma_set_mask(struct de + static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) + { +- struct dma_map_ops *dma_ops = get_dma_ops(dev); ++ const struct dma_map_ops *dma_ops = get_dma_ops(dev); + void *cpu_addr; + + BUG_ON(!dma_ops); +@@ -159,7 +160,7 @@ static inline void *dma_alloc_coherent(s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) + { +- struct dma_map_ops *dma_ops = get_dma_ops(dev); ++ const struct dma_map_ops *dma_ops = get_dma_ops(dev); + + BUG_ON(!dma_ops); + +@@ -170,7 +171,7 @@ static inline void dma_free_coherent(str + + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) + { +- struct dma_map_ops *dma_ops = get_dma_ops(dev); ++ const struct dma_map_ops *dma_ops = get_dma_ops(dev); + + if (dma_ops->mapping_error) + return dma_ops->mapping_error(dev, dma_addr); +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/elf.h linux-2.6.35.4/arch/powerpc/include/asm/elf.h +--- linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -178,8 +178,19 @@ typedef elf_fpreg_t elf_vsrreghalf_t32[E + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + +-extern unsigned long randomize_et_dyn(unsigned long base); +-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000)) ++#define ELF_ET_DYN_BASE (0x20000000) ++ ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (0x10000000UL) ++ ++#ifdef __powerpc64__ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 16 : 28) ++#else ++#define PAX_DELTA_MMAP_LEN 15 ++#define PAX_DELTA_STACK_LEN 15 ++#endif ++#endif + + /* + * Our registers are always unsigned longs, whether we're a 32 bit +@@ -274,9 +285,6 @@ extern int arch_setup_additional_pages(s + (0x7ff >> (PAGE_SHIFT - 12)) : \ + (0x3ffff >> (PAGE_SHIFT - 12))) + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* __KERNEL__ */ + + /* +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/iommu.h linux-2.6.35.4/arch/powerpc/include/asm/iommu.h +--- linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400 +@@ -116,6 +116,9 @@ extern void iommu_init_early_iSeries(voi + extern void iommu_init_early_dart(void); + extern void iommu_init_early_pasemi(void); + ++/* dma-iommu.c */ ++extern int dma_iommu_dma_supported(struct device *dev, u64 mask); ++ + #ifdef CONFIG_PCI + extern void pci_iommu_init(void); + extern void pci_direct_iommu_init(void); +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h +--- linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -27,6 +27,7 @@ enum km_type { + KM_PPC_SYNC_PAGE, + KM_PPC_SYNC_ICACHE, + KM_KDB, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page_64.h linux-2.6.35.4/arch/powerpc/include/asm/page_64.h +--- linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/page_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -172,15 +172,18 @@ do { \ + * stack by default, so in the absense of a PT_GNU_STACK program header + * we turn execute permission off. + */ +-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_STACK_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + ++#ifndef CONFIG_PAX_PAGEEXEC + #define VM_STACK_DEFAULT_FLAGS \ + (test_thread_flag(TIF_32BIT) ? \ + VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64) ++#endif + + #include <asm-generic/getorder.h> + +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/page.h linux-2.6.35.4/arch/powerpc/include/asm/page.h +--- linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400 +@@ -129,8 +129,9 @@ extern phys_addr_t kernstart_addr; + * and needs to be executable. This means the whole heap ends + * up being executable. + */ +-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \ +- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) ++#define VM_DATA_DEFAULT_FLAGS32 \ ++ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \ ++ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) + + #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \ + VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC) +@@ -158,6 +159,9 @@ extern phys_addr_t kernstart_addr; + #define is_kernel_addr(x) ((x) >= PAGE_OFFSET) + #endif + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef __ASSEMBLY__ + + #undef STRICT_MM_TYPECHECKS +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pci.h linux-2.6.35.4/arch/powerpc/include/asm/pci.h +--- linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/pci.h 2010-09-17 20:12:09.000000000 -0400 +@@ -65,8 +65,8 @@ static inline int pci_get_legacy_ide_irq + } + + #ifdef CONFIG_PCI +-extern void set_pci_dma_ops(struct dma_map_ops *dma_ops); +-extern struct dma_map_ops *get_pci_dma_ops(void); ++extern void set_pci_dma_ops(const struct dma_map_ops *dma_ops); ++extern const struct dma_map_ops *get_pci_dma_ops(void); + #else /* CONFIG_PCI */ + #define set_pci_dma_ops(d) + #define get_pci_dma_ops() NULL +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h +--- linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/pte-hash32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -21,6 +21,7 @@ + #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */ + #define _PAGE_USER 0x004 /* usermode access allowed */ + #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */ ++#define _PAGE_EXEC _PAGE_GUARDED + #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */ + #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */ + #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */ +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/reg.h linux-2.6.35.4/arch/powerpc/include/asm/reg.h +--- linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/reg.h 2010-09-17 20:12:09.000000000 -0400 +@@ -191,6 +191,7 @@ + #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */ + #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */ + #define DSISR_NOHPTE 0x40000000 /* no translation found */ ++#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */ + #define DSISR_PROTFAULT 0x08000000 /* protection fault */ + #define DSISR_ISSTORE 0x02000000 /* access was a store */ + #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */ +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h +--- linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/swiotlb.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,7 +13,7 @@ + + #include <linux/swiotlb.h> + +-extern struct dma_map_ops swiotlb_dma_ops; ++extern const struct dma_map_ops swiotlb_dma_ops; + + static inline void dma_mark_clean(void *addr, size_t size) {} + +diff -urNp linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h +--- linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,6 +13,8 @@ + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++ + /* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with +@@ -327,52 +329,6 @@ do { \ + extern unsigned long __copy_tofrom_user(void __user *to, + const void __user *from, unsigned long size); + +-#ifndef __powerpc64__ +- +-static inline unsigned long copy_from_user(void *to, +- const void __user *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_READ, from, n)) +- return __copy_tofrom_user((__force void __user *)to, from, n); +- if ((unsigned long)from < TASK_SIZE) { +- over = (unsigned long)from + n - TASK_SIZE; +- return __copy_tofrom_user((__force void __user *)to, from, +- n - over) + over; +- } +- return n; +-} +- +-static inline unsigned long copy_to_user(void __user *to, +- const void *from, unsigned long n) +-{ +- unsigned long over; +- +- if (access_ok(VERIFY_WRITE, to, n)) +- return __copy_tofrom_user(to, (__force void __user *)from, n); +- if ((unsigned long)to < TASK_SIZE) { +- over = (unsigned long)to + n - TASK_SIZE; +- return __copy_tofrom_user(to, (__force void __user *)from, +- n - over) + over; +- } +- return n; +-} +- +-#else /* __powerpc64__ */ +- +-#define __copy_in_user(to, from, size) \ +- __copy_tofrom_user((to), (from), (size)) +- +-extern unsigned long copy_from_user(void *to, const void __user *from, +- unsigned long n); +-extern unsigned long copy_to_user(void __user *to, const void *from, +- unsigned long n); +-extern unsigned long copy_in_user(void __user *to, const void __user *from, +- unsigned long n); +- +-#endif /* __powerpc64__ */ +- + static inline unsigned long __copy_from_user_inatomic(void *to, + const void __user *from, unsigned long n) + { +@@ -396,6 +352,10 @@ static inline unsigned long __copy_from_ + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ + return __copy_tofrom_user((__force void __user *)to, from, n); + } + +@@ -422,6 +382,10 @@ static inline unsigned long __copy_to_us + if (ret == 0) + return 0; + } ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_tofrom_user(to, (__force const void __user *)from, n); + } + +@@ -439,6 +403,92 @@ static inline unsigned long __copy_to_us + return __copy_to_user_inatomic(to, from, size); + } + ++#ifndef __powerpc64__ ++ ++static inline unsigned long __must_check copy_from_user(void *to, ++ const void __user *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_READ, from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ return __copy_tofrom_user((__force void __user *)to, from, n); ++ } ++ if ((unsigned long)from < TASK_SIZE) { ++ over = (unsigned long)from + n - TASK_SIZE; ++ if (!__builtin_constant_p(n - over)) ++ check_object_size(to, n - over, false); ++ return __copy_tofrom_user((__force void __user *)to, from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, ++ const void *from, unsigned long n) ++{ ++ unsigned long over; ++ ++ if ((long)n < 0) ++ return n; ++ ++ if (access_ok(VERIFY_WRITE, to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, n); ++ } ++ if ((unsigned long)to < TASK_SIZE) { ++ over = (unsigned long)to + n - TASK_SIZE; ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n - over, true); ++ return __copy_tofrom_user(to, (__force void __user *)from, ++ n - over) + over; ++ } ++ return n; ++} ++ ++#else /* __powerpc64__ */ ++ ++#define __copy_in_user(to, from, size) \ ++ __copy_tofrom_user((to), (from), (size)) ++ ++static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ ++ if (likely(access_ok(VERIFY_READ, from, n))) ++ n = __copy_from_user(to, from, n); ++ else ++ memset(to, 0, n); ++ return n; ++} ++ ++static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ if ((long)n < 0 || n > INT_MAX) ++ return n; ++ ++ if (likely(access_ok(VERIFY_WRITE, to, n))) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ n = __copy_to_user(to, from, n); ++ } ++ return n; ++} ++ ++extern unsigned long copy_in_user(void __user *to, const void __user *from, ++ unsigned long n); ++ ++#endif /* __powerpc64__ */ ++ + extern unsigned long __clear_user(void __user *addr, unsigned long size); + + static inline unsigned long clear_user(void __user *addr, unsigned long size) +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma.c linux-2.6.35.4/arch/powerpc/kernel/dma.c +--- linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -135,7 +135,7 @@ static inline void dma_direct_sync_singl + } + #endif + +-struct dma_map_ops dma_direct_ops = { ++const struct dma_map_ops dma_direct_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = dma_direct_map_sg, +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c +--- linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/dma-iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -70,7 +70,7 @@ static void dma_iommu_unmap_sg(struct de + } + + /* We support DMA to/from any memory page via the iommu */ +-static int dma_iommu_dma_supported(struct device *dev, u64 mask) ++int dma_iommu_dma_supported(struct device *dev, u64 mask) + { + struct iommu_table *tbl = get_iommu_table_base(dev); + +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c +--- linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/dma-swiotlb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -31,7 +31,7 @@ unsigned int ppc_swiotlb_enable; + * map_page, and unmap_page on highmem, use normal dma_ops + * for everything else. + */ +-struct dma_map_ops swiotlb_dma_ops = { ++const struct dma_map_ops swiotlb_dma_ops = { + .alloc_coherent = dma_direct_alloc_coherent, + .free_coherent = dma_direct_free_coherent, + .map_sg = swiotlb_map_sg_attrs, +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S +--- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64e.S 2010-09-17 20:12:09.000000000 -0400 +@@ -455,6 +455,7 @@ storage_fault_common: + std r14,_DAR(r1) + std r15,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + mr r4,r14 + mr r5,r15 + ld r14,PACA_EXGEN+EX_R14(r13) +@@ -464,8 +465,7 @@ storage_fault_common: + cmpdi r3,0 + bne- 1f + b .ret_from_except_lite +-1: bl .save_nvgprs +- mr r5,r3 ++1: mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + ld r4,_DAR(r1) + bl .bad_page_fault +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S +--- linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/exceptions-64s.S 2010-09-17 20:12:09.000000000 -0400 +@@ -840,10 +840,10 @@ handle_page_fault: + 11: ld r4,_DAR(r1) + ld r5,_DSISR(r1) + addi r3,r1,STACK_FRAME_OVERHEAD ++ bl .save_nvgprs + bl .do_page_fault + cmpdi r3,0 + beq+ 13f +- bl .save_nvgprs + mr r5,r3 + addi r3,r1,STACK_FRAME_OVERHEAD + lwz r4,_DAR(r1) +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c +--- linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/ibmebus.c 2010-09-17 20:12:09.000000000 -0400 +@@ -128,7 +128,7 @@ static int ibmebus_dma_supported(struct + return 1; + } + +-static struct dma_map_ops ibmebus_dma_ops = { ++static const struct dma_map_ops ibmebus_dma_ops = { + .alloc_coherent = ibmebus_alloc_coherent, + .free_coherent = ibmebus_free_coherent, + .map_sg = ibmebus_map_sg, +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/kgdb.c linux-2.6.35.4/arch/powerpc/kernel/kgdb.c +--- linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -128,7 +128,7 @@ static int kgdb_handle_breakpoint(struct + if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0) + return 0; + +- if (*(u32 *) (regs->nip) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) ++ if (*(u32 *) (regs->nip) == *(const u32 *) (&arch_kgdb_ops.gdb_bpt_instr)) + regs->nip += 4; + + return 1; +@@ -360,7 +360,7 @@ int kgdb_arch_handle_exception(int vecto + /* + * Global data + */ +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + .gdb_bpt_instr = {0x7d, 0x82, 0x10, 0x08}, + }; + +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module_32.c linux-2.6.35.4/arch/powerpc/kernel/module_32.c +--- linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/module_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr + me->arch.core_plt_section = i; + } + if (!me->arch.core_plt_section || !me->arch.init_plt_section) { +- printk("Module doesn't contain .plt or .init.plt sections.\n"); ++ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name); + return -ENOEXEC; + } + +@@ -203,11 +203,16 @@ static uint32_t do_plt_call(void *locati + + DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location); + /* Init, or core PLT? */ +- if (location >= mod->module_core +- && location < mod->module_core + mod->core_size) ++ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) || ++ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw)) + entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr; +- else ++ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) || ++ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw)) + entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr; ++ else { ++ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name); ++ return ~0UL; ++ } + + /* Find this entry, or if that fails, the next avail. entry */ + while (entry->jump[0]) { +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/module.c linux-2.6.35.4/arch/powerpc/kernel/module.c +--- linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -31,11 +31,24 @@ + + LIST_HEAD(module_bug_list); + ++#ifdef CONFIG_PAX_KERNEXEC + void *module_alloc(unsigned long size) + { + if (size == 0) + return NULL; + ++ return vmalloc(size); ++} ++ ++void *module_alloc_exec(unsigned long size) ++#else ++void *module_alloc(unsigned long size) ++#endif ++ ++{ ++ if (size == 0) ++ return NULL; ++ + return vmalloc_exec(size); + } + +@@ -45,6 +58,13 @@ void module_free(struct module *mod, voi + vfree(module_region); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++#endif ++ + static const Elf_Shdr *find_section(const Elf_Ehdr *hdr, + const Elf_Shdr *sechdrs, + const char *name) +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/pci-common.c linux-2.6.35.4/arch/powerpc/kernel/pci-common.c +--- linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/pci-common.c 2010-09-17 20:12:09.000000000 -0400 +@@ -51,14 +51,14 @@ resource_size_t isa_mem_base; + unsigned int ppc_pci_flags = 0; + + +-static struct dma_map_ops *pci_dma_ops = &dma_direct_ops; ++static const struct dma_map_ops *pci_dma_ops = &dma_direct_ops; + +-void set_pci_dma_ops(struct dma_map_ops *dma_ops) ++void set_pci_dma_ops(const struct dma_map_ops *dma_ops) + { + pci_dma_ops = dma_ops; + } + +-struct dma_map_ops *get_pci_dma_ops(void) ++const struct dma_map_ops *get_pci_dma_ops(void) + { + return pci_dma_ops; + } +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/process.c linux-2.6.35.4/arch/powerpc/kernel/process.c +--- linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/process.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1215,51 +1215,3 @@ unsigned long arch_align_stack(unsigned + sp -= get_random_int() & ~PAGE_MASK; + return sp & ~0xf; + } +- +-static inline unsigned long brk_rnd(void) +-{ +- unsigned long rnd = 0; +- +- /* 8MB for 32bit, 1GB for 64bit */ +- if (is_32bit_task()) +- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT))); +- else +- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT))); +- +- return rnd << PAGE_SHIFT; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long base = mm->brk; +- unsigned long ret; +- +-#ifdef CONFIG_PPC_STD_MMU_64 +- /* +- * If we are using 1TB segments and we are allowed to randomise +- * the heap, we can put it above 1TB so it is backed by a 1TB +- * segment. Otherwise the heap will be in the bottom 1TB +- * which always uses 256MB segments and this may result in a +- * performance penalty. +- */ +- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T)) +- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T); +-#endif +- +- ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < mm->brk) +- return mm->brk; +- +- return ret; +-} +- +-unsigned long randomize_et_dyn(unsigned long base) +-{ +- unsigned long ret = PAGE_ALIGN(base + brk_rnd()); +- +- if (ret < base) +- return base; +- +- return ret; +-} +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_32.c linux-2.6.35.4/arch/powerpc/kernel/signal_32.c +--- linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/signal_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -857,7 +857,7 @@ int handle_rt_signal32(unsigned long sig + /* Save user registers on the stack */ + frame = &rt_sf->uc.uc_mcontext; + addr = frame; +- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + if (save_user_regs(regs, frame, 0, 1)) + goto badframe; + regs->link = current->mm->context.vdso_base + vdso32_rt_sigtramp; +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/signal_64.c linux-2.6.35.4/arch/powerpc/kernel/signal_64.c +--- linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/signal_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -429,7 +429,7 @@ int handle_rt_signal64(int signr, struct + current->thread.fpscr.val = 0; + + /* Set up to return from userspace. */ +- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) { ++ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) { + regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp; + } else { + err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]); +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vdso.c linux-2.6.35.4/arch/powerpc/kernel/vdso.c +--- linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/vdso.c 2010-09-17 20:12:09.000000000 -0400 +@@ -36,6 +36,7 @@ + #include <asm/firmware.h> + #include <asm/vdso.h> + #include <asm/vdso_datapage.h> ++#include <asm/mman.h> + + #include "setup.h" + +@@ -220,7 +221,7 @@ int arch_setup_additional_pages(struct l + vdso_base = VDSO32_MBASE; + #endif + +- current->mm->context.vdso_base = 0; ++ current->mm->context.vdso_base = ~0UL; + + /* vDSO has a problem and was disabled, just don't "enable" it for the + * process +@@ -240,7 +241,7 @@ int arch_setup_additional_pages(struct l + vdso_base = get_unmapped_area(NULL, vdso_base, + (vdso_pages << PAGE_SHIFT) + + ((VDSO_ALIGNMENT - 1) & PAGE_MASK), +- 0, 0); ++ 0, MAP_PRIVATE | MAP_EXECUTABLE); + if (IS_ERR_VALUE(vdso_base)) { + rc = vdso_base; + goto fail_mmapsem; +diff -urNp linux-2.6.35.4/arch/powerpc/kernel/vio.c linux-2.6.35.4/arch/powerpc/kernel/vio.c +--- linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/kernel/vio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -602,11 +602,12 @@ static void vio_dma_iommu_unmap_sg(struc + vio_cmo_dealloc(viodev, alloc_size); + } + +-struct dma_map_ops vio_dma_mapping_ops = { ++static const struct dma_map_ops vio_dma_mapping_ops = { + .alloc_coherent = vio_dma_iommu_alloc_coherent, + .free_coherent = vio_dma_iommu_free_coherent, + .map_sg = vio_dma_iommu_map_sg, + .unmap_sg = vio_dma_iommu_unmap_sg, ++ .dma_supported = dma_iommu_dma_supported, + .map_page = vio_dma_iommu_map_page, + .unmap_page = vio_dma_iommu_unmap_page, + +@@ -860,7 +861,6 @@ static void vio_cmo_bus_remove(struct vi + + static void vio_cmo_set_dma_ops(struct vio_dev *viodev) + { +- vio_dma_mapping_ops.dma_supported = dma_iommu_ops.dma_supported; + viodev->dev.archdata.dma_ops = &vio_dma_mapping_ops; + } + +diff -urNp linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c +--- linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/lib/usercopy_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -9,22 +9,6 @@ + #include <linux/module.h> + #include <asm/uaccess.h> + +-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_READ, from, n))) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; +-} +- +-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) +-{ +- if (likely(access_ok(VERIFY_WRITE, to, n))) +- n = __copy_to_user(to, from, n); +- return n; +-} +- + unsigned long copy_in_user(void __user *to, const void __user *from, + unsigned long n) + { +@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user * + return n; + } + +-EXPORT_SYMBOL(copy_from_user); +-EXPORT_SYMBOL(copy_to_user); + EXPORT_SYMBOL(copy_in_user); + +diff -urNp linux-2.6.35.4/arch/powerpc/mm/fault.c linux-2.6.35.4/arch/powerpc/mm/fault.c +--- linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/mm/fault.c 2010-09-17 20:12:09.000000000 -0400 +@@ -30,6 +30,10 @@ + #include <linux/kprobes.h> + #include <linux/kdebug.h> + #include <linux/perf_event.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> ++#include <linux/unistd.h> + + #include <asm/firmware.h> + #include <asm/page.h> +@@ -41,6 +45,7 @@ + #include <asm/tlbflush.h> + #include <asm/siginfo.h> + #include <mm/mmu_decl.h> ++#include <asm/ptrace.h> + + #ifdef CONFIG_KPROBES + static inline int notify_page_fault(struct pt_regs *regs) +@@ -64,6 +69,33 @@ static inline int notify_page_fault(stru + } + #endif + ++#ifdef CONFIG_PAX_PAGEEXEC ++/* ++ * PaX: decide what to do with offenders (regs->nip = fault address) ++ * ++ * returns 1 when task should be killed ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ return 1; ++} ++ ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 5; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int __user *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + /* + * Check whether the instruction at regs->nip is a store using + * an update addressing form which will update r1. +@@ -134,7 +166,7 @@ int __kprobes do_page_fault(struct pt_re + * indicate errors in DSISR but can validly be set in SRR1. + */ + if (trap == 0x400) +- error_code &= 0x48200000; ++ error_code &= 0x58200000; + else + is_write = error_code & DSISR_ISSTORE; + #else +@@ -257,7 +289,7 @@ good_area: + * "undefined". Of those that can be set, this is the only + * one which seems bad. + */ +- if (error_code & 0x10000000) ++ if (error_code & DSISR_GUARDED) + /* Guarded storage error. */ + goto bad_area; + #endif /* CONFIG_8xx */ +@@ -272,7 +304,7 @@ good_area: + * processors use the same I/D cache coherency mechanism + * as embedded. + */ +- if (error_code & DSISR_PROTFAULT) ++ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED)) + goto bad_area; + #endif /* CONFIG_PPC_STD_MMU */ + +@@ -341,6 +373,23 @@ bad_area: + bad_area_nosemaphore: + /* User mode accesses cause a SIGSEGV */ + if (user_mode(regs)) { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (mm->pax_flags & MF_PAX_PAGEEXEC) { ++#ifdef CONFIG_PPC_STD_MMU ++ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) { ++#else ++ if (is_exec && regs->nip == address) { ++#endif ++ switch (pax_handle_fetch_fault(regs)) { ++ } ++ ++ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]); ++ do_group_exit(SIGKILL); ++ } ++ } ++#endif ++ + _exception(SIGSEGV, regs, code, address); + return 0; + } +diff -urNp linux-2.6.35.4/arch/powerpc/mm/mmap_64.c linux-2.6.35.4/arch/powerpc/mm/mmap_64.c +--- linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/mm/mmap_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -99,10 +99,22 @@ void arch_pick_mmap_layout(struct mm_str + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff -urNp linux-2.6.35.4/arch/powerpc/mm/slice.c linux-2.6.35.4/arch/powerpc/mm/slice.c +--- linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/mm/slice.c 2010-09-17 20:12:09.000000000 -0400 +@@ -98,10 +98,9 @@ static int slice_area_is_free(struct mm_ + if ((mm->task_size - len) < addr) + return 0; + vma = find_vma(mm, addr); +- return (!vma || (addr + len) <= vma->vm_start); ++ return check_heap_stack_gap(vma, addr, len); + } + +-static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice) + { + return !slice_area_is_free(mm, slice << SLICE_LOW_SHIFT, + 1ul << SLICE_LOW_SHIFT); +@@ -256,7 +255,7 @@ full_search: + addr = _ALIGN_UP(addr + 1, 1ul << SLICE_HIGH_SHIFT); + continue; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -336,7 +335,7 @@ static unsigned long slice_find_area_top + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || (addr + len) <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ + if (use_cache) + mm->free_area_cache = addr; +@@ -426,6 +425,11 @@ unsigned long slice_get_unmapped_area(un + if (fixed && addr > (mm->task_size - len)) + return -EINVAL; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP)) ++ addr = 0; ++#endif ++ + /* If hint, make sure it matches our alignment restrictions */ + if (!fixed && addr) { + addr = _ALIGN_UP(addr, 1ul << pshift); +diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c +--- linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/platforms/52xx/lite5200_pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -235,7 +235,7 @@ static void lite5200_pm_end(void) + lite5200_pm_target_state = PM_SUSPEND_ON; + } + +-static struct platform_suspend_ops lite5200_pm_ops = { ++static const struct platform_suspend_ops lite5200_pm_ops = { + .valid = lite5200_pm_valid, + .begin = lite5200_pm_begin, + .prepare = lite5200_pm_prepare, +diff -urNp linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c +--- linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/platforms/52xx/mpc52xx_pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -189,7 +189,7 @@ void mpc52xx_pm_finish(void) + iounmap(mbar); + } + +-static struct platform_suspend_ops mpc52xx_pm_ops = { ++static const struct platform_suspend_ops mpc52xx_pm_ops = { + .valid = mpc52xx_pm_valid, + .prepare = mpc52xx_pm_prepare, + .enter = mpc52xx_pm_enter, +diff -urNp linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c +--- linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/platforms/83xx/suspend.c 2010-09-17 20:12:09.000000000 -0400 +@@ -311,7 +311,7 @@ static int mpc83xx_is_pci_agent(void) + return ret; + } + +-static struct platform_suspend_ops mpc83xx_suspend_ops = { ++static const struct platform_suspend_ops mpc83xx_suspend_ops = { + .valid = mpc83xx_suspend_valid, + .begin = mpc83xx_suspend_begin, + .enter = mpc83xx_suspend_enter, +diff -urNp linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c +--- linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/platforms/cell/iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -642,7 +642,7 @@ static int dma_fixed_dma_supported(struc + + static int dma_set_mask_and_switch(struct device *dev, u64 dma_mask); + +-struct dma_map_ops dma_iommu_fixed_ops = { ++const struct dma_map_ops dma_iommu_fixed_ops = { + .alloc_coherent = dma_fixed_alloc_coherent, + .free_coherent = dma_fixed_free_coherent, + .map_sg = dma_fixed_map_sg, +diff -urNp linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c +--- linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/platforms/ps3/system-bus.c 2010-09-17 20:12:09.000000000 -0400 +@@ -695,7 +695,7 @@ static int ps3_dma_supported(struct devi + return mask >= DMA_BIT_MASK(32); + } + +-static struct dma_map_ops ps3_sb_dma_ops = { ++static const struct dma_map_ops ps3_sb_dma_ops = { + .alloc_coherent = ps3_alloc_coherent, + .free_coherent = ps3_free_coherent, + .map_sg = ps3_sb_map_sg, +@@ -705,7 +705,7 @@ static struct dma_map_ops ps3_sb_dma_ops + .unmap_page = ps3_unmap_page, + }; + +-static struct dma_map_ops ps3_ioc0_dma_ops = { ++static const struct dma_map_ops ps3_ioc0_dma_ops = { + .alloc_coherent = ps3_alloc_coherent, + .free_coherent = ps3_free_coherent, + .map_sg = ps3_ioc0_map_sg, +diff -urNp linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c +--- linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/powerpc/sysdev/fsl_pmc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -53,7 +53,7 @@ static int pmc_suspend_valid(suspend_sta + return 1; + } + +-static struct platform_suspend_ops pmc_suspend_ops = { ++static const struct platform_suspend_ops pmc_suspend_ops = { + .valid = pmc_suspend_valid, + .enter = pmc_suspend_enter, + }; +diff -urNp linux-2.6.35.4/arch/s390/include/asm/compat.h linux-2.6.35.4/arch/s390/include/asm/compat.h +--- linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -181,7 +181,7 @@ static inline int is_compat_task(void) + + #endif + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + unsigned long stack; + +diff -urNp linux-2.6.35.4/arch/s390/include/asm/elf.h linux-2.6.35.4/arch/s390/include/asm/elf.h +--- linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -163,6 +163,13 @@ extern unsigned int vdso_enabled; + that it will "exec", and that there is sufficient room for the brk. */ + #define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26 ) ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. */ + +diff -urNp linux-2.6.35.4/arch/s390/include/asm/uaccess.h linux-2.6.35.4/arch/s390/include/asm/uaccess.h +--- linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -234,6 +234,10 @@ static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (access_ok(VERIFY_WRITE, to, n)) + n = __copy_to_user(to, from, n); + return n; +@@ -259,6 +263,9 @@ copy_to_user(void __user *to, const void + static inline unsigned long __must_check + __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n) && (n <= 256)) + return uaccess.copy_from_user_small(n, from, to); + else +@@ -293,6 +300,10 @@ copy_from_user(void *to, const void __us + unsigned int sz = __compiletime_object_size(to); + + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return n; +diff -urNp linux-2.6.35.4/arch/s390/Kconfig linux-2.6.35.4/arch/s390/Kconfig +--- linux-2.6.35.4/arch/s390/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/Kconfig 2010-09-17 20:12:09.000000000 -0400 +@@ -230,13 +230,12 @@ config AUDIT_ARCH + + config S390_EXEC_PROTECT + bool "Data execute protection" ++ default y + help + This option allows to enable a buffer overflow protection for user +- space programs and it also selects the addressing mode option above. +- The kernel parameter noexec=on will enable this feature and also +- switch the addressing modes, default is disabled. Enabling this (via +- kernel parameter) on machines earlier than IBM System z9-109 EC/BC +- will reduce system performance. ++ space programs. ++ Enabling this on machines earlier than IBM System z9-109 EC/BC will ++ reduce system performance. + + comment "Code generation options" + +diff -urNp linux-2.6.35.4/arch/s390/kernel/module.c linux-2.6.35.4/arch/s390/kernel/module.c +--- linux-2.6.35.4/arch/s390/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -168,11 +168,11 @@ module_frob_arch_sections(Elf_Ehdr *hdr, + + /* Increase core size by size of got & plt and set start + offsets for got and plt. */ +- me->core_size = ALIGN(me->core_size, 4); +- me->arch.got_offset = me->core_size; +- me->core_size += me->arch.got_size; +- me->arch.plt_offset = me->core_size; +- me->core_size += me->arch.plt_size; ++ me->core_size_rw = ALIGN(me->core_size_rw, 4); ++ me->arch.got_offset = me->core_size_rw; ++ me->core_size_rw += me->arch.got_size; ++ me->arch.plt_offset = me->core_size_rx; ++ me->core_size_rx += me->arch.plt_size; + return 0; + } + +@@ -258,7 +258,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + if (info->got_initialized == 0) { + Elf_Addr *gotent; + +- gotent = me->module_core + me->arch.got_offset + ++ gotent = me->module_core_rw + me->arch.got_offset + + info->got_offset; + *gotent = val; + info->got_initialized = 1; +@@ -282,7 +282,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + else if (r_type == R_390_GOTENT || + r_type == R_390_GOTPLTENT) + *(unsigned int *) loc = +- (val + (Elf_Addr) me->module_core - loc) >> 1; ++ (val + (Elf_Addr) me->module_core_rw - loc) >> 1; + else if (r_type == R_390_GOT64 || + r_type == R_390_GOTPLT64) + *(unsigned long *) loc = val; +@@ -296,7 +296,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */ + if (info->plt_initialized == 0) { + unsigned int *ip; +- ip = me->module_core + me->arch.plt_offset + ++ ip = me->module_core_rx + me->arch.plt_offset + + info->plt_offset; + #ifndef CONFIG_64BIT + ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */ +@@ -321,7 +321,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + val - loc + 0xffffUL < 0x1ffffeUL) || + (r_type == R_390_PLT32DBL && + val - loc + 0xffffffffULL < 0x1fffffffeULL))) +- val = (Elf_Addr) me->module_core + ++ val = (Elf_Addr) me->module_core_rx + + me->arch.plt_offset + + info->plt_offset; + val += rela->r_addend - loc; +@@ -343,7 +343,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + case R_390_GOTOFF32: /* 32 bit offset to GOT. */ + case R_390_GOTOFF64: /* 64 bit offset to GOT. */ + val = val + rela->r_addend - +- ((Elf_Addr) me->module_core + me->arch.got_offset); ++ ((Elf_Addr) me->module_core_rw + me->arch.got_offset); + if (r_type == R_390_GOTOFF16) + *(unsigned short *) loc = val; + else if (r_type == R_390_GOTOFF32) +@@ -353,7 +353,7 @@ apply_rela(Elf_Rela *rela, Elf_Addr base + break; + case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */ + case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */ +- val = (Elf_Addr) me->module_core + me->arch.got_offset + ++ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset + + rela->r_addend - loc; + if (r_type == R_390_GOTPC) + *(unsigned int *) loc = val; +diff -urNp linux-2.6.35.4/arch/s390/kernel/setup.c linux-2.6.35.4/arch/s390/kernel/setup.c +--- linux-2.6.35.4/arch/s390/kernel/setup.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/kernel/setup.c 2010-09-17 20:12:09.000000000 -0400 +@@ -281,7 +281,7 @@ static int __init early_parse_mem(char * + } + early_param("mem", early_parse_mem); + +-unsigned int user_mode = HOME_SPACE_MODE; ++unsigned int user_mode = SECONDARY_SPACE_MODE; + EXPORT_SYMBOL_GPL(user_mode); + + static int set_amode_and_uaccess(unsigned long user_amode, +@@ -310,17 +310,6 @@ static int set_amode_and_uaccess(unsigne + } + } + +-/* +- * Switch kernel/user addressing modes? +- */ +-static int __init early_parse_switch_amode(char *p) +-{ +- if (user_mode != SECONDARY_SPACE_MODE) +- user_mode = PRIMARY_SPACE_MODE; +- return 0; +-} +-early_param("switch_amode", early_parse_switch_amode); +- + static int __init early_parse_user_mode(char *p) + { + if (p && strcmp(p, "primary") == 0) +@@ -337,20 +326,6 @@ static int __init early_parse_user_mode( + } + early_param("user_mode", early_parse_user_mode); + +-#ifdef CONFIG_S390_EXEC_PROTECT +-/* +- * Enable execute protection? +- */ +-static int __init early_parse_noexec(char *p) +-{ +- if (!strncmp(p, "off", 3)) +- return 0; +- user_mode = SECONDARY_SPACE_MODE; +- return 0; +-} +-early_param("noexec", early_parse_noexec); +-#endif /* CONFIG_S390_EXEC_PROTECT */ +- + static void setup_addressing_mode(void) + { + if (user_mode == SECONDARY_SPACE_MODE) { +diff -urNp linux-2.6.35.4/arch/s390/mm/maccess.c linux-2.6.35.4/arch/s390/mm/maccess.c +--- linux-2.6.35.4/arch/s390/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400 +@@ -45,7 +45,7 @@ static long probe_kernel_write_odd(void + return rc ? rc : count; + } + +-long probe_kernel_write(void *dst, void *src, size_t size) ++long probe_kernel_write(void *dst, const void *src, size_t size) + { + long copied = 0; + +diff -urNp linux-2.6.35.4/arch/s390/mm/mmap.c linux-2.6.35.4/arch/s390/mm/mmap.c +--- linux-2.6.35.4/arch/s390/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/s390/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -78,10 +78,22 @@ void arch_pick_mmap_layout(struct mm_str + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +@@ -153,10 +165,22 @@ void arch_pick_mmap_layout(struct mm_str + */ + if (mmap_is_legacy()) { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { + mm->mmap_base = mmap_base(); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = s390_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff -urNp linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c +--- linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/boards/mach-hp6xx/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -143,7 +143,7 @@ static int hp6x0_pm_enter(suspend_state_ + return 0; + } + +-static struct platform_suspend_ops hp6x0_pm_ops = { ++static const struct platform_suspend_ops hp6x0_pm_ops = { + .enter = hp6x0_pm_enter, + .valid = suspend_valid_only_mem, + }; +diff -urNp linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1,10 +1,10 @@ + #ifndef __ASM_SH_DMA_MAPPING_H + #define __ASM_SH_DMA_MAPPING_H + +-extern struct dma_map_ops *dma_ops; ++extern const struct dma_map_ops *dma_ops; + extern void no_iommu_init(void); + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + return dma_ops; + } +@@ -14,7 +14,7 @@ static inline struct dma_map_ops *get_dm + + static inline int dma_supported(struct device *dev, u64 mask) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->dma_supported) + return ops->dma_supported(dev, mask); +@@ -24,7 +24,7 @@ static inline int dma_supported(struct d + + static inline int dma_set_mask(struct device *dev, u64 mask) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (!dev->dma_mask || !dma_supported(dev, mask)) + return -EIO; +@@ -59,7 +59,7 @@ static inline int dma_get_cache_alignmen + + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); +@@ -70,7 +70,7 @@ static inline int dma_mapping_error(stru + static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) +@@ -87,7 +87,7 @@ static inline void *dma_alloc_coherent(s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + if (dma_release_from_coherent(dev, get_order(size), vaddr)) + return; +diff -urNp linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c +--- linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/kernel/cpu/shmobile/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -141,7 +141,7 @@ static int sh_pm_enter(suspend_state_t s + return 0; + } + +-static struct platform_suspend_ops sh_pm_ops = { ++static const struct platform_suspend_ops sh_pm_ops = { + .enter = sh_pm_enter, + .valid = suspend_valid_only_mem, + }; +diff -urNp linux-2.6.35.4/arch/sh/kernel/dma-nommu.c linux-2.6.35.4/arch/sh/kernel/dma-nommu.c +--- linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/kernel/dma-nommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -62,7 +62,7 @@ static void nommu_sync_sg(struct device + } + #endif + +-struct dma_map_ops nommu_dma_ops = { ++const struct dma_map_ops nommu_dma_ops = { + .alloc_coherent = dma_generic_alloc_coherent, + .free_coherent = dma_generic_free_coherent, + .map_page = nommu_map_page, +diff -urNp linux-2.6.35.4/arch/sh/kernel/kgdb.c linux-2.6.35.4/arch/sh/kernel/kgdb.c +--- linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -319,7 +319,7 @@ void kgdb_arch_exit(void) + unregister_die_notifier(&kgdb_notifier); + } + +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: trapa #0x3c */ + #ifdef CONFIG_CPU_LITTLE_ENDIAN + .gdb_bpt_instr = { 0x3c, 0xc3 }, +diff -urNp linux-2.6.35.4/arch/sh/mm/consistent.c linux-2.6.35.4/arch/sh/mm/consistent.c +--- linux-2.6.35.4/arch/sh/mm/consistent.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/mm/consistent.c 2010-09-17 20:12:09.000000000 -0400 +@@ -22,7 +22,7 @@ + + #define PREALLOC_DMA_DEBUG_ENTRIES 4096 + +-struct dma_map_ops *dma_ops; ++const struct dma_map_ops *dma_ops; + EXPORT_SYMBOL(dma_ops); + + static int __init dma_init(void) +diff -urNp linux-2.6.35.4/arch/sh/mm/mmap.c linux-2.6.35.4/arch/sh/mm/mmap.c +--- linux-2.6.35.4/arch/sh/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sh/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -74,8 +74,7 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -106,7 +105,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -157,8 +156,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -179,7 +177,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -199,7 +197,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h +--- linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/atomic_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -14,18 +14,40 @@ + #define ATOMIC64_INIT(i) { (i) } + + #define atomic_read(v) (*(volatile int *)&(v)->counter) ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} + #define atomic64_read(v) (*(volatile long *)&(v)->counter) ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return v->counter; ++} + + #define atomic_set(v, i) (((v)->counter) = i) ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} + #define atomic64_set(v, i) (((v)->counter) = i) ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} + + extern void atomic_add(int, atomic_t *); ++extern void atomic_add_unchecked(int, atomic_unchecked_t *); + extern void atomic64_add(long, atomic64_t *); ++extern void atomic64_add_unchecked(long, atomic64_unchecked_t *); + extern void atomic_sub(int, atomic_t *); ++extern void atomic_sub_unchecked(int, atomic_unchecked_t *); + extern void atomic64_sub(long, atomic64_t *); ++extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *); + + extern int atomic_add_ret(int, atomic_t *); ++extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *); + extern long atomic64_add_ret(long, atomic64_t *); ++extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *); + extern int atomic_sub_ret(int, atomic_t *); + extern long atomic64_sub_ret(long, atomic64_t *); + +@@ -33,7 +55,15 @@ extern long atomic64_sub_ret(long, atomi + #define atomic64_dec_return(v) atomic64_sub_ret(1, v) + + #define atomic_inc_return(v) atomic_add_ret(1, v) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_ret_unchecked(1, v); ++} + #define atomic64_inc_return(v) atomic64_add_ret(1, v) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_ret_unchecked(1, v); ++} + + #define atomic_sub_return(i, v) atomic_sub_ret(i, v) + #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v) +@@ -59,10 +89,26 @@ extern long atomic64_sub_ret(long, atomi + #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0) + + #define atomic_inc(v) atomic_add(1, v) ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_add_unchecked(1, v); ++} + #define atomic64_inc(v) atomic64_add(1, v) ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_add_unchecked(1, v); ++} + + #define atomic_dec(v) atomic_sub(1, v) ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ atomic_sub_unchecked(1, v); ++} + #define atomic64_dec(v) atomic64_sub(1, v) ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ atomic64_sub_unchecked(1, v); ++} + + #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0) + #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0) +@@ -72,17 +118,28 @@ extern long atomic64_sub_ret(long, atomi + + static inline int atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%icc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +@@ -93,17 +150,28 @@ static inline int atomic_add_unless(atom + + static inline long atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addcc %2, %0, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "tvs %%xcc, 6\n" ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a) ++ : "cc"); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/compat.h linux-2.6.35.4/arch/sparc/include/asm/compat.h +--- linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -167,7 +167,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = current_thread_info()->kregs; + unsigned long usp = regs->u_regs[UREG_I6]; +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,10 +13,10 @@ extern int dma_supported(struct device * + #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) + #define dma_is_consistent(d, h) (1) + +-extern struct dma_map_ops *dma_ops, pci32_dma_ops; ++extern const struct dma_map_ops *dma_ops, pci32_dma_ops; + extern struct bus_type pci_bus_type; + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + #if defined(CONFIG_SPARC32) && defined(CONFIG_PCI) + if (dev->bus == &pci_bus_type) +@@ -30,7 +30,7 @@ static inline struct dma_map_ops *get_dm + static inline void *dma_alloc_coherent(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t flag) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + void *cpu_addr; + + cpu_addr = ops->alloc_coherent(dev, size, dma_handle, flag); +@@ -41,7 +41,7 @@ static inline void *dma_alloc_coherent(s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *cpu_addr, dma_addr_t dma_handle) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); + ops->free_coherent(dev, size, cpu_addr, dma_handle); +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_32.h linux-2.6.35.4/arch/sparc/include/asm/elf_32.h +--- linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/elf_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -114,6 +114,13 @@ typedef struct { + + #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE) + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE 0x10000UL ++ ++#define PAX_DELTA_MMAP_LEN 16 ++#define PAX_DELTA_STACK_LEN 16 ++#endif ++ + /* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. This can NOT be done in userspace + on Sparc. */ +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/elf_64.h linux-2.6.35.4/arch/sparc/include/asm/elf_64.h +--- linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/elf_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -162,6 +162,12 @@ typedef struct { + #define ELF_ET_DYN_BASE 0x0000010000000000UL + #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL + ++#ifdef CONFIG_PAX_ASLR ++#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL) ++ ++#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28) ++#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29) ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this cpu supports. */ +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h +--- linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -43,6 +43,13 @@ BTFIXUPDEF_SIMM13(user_ptrs_per_pgd) + BTFIXUPDEF_INT(page_none) + BTFIXUPDEF_INT(page_copy) + BTFIXUPDEF_INT(page_readonly) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++BTFIXUPDEF_INT(page_shared_noexec) ++BTFIXUPDEF_INT(page_copy_noexec) ++BTFIXUPDEF_INT(page_readonly_noexec) ++#endif ++ + BTFIXUPDEF_INT(page_kernel) + + #define PMD_SHIFT SUN4C_PMD_SHIFT +@@ -64,6 +71,16 @@ extern pgprot_t PAGE_SHARED; + #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy)) + #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly)) + ++#ifdef CONFIG_PAX_PAGEEXEC ++extern pgprot_t PAGE_SHARED_NOEXEC; ++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec)) ++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec)) ++#else ++# define PAGE_SHARED_NOEXEC PAGE_SHARED ++# define PAGE_COPY_NOEXEC PAGE_COPY ++# define PAGE_READONLY_NOEXEC PAGE_READONLY ++#endif ++ + extern unsigned long page_kernel; + + #ifdef MODULE +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h +--- linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/pgtsrmmu.h 2010-09-17 20:12:09.000000000 -0400 +@@ -115,6 +115,13 @@ + SRMMU_EXEC | SRMMU_REF) + #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \ + SRMMU_EXEC | SRMMU_REF) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF) ++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF) ++#endif ++ + #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \ + SRMMU_DIRTY | SRMMU_REF) + +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h +--- linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/spinlock_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -99,7 +99,12 @@ static void inline arch_read_lock(arch_r + __asm__ __volatile__ ( + "1: ldsw [%2], %0\n" + " brlz,pn %0, 2f\n" +-"4: add %0, 1, %1\n" ++"4: addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -112,7 +117,7 @@ static void inline arch_read_lock(arch_r + " .previous" + : "=&r" (tmp1), "=&r" (tmp2) + : "r" (lock) +- : "memory"); ++ : "memory", "cc"); + } + + static int inline arch_read_trylock(arch_rwlock_t *lock) +@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch + "1: ldsw [%2], %0\n" + " brlz,a,pn %0, 2f\n" + " mov 0, %0\n" +-" add %0, 1, %1\n" ++" addcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%icc, 1b\n" +@@ -142,7 +152,12 @@ static void inline arch_read_unlock(arch + + __asm__ __volatile__( + "1: lduw [%2], %0\n" +-" sub %0, 1, %1\n" ++" subcc %0, 1, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++" tvs %%icc, 6\n" ++#endif ++ + " cas [%2], %0, %1\n" + " cmp %0, %1\n" + " bne,pn %%xcc, 1b\n" +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h +--- linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -249,14 +249,25 @@ extern unsigned long __copy_user(void __ + + static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n) + { +- if (n && __access_ok((unsigned long) to, n)) ++ if ((long)n < 0) ++ return n; ++ ++ if (n && __access_ok((unsigned long) to, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_user(to, (__force void __user *) from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); ++ + return __copy_user(to, (__force void __user *) from, n); + } + +@@ -272,19 +283,27 @@ static inline unsigned long copy_from_us + { + int sz = __compiletime_object_size(to); + ++ if ((long)n < 0) ++ return n; ++ + if (unlikely(sz != -1 && sz < n)) { + copy_from_user_overflow(); + return n; + } + +- if (n && __access_ok((unsigned long) from, n)) ++ if (n && __access_ok((unsigned long) from, n)) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_user((__force void __user *) to, from, n); +- else ++ } else + return n; + } + + static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + return __copy_user((__force void __user *) to, from, n); + } + +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h +--- linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -10,6 +10,7 @@ + #include <linux/compiler.h> + #include <linux/string.h> + #include <linux/thread_info.h> ++#include <linux/kernel.h> + #include <asm/asi.h> + #include <asm/system.h> + #include <asm/spitfire.h> +@@ -224,6 +225,12 @@ copy_from_user(void *to, const void __us + int sz = __compiletime_object_size(to); + unsigned long ret = size; + ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(to, size, false); ++ + if (likely(sz == -1 || sz >= size)) { + ret = ___copy_from_user(to, from, size); + if (unlikely(ret)) +@@ -243,8 +250,15 @@ extern unsigned long copy_to_user_fixup( + static inline unsigned long __must_check + copy_to_user(void __user *to, const void *from, unsigned long size) + { +- unsigned long ret = ___copy_to_user(to, from, size); ++ unsigned long ret; ++ ++ if ((long)size < 0 || size > INT_MAX) ++ return size; ++ ++ if (!__builtin_constant_p(size)) ++ check_object_size(from, size, true); + ++ ret = ___copy_to_user(to, from, size); + if (unlikely(ret)) + ret = copy_to_user_fixup(to, from, size); + return ret; +diff -urNp linux-2.6.35.4/arch/sparc/include/asm/uaccess.h linux-2.6.35.4/arch/sparc/include/asm/uaccess.h +--- linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1,5 +1,13 @@ + #ifndef ___ASM_SPARC_UACCESS_H + #define ___ASM_SPARC_UACCESS_H ++ ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#include <linux/types.h> ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++#endif ++#endif ++ + #if defined(__sparc__) && defined(__arch64__) + #include <asm/uaccess_64.h> + #else +diff -urNp linux-2.6.35.4/arch/sparc/kernel/iommu.c linux-2.6.35.4/arch/sparc/kernel/iommu.c +--- linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -828,7 +828,7 @@ static void dma_4u_sync_sg_for_cpu(struc + spin_unlock_irqrestore(&iommu->lock, flags); + } + +-static struct dma_map_ops sun4u_dma_ops = { ++static const struct dma_map_ops sun4u_dma_ops = { + .alloc_coherent = dma_4u_alloc_coherent, + .free_coherent = dma_4u_free_coherent, + .map_page = dma_4u_map_page, +@@ -839,7 +839,7 @@ static struct dma_map_ops sun4u_dma_ops + .sync_sg_for_cpu = dma_4u_sync_sg_for_cpu, + }; + +-struct dma_map_ops *dma_ops = &sun4u_dma_ops; ++const struct dma_map_ops *dma_ops = &sun4u_dma_ops; + EXPORT_SYMBOL(dma_ops); + + extern int pci64_dma_supported(struct pci_dev *pdev, u64 device_mask); +diff -urNp linux-2.6.35.4/arch/sparc/kernel/ioport.c linux-2.6.35.4/arch/sparc/kernel/ioport.c +--- linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/ioport.c 2010-09-17 20:12:09.000000000 -0400 +@@ -397,7 +397,7 @@ static void sbus_sync_sg_for_device(stru + BUG(); + } + +-struct dma_map_ops sbus_dma_ops = { ++const struct dma_map_ops sbus_dma_ops = { + .alloc_coherent = sbus_alloc_coherent, + .free_coherent = sbus_free_coherent, + .map_page = sbus_map_page, +@@ -408,7 +408,7 @@ struct dma_map_ops sbus_dma_ops = { + .sync_sg_for_device = sbus_sync_sg_for_device, + }; + +-struct dma_map_ops *dma_ops = &sbus_dma_ops; ++const struct dma_map_ops *dma_ops = &sbus_dma_ops; + EXPORT_SYMBOL(dma_ops); + + static int __init sparc_register_ioport(void) +@@ -645,7 +645,7 @@ static void pci32_sync_sg_for_device(str + } + } + +-struct dma_map_ops pci32_dma_ops = { ++const struct dma_map_ops pci32_dma_ops = { + .alloc_coherent = pci32_alloc_coherent, + .free_coherent = pci32_free_coherent, + .map_page = pci32_map_page, +diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c +--- linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/kgdb_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -164,7 +164,7 @@ void kgdb_arch_set_pc(struct pt_regs *re + regs->npc = regs->pc + 4; + } + +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: ta 0x7d */ + .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x7d }, + }; +diff -urNp linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c +--- linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/kgdb_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -187,7 +187,7 @@ void kgdb_arch_set_pc(struct pt_regs *re + regs->tnpc = regs->tpc + 4; + } + +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: ta 0x72 */ + .gdb_bpt_instr = { 0x91, 0xd0, 0x20, 0x72 }, + }; +diff -urNp linux-2.6.35.4/arch/sparc/kernel/Makefile linux-2.6.35.4/arch/sparc/kernel/Makefile +--- linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/Makefile 2010-09-17 20:12:09.000000000 -0400 +@@ -3,7 +3,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + extra-y := head_$(BITS).o + extra-y += init_task.o +diff -urNp linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c +--- linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/pci_sun4v.c 2010-09-17 20:12:09.000000000 -0400 +@@ -525,7 +525,7 @@ static void dma_4v_unmap_sg(struct devic + spin_unlock_irqrestore(&iommu->lock, flags); + } + +-static struct dma_map_ops sun4v_dma_ops = { ++static const struct dma_map_ops sun4v_dma_ops = { + .alloc_coherent = dma_4v_alloc_coherent, + .free_coherent = dma_4v_free_coherent, + .map_page = dma_4v_map_page, +diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c +--- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -57,7 +57,7 @@ unsigned long arch_get_unmapped_area(str + if (ARCH_SUN4C && len > 0x20000000) + return -ENOMEM; + if (!addr) +- addr = TASK_UNMAPPED_BASE; ++ addr = current->mm->mmap_base; + + if (flags & MAP_SHARED) + addr = COLOUR_ALIGN(addr); +@@ -72,7 +72,7 @@ unsigned long arch_get_unmapped_area(str + } + if (TASK_SIZE - PAGE_SIZE - len < addr) + return -ENOMEM; +- if (!vmm || addr + len <= vmm->vm_start) ++ if (check_heap_stack_gap(vmm, addr, len)) + return addr; + addr = vmm->vm_end; + if (flags & MAP_SHARED) +diff -urNp linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c +--- linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/sys_sparc_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -124,7 +124,7 @@ unsigned long arch_get_unmapped_area(str + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -139,6 +139,10 @@ unsigned long arch_get_unmapped_area(str + if (filp || (flags & MAP_SHARED)) + do_color_align = 1; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + if (do_color_align) + addr = COLOUR_ALIGN(addr, pgoff); +@@ -146,15 +150,14 @@ unsigned long arch_get_unmapped_area(str + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + } + +@@ -174,14 +177,14 @@ full_search: + vma = find_vma(mm, VA_EXCLUDE_END); + } + if (unlikely(task_size < addr)) { +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -215,7 +218,7 @@ arch_get_unmapped_area_topdown(struct fi + /* We do not accept a shared mapping if it would violate + * cache aliasing constraints. + */ +- if ((flags & MAP_SHARED) && ++ if ((filp || (flags & MAP_SHARED)) && + ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1))) + return -EINVAL; + return addr; +@@ -236,8 +239,7 @@ arch_get_unmapped_area_topdown(struct fi + addr = PAGE_ALIGN(addr); + + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -258,7 +260,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -278,7 +280,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -385,6 +387,12 @@ void arch_pick_mmap_layout(struct mm_str + gap == RLIM_INFINITY || + sysctl_legacy_va_layout) { + mm->mmap_base = TASK_UNMAPPED_BASE + random_factor; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { +@@ -397,6 +405,12 @@ void arch_pick_mmap_layout(struct mm_str + gap = (task_size / 6 * 5); + + mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff -urNp linux-2.6.35.4/arch/sparc/kernel/traps_64.c linux-2.6.35.4/arch/sparc/kernel/traps_64.c +--- linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/kernel/traps_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -95,6 +95,12 @@ void bad_trap(struct pt_regs *regs, long + + lvl -= 0x100; + if (regs->tstate & TSTATE_PRIV) { ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + sprintf(buffer, "Kernel bad sw trap %lx", lvl); + die_if_kernel(buffer, regs); + } +@@ -113,11 +119,16 @@ void bad_trap(struct pt_regs *regs, long + void bad_trap_tl1(struct pt_regs *regs, long lvl) + { + char buffer[32]; +- ++ + if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs, + 0, lvl, SIGTRAP) == NOTIFY_STOP) + return; + ++#ifdef CONFIG_PAX_REFCOUNT ++ if (lvl == 6) ++ pax_report_refcount_overflow(regs); ++#endif ++ + dump_tl1_traplog((struct tl1_traplog *)(regs + 1)); + + sprintf (buffer, "Bad trap %lx at tl>0", lvl); +diff -urNp linux-2.6.35.4/arch/sparc/lib/atomic_64.S linux-2.6.35.4/arch/sparc/lib/atomic_64.S +--- linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/lib/atomic_64.S 2010-09-17 20:12:37.000000000 -0400 +@@ -18,7 +18,12 @@ + atomic_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f +@@ -28,12 +33,32 @@ atomic_add: /* %o0 = increment, %o1 = at + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add, .-atomic_add + ++ .globl atomic_add_unchecked ++ .type atomic_add_unchecked,#function ++atomic_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ add %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_add_unchecked, .-atomic_add_unchecked ++ + .globl atomic_sub + .type atomic_sub,#function + atomic_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f +@@ -43,12 +68,32 @@ atomic_sub: /* %o0 = decrement, %o1 = at + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_sub, .-atomic_sub + ++ .globl atomic_sub_unchecked ++ .type atomic_sub_unchecked,#function ++atomic_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ sub %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_sub_unchecked, .-atomic_sub_unchecked ++ + .globl atomic_add_ret + .type atomic_add_ret,#function + atomic_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f +@@ -59,12 +104,33 @@ atomic_add_ret: /* %o0 = increment, %o1 + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic_add_ret, .-atomic_add_ret + ++ .globl atomic_add_ret_unchecked ++ .type atomic_add_ret_unchecked,#function ++atomic_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: lduw [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ cas [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %icc, 2f ++ add %g7, %o0, %g7 ++ sra %g7, 0, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic_add_ret_unchecked, .-atomic_add_ret_unchecked ++ + .globl atomic_sub_ret + .type atomic_sub_ret,#function + atomic_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: lduw [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 2f +@@ -80,7 +146,12 @@ atomic_sub_ret: /* %o0 = decrement, %o1 + atomic64_add: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f +@@ -90,12 +161,32 @@ atomic64_add: /* %o0 = increment, %o1 = + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add, .-atomic64_add + ++ .globl atomic64_add_unchecked ++ .type atomic64_add_unchecked,#function ++atomic64_add_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_add_unchecked, .-atomic64_add_unchecked ++ + .globl atomic64_sub + .type atomic64_sub,#function + atomic64_sub: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f +@@ -105,12 +196,32 @@ atomic64_sub: /* %o0 = decrement, %o1 = + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_sub, .-atomic64_sub + ++ .globl atomic64_sub_unchecked ++ .type atomic64_sub_unchecked,#function ++atomic64_sub_unchecked: /* %o0 = decrement, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ subcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ nop ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_sub_unchecked, .-atomic64_sub_unchecked ++ + .globl atomic64_add_ret + .type atomic64_add_ret,#function + atomic64_add_ret: /* %o0 = increment, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- add %g1, %o0, %g7 ++ addcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f +@@ -121,12 +232,33 @@ atomic64_add_ret: /* %o0 = increment, %o + 2: BACKOFF_SPIN(%o2, %o3, 1b) + .size atomic64_add_ret, .-atomic64_add_ret + ++ .globl atomic64_add_ret_unchecked ++ .type atomic64_add_ret_unchecked,#function ++atomic64_add_ret_unchecked: /* %o0 = increment, %o1 = atomic_ptr */ ++ BACKOFF_SETUP(%o2) ++1: ldx [%o1], %g1 ++ addcc %g1, %o0, %g7 ++ casx [%o1], %g1, %g7 ++ cmp %g1, %g7 ++ bne,pn %xcc, 2f ++ add %g7, %o0, %g7 ++ mov %g7, %o0 ++ retl ++ nop ++2: BACKOFF_SPIN(%o2, %o3, 1b) ++ .size atomic64_add_ret_unchecked, .-atomic64_add_ret_unchecked ++ + .globl atomic64_sub_ret + .type atomic64_sub_ret,#function + atomic64_sub_ret: /* %o0 = decrement, %o1 = atomic_ptr */ + BACKOFF_SETUP(%o2) + 1: ldx [%o1], %g1 +- sub %g1, %o0, %g7 ++ subcc %g1, %o0, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %xcc, 6 ++#endif ++ + casx [%o1], %g1, %g7 + cmp %g1, %g7 + bne,pn %xcc, 2f +diff -urNp linux-2.6.35.4/arch/sparc/lib/ksyms.c linux-2.6.35.4/arch/sparc/lib/ksyms.c +--- linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/lib/ksyms.c 2010-09-17 20:12:09.000000000 -0400 +@@ -142,12 +142,17 @@ EXPORT_SYMBOL(__downgrade_write); + + /* Atomic counter implementation. */ + EXPORT_SYMBOL(atomic_add); ++EXPORT_SYMBOL(atomic_add_unchecked); + EXPORT_SYMBOL(atomic_add_ret); + EXPORT_SYMBOL(atomic_sub); ++EXPORT_SYMBOL(atomic_sub_unchecked); + EXPORT_SYMBOL(atomic_sub_ret); + EXPORT_SYMBOL(atomic64_add); ++EXPORT_SYMBOL(atomic64_add_unchecked); + EXPORT_SYMBOL(atomic64_add_ret); ++EXPORT_SYMBOL(atomic64_add_ret_unchecked); + EXPORT_SYMBOL(atomic64_sub); ++EXPORT_SYMBOL(atomic64_sub_unchecked); + EXPORT_SYMBOL(atomic64_sub_ret); + + /* Atomic bit operations. */ +diff -urNp linux-2.6.35.4/arch/sparc/lib/rwsem_64.S linux-2.6.35.4/arch/sparc/lib/rwsem_64.S +--- linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/lib/rwsem_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -11,7 +11,12 @@ + .globl __down_read + __down_read: + 1: lduw [%o0], %g1 +- add %g1, 1, %g7 ++ addcc %g1, 1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b +@@ -33,7 +38,12 @@ __down_read: + .globl __down_read_trylock + __down_read_trylock: + 1: lduw [%o0], %g1 +- add %g1, 1, %g7 ++ addcc %g1, 1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cmp %g7, 0 + bl,pn %icc, 2f + mov 0, %o1 +@@ -51,7 +61,12 @@ __down_write: + or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 + 1: + lduw [%o0], %g3 +- add %g3, %g1, %g7 ++ addcc %g3, %g1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g3, %g7 + cmp %g3, %g7 + bne,pn %icc, 1b +@@ -77,7 +92,12 @@ __down_write_trylock: + cmp %g3, 0 + bne,pn %icc, 2f + mov 0, %o1 +- add %g3, %g1, %g7 ++ addcc %g3, %g1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g3, %g7 + cmp %g3, %g7 + bne,pn %icc, 1b +@@ -90,7 +110,12 @@ __down_write_trylock: + __up_read: + 1: + lduw [%o0], %g1 +- sub %g1, 1, %g7 ++ subcc %g1, 1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g1, %g7 + cmp %g1, %g7 + bne,pn %icc, 1b +@@ -118,7 +143,12 @@ __up_write: + or %g1, %lo(RWSEM_ACTIVE_WRITE_BIAS), %g1 + 1: + lduw [%o0], %g3 +- sub %g3, %g1, %g7 ++ subcc %g3, %g1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g3, %g7 + cmp %g3, %g7 + bne,pn %icc, 1b +@@ -143,7 +173,12 @@ __downgrade_write: + or %g1, %lo(RWSEM_WAITING_BIAS), %g1 + 1: + lduw [%o0], %g3 +- sub %g3, %g1, %g7 ++ subcc %g3, %g1, %g7 ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ tvs %icc, 6 ++#endif ++ + cas [%o0], %g3, %g7 + cmp %g3, %g7 + bne,pn %icc, 1b +diff -urNp linux-2.6.35.4/arch/sparc/Makefile linux-2.6.35.4/arch/sparc/Makefile +--- linux-2.6.35.4/arch/sparc/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/Makefile 2010-09-17 20:12:37.000000000 -0400 +@@ -75,7 +75,7 @@ drivers-$(CONFIG_OPROFILE) += arch/sparc + # Export what is needed by arch/sparc/boot/Makefile + export VMLINUX_INIT VMLINUX_MAIN + VMLINUX_INIT := $(head-y) $(init-y) +-VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++VMLINUX_MAIN := $(core-y) kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + VMLINUX_MAIN += $(patsubst %/, %/lib.a, $(libs-y)) $(libs-y) + VMLINUX_MAIN += $(drivers-y) $(net-y) + +diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_32.c linux-2.6.35.4/arch/sparc/mm/fault_32.c +--- linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/fault_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -22,6 +22,9 @@ + #include <linux/interrupt.h> + #include <linux/module.h> + #include <linux/kdebug.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/system.h> + #include <asm/page.h> +@@ -209,6 +212,268 @@ static unsigned long compute_si_addr(str + return safe_compute_effective_address(regs, insn); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->pc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->pc); ++ err |= get_user(sethi2, (unsigned int *)(regs->pc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned int addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->pc); ++ ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { ++ unsigned int addr; ++ ++ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(jmpl, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U && ++ nop == 0x01000000U) ++ { ++ unsigned int addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->pc); ++ err |= get_user(ba, (unsigned int *)(regs->pc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->pc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned int addr, save, call; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2); ++ else ++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2); ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->pc = call_dl_resolve; ++ regs->npc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U); ++ regs->pc = addr; ++ regs->npc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->pc-4)); ++ err |= get_user(call, (unsigned int *)regs->pc); ++ err |= get_user(nop, (unsigned int *)(regs->pc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2); ++ ++ regs->u_regs[UREG_RETPC] = regs->pc; ++ regs->pc = dl_resolve; ++ regs->npc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs, + int text_fault) + { +@@ -282,6 +547,24 @@ good_area: + if(!(vma->vm_flags & VM_WRITE)) + goto bad_area; + } else { ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) { ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Allow reads even for write-only mappings */ + if(!(vma->vm_flags & (VM_READ | VM_EXEC))) + goto bad_area; +diff -urNp linux-2.6.35.4/arch/sparc/mm/fault_64.c linux-2.6.35.4/arch/sparc/mm/fault_64.c +--- linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/fault_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -21,6 +21,9 @@ + #include <linux/kprobes.h> + #include <linux/kdebug.h> + #include <linux/percpu.h> ++#include <linux/slab.h> ++#include <linux/pagemap.h> ++#include <linux/compiler.h> + + #include <asm/page.h> + #include <asm/pgtable.h> +@@ -272,6 +275,457 @@ static void noinline __kprobes bogus_32b + show_regs(regs); + } + ++#ifdef CONFIG_PAX_PAGEEXEC ++#ifdef CONFIG_PAX_DLRESOLVE ++static void pax_emuplt_close(struct vm_area_struct *vma) ++{ ++ vma->vm_mm->call_dl_resolve = 0UL; ++} ++ ++static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++{ ++ unsigned int *kaddr; ++ ++ vmf->page = alloc_page(GFP_HIGHUSER); ++ if (!vmf->page) ++ return VM_FAULT_OOM; ++ ++ kaddr = kmap(vmf->page); ++ memset(kaddr, 0, PAGE_SIZE); ++ kaddr[0] = 0x9DE3BFA8U; /* save */ ++ flush_dcache_page(vmf->page); ++ kunmap(vmf->page); ++ return VM_FAULT_MAJOR; ++} ++ ++static const struct vm_operations_struct pax_vm_ops = { ++ .close = pax_emuplt_close, ++ .fault = pax_emuplt_fault ++}; ++ ++static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr) ++{ ++ int ret; ++ ++ INIT_LIST_HEAD(&vma->anon_vma_chain); ++ vma->vm_mm = current->mm; ++ vma->vm_start = addr; ++ vma->vm_end = addr + PAGE_SIZE; ++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ vma->vm_ops = &pax_vm_ops; ++ ++ ret = insert_vm_struct(current->mm, vma); ++ if (ret) ++ return ret; ++ ++ ++current->mm->total_vm; ++ return 0; ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->tpc = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when patched PLT trampoline was detected ++ * 3 when unpatched PLT trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ ++#ifdef CONFIG_PAX_EMUPLT ++ int err; ++ ++ do { /* PaX: patched PLT emulation #1 */ ++ unsigned int sethi1, sethi2, jmpl; ++ ++ err = get_user(sethi1, (unsigned int *)regs->tpc); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10; ++ addr = regs->u_regs[UREG_G1]; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ { /* PaX: patched PLT emulation #2 */ ++ unsigned int ba; ++ ++ err = get_user(ba, (unsigned int *)regs->tpc); ++ ++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) { ++ unsigned long addr; ++ ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } ++ ++ do { /* PaX: patched PLT emulation #3 */ ++ unsigned int sethi, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (jmpl & 0xFFFFE000U) == 0x81C06000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #4 */ ++ unsigned int sethi, mov1, call, mov2; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(mov1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(call, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(mov2, (unsigned int *)(regs->tpc+12)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ mov1 == 0x8210000FU && ++ (call & 0xC0000000U) == 0x40000000U && ++ mov2 == 0x9E100001U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC]; ++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #5 */ ++ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(or1, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or2, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+28)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x82106000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x83287020U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: patched PLT emulation #6 */ ++ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8)); ++ err |= get_user(sllx, (unsigned int *)(regs->tpc+12)); ++ err |= get_user(or, (unsigned int *)(regs->tpc+16)); ++ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+24)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (sethi1 & 0xFFC00000U) == 0x03000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ sllx == 0x83287020U && ++ (or & 0xFFFFE000U) == 0x8A116000U && ++ jmpl == 0x81C04005U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU); ++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: unpatched PLT emulation step 1 */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ unsigned int save, call; ++ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl; ++ ++ if ((ba & 0xFFC00000U) == 0x30800000U) ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2); ++ else ++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ err = get_user(save, (unsigned int *)addr); ++ err |= get_user(call, (unsigned int *)(addr+4)); ++ err |= get_user(nop, (unsigned int *)(addr+8)); ++ if (err) ++ break; ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ struct vm_area_struct *vma; ++ unsigned long call_dl_resolve; ++ ++ down_read(¤t->mm->mmap_sem); ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_read(¤t->mm->mmap_sem); ++ if (likely(call_dl_resolve)) ++ goto emulate; ++ ++ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ ++ down_write(¤t->mm->mmap_sem); ++ if (current->mm->call_dl_resolve) { ++ call_dl_resolve = current->mm->call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ goto emulate; ++ } ++ ++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE); ++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) { ++ up_write(¤t->mm->mmap_sem); ++ if (vma) ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ if (pax_insert_vma(vma, call_dl_resolve)) { ++ up_write(¤t->mm->mmap_sem); ++ kmem_cache_free(vm_area_cachep, vma); ++ return 1; ++ } ++ ++ current->mm->call_dl_resolve = call_dl_resolve; ++ up_write(¤t->mm->mmap_sem); ++ ++emulate: ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->tpc = call_dl_resolve; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++#endif ++ ++ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */ ++ if ((save & 0xFFC00000U) == 0x05000000U && ++ (call & 0xFFFFE000U) == 0x85C0A000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G2] = addr + 4; ++ addr = (save & 0x003FFFFFU) << 10; ++ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ ++ /* PaX: 64-bit PLT stub */ ++ err = get_user(sethi1, (unsigned int *)addr); ++ err |= get_user(sethi2, (unsigned int *)(addr+4)); ++ err |= get_user(or1, (unsigned int *)(addr+8)); ++ err |= get_user(or2, (unsigned int *)(addr+12)); ++ err |= get_user(sllx, (unsigned int *)(addr+16)); ++ err |= get_user(add, (unsigned int *)(addr+20)); ++ err |= get_user(jmpl, (unsigned int *)(addr+24)); ++ err |= get_user(nop, (unsigned int *)(addr+28)); ++ if (err) ++ break; ++ ++ if ((sethi1 & 0xFFC00000U) == 0x09000000U && ++ (sethi2 & 0xFFC00000U) == 0x0B000000U && ++ (or1 & 0xFFFFE000U) == 0x88112000U && ++ (or2 & 0xFFFFE000U) == 0x8A116000U && ++ sllx == 0x89293020U && ++ add == 0x8A010005U && ++ jmpl == 0x89C14000U && ++ nop == 0x01000000U) ++ { ++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU); ++ regs->u_regs[UREG_G4] <<= 32; ++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU); ++ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4]; ++ regs->u_regs[UREG_G4] = addr + 24; ++ addr = regs->u_regs[UREG_G5]; ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 3; ++ } ++ } ++ } while (0); ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ do { /* PaX: unpatched PLT emulation step 2 */ ++ unsigned int save, call, nop; ++ ++ err = get_user(save, (unsigned int *)(regs->tpc-4)); ++ err |= get_user(call, (unsigned int *)regs->tpc); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+4)); ++ if (err) ++ break; ++ ++ if (save == 0x9DE3BFA8U && ++ (call & 0xC0000000U) == 0x40000000U && ++ nop == 0x01000000U) ++ { ++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ dl_resolve &= 0xFFFFFFFFUL; ++ ++ regs->u_regs[UREG_RETPC] = regs->tpc; ++ regs->tpc = dl_resolve; ++ regs->tnpc = dl_resolve+4; ++ return 3; ++ } ++ } while (0); ++#endif ++ ++ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */ ++ unsigned int sethi, ba, nop; ++ ++ err = get_user(sethi, (unsigned int *)regs->tpc); ++ err |= get_user(ba, (unsigned int *)(regs->tpc+4)); ++ err |= get_user(nop, (unsigned int *)(regs->tpc+8)); ++ ++ if (err) ++ break; ++ ++ if ((sethi & 0xFFC00000U) == 0x03000000U && ++ (ba & 0xFFF00000U) == 0x30600000U && ++ nop == 0x01000000U) ++ { ++ unsigned long addr; ++ ++ addr = (sethi & 0x003FFFFFU) << 10; ++ regs->u_regs[UREG_G1] = addr; ++ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2); ++ ++ if (test_thread_flag(TIF_32BIT)) ++ addr &= 0xFFFFFFFFUL; ++ ++ regs->tpc = addr; ++ regs->tnpc = addr+4; ++ return 2; ++ } ++ } while (0); ++ ++#endif ++ ++ return 1; ++} ++ ++void pax_report_insns(void *pc, void *sp) ++{ ++ unsigned long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 8; i++) { ++ unsigned int c; ++ if (get_user(c, (unsigned int *)pc+i)) ++ printk(KERN_CONT "???????? "); ++ else ++ printk(KERN_CONT "%08x ", c); ++ } ++ printk("\n"); ++} ++#endif ++ + asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs) + { + struct mm_struct *mm = current->mm; +@@ -340,6 +794,29 @@ asmlinkage void __kprobes do_sparc64_fau + if (!vma) + goto bad_area; + ++#ifdef CONFIG_PAX_PAGEEXEC ++ /* PaX: detect ITLB misses on non-exec pages */ ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address && ++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB)) ++ { ++ if (address != regs->tpc) ++ goto good_area; ++ ++ up_read(&mm->mmap_sem); ++ switch (pax_handle_fetch_fault(regs)) { ++ ++#ifdef CONFIG_PAX_EMUPLT ++ case 2: ++ case 3: ++ return; ++#endif ++ ++ } ++ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS)); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ + /* Pure DTLB misses do not tell us whether the fault causing + * load/store/atomic was a write or not, it only says that there + * was no match. So in such a case we (carefully) read the +diff -urNp linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c +--- linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400 +@@ -68,7 +68,7 @@ full_search: + } + return -ENOMEM; + } +- if (likely(!vma || addr + len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* + * Remember the place where we stopped the search: + */ +@@ -107,7 +107,7 @@ hugetlb_get_unmapped_area_topdown(struct + /* make sure it can fit in the remaining address space */ + if (likely(addr > len)) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr - len, len)) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -125,7 +125,7 @@ hugetlb_get_unmapped_area_topdown(struct + * return with success: + */ + vma = find_vma(mm, addr); +- if (likely(!vma || addr+len <= vma->vm_start)) { ++ if (likely(check_heap_stack_gap(vma, addr, len))) { + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + } +@@ -182,8 +182,7 @@ hugetlb_get_unmapped_area(struct file *f + if (addr) { + addr = ALIGN(addr, HPAGE_SIZE); + vma = find_vma(mm, addr); +- if (task_size - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff -urNp linux-2.6.35.4/arch/sparc/mm/init_32.c linux-2.6.35.4/arch/sparc/mm/init_32.c +--- linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/init_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -318,6 +318,9 @@ extern void device_scan(void); + pgprot_t PAGE_SHARED __read_mostly; + EXPORT_SYMBOL(PAGE_SHARED); + ++pgprot_t PAGE_SHARED_NOEXEC __read_mostly; ++EXPORT_SYMBOL(PAGE_SHARED_NOEXEC); ++ + void __init paging_init(void) + { + switch(sparc_cpu_model) { +@@ -346,17 +349,17 @@ void __init paging_init(void) + + /* Initialize the protection map with non-constant, MMU dependent values. */ + protection_map[0] = PAGE_NONE; +- protection_map[1] = PAGE_READONLY; +- protection_map[2] = PAGE_COPY; +- protection_map[3] = PAGE_COPY; ++ protection_map[1] = PAGE_READONLY_NOEXEC; ++ protection_map[2] = PAGE_COPY_NOEXEC; ++ protection_map[3] = PAGE_COPY_NOEXEC; + protection_map[4] = PAGE_READONLY; + protection_map[5] = PAGE_READONLY; + protection_map[6] = PAGE_COPY; + protection_map[7] = PAGE_COPY; + protection_map[8] = PAGE_NONE; +- protection_map[9] = PAGE_READONLY; +- protection_map[10] = PAGE_SHARED; +- protection_map[11] = PAGE_SHARED; ++ protection_map[9] = PAGE_READONLY_NOEXEC; ++ protection_map[10] = PAGE_SHARED_NOEXEC; ++ protection_map[11] = PAGE_SHARED_NOEXEC; + protection_map[12] = PAGE_READONLY; + protection_map[13] = PAGE_READONLY; + protection_map[14] = PAGE_SHARED; +diff -urNp linux-2.6.35.4/arch/sparc/mm/Makefile linux-2.6.35.4/arch/sparc/mm/Makefile +--- linux-2.6.35.4/arch/sparc/mm/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/Makefile 2010-09-17 20:12:09.000000000 -0400 +@@ -2,7 +2,7 @@ + # + + asflags-y := -ansi +-ccflags-y := -Werror ++#ccflags-y := -Werror + + obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o + obj-y += fault_$(BITS).o +diff -urNp linux-2.6.35.4/arch/sparc/mm/srmmu.c linux-2.6.35.4/arch/sparc/mm/srmmu.c +--- linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/sparc/mm/srmmu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2198,6 +2198,13 @@ void __init ld_mmu_srmmu(void) + PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); + BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); + BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ PAGE_SHARED_NOEXEC = pgprot_val(SRMMU_PAGE_SHARED_NOEXEC); ++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC)); ++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC)); ++#endif ++ + BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); + page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); + +diff -urNp linux-2.6.35.4/arch/um/include/asm/kmap_types.h linux-2.6.35.4/arch/um/include/asm/kmap_types.h +--- linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/um/include/asm/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -23,6 +23,7 @@ enum km_type { + KM_IRQ1, + KM_SOFTIRQ0, + KM_SOFTIRQ1, ++ KM_CLEARPAGE, + KM_TYPE_NR + }; + +diff -urNp linux-2.6.35.4/arch/um/include/asm/page.h linux-2.6.35.4/arch/um/include/asm/page.h +--- linux-2.6.35.4/arch/um/include/asm/page.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/um/include/asm/page.h 2010-09-17 20:12:09.000000000 -0400 +@@ -14,6 +14,9 @@ + #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) + #define PAGE_MASK (~(PAGE_SIZE-1)) + ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++ + #ifndef __ASSEMBLY__ + + struct page; +diff -urNp linux-2.6.35.4/arch/um/sys-i386/syscalls.c linux-2.6.35.4/arch/um/sys-i386/syscalls.c +--- linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/um/sys-i386/syscalls.c 2010-09-17 20:12:09.000000000 -0400 +@@ -11,6 +11,21 @@ + #include "asm/uaccess.h" + #include "asm/unistd.h" + ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) ++{ ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (len > pax_task_size || addr > pax_task_size - len) ++ return -EINVAL; ++ ++ return 0; ++} ++ + /* + * The prototype on i386 is: + * +diff -urNp linux-2.6.35.4/arch/x86/boot/bitops.h linux-2.6.35.4/arch/x86/boot/bitops.h +--- linux-2.6.35.4/arch/x86/boot/bitops.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/bitops.h 2010-09-17 20:12:09.000000000 -0400 +@@ -26,7 +26,7 @@ static inline int variable_test_bit(int + u8 v; + const u32 *p = (const u32 *)addr; + +- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); ++ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr)); + return v; + } + +@@ -37,7 +37,7 @@ static inline int variable_test_bit(int + + static inline void set_bit(int nr, void *addr) + { +- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); ++ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr)); + } + + #endif /* BOOT_BITOPS_H */ +diff -urNp linux-2.6.35.4/arch/x86/boot/boot.h linux-2.6.35.4/arch/x86/boot/boot.h +--- linux-2.6.35.4/arch/x86/boot/boot.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/boot.h 2010-09-17 20:12:09.000000000 -0400 +@@ -82,7 +82,7 @@ static inline void io_delay(void) + static inline u16 ds(void) + { + u16 seg; +- asm("movw %%ds,%0" : "=rm" (seg)); ++ asm volatile("movw %%ds,%0" : "=rm" (seg)); + return seg; + } + +@@ -178,7 +178,7 @@ static inline void wrgs32(u32 v, addr_t + static inline int memcmp(const void *s1, const void *s2, size_t len) + { + u8 diff; +- asm("repe; cmpsb; setnz %0" ++ asm volatile("repe; cmpsb; setnz %0" + : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len)); + return diff; + } +diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_32.S linux-2.6.35.4/arch/x86/boot/compressed/head_32.S +--- linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/compressed/head_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -76,7 +76,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -149,7 +149,7 @@ relocated: + * and where it was actually loaded. + */ + movl %ebp, %ebx +- subl $LOAD_PHYSICAL_ADDR, %ebx ++ subl $____LOAD_PHYSICAL_ADDR, %ebx + jz 2f /* Nothing to be done if loaded at compiled addr. */ + /* + * Process relocations. +@@ -157,8 +157,7 @@ relocated: + + 1: subl $4, %edi + movl (%edi), %ecx +- testl %ecx, %ecx +- jz 2f ++ jecxz 2f + addl %ebx, -__PAGE_OFFSET(%ebx, %ecx) + jmp 1b + 2: +diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/head_64.S linux-2.6.35.4/arch/x86/boot/compressed/head_64.S +--- linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/compressed/head_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -91,7 +91,7 @@ ENTRY(startup_32) + notl %eax + andl %eax, %ebx + #else +- movl $LOAD_PHYSICAL_ADDR, %ebx ++ movl $____LOAD_PHYSICAL_ADDR, %ebx + #endif + + /* Target address to relocate to for decompression */ +@@ -233,7 +233,7 @@ ENTRY(startup_64) + notq %rax + andq %rax, %rbp + #else +- movq $LOAD_PHYSICAL_ADDR, %rbp ++ movq $____LOAD_PHYSICAL_ADDR, %rbp + #endif + + /* Target address to relocate to for decompression */ +diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/misc.c linux-2.6.35.4/arch/x86/boot/compressed/misc.c +--- linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/compressed/misc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -285,7 +285,7 @@ static void parse_elf(void *output) + case PT_LOAD: + #ifdef CONFIG_RELOCATABLE + dest = output; +- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR); ++ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR); + #else + dest = (void *)(phdr->p_paddr); + #endif +@@ -332,7 +332,7 @@ asmlinkage void decompress_kernel(void * + error("Destination address too large"); + #endif + #ifndef CONFIG_RELOCATABLE +- if ((unsigned long)output != LOAD_PHYSICAL_ADDR) ++ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR) + error("Wrong destination address"); + #endif + +diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c +--- linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/compressed/mkpiggy.c 2010-09-17 20:12:09.000000000 -0400 +@@ -74,7 +74,7 @@ int main(int argc, char *argv[]) + + offs = (olen > ilen) ? olen - ilen : 0; + offs += olen >> 12; /* Add 8 bytes for each 32K block */ +- offs += 32*1024 + 18; /* Add 32K + 18 bytes slack */ ++ offs += 64*1024; /* Add 64K bytes slack */ + offs = (offs+4095) & ~4095; /* Round to a 4K boundary */ + + printf(".section \".rodata..compressed\",\"a\",@progbits\n"); +diff -urNp linux-2.6.35.4/arch/x86/boot/compressed/relocs.c linux-2.6.35.4/arch/x86/boot/compressed/relocs.c +--- linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/compressed/relocs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -13,8 +13,11 @@ + + static void die(char *fmt, ...); + ++#include "../../../../include/generated/autoconf.h" ++ + #define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) + static Elf32_Ehdr ehdr; ++static Elf32_Phdr *phdr; + static unsigned long reloc_count, reloc_idx; + static unsigned long *relocs; + +@@ -270,9 +273,39 @@ static void read_ehdr(FILE *fp) + } + } + ++static void read_phdrs(FILE *fp) ++{ ++ unsigned int i; ++ ++ phdr = calloc(ehdr.e_phnum, sizeof(Elf32_Phdr)); ++ if (!phdr) { ++ die("Unable to allocate %d program headers\n", ++ ehdr.e_phnum); ++ } ++ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) { ++ die("Seek to %d failed: %s\n", ++ ehdr.e_phoff, strerror(errno)); ++ } ++ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) { ++ die("Cannot read ELF program headers: %s\n", ++ strerror(errno)); ++ } ++ for(i = 0; i < ehdr.e_phnum; i++) { ++ phdr[i].p_type = elf32_to_cpu(phdr[i].p_type); ++ phdr[i].p_offset = elf32_to_cpu(phdr[i].p_offset); ++ phdr[i].p_vaddr = elf32_to_cpu(phdr[i].p_vaddr); ++ phdr[i].p_paddr = elf32_to_cpu(phdr[i].p_paddr); ++ phdr[i].p_filesz = elf32_to_cpu(phdr[i].p_filesz); ++ phdr[i].p_memsz = elf32_to_cpu(phdr[i].p_memsz); ++ phdr[i].p_flags = elf32_to_cpu(phdr[i].p_flags); ++ phdr[i].p_align = elf32_to_cpu(phdr[i].p_align); ++ } ++ ++} ++ + static void read_shdrs(FILE *fp) + { +- int i; ++ unsigned int i; + Elf32_Shdr shdr; + + secs = calloc(ehdr.e_shnum, sizeof(struct section)); +@@ -307,7 +340,7 @@ static void read_shdrs(FILE *fp) + + static void read_strtabs(FILE *fp) + { +- int i; ++ unsigned int i; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_STRTAB) { +@@ -332,7 +365,7 @@ static void read_strtabs(FILE *fp) + + static void read_symtabs(FILE *fp) + { +- int i,j; ++ unsigned int i,j; + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_SYMTAB) { +@@ -365,7 +398,9 @@ static void read_symtabs(FILE *fp) + + static void read_relocs(FILE *fp) + { +- int i,j; ++ unsigned int i,j; ++ uint32_t base; ++ + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + if (sec->shdr.sh_type != SHT_REL) { +@@ -385,9 +420,18 @@ static void read_relocs(FILE *fp) + die("Cannot read symbol table: %s\n", + strerror(errno)); + } ++ base = 0; ++ for (j = 0; j < ehdr.e_phnum; j++) { ++ if (phdr[j].p_type != PT_LOAD ) ++ continue; ++ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz) ++ continue; ++ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr; ++ break; ++ } + for (j = 0; j < sec->shdr.sh_size/sizeof(Elf32_Rel); j++) { + Elf32_Rel *rel = &sec->reltab[j]; +- rel->r_offset = elf32_to_cpu(rel->r_offset); ++ rel->r_offset = elf32_to_cpu(rel->r_offset) + base; + rel->r_info = elf32_to_cpu(rel->r_info); + } + } +@@ -396,14 +440,14 @@ static void read_relocs(FILE *fp) + + static void print_absolute_symbols(void) + { +- int i; ++ unsigned int i; + printf("Absolute symbols\n"); + printf(" Num: Value Size Type Bind Visibility Name\n"); + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + char *sym_strtab; + Elf32_Sym *sh_symtab; +- int j; ++ unsigned int j; + + if (sec->shdr.sh_type != SHT_SYMTAB) { + continue; +@@ -431,14 +475,14 @@ static void print_absolute_symbols(void) + + static void print_absolute_relocs(void) + { +- int i, printed = 0; ++ unsigned int i, printed = 0; + + for (i = 0; i < ehdr.e_shnum; i++) { + struct section *sec = &secs[i]; + struct section *sec_applies, *sec_symtab; + char *sym_strtab; + Elf32_Sym *sh_symtab; +- int j; ++ unsigned int j; + if (sec->shdr.sh_type != SHT_REL) { + continue; + } +@@ -499,13 +543,13 @@ static void print_absolute_relocs(void) + + static void walk_relocs(void (*visit)(Elf32_Rel *rel, Elf32_Sym *sym)) + { +- int i; ++ unsigned int i; + /* Walk through the relocations */ + for (i = 0; i < ehdr.e_shnum; i++) { + char *sym_strtab; + Elf32_Sym *sh_symtab; + struct section *sec_applies, *sec_symtab; +- int j; ++ unsigned int j; + struct section *sec = &secs[i]; + + if (sec->shdr.sh_type != SHT_REL) { +@@ -530,6 +574,22 @@ static void walk_relocs(void (*visit)(El + !is_rel_reloc(sym_name(sym_strtab, sym))) { + continue; + } ++ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */ ++ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load")) ++ continue; ++ ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_X86_32) ++ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */ ++ if (!strcmp(sec_name(sym->st_shndx), ".module.text") && !strcmp(sym_name(sym_strtab, sym), "_etext")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".init.text")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".exit.text")) ++ continue; ++ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR")) ++ continue; ++#endif ++ + switch (r_type) { + case R_386_NONE: + case R_386_PC32: +@@ -571,7 +631,7 @@ static int cmp_relocs(const void *va, co + + static void emit_relocs(int as_text) + { +- int i; ++ unsigned int i; + /* Count how many relocations I have and allocate space for them. */ + reloc_count = 0; + walk_relocs(count_reloc); +@@ -665,6 +725,7 @@ int main(int argc, char **argv) + fname, strerror(errno)); + } + read_ehdr(fp); ++ read_phdrs(fp); + read_shdrs(fp); + read_strtabs(fp); + read_symtabs(fp); +diff -urNp linux-2.6.35.4/arch/x86/boot/cpucheck.c linux-2.6.35.4/arch/x86/boot/cpucheck.c +--- linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/cpucheck.c 2010-09-17 20:12:09.000000000 -0400 +@@ -74,7 +74,7 @@ static int has_fpu(void) + u16 fcw = -1, fsw = -1; + u32 cr0; + +- asm("movl %%cr0,%0" : "=r" (cr0)); ++ asm volatile("movl %%cr0,%0" : "=r" (cr0)); + if (cr0 & (X86_CR0_EM|X86_CR0_TS)) { + cr0 &= ~(X86_CR0_EM|X86_CR0_TS); + asm volatile("movl %0,%%cr0" : : "r" (cr0)); +@@ -90,7 +90,7 @@ static int has_eflag(u32 mask) + { + u32 f0, f1; + +- asm("pushfl ; " ++ asm volatile("pushfl ; " + "pushfl ; " + "popl %0 ; " + "movl %0,%1 ; " +@@ -115,7 +115,7 @@ static void get_flags(void) + set_bit(X86_FEATURE_FPU, cpu.flags); + + if (has_eflag(X86_EFLAGS_ID)) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_intel_level), + "=b" (cpu_vendor[0]), + "=d" (cpu_vendor[1]), +@@ -124,7 +124,7 @@ static void get_flags(void) + + if (max_intel_level >= 0x00000001 && + max_intel_level <= 0x0000ffff) { +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (tfms), + "=c" (cpu.flags[4]), + "=d" (cpu.flags[0]) +@@ -136,7 +136,7 @@ static void get_flags(void) + cpu.model += ((tfms >> 16) & 0xf) << 4; + } + +- asm("cpuid" ++ asm volatile("cpuid" + : "=a" (max_amd_level) + : "a" (0x80000000) + : "ebx", "ecx", "edx"); +@@ -144,7 +144,7 @@ static void get_flags(void) + if (max_amd_level >= 0x80000001 && + max_amd_level <= 0x8000ffff) { + u32 eax = 0x80000001; +- asm("cpuid" ++ asm volatile("cpuid" + : "+a" (eax), + "=c" (cpu.flags[6]), + "=d" (cpu.flags[1]) +@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *r + u32 ecx = MSR_K7_HWCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax &= ~(1 << 15); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + get_flags(); /* Make sure it really did something */ + err = check_flags(); +@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *r + u32 ecx = MSR_VIA_FCR; + u32 eax, edx; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); + eax |= (1<<1)|(1<<7); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + set_bit(X86_FEATURE_CX8, cpu.flags); + err = check_flags(); +@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *r + u32 eax, edx; + u32 level = 1; + +- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); +- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); +- asm("cpuid" ++ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx)); ++ asm volatile("cpuid" + : "+a" (level), "=d" (cpu.flags[0]) + : : "ecx", "ebx"); +- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); ++ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx)); + + err = check_flags(); + } +diff -urNp linux-2.6.35.4/arch/x86/boot/header.S linux-2.6.35.4/arch/x86/boot/header.S +--- linux-2.6.35.4/arch/x86/boot/header.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/header.S 2010-09-17 20:12:09.000000000 -0400 +@@ -224,7 +224,7 @@ setup_data: .quad 0 # 64-bit physical + # single linked list of + # struct setup_data + +-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr ++pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr + + #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset) + #define VO_INIT_SIZE (VO__end - VO__text) +diff -urNp linux-2.6.35.4/arch/x86/boot/memory.c linux-2.6.35.4/arch/x86/boot/memory.c +--- linux-2.6.35.4/arch/x86/boot/memory.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/memory.c 2010-09-17 20:12:09.000000000 -0400 +@@ -19,7 +19,7 @@ + + static int detect_memory_e820(void) + { +- int count = 0; ++ unsigned int count = 0; + struct biosregs ireg, oreg; + struct e820entry *desc = boot_params.e820_map; + static struct e820entry buf; /* static so it is zeroed */ +diff -urNp linux-2.6.35.4/arch/x86/boot/video.c linux-2.6.35.4/arch/x86/boot/video.c +--- linux-2.6.35.4/arch/x86/boot/video.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/video.c 2010-09-17 20:12:09.000000000 -0400 +@@ -96,7 +96,7 @@ static void store_mode_params(void) + static unsigned int get_entry(void) + { + char entry_buf[4]; +- int i, len = 0; ++ unsigned int i, len = 0; + int key; + unsigned int v; + +diff -urNp linux-2.6.35.4/arch/x86/boot/video-vesa.c linux-2.6.35.4/arch/x86/boot/video-vesa.c +--- linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/boot/video-vesa.c 2010-09-17 20:12:09.000000000 -0400 +@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void) + + boot_params.screen_info.vesapm_seg = oreg.es; + boot_params.screen_info.vesapm_off = oreg.di; ++ boot_params.screen_info.vesapm_size = oreg.cx; + } + + /* +diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32entry.S linux-2.6.35.4/arch/x86/ia32/ia32entry.S +--- linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/ia32/ia32entry.S 2010-09-17 20:12:37.000000000 -0400 +@@ -13,6 +13,7 @@ + #include <asm/thread_info.h> + #include <asm/segment.h> + #include <asm/irqflags.h> ++#include <asm/pgtable.h> + #include <linux/linkage.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ +@@ -50,7 +51,12 @@ + /* + * Reload arg registers from stack in case ptrace changed them. + * We don't reload %eax because syscall_trace_enter() returned +- * the value it wants us to use in the table lookup. ++ * the %rax value we should see. Instead, we just truncate that ++ * value to 32 bits again as we did on entry from user mode. ++ * If it's a new value set by user_regset during entry tracing, ++ * this matches the normal truncation of the user-mode value. ++ * If it's -1 to make us punt the syscall, then (u32)-1 is still ++ * an appropriately invalid value. + */ + .macro LOAD_ARGS32 offset, _r9=0 + .if \_r9 +@@ -60,6 +66,7 @@ + movl \offset+48(%rsp),%edx + movl \offset+56(%rsp),%esi + movl \offset+64(%rsp),%edi ++ movl %eax,%eax /* zero extension */ + .endm + + .macro CFI_STARTPROC32 simple +@@ -114,6 +121,11 @@ ENTRY(ia32_sysenter_target) + SWAPGS_UNSAFE_STACK + movq PER_CPU_VAR(kernel_stack), %rsp + addq $(KERNEL_STACK_OFFSET),%rsp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs, here we enable it straight after entry: +@@ -144,6 +156,12 @@ ENTRY(ia32_sysenter_target) + SAVE_ARGS 0,0,1 + /* no need to do an access_ok check here because rbp has been + 32bit zero extended */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%r10 ++ add %r10,%rbp ++#endif ++ + 1: movl (%rbp),%ebp + .section __ex_table,"a" + .quad 1b,ia32_badarg +@@ -153,7 +171,7 @@ ENTRY(ia32_sysenter_target) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + CFI_REMEMBER_STATE + jnz sysenter_tracesys +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + sysenter_do_call: + IA32_ARG_FIXUP +@@ -166,6 +184,11 @@ sysenter_dispatch: + testl $_TIF_ALLWORK_MASK,TI_flags(%r10) + jnz sysexit_audit + sysexit_from_sys_call: ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++ + andl $~TS_COMPAT,TI_status(%r10) + /* clear IF, that popfq doesn't enable interrupts early */ + andl $~0x200,EFLAGS-R11(%rsp) +@@ -195,7 +218,7 @@ sysexit_from_sys_call: + movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */ + call audit_syscall_entry + movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */ +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + movl %ebx,%edi /* reload 1st syscall arg */ + movl RCX-ARGOFFSET(%rsp),%esi /* reload 2nd syscall arg */ +@@ -248,7 +271,7 @@ sysenter_tracesys: + call syscall_trace_enter + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */ + jmp sysenter_do_call + CFI_ENDPROC +@@ -284,6 +307,11 @@ ENTRY(ia32_cstar_target) + movl %esp,%r8d + CFI_REGISTER rsp,r8 + movq PER_CPU_VAR(kernel_stack),%rsp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs and here we enable it straight after entry: +@@ -305,6 +333,12 @@ ENTRY(ia32_cstar_target) + /* no need to do an access_ok check here because r8 has been + 32bit zero extended */ + /* hardware stack frame is complete now */ ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%r10 ++ add %r10,%r8 ++#endif ++ + 1: movl (%r8),%r9d + .section __ex_table,"a" + .quad 1b,ia32_badarg +@@ -314,7 +348,7 @@ ENTRY(ia32_cstar_target) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + CFI_REMEMBER_STATE + jnz cstar_tracesys +- cmpl $IA32_NR_syscalls-1,%eax ++ cmpq $IA32_NR_syscalls-1,%rax + ja ia32_badsys + cstar_do_call: + IA32_ARG_FIXUP 1 +@@ -327,6 +361,11 @@ cstar_dispatch: + testl $_TIF_ALLWORK_MASK,TI_flags(%r10) + jnz sysretl_audit + sysretl_from_sys_call: ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++ + andl $~TS_COMPAT,TI_status(%r10) + RESTORE_ARGS 1,-ARG_SKIP,1,1,1 + movl RIP-ARGOFFSET(%rsp),%ecx +@@ -367,7 +406,7 @@ cstar_tracesys: + LOAD_ARGS32 ARGOFFSET, 1 /* reload args from stack in case ptrace changed it */ + RESTORE_REST + xchgl %ebp,%r9d +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */ + jmp cstar_do_call + END(ia32_cstar_target) +@@ -409,6 +448,11 @@ ENTRY(ia32_syscall) + CFI_REL_OFFSET rip,RIP-RIP + PARAVIRT_ADJUST_EXCEPTION_FRAME + SWAPGS ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ + /* + * No need to follow this irqs on/off section: the syscall + * disabled irqs and here we enable it straight after entry: +@@ -425,7 +469,7 @@ ENTRY(ia32_syscall) + orl $TS_COMPAT,TI_status(%r10) + testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r10) + jnz ia32_tracesys +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja ia32_badsys + ia32_do_call: + IA32_ARG_FIXUP +@@ -444,7 +488,7 @@ ia32_tracesys: + call syscall_trace_enter + LOAD_ARGS32 ARGOFFSET /* reload args from stack in case ptrace changed it */ + RESTORE_REST +- cmpl $(IA32_NR_syscalls-1),%eax ++ cmpq $(IA32_NR_syscalls-1),%rax + ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */ + jmp ia32_do_call + END(ia32_syscall) +diff -urNp linux-2.6.35.4/arch/x86/ia32/ia32_signal.c linux-2.6.35.4/arch/x86/ia32/ia32_signal.c +--- linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/ia32/ia32_signal.c 2010-09-17 20:12:09.000000000 -0400 +@@ -403,7 +403,7 @@ static void __user *get_sigframe(struct + sp -= frame_size; + /* Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + return (void __user *) sp; + } + +@@ -503,7 +503,7 @@ int ia32_setup_rt_frame(int sig, struct + 0xb8, + __NR_ia32_rt_sigreturn, + 0x80cd, +- 0, ++ 0 + }; + + frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate); +diff -urNp linux-2.6.35.4/arch/x86/include/asm/alternative.h linux-2.6.35.4/arch/x86/include/asm/alternative.h +--- linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/alternative.h 2010-09-17 20:12:09.000000000 -0400 +@@ -91,7 +91,7 @@ static inline int alternatives_text_rese + " .byte 664f-663f\n" /* replacementlen */ \ + " .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \ + ".previous\n" \ +- ".section .altinstr_replacement, \"ax\"\n" \ ++ ".section .altinstr_replacement, \"a\"\n" \ + "663:\n\t" newinstr "\n664:\n" /* replacement */ \ + ".previous" + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/apm.h linux-2.6.35.4/arch/x86/include/asm/apm.h +--- linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/apm.h 2010-09-17 20:12:09.000000000 -0400 +@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%al\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_as + __asm__ __volatile__(APM_DO_ZERO_SEGS + "pushl %%edi\n\t" + "pushl %%ebp\n\t" +- "lcall *%%cs:apm_bios_entry\n\t" ++ "lcall *%%ss:apm_bios_entry\n\t" + "setc %%bl\n\t" + "popl %%ebp\n\t" + "popl %%edi\n\t" +diff -urNp linux-2.6.35.4/arch/x86/include/asm/asm.h linux-2.6.35.4/arch/x86/include/asm/asm.h +--- linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/asm.h 2010-09-17 20:12:09.000000000 -0400 +@@ -37,6 +37,12 @@ + #define _ASM_SI __ASM_REG(si) + #define _ASM_DI __ASM_REG(di) + ++#ifdef CONFIG_X86_32 ++#define _ASM_INTO "into" ++#else ++#define _ASM_INTO "int $4" ++#endif ++ + /* Exception table entry */ + #ifdef __ASSEMBLY__ + # define _ASM_EXTABLE(from,to) \ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h +--- linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/atomic64_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,6 +12,14 @@ typedef struct { + u64 __aligned(8) counter; + } atomic64_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ u64 __aligned(8) counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif ++ + #define ATOMIC64_INIT(val) { (val) } + + #ifdef CONFIG_X86_CMPXCHG64 +diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h +--- linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/atomic64_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -22,6 +22,18 @@ static inline long atomic64_read(const a + } + + /** ++ * atomic64_read_unchecked - read atomic64 variable ++ * @v: pointer of type atomic64_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ * Doesn't imply a read memory barrier. ++ */ ++static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v) ++{ ++ return v->counter; ++} ++ ++/** + * atomic64_set - set atomic64 variable + * @v: pointer to type atomic64_t + * @i: required value +@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64 + } + + /** ++ * atomic64_set_unchecked - set atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic64_add - add integer to atomic64 variable + * @i: integer value to add + * @v: pointer to type atomic64_t +@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64 + */ + static inline void atomic64_add(long i, atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "addq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=m" (v->counter) ++ : "er" (i), "m" (v->counter)); ++} ++ ++/** ++ * atomic64_add_unchecked - add integer to atomic64 variable ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "addq %1,%0" + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); +@@ -56,7 +102,15 @@ static inline void atomic64_add(long i, + */ + static inline void atomic64_sub(long i, atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "subq %1,%0" ++ asm volatile(LOCK_PREFIX "subq %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %1,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ + : "=m" (v->counter) + : "er" (i), "m" (v->counter)); + } +@@ -74,7 +128,16 @@ static inline int atomic64_sub_and_test( + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -88,6 +151,31 @@ static inline int atomic64_sub_and_test( + */ + static inline void atomic64_inc(atomic64_t *v) + { ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "int $4\n0:\n" ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX "decq %0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_inc_unchecked - increment atomic64 variable ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v) ++{ + asm volatile(LOCK_PREFIX "incq %0" + : "=m" (v->counter) + : "m" (v->counter)); +@@ -101,7 +189,32 @@ static inline void atomic64_inc(atomic64 + */ + static inline void atomic64_dec(atomic64_t *v) + { +- asm volatile(LOCK_PREFIX "decq %0" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "int $4\n0:\n" ++ ".pushsection .fixup,\"ax\"\n" ++ "1: \n" ++ LOCK_PREFIX "incq %0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ : "=m" (v->counter) ++ : "m" (v->counter)); ++} ++ ++/** ++ * atomic64_dec_unchecked - decrement atomic64 variable ++ * @v: pointer to type atomic64_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decq %0\n" + : "=m" (v->counter) + : "m" (v->counter)); + } +@@ -118,7 +231,20 @@ static inline int atomic64_dec_and_test( + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decq %0; sete %1" ++ asm volatile(LOCK_PREFIX "decq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "int $4\n0:\n" ++ ".pushsection .fixup,\"ax\"\n" ++ "1: \n" ++ LOCK_PREFIX "incq %0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -136,7 +262,20 @@ static inline int atomic64_inc_and_test( + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incq %0; sete %1" ++ asm volatile(LOCK_PREFIX "incq %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "int $4\n0:\n" ++ ".pushsection .fixup,\"ax\"\n" ++ "1: \n" ++ LOCK_PREFIX "decq %0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sete %1\n" + : "=m" (v->counter), "=qm" (c) + : "m" (v->counter) : "memory"); + return c != 0; +@@ -155,7 +294,16 @@ static inline int atomic64_add_negative( + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addq %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subq %2,%0\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "=m" (v->counter), "=qm" (c) + : "er" (i), "m" (v->counter) : "memory"); + return c; +@@ -171,7 +319,31 @@ static inline int atomic64_add_negative( + static inline long atomic64_add_return(long i, atomic64_t *v) + { + long __i = i; +- asm volatile(LOCK_PREFIX "xaddq %0, %1;" ++ asm volatile(LOCK_PREFIX "xaddq %0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "movq %0, %1\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+r" (i), "+m" (v->counter) ++ : : "memory"); ++ return i + __i; ++} ++ ++/** ++ * atomic64_add_return_unchecked - add and return ++ * @i: integer value to add ++ * @v: pointer to type atomic64_unchecked_t ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v) ++{ ++ long __i = i; ++ asm volatile(LOCK_PREFIX "xaddq %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); + return i + __i; +@@ -183,6 +355,10 @@ static inline long atomic64_sub_return(l + } + + #define atomic64_inc_return(v) (atomic64_add_return(1, (v))) ++static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v) ++{ ++ return atomic64_add_return_unchecked(1, v); ++} + #define atomic64_dec_return(v) (atomic64_sub_return(1, (v))) + + static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new) +@@ -206,17 +382,29 @@ static inline long atomic64_xchg(atomic6 + */ + static inline int atomic64_add_unless(atomic64_t *v, long a, long u) + { +- long c, old; ++ long c, old, new; + c = atomic64_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic64_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("add %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "int $4\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic64_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/atomic.h linux-2.6.35.4/arch/x86/include/asm/atomic.h +--- linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/atomic.h 2010-09-17 20:12:09.000000000 -0400 +@@ -26,6 +26,17 @@ static inline int atomic_read(const atom + } + + /** ++ * atomic_read_unchecked - read atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically reads the value of @v. ++ */ ++static inline int atomic_read_unchecked(const atomic_unchecked_t *v) ++{ ++ return v->counter; ++} ++ ++/** + * atomic_set - set atomic variable + * @v: pointer of type atomic_t + * @i: required value +@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t * + } + + /** ++ * atomic_set_unchecked - set atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * @i: required value ++ * ++ * Atomically sets the value of @v to @i. ++ */ ++static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i) ++{ ++ v->counter = i; ++} ++ ++/** + * atomic_add - add integer to atomic variable + * @i: integer value to add + * @v: pointer of type atomic_t +@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t * + */ + static inline void atomic_add(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "addl %1,%0" ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %1,%0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_add_unchecked - add integer to atomic variable ++ * @i: integer value to add ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically adds @i to @v. ++ */ ++static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "addl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -60,7 +105,29 @@ static inline void atomic_add(int i, ato + */ + static inline void atomic_sub(int i, atomic_t *v) + { +- asm volatile(LOCK_PREFIX "subl %1,%0" ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %1,%0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter) ++ : "ir" (i)); ++} ++ ++/** ++ * atomic_sub_unchecked - subtract integer from atomic variable ++ * @i: integer value to subtract ++ * @v: pointer of type atomic_t ++ * ++ * Atomically subtracts @i from @v. ++ */ ++static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "subl %1,%0\n" + : "+m" (v->counter) + : "ir" (i)); + } +@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(in + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1" ++ asm volatile(LOCK_PREFIX "subl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "addl %2,%0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(in + */ + static inline void atomic_inc(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "incl %0" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_inc_unchecked - increment atomic variable ++ * @v: pointer of type atomic_unchecked_t ++ * ++ * Atomically increments @v by 1. ++ */ ++static inline void atomic_inc_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "incl %0\n" + : "+m" (v->counter)); + } + +@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t * + */ + static inline void atomic_dec(atomic_t *v) + { +- asm volatile(LOCK_PREFIX "decl %0" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+m" (v->counter)); ++} ++ ++/** ++ * atomic_dec_unchecked - decrement atomic variable ++ * @v: pointer of type atomic_t ++ * ++ * Atomically decrements @v by 1. ++ */ ++static inline void atomic_dec_unchecked(atomic_unchecked_t *v) ++{ ++ asm volatile(LOCK_PREFIX "decl %0\n" + : "+m" (v->counter)); + } + +@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(at + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "decl %0; sete %1" ++ asm volatile(LOCK_PREFIX "decl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "incl %0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -138,7 +263,16 @@ static inline int atomic_inc_and_test(at + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "incl %0; sete %1" ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "decl %0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sete %1\n" + : "+m" (v->counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -157,7 +291,16 @@ static inline int atomic_add_negative(in + { + unsigned char c; + +- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1" ++ asm volatile(LOCK_PREFIX "addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ LOCK_PREFIX "subl %2,%0\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ "sets %1\n" + : "+m" (v->counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -180,6 +323,46 @@ static inline int atomic_add_return(int + #endif + /* Modern 486+ processor */ + __i = i; ++ asm volatile(LOCK_PREFIX "xaddl %0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ "movl %0, %1\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "+r" (i), "+m" (v->counter) ++ : : "memory"); ++ return i + __i; ++ ++#ifdef CONFIG_M386 ++no_xadd: /* Legacy 386 processor */ ++ local_irq_save(flags); ++ __i = atomic_read(v); ++ atomic_set(v, i + __i); ++ local_irq_restore(flags); ++ return i + __i; ++#endif ++} ++ ++/** ++ * atomic_add_return_unchecked - add integer and return ++ * @v: pointer of type atomic_unchecked_t ++ * @i: integer value to add ++ * ++ * Atomically adds @i to @v and returns @i + @v ++ */ ++static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v) ++{ ++ int __i; ++#ifdef CONFIG_M386 ++ unsigned long flags; ++ if (unlikely(boot_cpu_data.x86 <= 3)) ++ goto no_xadd; ++#endif ++ /* Modern 486+ processor */ ++ __i = i; + asm volatile(LOCK_PREFIX "xaddl %0, %1" + : "+r" (i), "+m" (v->counter) + : : "memory"); +@@ -208,6 +391,10 @@ static inline int atomic_sub_return(int + } + + #define atomic_inc_return(v) (atomic_add_return(1, v)) ++static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v) ++{ ++ return atomic_add_return_unchecked(1, v); ++} + #define atomic_dec_return(v) (atomic_sub_return(1, v)) + + static inline int atomic_cmpxchg(atomic_t *v, int old, int new) +@@ -231,17 +418,29 @@ static inline int atomic_xchg(atomic_t * + */ + static inline int atomic_add_unless(atomic_t *v, int a, int u) + { +- int c, old; ++ int c, old, new; + c = atomic_read(v); + for (;;) { +- if (unlikely(c == (u))) ++ if (unlikely(c == u)) + break; +- old = atomic_cmpxchg((v), c, c + (a)); ++ ++ asm volatile("addl %2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ "jno 0f\n" ++ _ASM_INTO "\n0:\n" ++ _ASM_EXTABLE(0b, 0b) ++#endif ++ ++ : "=r" (new) ++ : "0" (c), "ir" (a)); ++ ++ old = atomic_cmpxchg(v, c, new); + if (likely(old == c)) + break; + c = old; + } +- return c != (u); ++ return c != u; + } + + #define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/boot.h linux-2.6.35.4/arch/x86/include/asm/boot.h +--- linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/boot.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,10 +11,15 @@ + #include <asm/pgtable_types.h> + + /* Physical address where kernel should be loaded. */ +-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ ++#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \ + + (CONFIG_PHYSICAL_ALIGN - 1)) \ + & ~(CONFIG_PHYSICAL_ALIGN - 1)) + ++#ifndef __ASSEMBLY__ ++extern unsigned char __LOAD_PHYSICAL_ADDR[]; ++#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR) ++#endif ++ + /* Minimum kernel alignment, as a power of two */ + #ifdef CONFIG_X86_64 + #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT +diff -urNp linux-2.6.35.4/arch/x86/include/asm/cacheflush.h linux-2.6.35.4/arch/x86/include/asm/cacheflush.h +--- linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/cacheflush.h 2010-09-17 20:12:09.000000000 -0400 +@@ -66,7 +66,7 @@ static inline unsigned long get_page_mem + unsigned long pg_flags = pg->flags & _PGMT_MASK; + + if (pg_flags == _PGMT_DEFAULT) +- return -1; ++ return ~0UL; + else if (pg_flags == _PGMT_WC) + return _PAGE_CACHE_WC; + else if (pg_flags == _PGMT_UC_MINUS) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/cache.h linux-2.6.35.4/arch/x86/include/asm/cache.h +--- linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/cache.h 2010-09-17 20:12:09.000000000 -0400 +@@ -8,6 +8,7 @@ + #define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + + #define __read_mostly __attribute__((__section__(".data..read_mostly"))) ++#define __read_only __attribute__((__section__(".data..read_only"))) + + #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT + #define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/checksum_32.h linux-2.6.35.4/arch/x86/include/asm/checksum_32.h +--- linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/checksum_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_gene + int len, __wsum sum, + int *src_err_ptr, int *dst_err_ptr); + ++asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ ++asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst, ++ int len, __wsum sum, ++ int *src_err_ptr, int *dst_err_ptr); ++ + /* + * Note: when you get a NULL pointer exception here this means someone + * passed in an incorrect kernel address to one of these functions. +@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_f + int *err_ptr) + { + might_sleep(); +- return csum_partial_copy_generic((__force void *)src, dst, ++ return csum_partial_copy_generic_from_user((__force void *)src, dst, + len, sum, err_ptr, NULL); + } + +@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_us + { + might_sleep(); + if (access_ok(VERIFY_WRITE, dst, len)) +- return csum_partial_copy_generic(src, (__force void *)dst, ++ return csum_partial_copy_generic_to_user(src, (__force void *)dst, + len, sum, NULL, err_ptr); + + if (len) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/compat.h linux-2.6.35.4/arch/x86/include/asm/compat.h +--- linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -205,7 +205,7 @@ static inline compat_uptr_t ptr_to_compa + return (u32)(unsigned long)uptr; + } + +-static inline void __user *compat_alloc_user_space(long len) ++static inline void __user *arch_compat_alloc_user_space(long len) + { + struct pt_regs *regs = task_pt_regs(current); + return (void __user *)regs->sp - len; +diff -urNp linux-2.6.35.4/arch/x86/include/asm/cpufeature.h linux-2.6.35.4/arch/x86/include/asm/cpufeature.h +--- linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/cpufeature.h 2010-09-17 20:12:09.000000000 -0400 +@@ -323,7 +323,7 @@ static __always_inline __pure bool __sta + " .byte 4f - 3f\n" /* replacement len */ + " .byte 0xff + (4f-3f) - (2b-1b)\n" /* padding */ + ".previous\n" +- ".section .altinstr_replacement,\"ax\"\n" ++ ".section .altinstr_replacement,\"a\"\n" + "3: movb $1,%0\n" + "4:\n" + ".previous\n" +diff -urNp linux-2.6.35.4/arch/x86/include/asm/desc.h linux-2.6.35.4/arch/x86/include/asm/desc.h +--- linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/desc.h 2010-09-17 20:12:09.000000000 -0400 +@@ -4,6 +4,7 @@ + #include <asm/desc_defs.h> + #include <asm/ldt.h> + #include <asm/mmu.h> ++#include <asm/pgtable.h> + #include <linux/smp.h> + + static inline void fill_ldt(struct desc_struct *desc, +@@ -15,6 +16,7 @@ static inline void fill_ldt(struct desc_ + desc->base1 = (info->base_addr & 0x00ff0000) >> 16; + desc->type = (info->read_exec_only ^ 1) << 1; + desc->type |= info->contents << 2; ++ desc->type |= info->seg_not_present ^ 1; + desc->s = 1; + desc->dpl = 0x3; + desc->p = info->seg_not_present ^ 1; +@@ -31,16 +33,12 @@ static inline void fill_ldt(struct desc_ + } + + extern struct desc_ptr idt_descr; +-extern gate_desc idt_table[]; +- +-struct gdt_page { +- struct desc_struct gdt[GDT_ENTRIES]; +-} __attribute__((aligned(PAGE_SIZE))); +-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page); ++extern gate_desc idt_table[256]; + ++extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)]; + static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu) + { +- return per_cpu(gdt_page, cpu).gdt; ++ return cpu_gdt_table[cpu]; + } + + #ifdef CONFIG_X86_64 +@@ -115,19 +113,24 @@ static inline void paravirt_free_ldt(str + static inline void native_write_idt_entry(gate_desc *idt, int entry, + const gate_desc *gate) + { ++ pax_open_kernel(); + memcpy(&idt[entry], gate, sizeof(*gate)); ++ pax_close_kernel(); + } + + static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, + const void *desc) + { ++ pax_open_kernel(); + memcpy(&ldt[entry], desc, 8); ++ pax_close_kernel(); + } + + static inline void native_write_gdt_entry(struct desc_struct *gdt, int entry, + const void *desc, int type) + { + unsigned int size; ++ + switch (type) { + case DESC_TSS: + size = sizeof(tss_desc); +@@ -139,7 +142,10 @@ static inline void native_write_gdt_entr + size = sizeof(struct desc_struct); + break; + } ++ ++ pax_open_kernel(); + memcpy(&gdt[entry], desc, size); ++ pax_close_kernel(); + } + + static inline void pack_descriptor(struct desc_struct *desc, unsigned long base, +@@ -211,7 +217,9 @@ static inline void native_set_ldt(const + + static inline void native_load_tr_desc(void) + { ++ pax_open_kernel(); + asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8)); ++ pax_close_kernel(); + } + + static inline void native_load_gdt(const struct desc_ptr *dtr) +@@ -246,8 +254,10 @@ static inline void native_load_tls(struc + unsigned int i; + struct desc_struct *gdt = get_cpu_gdt_table(cpu); + ++ pax_open_kernel(); + for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) + gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i]; ++ pax_close_kernel(); + } + + #define _LDT_empty(info) \ +@@ -309,7 +319,7 @@ static inline void set_desc_limit(struct + desc->limit = (limit >> 16) & 0xf; + } + +-static inline void _set_gate(int gate, unsigned type, void *addr, ++static inline void _set_gate(int gate, unsigned type, const void *addr, + unsigned dpl, unsigned ist, unsigned seg) + { + gate_desc s; +@@ -327,7 +337,7 @@ static inline void _set_gate(int gate, u + * Pentium F0 0F bugfix can have resulted in the mapped + * IDT being write-protected. + */ +-static inline void set_intr_gate(unsigned int n, void *addr) ++static inline void set_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS); +@@ -356,19 +366,19 @@ static inline void alloc_intr_gate(unsig + /* + * This routine sets up an interrupt gate at directory privilege level 3. + */ +-static inline void set_system_intr_gate(unsigned int n, void *addr) ++static inline void set_system_intr_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_system_trap_gate(unsigned int n, void *addr) ++static inline void set_system_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS); + } + +-static inline void set_trap_gate(unsigned int n, void *addr) ++static inline void set_trap_gate(unsigned int n, const void *addr) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS); +@@ -377,19 +387,31 @@ static inline void set_trap_gate(unsigne + static inline void set_task_gate(unsigned int n, unsigned int gdt_entry) + { + BUG_ON((unsigned)n > 0xFF); +- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3)); ++ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3)); + } + +-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS); + } + +-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist) ++static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist) + { + BUG_ON((unsigned)n > 0xFF); + _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS); + } + ++#ifdef CONFIG_X86_32 ++static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu) ++{ ++ struct desc_struct d; ++ ++ if (likely(limit)) ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ pack_descriptor(&d, base, limit, 0xFB, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S); ++} ++#endif ++ + #endif /* _ASM_X86_DESC_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/device.h linux-2.6.35.4/arch/x86/include/asm/device.h +--- linux-2.6.35.4/arch/x86/include/asm/device.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/device.h 2010-09-17 20:12:09.000000000 -0400 +@@ -6,7 +6,7 @@ struct dev_archdata { + void *acpi_handle; + #endif + #ifdef CONFIG_X86_64 +-struct dma_map_ops *dma_ops; ++ const struct dma_map_ops *dma_ops; + #endif + #if defined(CONFIG_DMAR) || defined(CONFIG_AMD_IOMMU) + void *iommu; /* hook for IOMMU specific extension */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h +--- linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -26,9 +26,9 @@ extern int iommu_merge; + extern struct device x86_dma_fallback_dev; + extern int panic_on_overflow; + +-extern struct dma_map_ops *dma_ops; ++extern const struct dma_map_ops *dma_ops; + +-static inline struct dma_map_ops *get_dma_ops(struct device *dev) ++static inline const struct dma_map_ops *get_dma_ops(struct device *dev) + { + #ifdef CONFIG_X86_32 + return dma_ops; +@@ -45,7 +45,7 @@ static inline struct dma_map_ops *get_dm + /* Make sure we keep the same behaviour */ + static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + if (ops->mapping_error) + return ops->mapping_error(dev, dma_addr); + +@@ -123,7 +123,7 @@ static inline void * + dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, + gfp_t gfp) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + void *memory; + + gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); +@@ -150,7 +150,7 @@ dma_alloc_coherent(struct device *dev, s + static inline void dma_free_coherent(struct device *dev, size_t size, + void *vaddr, dma_addr_t bus) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + WARN_ON(irqs_disabled()); /* for portability */ + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/e820.h linux-2.6.35.4/arch/x86/include/asm/e820.h +--- linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/e820.h 2010-09-17 20:12:09.000000000 -0400 +@@ -69,7 +69,7 @@ struct e820map { + #define ISA_START_ADDRESS 0xa0000 + #define ISA_END_ADDRESS 0x100000 + +-#define BIOS_BEGIN 0x000a0000 ++#define BIOS_BEGIN 0x000c0000 + #define BIOS_END 0x00100000 + + #ifdef __KERNEL__ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/elf.h linux-2.6.35.4/arch/x86/include/asm/elf.h +--- linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -237,7 +237,25 @@ extern int force_personality32; + the loader. We need to make sure that it is out of the way of the program + that it will "exec", and that there is sufficient room for the brk. */ + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2) ++#else + #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2) ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++#ifdef CONFIG_X86_32 ++#define PAX_ELF_ET_DYN_BASE 0x10000000UL ++ ++#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16) ++#else ++#define PAX_ELF_ET_DYN_BASE 0x400000UL ++ ++#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_IA32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3) ++#endif ++#endif + + /* This yields a mask that user programs can use to figure out what + instruction set this CPU supports. This could be done in user space, +@@ -291,8 +309,7 @@ do { \ + #define ARCH_DLINFO \ + do { \ + if (vdso_enabled) \ +- NEW_AUX_ENT(AT_SYSINFO_EHDR, \ +- (unsigned long)current->mm->context.vdso); \ ++ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso);\ + } while (0) + + #define AT_SYSINFO 32 +@@ -303,7 +320,7 @@ do { \ + + #endif /* !CONFIG_X86_32 */ + +-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso) ++#define VDSO_CURRENT_BASE (current->mm->context.vdso) + + #define VDSO_ENTRY \ + ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall)) +@@ -317,7 +334,4 @@ extern int arch_setup_additional_pages(s + extern int syscall32_setup_pages(struct linux_binprm *, int exstack); + #define compat_arch_setup_additional_pages syscall32_setup_pages + +-extern unsigned long arch_randomize_brk(struct mm_struct *mm); +-#define arch_randomize_brk arch_randomize_brk +- + #endif /* _ASM_X86_ELF_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/futex.h linux-2.6.35.4/arch/x86/include/asm/futex.h +--- linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/futex.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,17 +11,54 @@ + #include <asm/processor.h> + #include <asm/system.h> + ++#ifdef CONFIG_X86_32 + #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ ++ asm volatile( \ ++ "movw\t%w6, %%ds\n" \ ++ "1:\t" insn "\n" \ ++ "2:\tpushl\t%%ss\n" \ ++ "\tpopl\t%%ds\n" \ ++ "\t.section .fixup,\"ax\"\n" \ ++ "3:\tmov\t%3, %1\n" \ ++ "\tjmp\t2b\n" \ ++ "\t.previous\n" \ ++ _ASM_EXTABLE(1b, 3b) \ ++ : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ ++ : "i" (-EFAULT), "0" (oparg), "1" (0), "r" (__USER_DS)) ++ ++#define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ ++ asm volatile("movw\t%w7, %%es\n" \ ++ "1:\tmovl\t%%es:%2, %0\n" \ ++ "\tmovl\t%0, %3\n" \ ++ "\t" insn "\n" \ ++ "2:\t" LOCK_PREFIX "cmpxchgl %3, %%es:%2\n"\ ++ "\tjnz\t1b\n" \ ++ "3:\tpushl\t%%ss\n" \ ++ "\tpopl\t%%es\n" \ ++ "\t.section .fixup,\"ax\"\n" \ ++ "4:\tmov\t%5, %1\n" \ ++ "\tjmp\t3b\n" \ ++ "\t.previous\n" \ ++ _ASM_EXTABLE(1b, 4b) \ ++ _ASM_EXTABLE(2b, 4b) \ ++ : "=&a" (oldval), "=&r" (ret), \ ++ "+m" (*uaddr), "=&r" (tem) \ ++ : "r" (oparg), "i" (-EFAULT), "1" (0), "r" (__USER_DS)) ++#else ++#define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 *, uaddr); \ + asm volatile("1:\t" insn "\n" \ + "2:\t.section .fixup,\"ax\"\n" \ + "3:\tmov\t%3, %1\n" \ + "\tjmp\t2b\n" \ + "\t.previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \ ++ : "=r" (oldval), "=r" (ret), \ ++ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4))\ + : "i" (-EFAULT), "0" (oparg), "1" (0)) + + #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \ ++ typecheck(u32 *, uaddr); \ + asm volatile("1:\tmovl %2, %0\n" \ + "\tmovl\t%0, %3\n" \ + "\t" insn "\n" \ +@@ -34,10 +71,12 @@ + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=&a" (oldval), "=&r" (ret), \ +- "+m" (*uaddr), "=&r" (tem) \ ++ "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)),\ ++ "=&r" (tem) \ + : "r" (oparg), "i" (-EFAULT), "1" (0)) ++#endif + +-static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) ++static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) + { + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; +@@ -61,11 +100,20 @@ static inline int futex_atomic_op_inuser + + switch (op) { + case FUTEX_OP_SET: ++#ifdef CONFIG_X86_32 ++ __futex_atomic_op1("xchgl %0, %%ds:%2", ret, oldval, uaddr, oparg); ++#else + __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg); ++#endif + break; + case FUTEX_OP_ADD: ++#ifdef CONFIG_X86_32 ++ __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %%ds:%2", ret, oldval, ++ uaddr, oparg); ++#else + __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval, + uaddr, oparg); ++#endif + break; + case FUTEX_OP_OR: + __futex_atomic_op2("orl %4, %3", ret, oldval, uaddr, oparg); +@@ -109,7 +157,7 @@ static inline int futex_atomic_op_inuser + return ret; + } + +-static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, ++static inline int futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, + int newval) + { + +@@ -119,17 +167,31 @@ static inline int futex_atomic_cmpxchg_i + return -ENOSYS; + #endif + +- if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) ++ if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + return -EFAULT; + +- asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" +- "2:\t.section .fixup, \"ax\"\n" ++ asm volatile( ++#ifdef CONFIG_X86_32 ++ "\tmovw %w5, %%ds\n" ++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %%ds:%1\n" ++ "2:\tpushl %%ss\n" ++ "\tpopl %%ds\n" ++#else ++ "1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" ++ "2:\n" ++#endif ++ "\t.section .fixup, \"ax\"\n" + "3:\tmov %2, %0\n" + "\tjmp 2b\n" + "\t.previous\n" + _ASM_EXTABLE(1b, 3b) ++#ifdef CONFIG_X86_32 + : "=a" (oldval), "+m" (*uaddr) ++ : "i" (-EFAULT), "r" (newval), "0" (oldval), "r" (__USER_DS) ++#else ++ : "=a" (oldval), "+m" (*(uaddr + PAX_USER_SHADOW_BASE / 4)) + : "i" (-EFAULT), "r" (newval), "0" (oldval) ++#endif + : "memory" + ); + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/i387.h linux-2.6.35.4/arch/x86/include/asm/i387.h +--- linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/i387.h 2010-09-17 20:12:09.000000000 -0400 +@@ -77,6 +77,11 @@ static inline int fxrstor_checking(struc + { + int err; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) ++ fx = (struct i387_fxsave_struct *)((void *)fx + PAX_USER_SHADOW_BASE); ++#endif ++ + asm volatile("1: rex64/fxrstor (%[fx])\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" +@@ -127,6 +132,11 @@ static inline int fxsave_user(struct i38 + { + int err; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)fx < PAX_USER_SHADOW_BASE) ++ fx = (struct i387_fxsave_struct __user *)((void __user *)fx + PAX_USER_SHADOW_BASE); ++#endif ++ + asm volatile("1: rex64/fxsave (%[fx])\n\t" + "2:\n" + ".section .fixup,\"ax\"\n" +@@ -220,13 +230,8 @@ static inline int fxrstor_checking(struc + } + + /* We need a safe address that is cheap to find and that is already +- in L1 during context switch. The best choices are unfortunately +- different for UP and SMP */ +-#ifdef CONFIG_SMP +-#define safe_address (__per_cpu_offset[0]) +-#else +-#define safe_address (kstat_cpu(0).cpustat.user) +-#endif ++ in L1 during context switch. */ ++#define safe_address (init_tss[smp_processor_id()].x86_tss.sp0) + + /* + * These must be called with preempt disabled +diff -urNp linux-2.6.35.4/arch/x86/include/asm/io.h linux-2.6.35.4/arch/x86/include/asm/io.h +--- linux-2.6.35.4/arch/x86/include/asm/io.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/io.h 2010-09-17 20:12:09.000000000 -0400 +@@ -213,6 +213,17 @@ extern void iounmap(volatile void __iome + + #include <linux/vmalloc.h> + ++#define ARCH_HAS_VALID_PHYS_ADDR_RANGE ++static inline int valid_phys_addr_range(unsigned long addr, size_t count) ++{ ++ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ ++static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count) ++{ ++ return (pfn + (count >> PAGE_SHIFT)) < (1 << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0; ++} ++ + /* + * Convert a virtual cached pointer to an uncached pointer + */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/iommu.h linux-2.6.35.4/arch/x86/include/asm/iommu.h +--- linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/iommu.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1,7 +1,7 @@ + #ifndef _ASM_X86_IOMMU_H + #define _ASM_X86_IOMMU_H + +-extern struct dma_map_ops nommu_dma_ops; ++extern const struct dma_map_ops nommu_dma_ops; + extern int force_iommu, no_iommu; + extern int iommu_detected; + extern int iommu_pass_through; +diff -urNp linux-2.6.35.4/arch/x86/include/asm/irqflags.h linux-2.6.35.4/arch/x86/include/asm/irqflags.h +--- linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/irqflags.h 2010-09-17 20:12:09.000000000 -0400 +@@ -142,6 +142,11 @@ static inline unsigned long __raw_local_ + sti; \ + sysexit + ++#define GET_CR0_INTO_RDI mov %cr0, %rdi ++#define SET_RDI_INTO_CR0 mov %rdi, %cr0 ++#define GET_CR3_INTO_RDI mov %cr3, %rdi ++#define SET_RDI_INTO_CR3 mov %rdi, %cr3 ++ + #else + #define INTERRUPT_RETURN iret + #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit +diff -urNp linux-2.6.35.4/arch/x86/include/asm/kvm_host.h linux-2.6.35.4/arch/x86/include/asm/kvm_host.h +--- linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/kvm_host.h 2010-09-17 20:12:09.000000000 -0400 +@@ -536,7 +536,7 @@ struct kvm_x86_ops { + const struct trace_print_flags *exit_reasons_str; + }; + +-extern struct kvm_x86_ops *kvm_x86_ops; ++extern const struct kvm_x86_ops *kvm_x86_ops; + + int kvm_mmu_module_init(void); + void kvm_mmu_module_exit(void); +diff -urNp linux-2.6.35.4/arch/x86/include/asm/local.h linux-2.6.35.4/arch/x86/include/asm/local.h +--- linux-2.6.35.4/arch/x86/include/asm/local.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/local.h 2010-09-17 20:12:09.000000000 -0400 +@@ -18,26 +18,90 @@ typedef struct { + + static inline void local_inc(local_t *l) + { +- asm volatile(_ASM_INC "%0" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_DEC "%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (l->a.counter)); + } + + static inline void local_dec(local_t *l) + { +- asm volatile(_ASM_DEC "%0" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_INC "%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (l->a.counter)); + } + + static inline void local_add(long i, local_t *l) + { +- asm volatile(_ASM_ADD "%1,%0" ++ asm volatile(_ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_SUB "%1,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (l->a.counter) + : "ir" (i)); + } + + static inline void local_sub(long i, local_t *l) + { +- asm volatile(_ASM_SUB "%1,%0" ++ asm volatile(_ASM_SUB "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_ADD "%1,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (l->a.counter) + : "ir" (i)); + } +@@ -55,7 +119,24 @@ static inline int local_sub_and_test(lon + { + unsigned char c; + +- asm volatile(_ASM_SUB "%2,%0; sete %1" ++ asm volatile(_ASM_SUB "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_ADD "%2,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -73,7 +154,24 @@ static inline int local_dec_and_test(loc + { + unsigned char c; + +- asm volatile(_ASM_DEC "%0; sete %1" ++ asm volatile(_ASM_DEC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_INC "%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -91,7 +189,24 @@ static inline int local_inc_and_test(loc + { + unsigned char c; + +- asm volatile(_ASM_INC "%0; sete %1" ++ asm volatile(_ASM_INC "%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_DEC "%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sete %1\n" + : "+m" (l->a.counter), "=qm" (c) + : : "memory"); + return c != 0; +@@ -110,7 +225,24 @@ static inline int local_add_negative(lon + { + unsigned char c; + +- asm volatile(_ASM_ADD "%2,%0; sets %1" ++ asm volatile(_ASM_ADD "%2,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_SUB "%2,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "sets %1\n" + : "+m" (l->a.counter), "=qm" (c) + : "ir" (i) : "memory"); + return c; +@@ -133,7 +265,23 @@ static inline long local_add_return(long + #endif + /* Modern 486+ processor */ + __i = i; +- asm volatile(_ASM_XADD "%0, %1;" ++ asm volatile(_ASM_XADD "%0, %1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ _ASM_MOV "%0,%1\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+r" (i), "+m" (l->a.counter) + : : "memory"); + return i + __i; +diff -urNp linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h +--- linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/mc146818rtc.h 2010-09-17 20:12:09.000000000 -0400 +@@ -81,8 +81,8 @@ static inline unsigned char current_lock + #else + #define lock_cmos_prefix(reg) do {} while (0) + #define lock_cmos_suffix(reg) do {} while (0) +-#define lock_cmos(reg) +-#define unlock_cmos() ++#define lock_cmos(reg) do {} while (0) ++#define unlock_cmos() do {} while (0) + #define do_i_have_lock_cmos() 0 + #define current_lock_cmos_reg() 0 + #endif +diff -urNp linux-2.6.35.4/arch/x86/include/asm/microcode.h linux-2.6.35.4/arch/x86/include/asm/microcode.h +--- linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/microcode.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,13 +12,13 @@ struct device; + enum ucode_state { UCODE_ERROR, UCODE_OK, UCODE_NFOUND }; + + struct microcode_ops { +- enum ucode_state (*request_microcode_user) (int cpu, ++ enum ucode_state (* const request_microcode_user) (int cpu, + const void __user *buf, size_t size); + +- enum ucode_state (*request_microcode_fw) (int cpu, ++ enum ucode_state (* const request_microcode_fw) (int cpu, + struct device *device); + +- void (*microcode_fini_cpu) (int cpu); ++ void (* const microcode_fini_cpu) (int cpu); + + /* + * The generic 'microcode_core' part guarantees that +@@ -38,18 +38,18 @@ struct ucode_cpu_info { + extern struct ucode_cpu_info ucode_cpu_info[]; + + #ifdef CONFIG_MICROCODE_INTEL +-extern struct microcode_ops * __init init_intel_microcode(void); ++extern const struct microcode_ops * __init init_intel_microcode(void); + #else +-static inline struct microcode_ops * __init init_intel_microcode(void) ++static inline const struct microcode_ops * __init init_intel_microcode(void) + { + return NULL; + } + #endif /* CONFIG_MICROCODE_INTEL */ + + #ifdef CONFIG_MICROCODE_AMD +-extern struct microcode_ops * __init init_amd_microcode(void); ++extern const struct microcode_ops * __init init_amd_microcode(void); + #else +-static inline struct microcode_ops * __init init_amd_microcode(void) ++static inline const struct microcode_ops * __init init_amd_microcode(void) + { + return NULL; + } +diff -urNp linux-2.6.35.4/arch/x86/include/asm/mman.h linux-2.6.35.4/arch/x86/include/asm/mman.h +--- linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/mman.h 2010-09-17 20:12:09.000000000 -0400 +@@ -5,4 +5,14 @@ + + #include <asm-generic/mman.h> + ++#ifdef __KERNEL__ ++#ifndef __ASSEMBLY__ ++#ifdef CONFIG_X86_32 ++#define arch_mmap_check i386_mmap_check ++int i386_mmap_check(unsigned long addr, unsigned long len, ++ unsigned long flags); ++#endif ++#endif ++#endif ++ + #endif /* _ASM_X86_MMAN_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu_context.h linux-2.6.35.4/arch/x86/include/asm/mmu_context.h +--- linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/mmu_context.h 2010-09-17 20:12:09.000000000 -0400 +@@ -24,6 +24,21 @@ void destroy_context(struct mm_struct *m + + static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) + { ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ unsigned int i; ++ pgd_t *pgd; ++ ++ pax_open_kernel(); ++ pgd = get_cpu_pgd(smp_processor_id()); ++ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i) ++ if (paravirt_enabled()) ++ set_pgd(pgd+i, native_make_pgd(0)); ++ else ++ pgd[i] = native_make_pgd(0); ++ pax_close_kernel(); ++#endif ++ + #ifdef CONFIG_SMP + if (percpu_read(cpu_tlbstate.state) == TLBSTATE_OK) + percpu_write(cpu_tlbstate.state, TLBSTATE_LAZY); +@@ -34,27 +49,70 @@ static inline void switch_mm(struct mm_s + struct task_struct *tsk) + { + unsigned cpu = smp_processor_id(); ++#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) ++ int tlbstate = TLBSTATE_OK; ++#endif + + if (likely(prev != next)) { + /* stop flush ipis for the previous mm */ + cpumask_clear_cpu(cpu, mm_cpumask(prev)); + #ifdef CONFIG_SMP ++#ifdef CONFIG_X86_32 ++ tlbstate = percpu_read(cpu_tlbstate.state); ++#endif + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + percpu_write(cpu_tlbstate.active_mm, next); + #endif + cpumask_set_cpu(cpu, mm_cpumask(next)); + + /* Re-load page tables */ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#else + load_cr3(next->pgd); ++#endif + + /* + * load the LDT, if the LDT is different: + */ + if (unlikely(prev->context.ldt != next->context.ldt)) + load_LDT_nolock(&next->context); +- } ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ if (!(__supported_pte_mask & _PAGE_NX)) { ++ smp_mb__before_clear_bit(); ++ cpu_clear(cpu, prev->context.cpu_user_cs_mask); ++ smp_mb__after_clear_bit(); ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++ } ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base || ++ prev->context.user_cs_limit != next->context.user_cs_limit)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); + #ifdef CONFIG_SMP ++ else if (unlikely(tlbstate != TLBSTATE_OK)) ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++#endif ++ ++ } + else { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ pax_open_kernel(); ++ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd, USER_PGD_PTRS); ++ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd, USER_PGD_PTRS); ++ pax_close_kernel(); ++ load_cr3(get_cpu_pgd(cpu)); ++#endif ++ ++#ifdef CONFIG_SMP + percpu_write(cpu_tlbstate.state, TLBSTATE_OK); + BUG_ON(percpu_read(cpu_tlbstate.active_mm) != next); + +@@ -63,11 +121,28 @@ static inline void switch_mm(struct mm_s + * tlb flush IPI delivery. We must reload CR3 + * to make sure to use no freed page tables. + */ ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(next->pgd); ++#endif ++ + load_LDT_nolock(&next->context); ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX)) ++ cpu_set(cpu, next->context.cpu_user_cs_mask); ++#endif ++ ++#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)) ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX))) ++#endif ++ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu); ++#endif ++ + } +- } + #endif ++ } + } + + #define activate_mm(prev, next) \ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/mmu.h linux-2.6.35.4/arch/x86/include/asm/mmu.h +--- linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/mmu.h 2010-09-17 20:12:09.000000000 -0400 +@@ -9,10 +9,23 @@ + * we put the segment information here. + */ + typedef struct { +- void *ldt; ++ struct desc_struct *ldt; + int size; + struct mutex lock; +- void *vdso; ++ unsigned long vdso; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ unsigned long user_cs_base; ++ unsigned long user_cs_limit; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpumask_t cpu_user_cs_mask; ++#endif ++ ++#endif ++#endif ++ + } mm_context_t; + + #ifdef CONFIG_SMP +diff -urNp linux-2.6.35.4/arch/x86/include/asm/module.h linux-2.6.35.4/arch/x86/include/asm/module.h +--- linux-2.6.35.4/arch/x86/include/asm/module.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/module.h 2010-09-17 20:12:37.000000000 -0400 +@@ -59,13 +59,31 @@ + #error unknown processor family + #endif + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define MODULE_PAX_UDEREF "UDEREF " ++#else ++#define MODULE_PAX_UDEREF "" ++#endif ++ + #ifdef CONFIG_X86_32 + # ifdef CONFIG_4KSTACKS + # define MODULE_STACKSIZE "4KSTACKS " + # else + # define MODULE_STACKSIZE "" + # endif +-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE ++# ifdef CONFIG_PAX_KERNEXEC ++# define MODULE_PAX_KERNEXEC "KERNEXEC " ++# else ++# define MODULE_PAX_KERNEXEC "" ++# endif ++# ifdef CONFIG_GRKERNSEC ++# define MODULE_GRSEC "GRSECURITY " ++# else ++# define MODULE_GRSEC "" ++# endif ++# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_STACKSIZE MODULE_GRSEC MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF ++#else ++# define MODULE_ARCH_VERMAGIC MODULE_PAX_UDEREF + #endif + + #endif /* _ASM_X86_MODULE_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/page_32_types.h linux-2.6.35.4/arch/x86/include/asm/page_32_types.h +--- linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/page_32_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -15,6 +15,10 @@ + */ + #define __PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) + ++#ifdef CONFIG_PAX_PAGEEXEC ++#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1 ++#endif ++ + #ifdef CONFIG_4KSTACKS + #define THREAD_ORDER 0 + #else +diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt.h linux-2.6.35.4/arch/x86/include/asm/paravirt.h +--- linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/paravirt.h 2010-09-17 20:12:09.000000000 -0400 +@@ -720,6 +720,21 @@ static inline void __set_fixmap(unsigned + pv_mmu_ops.set_fixmap(idx, phys, flags); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long pax_open_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel); ++} ++ ++static inline unsigned long pax_close_kernel(void) ++{ ++ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel); ++} ++#else ++static inline unsigned long pax_open_kernel(void) { return 0; } ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS) + + static inline int arch_spin_is_locked(struct arch_spinlock *lock) +@@ -936,7 +951,7 @@ extern void default_banner(void); + + #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4) + #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4) +-#define PARA_INDIRECT(addr) *%cs:addr ++#define PARA_INDIRECT(addr) *%ss:addr + #endif + + #define INTERRUPT_RETURN \ +@@ -1013,6 +1028,21 @@ extern void default_banner(void); + PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \ + CLBR_NONE, \ + jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit)) ++ ++#define GET_CR0_INTO_RDI \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR0 \ ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++ ++#define GET_CR3_INTO_RDI \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \ ++ mov %rax,%rdi ++ ++#define SET_RDI_INTO_CR3 \ ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3) ++ + #endif /* CONFIG_X86_32 */ + + #endif /* __ASSEMBLY__ */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h +--- linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/paravirt_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -312,6 +312,12 @@ struct pv_mmu_ops { + an mfn. We can tell which is which from the index. */ + void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx, + phys_addr_t phys, pgprot_t flags); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ unsigned long (*pax_open_kernel)(void); ++ unsigned long (*pax_close_kernel)(void); ++#endif ++ + }; + + struct arch_spinlock; +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pci_x86.h linux-2.6.35.4/arch/x86/include/asm/pci_x86.h +--- linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pci_x86.h 2010-09-17 20:12:09.000000000 -0400 +@@ -91,16 +91,16 @@ extern int (*pcibios_enable_irq)(struct + extern void (*pcibios_disable_irq)(struct pci_dev *dev); + + struct pci_raw_ops { +- int (*read)(unsigned int domain, unsigned int bus, unsigned int devfn, ++ int (* const read)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val); +- int (*write)(unsigned int domain, unsigned int bus, unsigned int devfn, ++ int (* const write)(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 val); + }; + +-extern struct pci_raw_ops *raw_pci_ops; +-extern struct pci_raw_ops *raw_pci_ext_ops; ++extern const struct pci_raw_ops *raw_pci_ops; ++extern const struct pci_raw_ops *raw_pci_ext_ops; + +-extern struct pci_raw_ops pci_direct_conf1; ++extern const struct pci_raw_ops pci_direct_conf1; + extern bool port_cf9_safe; + + /* arch_initcall level */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgalloc.h linux-2.6.35.4/arch/x86/include/asm/pgalloc.h +--- linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgalloc.h 2010-09-17 20:12:09.000000000 -0400 +@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(s + pmd_t *pmd, pte_t *pte) + { + paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); ++ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE)); ++} ++ ++static inline void pmd_populate_user(struct mm_struct *mm, ++ pmd_t *pmd, pte_t *pte) ++{ ++ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT); + set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE)); + } + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable-2level.h 2010-09-17 20:12:09.000000000 -0400 +@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -25,8 +25,6 @@ + struct mm_struct; + struct vm_area_struct; + +-extern pgd_t swapper_pg_dir[1024]; +- + static inline void pgtable_cache_init(void) { } + static inline void check_pgt_cache(void) { } + void paging_init(void); +@@ -47,6 +45,11 @@ extern void set_pmd_pfn(unsigned long, u + # include <asm/pgtable-2level.h> + #endif + ++extern pgd_t swapper_pg_dir[PTRS_PER_PGD]; ++#ifdef CONFIG_X86_PAE ++extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD]; ++#endif ++ + #if defined(CONFIG_HIGHPTE) + #define __KM_PTE \ + (in_nmi() ? KM_NMI_PTE : \ +@@ -71,7 +74,9 @@ extern void set_pmd_pfn(unsigned long, u + /* Clear a kernel PTE and flush it from the TLB */ + #define kpte_clear_flush(ptep, vaddr) \ + do { \ ++ pax_open_kernel(); \ + pte_clear(&init_mm, (vaddr), (ptep)); \ ++ pax_close_kernel(); \ + __flush_tlb_one((vaddr)); \ + } while (0) + +@@ -83,6 +88,9 @@ do { \ + + #endif /* !__ASSEMBLY__ */ + ++#define HAVE_ARCH_UNMAPPED_AREA ++#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN ++ + /* + * kern_addr_valid() is (1) for FLATMEM and (0) for + * SPARSEMEM and DISCONTIGMEM +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_32_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -8,7 +8,7 @@ + */ + #ifdef CONFIG_X86_PAE + # include <asm/pgtable-3level_types.h> +-# define PMD_SIZE (1UL << PMD_SHIFT) ++# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) + # define PMD_MASK (~(PMD_SIZE - 1)) + #else + # include <asm/pgtable-2level_types.h> +@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set + # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE) + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++#ifndef __ASSEMBLY__ ++extern unsigned char MODULES_EXEC_VADDR[]; ++extern unsigned char MODULES_EXEC_END[]; ++#endif ++#include <asm/boot.h> ++#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET) ++#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET) ++#else ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) ++#endif ++ + #define MODULES_VADDR VMALLOC_START + #define MODULES_END VMALLOC_END + #define MODULES_LEN (MODULES_VADDR - MODULES_END) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable-3level.h 2010-09-17 20:12:09.000000000 -0400 +@@ -38,12 +38,16 @@ static inline void native_set_pte_atomic + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd)); ++ pax_close_kernel(); + } + + static inline void native_set_pud(pud_t *pudp, pud_t pud) + { ++ pax_open_kernel(); + set_64bit((unsigned long long *)(pudp), native_pud_val(pud)); ++ pax_close_kernel(); + } + + /* +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -16,10 +16,13 @@ + + extern pud_t level3_kernel_pgt[512]; + extern pud_t level3_ident_pgt[512]; ++extern pud_t level3_vmalloc_pgt[512]; ++extern pud_t level3_vmemmap_pgt[512]; ++extern pud_t level2_vmemmap_pgt[512]; + extern pmd_t level2_kernel_pgt[512]; + extern pmd_t level2_fixmap_pgt[512]; +-extern pmd_t level2_ident_pgt[512]; +-extern pgd_t init_level4_pgt[]; ++extern pmd_t level2_ident_pgt[512*2]; ++extern pgd_t init_level4_pgt[512]; + + #define swapper_pg_dir init_level4_pgt + +@@ -74,7 +77,9 @@ static inline pte_t native_ptep_get_and_ + + static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd) + { ++ pax_open_kernel(); + *pmdp = pmd; ++ pax_close_kernel(); + } + + static inline void native_pmd_clear(pmd_t *pmd) +@@ -94,7 +99,9 @@ static inline void native_pud_clear(pud_ + + static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd) + { ++ pax_open_kernel(); + *pgdp = pgd; ++ pax_close_kernel(); + } + + static inline void native_pgd_clear(pgd_t *pgd) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_64_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -59,5 +59,10 @@ typedef struct { pteval_t pte; } pte_t; + #define MODULES_VADDR _AC(0xffffffffa0000000, UL) + #define MODULES_END _AC(0xffffffffff000000, UL) + #define MODULES_LEN (MODULES_END - MODULES_VADDR) ++#define MODULES_EXEC_VADDR MODULES_VADDR ++#define MODULES_EXEC_END MODULES_END ++ ++#define ktla_ktva(addr) (addr) ++#define ktva_ktla(addr) (addr) + + #endif /* _ASM_X86_PGTABLE_64_DEFS_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable.h linux-2.6.35.4/arch/x86/include/asm/pgtable.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -76,12 +76,51 @@ extern struct list_head pgd_list; + + #define arch_end_context_switch(prev) do {} while(0) + ++#define pax_open_kernel() native_pax_open_kernel() ++#define pax_close_kernel() native_pax_close_kernel() + #endif /* CONFIG_PARAVIRT */ + ++#define __HAVE_ARCH_PAX_OPEN_KERNEL ++#define __HAVE_ARCH_PAX_CLOSE_KERNEL ++ ++#ifdef CONFIG_PAX_KERNEXEC ++static inline unsigned long native_pax_open_kernel(void) ++{ ++ unsigned long cr0; ++ ++ preempt_disable(); ++ barrier(); ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(unlikely(cr0 & X86_CR0_WP)); ++ write_cr0(cr0); ++ return cr0 ^ X86_CR0_WP; ++} ++ ++static inline unsigned long native_pax_close_kernel(void) ++{ ++ unsigned long cr0; ++ ++ cr0 = read_cr0() ^ X86_CR0_WP; ++ BUG_ON(unlikely(!(cr0 & X86_CR0_WP))); ++ write_cr0(cr0); ++ barrier(); ++ preempt_enable_no_resched(); ++ return cr0 ^ X86_CR0_WP; ++} ++#else ++static inline unsigned long native_pax_open_kernel(void) { return 0; } ++static inline unsigned long native_pax_close_kernel(void) { return 0; } ++#endif ++ + /* + * The following only work if pte_present() is true. + * Undefined behaviour if not.. + */ ++static inline int pte_user(pte_t pte) ++{ ++ return pte_val(pte) & _PAGE_USER; ++} ++ + static inline int pte_dirty(pte_t pte) + { + return pte_flags(pte) & _PAGE_DIRTY; +@@ -169,9 +208,29 @@ static inline pte_t pte_wrprotect(pte_t + return pte_clear_flags(pte, _PAGE_RW); + } + ++static inline pte_t pte_mkread(pte_t pte) ++{ ++ return __pte(pte_val(pte) | _PAGE_USER); ++} ++ + static inline pte_t pte_mkexec(pte_t pte) + { +- return pte_clear_flags(pte, _PAGE_NX); ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_clear_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_set_flags(pte, _PAGE_USER); ++} ++ ++static inline pte_t pte_exprotect(pte_t pte) ++{ ++#ifdef CONFIG_X86_PAE ++ if (__supported_pte_mask & _PAGE_NX) ++ return pte_set_flags(pte, _PAGE_NX); ++ else ++#endif ++ return pte_clear_flags(pte, _PAGE_USER); + } + + static inline pte_t pte_mkdirty(pte_t pte) +@@ -304,6 +363,15 @@ pte_t *populate_extra_pte(unsigned long + #endif + + #ifndef __ASSEMBLY__ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD]; ++static inline pgd_t *get_cpu_pgd(unsigned int cpu) ++{ ++ return cpu_pgd[cpu]; ++} ++#endif ++ + #include <linux/mm_types.h> + + static inline int pte_none(pte_t pte) +@@ -474,7 +542,7 @@ static inline pud_t *pud_offset(pgd_t *p + + static inline int pgd_bad(pgd_t pgd) + { +- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE; ++ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE; + } + + static inline int pgd_none(pgd_t pgd) +@@ -497,7 +565,12 @@ static inline int pgd_none(pgd_t pgd) + * pgd_offset() returns a (pgd_t *) + * pgd_index() is used get the offset into the pgd page's array of pgd_t's; + */ +-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address))) ++#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address)) ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address)) ++#endif ++ + /* + * a shortcut which implies the use of the kernel's pgd, instead + * of a process's +@@ -508,6 +581,20 @@ static inline int pgd_none(pgd_t pgd) + #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET) + #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY) + ++#ifdef CONFIG_X86_32 ++#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY ++#else ++#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT ++#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT)) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++#define PAX_USER_SHADOW_BASE (_AC(1,UL) << TASK_SIZE_MAX_SHIFT) ++#else ++#define PAX_USER_SHADOW_BASE (_AC(0,UL)) ++#endif ++ ++#endif ++ + #ifndef __ASSEMBLY__ + + extern int direct_gbpages; +@@ -613,11 +700,23 @@ static inline void ptep_set_wrprotect(st + * dst and src can be on the same page, but the range must not overlap, + * and must not cross a page boundary. + */ +-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count) ++static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count) + { +- memcpy(dst, src, count * sizeof(pgd_t)); ++ pax_open_kernel(); ++ while (count--) ++ *dst++ = *src++; ++ pax_close_kernel(); + } + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count); ++#endif ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count); ++#else ++static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) {} ++#endif + + #include <asm-generic/pgtable.h> + #endif /* __ASSEMBLY__ */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h +--- linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/pgtable_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -16,12 +16,11 @@ + #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */ + #define _PAGE_BIT_PAT 7 /* on 4KB pages */ + #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */ +-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */ ++#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */ + #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */ + #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */ + #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */ +-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1 +-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1 ++#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL + #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */ + + /* If _PAGE_BIT_PRESENT is clear, we use these: */ +@@ -39,7 +38,6 @@ + #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY) + #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE) + #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL) +-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1) + #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP) + #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT) + #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE) +@@ -55,8 +53,10 @@ + + #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX) +-#else ++#elif defined(CONFIG_KMEMCHECK) + #define _PAGE_NX (_AT(pteval_t, 0)) ++#else ++#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN) + #endif + + #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE) +@@ -93,6 +93,9 @@ + #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \ + _PAGE_ACCESSED) + ++#define PAGE_READONLY_NOEXEC PAGE_READONLY ++#define PAGE_SHARED_NOEXEC PAGE_SHARED ++ + #define __PAGE_KERNEL_EXEC \ + (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL) + #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX) +@@ -103,8 +106,8 @@ + #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC) + #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT) + #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD) +-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER) +-#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_VSYSCALL | _PAGE_PCD | _PAGE_PWT) ++#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER) ++#define __PAGE_KERNEL_VSYSCALL_NOCACHE (__PAGE_KERNEL_RO | _PAGE_PCD | _PAGE_PWT | _PAGE_USER) + #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE) + #define __PAGE_KERNEL_LARGE_NOCACHE (__PAGE_KERNEL | _PAGE_CACHE_UC | _PAGE_PSE) + #define __PAGE_KERNEL_LARGE_EXEC (__PAGE_KERNEL_EXEC | _PAGE_PSE) +@@ -163,8 +166,8 @@ + * bits are combined, this will alow user to access the high address mapped + * VDSO in the presence of CONFIG_COMPAT_VDSO + */ +-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */ +-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */ ++#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ ++#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */ + #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */ + #endif + +@@ -202,7 +205,17 @@ static inline pgdval_t pgd_flags(pgd_t p + { + return native_pgd_val(pgd) & PTE_FLAGS_MASK; + } ++#endif + ++#if PAGETABLE_LEVELS == 3 ++#include <asm-generic/pgtable-nopud.h> ++#endif ++ ++#if PAGETABLE_LEVELS == 2 ++#include <asm-generic/pgtable-nopmd.h> ++#endif ++ ++#ifndef __ASSEMBLY__ + #if PAGETABLE_LEVELS > 3 + typedef struct { pudval_t pud; } pud_t; + +@@ -216,8 +229,6 @@ static inline pudval_t native_pud_val(pu + return pud.pud; + } + #else +-#include <asm-generic/pgtable-nopud.h> +- + static inline pudval_t native_pud_val(pud_t pud) + { + return native_pgd_val(pud.pgd); +@@ -237,8 +248,6 @@ static inline pmdval_t native_pmd_val(pm + return pmd.pmd; + } + #else +-#include <asm-generic/pgtable-nopmd.h> +- + static inline pmdval_t native_pmd_val(pmd_t pmd) + { + return native_pgd_val(pmd.pud.pgd); +@@ -278,7 +287,6 @@ typedef struct page *pgtable_t; + + extern pteval_t __supported_pte_mask; + extern void set_nx(void); +-extern int nx_enabled; + + #define pgprot_writecombine pgprot_writecombine + extern pgprot_t pgprot_writecombine(pgprot_t prot); +diff -urNp linux-2.6.35.4/arch/x86/include/asm/processor.h linux-2.6.35.4/arch/x86/include/asm/processor.h +--- linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/processor.h 2010-09-17 20:12:09.000000000 -0400 +@@ -269,7 +269,7 @@ struct tss_struct { + + } ____cacheline_aligned; + +-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss); ++extern struct tss_struct init_tss[NR_CPUS]; + + /* + * Save the original ist values for checking stack pointers during debugging +@@ -884,8 +884,15 @@ static inline void spin_lock_prefetch(co + */ + #define TASK_SIZE PAGE_OFFSET + #define TASK_SIZE_MAX TASK_SIZE ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2) ++#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE) ++#else + #define STACK_TOP TASK_SIZE +-#define STACK_TOP_MAX STACK_TOP ++#endif ++ ++#define STACK_TOP_MAX TASK_SIZE + + #define INIT_THREAD { \ + .sp0 = sizeof(init_stack) + (long)&init_stack, \ +@@ -902,7 +909,7 @@ static inline void spin_lock_prefetch(co + */ + #define INIT_TSS { \ + .x86_tss = { \ +- .sp0 = sizeof(init_stack) + (long)&init_stack, \ ++ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \ + .ss0 = __KERNEL_DS, \ + .ss1 = __KERNEL_CS, \ + .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \ +@@ -913,11 +920,7 @@ static inline void spin_lock_prefetch(co + extern unsigned long thread_saved_pc(struct task_struct *tsk); + + #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long)) +-#define KSTK_TOP(info) \ +-({ \ +- unsigned long *__ptr = (unsigned long *)(info); \ +- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \ +-}) ++#define KSTK_TOP(info) ((info)->task.thread.sp0) + + /* + * The below -8 is to reserve 8 bytes on top of the ring0 stack. +@@ -932,7 +935,7 @@ extern unsigned long thread_saved_pc(str + #define task_pt_regs(task) \ + ({ \ + struct pt_regs *__regs__; \ +- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \ ++ __regs__ = (struct pt_regs *)((task)->thread.sp0); \ + __regs__ - 1; \ + }) + +@@ -942,13 +945,13 @@ extern unsigned long thread_saved_pc(str + /* + * User space process size. 47bits minus one guard page. + */ +-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE) ++#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE) + + /* This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ + #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \ +- 0xc0000000 : 0xFFFFe000) ++ 0xc0000000 : 0xFFFFf000) + + #define TASK_SIZE (test_thread_flag(TIF_IA32) ? \ + IA32_PAGE_OFFSET : TASK_SIZE_MAX) +@@ -985,6 +988,10 @@ extern void start_thread(struct pt_regs + */ + #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3)) + ++#ifdef CONFIG_PAX_SEGMEXEC ++#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3)) ++#endif ++ + #define KSTK_EIP(task) (task_pt_regs(task)->ip) + + /* Get/set a process' ability to use the timestamp counter instruction */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/ptrace.h linux-2.6.35.4/arch/x86/include/asm/ptrace.h +--- linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/ptrace.h 2010-09-17 20:12:09.000000000 -0400 +@@ -152,28 +152,29 @@ static inline unsigned long regs_return_ + } + + /* +- * user_mode_vm(regs) determines whether a register set came from user mode. ++ * user_mode(regs) determines whether a register set came from user mode. + * This is true if V8086 mode was enabled OR if the register set was from + * protected mode with RPL-3 CS value. This tricky test checks that with + * one comparison. Many places in the kernel can bypass this full check +- * if they have already ruled out V8086 mode, so user_mode(regs) can be used. ++ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can ++ * be used. + */ +-static inline int user_mode(struct pt_regs *regs) ++static inline int user_mode_novm(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL; + #else +- return !!(regs->cs & 3); ++ return !!(regs->cs & SEGMENT_RPL_MASK); + #endif + } + +-static inline int user_mode_vm(struct pt_regs *regs) ++static inline int user_mode(struct pt_regs *regs) + { + #ifdef CONFIG_X86_32 + return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= + USER_RPL; + #else +- return user_mode(regs); ++ return user_mode_novm(regs); + #endif + } + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/reboot.h linux-2.6.35.4/arch/x86/include/asm/reboot.h +--- linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/reboot.h 2010-09-17 20:12:09.000000000 -0400 +@@ -18,7 +18,7 @@ extern struct machine_ops machine_ops; + + void native_machine_crash_shutdown(struct pt_regs *regs); + void native_machine_shutdown(void); +-void machine_real_restart(const unsigned char *code, int length); ++void machine_real_restart(const unsigned char *code, unsigned int length); + + typedef void (*nmi_shootdown_cb)(int, struct die_args*); + void nmi_shootdown_cpus(nmi_shootdown_cb callback); +diff -urNp linux-2.6.35.4/arch/x86/include/asm/rwsem.h linux-2.6.35.4/arch/x86/include/asm/rwsem.h +--- linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/rwsem.h 2010-09-17 20:12:09.000000000 -0400 +@@ -118,10 +118,26 @@ static inline void __down_read(struct rw + { + asm volatile("# beginning down_read\n\t" + LOCK_PREFIX _ASM_INC "(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX _ASM_DEC "(%1)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + /* adds 0x00000001, returns the old value */ +- " jns 1f\n" ++ " jns 2f\n" + " call call_rwsem_down_read_failed\n" +- "1:\n\t" ++ "2:\n\t" + "# ending down_read\n\t" + : "+m" (sem->count) + : "a" (sem) +@@ -136,13 +152,29 @@ static inline int __down_read_trylock(st + rwsem_count_t result, tmp; + asm volatile("# beginning __down_read_trylock\n\t" + " mov %0,%1\n\t" +- "1:\n\t" ++ "2:\n\t" + " mov %1,%2\n\t" + " add %3,%2\n\t" +- " jle 2f\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ "sub %3,%2\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ " jle 3f\n\t" + LOCK_PREFIX " cmpxchg %2,%0\n\t" +- " jnz 1b\n\t" +- "2:\n\t" ++ " jnz 2b\n\t" ++ "3:\n\t" + "# ending __down_read_trylock\n\t" + : "+m" (sem->count), "=&a" (result), "=&r" (tmp) + : "i" (RWSEM_ACTIVE_READ_BIAS) +@@ -160,12 +192,28 @@ static inline void __down_write_nested(s + tmp = RWSEM_ACTIVE_WRITE_BIAS; + asm volatile("# beginning down_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ "mov %1,(%2)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + /* subtract 0x0000ffff, returns the old value */ + " test %1,%1\n\t" + /* was the count 0 before? */ +- " jz 1f\n" ++ " jz 2f\n" + " call call_rwsem_down_write_failed\n" +- "1:\n" ++ "2:\n" + "# ending down_write" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) +@@ -198,10 +246,26 @@ static inline void __up_read(struct rw_s + rwsem_count_t tmp = -RWSEM_ACTIVE_READ_BIAS; + asm volatile("# beginning __up_read\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ "mov %1,(%2)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + /* subtracts 1, returns the old value */ +- " jns 1f\n\t" ++ " jns 2f\n\t" + " call call_rwsem_wake\n" +- "1:\n" ++ "2:\n" + "# ending __up_read\n" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (tmp) +@@ -216,11 +280,27 @@ static inline void __up_write(struct rw_ + rwsem_count_t tmp; + asm volatile("# beginning __up_write\n\t" + LOCK_PREFIX " xadd %1,(%2)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ "mov %1,(%2)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + /* tries to transition + 0xffff0001 -> 0x00000000 */ +- " jz 1f\n" ++ " jz 2f\n" + " call call_rwsem_wake\n" +- "1:\n\t" ++ "2:\n\t" + "# ending __up_write\n" + : "+m" (sem->count), "=d" (tmp) + : "a" (sem), "1" (-RWSEM_ACTIVE_WRITE_BIAS) +@@ -234,13 +314,29 @@ static inline void __downgrade_write(str + { + asm volatile("# beginning __downgrade_write\n\t" + LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX _ASM_SUB "%2,(%1)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + /* + * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386) + * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64) + */ +- " jns 1f\n\t" ++ " jns 2f\n\t" + " call call_rwsem_downgrade_wake\n" +- "1:\n\t" ++ "2:\n\t" + "# ending __downgrade_write\n" + : "+m" (sem->count) + : "a" (sem), "er" (-RWSEM_WAITING_BIAS) +@@ -253,7 +349,23 @@ static inline void __downgrade_write(str + static inline void rwsem_atomic_add(rwsem_count_t delta, + struct rw_semaphore *sem) + { +- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0" ++ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX _ASM_SUB "%1,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (sem->count) + : "er" (delta)); + } +@@ -266,7 +378,23 @@ static inline rwsem_count_t rwsem_atomic + { + rwsem_count_t tmp = delta; + +- asm volatile(LOCK_PREFIX "xadd %0,%1" ++ asm volatile(LOCK_PREFIX "xadd %0,%1\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ "mov %0,%1\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+r" (tmp), "+m" (sem->count) + : : "memory"); + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/segment.h linux-2.6.35.4/arch/x86/include/asm/segment.h +--- linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/segment.h 2010-09-17 20:12:09.000000000 -0400 +@@ -62,8 +62,8 @@ + * 26 - ESPFIX small SS + * 27 - per-cpu [ offset to per-cpu data area ] + * 28 - stack_canary-20 [ for stack protector ] +- * 29 - unused +- * 30 - unused ++ * 29 - PCI BIOS CS ++ * 30 - PCI BIOS DS + * 31 - TSS for double fault handler + */ + #define GDT_ENTRY_TLS_MIN 6 +@@ -77,6 +77,8 @@ + + #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE + 0) + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4) ++ + #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE + 1) + + #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE + 4) +@@ -88,7 +90,7 @@ + #define GDT_ENTRY_ESPFIX_SS (GDT_ENTRY_KERNEL_BASE + 14) + #define __ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8) + +-#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) ++#define GDT_ENTRY_PERCPU (GDT_ENTRY_KERNEL_BASE + 15) + #ifdef CONFIG_SMP + #define __KERNEL_PERCPU (GDT_ENTRY_PERCPU * 8) + #else +@@ -102,6 +104,12 @@ + #define __KERNEL_STACK_CANARY 0 + #endif + ++#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE + 17) ++#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8) ++ ++#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE + 18) ++#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8) ++ + #define GDT_ENTRY_DOUBLEFAULT_TSS 31 + + /* +@@ -139,7 +147,7 @@ + */ + + /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */ +-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8) ++#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16) + + + #else +@@ -163,6 +171,8 @@ + #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS * 8 + 3) + #define __USER32_DS __USER_DS + ++#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7 ++ + #define GDT_ENTRY_TSS 8 /* needs two entries */ + #define GDT_ENTRY_LDT 10 /* needs two entries */ + #define GDT_ENTRY_TLS_MIN 12 +@@ -183,6 +193,7 @@ + #endif + + #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS * 8) ++#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS * 8) + #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS * 8) + #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS* 8 + 3) + #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS* 8 + 3) +diff -urNp linux-2.6.35.4/arch/x86/include/asm/spinlock.h linux-2.6.35.4/arch/x86/include/asm/spinlock.h +--- linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/spinlock.h 2010-09-17 20:12:09.000000000 -0400 +@@ -249,18 +249,50 @@ static inline int arch_write_can_lock(ar + static inline void arch_read_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX " subl $1,(%0)\n\t" +- "jns 1f\n" +- "call __read_lock_failed\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" + "1:\n" ++ LOCK_PREFIX " addl $1,(%0)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "jns 2f\n" ++ "call __read_lock_failed\n\t" ++ "2:\n" + ::LOCK_PTR_REG (rw) : "memory"); + } + + static inline void arch_write_lock(arch_rwlock_t *rw) + { + asm volatile(LOCK_PREFIX " subl %1,(%0)\n\t" +- "jz 1f\n" +- "call __write_lock_failed\n\t" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" + "1:\n" ++ LOCK_PREFIX " addl %1,(%0)\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ "jz 2f\n" ++ "call __write_lock_failed\n\t" ++ "2:\n" + ::LOCK_PTR_REG (rw), "i" (RW_LOCK_BIAS) : "memory"); + } + +@@ -286,12 +318,45 @@ static inline int arch_write_trylock(arc + + static inline void arch_read_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX "incl %0" :"+m" (rw->lock) : : "memory"); ++ asm volatile(LOCK_PREFIX "incl %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX "decl %0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ ++ :"+m" (rw->lock) : : "memory"); + } + + static inline void arch_write_unlock(arch_rwlock_t *rw) + { +- asm volatile(LOCK_PREFIX "addl %1, %0" ++ asm volatile(LOCK_PREFIX "addl %1, %0\n" ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#ifdef CONFIG_X86_32 ++ "into\n0:\n" ++#else ++ "jno 0f\n" ++ "int $4\n0:\n" ++#endif ++ ".pushsection .fixup,\"ax\"\n" ++ "1:\n" ++ LOCK_PREFIX "subl %1,%0\n" ++ "jmp 0b\n" ++ ".popsection\n" ++ _ASM_EXTABLE(0b, 1b) ++#endif ++ + : "+m" (rw->lock) : "i" (RW_LOCK_BIAS) : "memory"); + } + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/system.h linux-2.6.35.4/arch/x86/include/asm/system.h +--- linux-2.6.35.4/arch/x86/include/asm/system.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/system.h 2010-09-17 20:12:09.000000000 -0400 +@@ -202,7 +202,7 @@ static inline unsigned long get_limit(un + { + unsigned long __limit; + asm("lsll %1,%0" : "=r" (__limit) : "r" (segment)); +- return __limit + 1; ++ return __limit; + } + + static inline void native_clts(void) +@@ -342,7 +342,7 @@ void enable_hlt(void); + + void cpu_idle_wait(void); + +-extern unsigned long arch_align_stack(unsigned long sp); ++#define arch_align_stack(x) ((x) & ~0xfUL) + extern void free_init_pages(char *what, unsigned long begin, unsigned long end); + + void default_idle(void); +diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h +--- linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/uaccess_32.h 2010-09-17 20:12:09.000000000 -0400 +@@ -44,6 +44,9 @@ unsigned long __must_check __copy_from_u + static __always_inline unsigned long __must_check + __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -62,6 +65,8 @@ __copy_to_user_inatomic(void __user *to, + return ret; + } + } ++ if (!__builtin_constant_p(n)) ++ check_object_size(from, n, true); + return __copy_to_user_ll(to, from, n); + } + +@@ -89,6 +94,9 @@ __copy_to_user(void __user *to, const vo + static __always_inline unsigned long + __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n) + { ++ if ((long)n < 0) ++ return n; ++ + /* Avoid zeroing the tail if the copy fails.. + * If 'n' is constant and 1, 2, or 4, we do still zero on a failure, + * but as the zeroing behaviour is only significant when n is not +@@ -138,6 +146,10 @@ static __always_inline unsigned long + __copy_from_user(void *to, const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -153,6 +165,8 @@ __copy_from_user(void *to, const void __ + return ret; + } + } ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); + return __copy_from_user_ll(to, from, n); + } + +@@ -160,6 +174,10 @@ static __always_inline unsigned long __c + const void __user *from, unsigned long n) + { + might_fault(); ++ ++ if ((long)n < 0) ++ return n; ++ + if (__builtin_constant_p(n)) { + unsigned long ret; + +@@ -182,15 +200,19 @@ static __always_inline unsigned long + __copy_from_user_inatomic_nocache(void *to, const void __user *from, + unsigned long n) + { +- return __copy_from_user_ll_nocache_nozero(to, from, n); +-} ++ if ((long)n < 0) ++ return n; + +-unsigned long __must_check copy_to_user(void __user *to, +- const void *from, unsigned long n); +-unsigned long __must_check _copy_from_user(void *to, +- const void __user *from, +- unsigned long n); ++ return __copy_from_user_ll_nocache_nozero(to, from, n); ++} + ++extern void copy_to_user_overflow(void) ++#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS ++ __compiletime_error("copy_to_user() buffer size is not provably correct") ++#else ++ __compiletime_warning("copy_to_user() buffer size is not provably correct") ++#endif ++; + + extern void copy_from_user_overflow(void) + #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS +@@ -200,17 +222,61 @@ extern void copy_from_user_overflow(void + #endif + ; + +-static inline unsigned long __must_check copy_from_user(void *to, +- const void __user *from, +- unsigned long n) ++/** ++ * copy_to_user: - Copy a block of data into user space. ++ * @to: Destination address, in user space. ++ * @from: Source address, in kernel space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from kernel space to user space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ */ ++static inline unsigned long __must_check ++copy_to_user(void __user *to, const void *from, unsigned long n) ++{ ++ int sz = __compiletime_object_size(from); ++ ++ if (unlikely(sz != -1 && sz < n)) ++ copy_to_user_overflow(); ++ else if (access_ok(VERIFY_WRITE, to, n)) ++ n = __copy_to_user(to, from, n); ++ return n; ++} ++ ++/** ++ * copy_from_user: - Copy a block of data from user space. ++ * @to: Destination address, in kernel space. ++ * @from: Source address, in user space. ++ * @n: Number of bytes to copy. ++ * ++ * Context: User context only. This function may sleep. ++ * ++ * Copy data from user space to kernel space. ++ * ++ * Returns number of bytes that could not be copied. ++ * On success, this will be zero. ++ * ++ * If some data could not be copied, this function will pad the copied ++ * data to the requested size using zero bytes. ++ */ ++static inline unsigned long __must_check ++copy_from_user(void *to, const void __user *from, unsigned long n) + { + int sz = __compiletime_object_size(to); + +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +- else ++ if (unlikely(sz != -1 && sz < n)) + copy_from_user_overflow(); +- ++ else if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if ((long)n > 0) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ memset(to, 0, n); ++ } + return n; + } + +diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h +--- linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/uaccess_64.h 2010-09-17 20:12:37.000000000 -0400 +@@ -11,6 +11,11 @@ + #include <asm/alternative.h> + #include <asm/cpufeature.h> + #include <asm/page.h> ++#include <asm/pgtable.h> ++ ++#define set_fs(x) (current_thread_info()->addr_limit = (x)) ++ ++extern void check_object_size(const void *ptr, unsigned long n, bool to); + + /* + * Copy To/From Userspace +@@ -37,26 +42,26 @@ copy_user_generic(void *to, const void * + return ret; + } + +-__must_check unsigned long +-_copy_to_user(void __user *to, const void *from, unsigned len); +-__must_check unsigned long +-_copy_from_user(void *to, const void __user *from, unsigned len); ++static __always_inline __must_check unsigned long ++__copy_to_user(void __user *to, const void *from, unsigned len); ++static __always_inline __must_check unsigned long ++__copy_from_user(void *to, const void __user *from, unsigned len); + __must_check unsigned long + copy_in_user(void __user *to, const void __user *from, unsigned len); + + static inline unsigned long __must_check copy_from_user(void *to, + const void __user *from, +- unsigned long n) ++ unsigned n) + { +- int sz = __compiletime_object_size(to); +- + might_fault(); +- if (likely(sz == -1 || sz >= n)) +- n = _copy_from_user(to, from, n); +-#ifdef CONFIG_DEBUG_VM +- else +- WARN(1, "Buffer overflow detected!\n"); +-#endif ++ ++ if (access_ok(VERIFY_READ, from, n)) ++ n = __copy_from_user(to, from, n); ++ else if ((int)n > 0) { ++ if (!__builtin_constant_p(n)) ++ check_object_size(to, n, false); ++ memset(to, 0, n); ++ } + return n; + } + +@@ -65,17 +70,35 @@ int copy_to_user(void __user *dst, const + { + might_fault(); + +- return _copy_to_user(dst, src, size); ++ if (access_ok(VERIFY_WRITE, dst, size)) ++ size = __copy_to_user(dst, src, size); ++ return size; + } + + static __always_inline __must_check +-int __copy_from_user(void *dst, const void __user *src, unsigned size) ++unsigned long __copy_from_user(void *dst, const void __user *src, unsigned size) + { +- int ret = 0; ++ int sz = __compiletime_object_size(dst); ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) ++ ++ if ((int)size < 0) ++ return size; ++ ++ if (unlikely(sz != -1 && sz < size)) { ++#ifdef CONFIG_DEBUG_VM ++ WARN(1, "Buffer overflow detected!\n"); ++#endif ++ return size; ++ } ++ ++ if (!__builtin_constant_p(size)) { ++ check_object_size(dst, size, false); ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; + return copy_user_generic(dst, (__force void *)src, size); ++ } + switch (size) { + case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src, + ret, "b", "b", "=q", 1); +@@ -108,18 +131,36 @@ int __copy_from_user(void *dst, const vo + ret, "q", "", "=r", 8); + return ret; + default: ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; + return copy_user_generic(dst, (__force void *)src, size); + } + } + + static __always_inline __must_check +-int __copy_to_user(void __user *dst, const void *src, unsigned size) ++unsigned long __copy_to_user(void __user *dst, const void *src, unsigned size) + { +- int ret = 0; ++ int sz = __compiletime_object_size(src); ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) ++ ++ if ((int)size < 0) ++ return size; ++ ++ if (unlikely(sz != -1 && sz < size)) { ++#ifdef CONFIG_DEBUG_VM ++ WARN(1, "Buffer overflow detected!\n"); ++#endif ++ return size; ++ } ++ ++ if (!__builtin_constant_p(size)) { ++ check_object_size(src, size, true); ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)dst, src, size); ++ } + switch (size) { + case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst, + ret, "b", "b", "iq", 1); +@@ -152,19 +193,30 @@ int __copy_to_user(void __user *dst, con + ret, "q", "", "er", 8); + return ret; + default: ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)dst, src, size); + } + } + + static __always_inline __must_check +-int __copy_in_user(void __user *dst, const void __user *src, unsigned size) ++unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned size) + { +- int ret = 0; ++ unsigned ret = 0; + + might_fault(); +- if (!__builtin_constant_p(size)) ++ ++ if ((int)size < 0) ++ return size; ++ ++ if (!__builtin_constant_p(size)) { ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)dst, + (__force void *)src, size); ++ } + switch (size) { + case 1: { + u8 tmp; +@@ -204,6 +256,10 @@ int __copy_in_user(void __user *dst, con + return ret; + } + default: ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)dst, + (__force void *)src, size); + } +@@ -222,33 +278,45 @@ __must_check unsigned long __clear_user( + static __must_check __always_inline int + __copy_from_user_inatomic(void *dst, const void __user *src, unsigned size) + { ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; + return copy_user_generic(dst, (__force const void *)src, size); + } + +-static __must_check __always_inline int ++static __must_check __always_inline unsigned long + __copy_to_user_inatomic(void __user *dst, const void *src, unsigned size) + { ++ if ((int)size < 0) ++ return size; ++ ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)dst, src, size); + } + +-extern long __copy_user_nocache(void *dst, const void __user *src, ++extern unsigned long __copy_user_nocache(void *dst, const void __user *src, + unsigned size, int zerorest); + +-static inline int +-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size) ++static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned size) + { + might_sleep(); ++ ++ if ((int)size < 0) ++ return size; ++ + return __copy_user_nocache(dst, src, size, 1); + } + +-static inline int +-__copy_from_user_inatomic_nocache(void *dst, const void __user *src, ++static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src, + unsigned size) + { ++ if ((int)size < 0) ++ return size; ++ + return __copy_user_nocache(dst, src, size, 0); + } + +-unsigned long ++extern unsigned long + copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest); + + #endif /* _ASM_X86_UACCESS_64_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/uaccess.h linux-2.6.35.4/arch/x86/include/asm/uaccess.h +--- linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -8,12 +8,15 @@ + #include <linux/thread_info.h> + #include <linux/prefetch.h> + #include <linux/string.h> ++#include <linux/sched.h> + #include <asm/asm.h> + #include <asm/page.h> + + #define VERIFY_READ 0 + #define VERIFY_WRITE 1 + ++extern void check_object_size(const void *ptr, unsigned long n, bool to); ++ + /* + * The fs value determines whether argument validity checking should be + * performed or not. If get_fs() == USER_DS, checking is performed, with +@@ -29,7 +32,12 @@ + + #define get_ds() (KERNEL_DS) + #define get_fs() (current_thread_info()->addr_limit) ++#ifdef CONFIG_X86_32 ++void __set_fs(mm_segment_t x, int cpu); ++void set_fs(mm_segment_t x); ++#else + #define set_fs(x) (current_thread_info()->addr_limit = (x)) ++#endif + + #define segment_eq(a, b) ((a).seg == (b).seg) + +@@ -77,7 +85,33 @@ + * checks that the pointer is in the user space range - after calling + * this function, memory access functions may still return -EFAULT. + */ +-#define access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) ++#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size) == 0)) ++#define access_ok(type, addr, size) \ ++({ \ ++ long __size = size; \ ++ unsigned long __addr = (unsigned long)addr; \ ++ unsigned long __addr_ao = __addr & PAGE_MASK; \ ++ unsigned long __end_ao = __addr + __size - 1; \ ++ bool __ret_ao = __range_not_ok(__addr, __size) == 0; \ ++ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \ ++ while(__addr_ao <= __end_ao) { \ ++ char __c_ao; \ ++ __addr_ao += PAGE_SIZE; \ ++ if (__size > PAGE_SIZE) \ ++ cond_resched(); \ ++ if (__get_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ if (type != VERIFY_WRITE) { \ ++ __addr = __addr_ao; \ ++ continue; \ ++ } \ ++ if (__put_user(__c_ao, (char __user *)__addr)) \ ++ break; \ ++ __addr = __addr_ao; \ ++ } \ ++ } \ ++ __ret_ao; \ ++}) + + /* + * The exception table consists of pairs of addresses: the first is the +@@ -183,13 +217,21 @@ extern int __get_user_bad(void); + asm volatile("call __put_user_" #size : "=a" (__ret_pu) \ + : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx") + +- ++#ifdef CONFIG_X86_32 ++#define _ASM_LOAD_USER_DS(ds) "movw %w" #ds ",%%ds\n" ++#define _ASM_LOAD_KERNEL_DS "pushl %%ss; popl %%ds\n" ++#else ++#define _ASM_LOAD_USER_DS(ds) ++#define _ASM_LOAD_KERNEL_DS ++#endif + + #ifdef CONFIG_X86_32 + #define __put_user_asm_u64(x, addr, err, errret) \ +- asm volatile("1: movl %%eax,0(%2)\n" \ +- "2: movl %%edx,4(%2)\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(5) \ ++ "1: movl %%eax,%%ds:0(%2)\n" \ ++ "2: movl %%edx,%%ds:4(%2)\n" \ + "3:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + ".section .fixup,\"ax\"\n" \ + "4: movl %3,%0\n" \ + " jmp 3b\n" \ +@@ -197,15 +239,18 @@ extern int __get_user_bad(void); + _ASM_EXTABLE(1b, 4b) \ + _ASM_EXTABLE(2b, 4b) \ + : "=r" (err) \ +- : "A" (x), "r" (addr), "i" (errret), "0" (err)) ++ : "A" (x), "r" (addr), "i" (errret), "0" (err), \ ++ "r"(__USER_DS)) + + #define __put_user_asm_ex_u64(x, addr) \ +- asm volatile("1: movl %%eax,0(%1)\n" \ +- "2: movl %%edx,4(%1)\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(2) \ ++ "1: movl %%eax,%%ds:0(%1)\n" \ ++ "2: movl %%edx,%%ds:4(%1)\n" \ + "3:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + _ASM_EXTABLE(1b, 2b - 1b) \ + _ASM_EXTABLE(2b, 3b - 2b) \ +- : : "A" (x), "r" (addr)) ++ : : "A" (x), "r" (addr), "r"(__USER_DS)) + + #define __put_user_x8(x, ptr, __ret_pu) \ + asm volatile("call __put_user_8" : "=a" (__ret_pu) \ +@@ -374,16 +419,18 @@ do { \ + } while (0) + + #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +- asm volatile("1: mov"itype" %2,%"rtype"1\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(5) \ ++ "1: mov"itype" %%ds:%2,%"rtype"1\n" \ + "2:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ + " xor"itype" %"rtype"1,%"rtype"1\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ +- : "=r" (err), ltype(x) \ +- : "m" (__m(addr)), "i" (errret), "0" (err)) ++ : "=r" (err), ltype (x) \ ++ : "m" (__m(addr)), "i" (errret), "0" (err), "r"(__USER_DS)) + + #define __get_user_size_ex(x, ptr, size) \ + do { \ +@@ -407,10 +454,12 @@ do { \ + } while (0) + + #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %1,%"rtype"0\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(2) \ ++ "1: mov"itype" %%ds:%1,%"rtype"0\n" \ + "2:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + _ASM_EXTABLE(1b, 2b - 1b) \ +- : ltype(x) : "m" (__m(addr))) ++ : ltype(x) : "m" (__m(addr)), "r"(__USER_DS)) + + #define __put_user_nocheck(x, ptr, size) \ + ({ \ +@@ -424,13 +473,24 @@ do { \ + int __gu_err; \ + unsigned long __gu_val; \ + __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \ +- (x) = (__force __typeof__(*(ptr)))__gu_val; \ ++ (x) = (__typeof__(*(ptr)))__gu_val; \ + __gu_err; \ + }) + + /* FIXME: this hack is definitely wrong -AK */ + struct __large_struct { unsigned long buf[100]; }; +-#define __m(x) (*(struct __large_struct __user *)(x)) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define ____m(x) \ ++({ \ ++ unsigned long ____x = (unsigned long)(x); \ ++ if (____x < PAX_USER_SHADOW_BASE) \ ++ ____x += PAX_USER_SHADOW_BASE; \ ++ (void __user *)____x; \ ++}) ++#else ++#define ____m(x) (x) ++#endif ++#define __m(x) (*(struct __large_struct __user *)____m(x)) + + /* + * Tell gcc we read from memory instead of writing: this is because +@@ -438,21 +498,26 @@ struct __large_struct { unsigned long bu + * aliasing issues. + */ + #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \ +- asm volatile("1: mov"itype" %"rtype"1,%2\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(5) \ ++ "1: mov"itype" %"rtype"1,%%ds:%2\n" \ + "2:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + ".section .fixup,\"ax\"\n" \ + "3: mov %3,%0\n" \ + " jmp 2b\n" \ + ".previous\n" \ + _ASM_EXTABLE(1b, 3b) \ + : "=r"(err) \ +- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err)) ++ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err),\ ++ "r"(__USER_DS)) + + #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \ +- asm volatile("1: mov"itype" %"rtype"0,%1\n" \ ++ asm volatile(_ASM_LOAD_USER_DS(2) \ ++ "1: mov"itype" %"rtype"0,%%ds:%1\n" \ + "2:\n" \ ++ _ASM_LOAD_KERNEL_DS \ + _ASM_EXTABLE(1b, 2b - 1b) \ +- : : ltype(x), "m" (__m(addr))) ++ : : ltype(x), "m" (__m(addr)), "r"(__USER_DS)) + + /* + * uaccess_try and catch +@@ -530,7 +595,7 @@ struct __large_struct { unsigned long bu + #define get_user_ex(x, ptr) do { \ + unsigned long __gue_val; \ + __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \ +- (x) = (__force __typeof__(*(ptr)))__gue_val; \ ++ (x) = (__typeof__(*(ptr)))__gue_val; \ + } while (0) + + #ifdef CONFIG_X86_WP_WORKS_OK +@@ -567,6 +632,7 @@ extern struct movsl_mask { + + #define ARCH_HAS_NOCACHE_UACCESS 1 + ++#define ARCH_HAS_SORT_EXTABLE + #ifdef CONFIG_X86_32 + # include "uaccess_32.h" + #else +diff -urNp linux-2.6.35.4/arch/x86/include/asm/vgtod.h linux-2.6.35.4/arch/x86/include/asm/vgtod.h +--- linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/vgtod.h 2010-09-17 20:12:09.000000000 -0400 +@@ -14,6 +14,7 @@ struct vsyscall_gtod_data { + int sysctl_enabled; + struct timezone sys_tz; + struct { /* extract of a clocksource struct */ ++ char name[8]; + cycle_t (*vread)(void); + cycle_t cycle_last; + cycle_t mask; +diff -urNp linux-2.6.35.4/arch/x86/include/asm/vmi.h linux-2.6.35.4/arch/x86/include/asm/vmi.h +--- linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/vmi.h 2010-09-17 20:12:09.000000000 -0400 +@@ -191,6 +191,7 @@ struct vrom_header { + u8 reserved[96]; /* Reserved for headers */ + char vmi_init[8]; /* VMI_Init jump point */ + char get_reloc[8]; /* VMI_GetRelocationInfo jump point */ ++ char rom_data[8048]; /* rest of the option ROM */ + } __attribute__((packed)); + + struct pnp_header { +diff -urNp linux-2.6.35.4/arch/x86/include/asm/vsyscall.h linux-2.6.35.4/arch/x86/include/asm/vsyscall.h +--- linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/vsyscall.h 2010-09-17 20:12:09.000000000 -0400 +@@ -15,9 +15,10 @@ enum vsyscall_num { + + #ifdef __KERNEL__ + #include <linux/seqlock.h> ++#include <linux/getcpu.h> ++#include <linux/time.h> + + #define __section_vgetcpu_mode __attribute__ ((unused, __section__ (".vgetcpu_mode"), aligned(16))) +-#define __section_jiffies __attribute__ ((unused, __section__ (".jiffies"), aligned(16))) + + /* Definitions for CONFIG_GENERIC_TIME definitions */ + #define __section_vsyscall_gtod_data __attribute__ \ +@@ -31,7 +32,6 @@ enum vsyscall_num { + #define VGETCPU_LSL 2 + + extern int __vgetcpu_mode; +-extern volatile unsigned long __jiffies; + + /* kernel space (writeable) */ + extern int vgetcpu_mode; +@@ -39,6 +39,9 @@ extern struct timezone sys_tz; + + extern void map_vsyscall(void); + ++extern int vgettimeofday(struct timeval * tv, struct timezone * tz); ++extern time_t vtime(time_t *t); ++extern long vgetcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache); + #endif /* __KERNEL__ */ + + #endif /* _ASM_X86_VSYSCALL_H */ +diff -urNp linux-2.6.35.4/arch/x86/include/asm/xsave.h linux-2.6.35.4/arch/x86/include/asm/xsave.h +--- linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/include/asm/xsave.h 2010-09-17 20:12:09.000000000 -0400 +@@ -59,6 +59,12 @@ static inline int fpu_xrstor_checking(st + static inline int xsave_user(struct xsave_struct __user *buf) + { + int err; ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)buf < PAX_USER_SHADOW_BASE) ++ buf = (struct xsave_struct __user *)((void __user*)buf + PAX_USER_SHADOW_BASE); ++#endif ++ + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x27\n" + "2:\n" + ".section .fixup,\"ax\"\n" +@@ -85,6 +91,11 @@ static inline int xrestore_user(struct x + u32 lmask = mask; + u32 hmask = mask >> 32; + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if ((unsigned long)xstate < PAX_USER_SHADOW_BASE) ++ xstate = (struct xsave_struct *)((void *)xstate + PAX_USER_SHADOW_BASE); ++#endif ++ + __asm__ __volatile__("1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n" + "2:\n" + ".section .fixup,\"ax\"\n" +diff -urNp linux-2.6.35.4/arch/x86/Kconfig linux-2.6.35.4/arch/x86/Kconfig +--- linux-2.6.35.4/arch/x86/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/Kconfig 2010-09-17 20:12:37.000000000 -0400 +@@ -1038,7 +1038,7 @@ choice + + config NOHIGHMEM + bool "off" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Linux can use up to 64 Gigabytes of physical memory on x86 systems. + However, the address space of 32-bit x86 processors is only 4 +@@ -1075,7 +1075,7 @@ config NOHIGHMEM + + config HIGHMEM4G + bool "4GB" +- depends on !X86_NUMAQ ++ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE) + ---help--- + Select this if you have a 32-bit processor and between 1 and 4 + gigabytes of physical RAM. +@@ -1129,7 +1129,7 @@ config PAGE_OFFSET + hex + default 0xB0000000 if VMSPLIT_3G_OPT + default 0x80000000 if VMSPLIT_2G +- default 0x78000000 if VMSPLIT_2G_OPT ++ default 0x70000000 if VMSPLIT_2G_OPT + default 0x40000000 if VMSPLIT_1G + default 0xC0000000 + depends on X86_32 +@@ -1461,7 +1461,7 @@ config ARCH_USES_PG_UNCACHED + + config EFI + bool "EFI runtime service support" +- depends on ACPI ++ depends on ACPI && !PAX_KERNEXEC + ---help--- + This enables the kernel to use EFI runtime services that are + available (such as the EFI variable services). +@@ -1548,6 +1548,7 @@ config KEXEC_JUMP + config PHYSICAL_START + hex "Physical address where the kernel is loaded" if (EMBEDDED || CRASH_DUMP) + default "0x1000000" ++ range 0x400000 0x40000000 + ---help--- + This gives the physical address where the kernel is loaded. + +@@ -1611,6 +1612,7 @@ config X86_NEED_RELOCS + config PHYSICAL_ALIGN + hex "Alignment value to which kernel should be aligned" if X86_32 + default "0x1000000" ++ range 0x400000 0x1000000 if PAX_KERNEXEC + range 0x2000 0x1000000 + ---help--- + This value puts the alignment restrictions on physical address +@@ -1642,9 +1644,10 @@ config HOTPLUG_CPU + Say N if you want to disable CPU hotplug. + + config COMPAT_VDSO +- def_bool y ++ def_bool n + prompt "Compat VDSO support" + depends on X86_32 || IA32_EMULATION ++ depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF + ---help--- + Map the 32-bit VDSO to the predictable old-style address too. + +diff -urNp linux-2.6.35.4/arch/x86/Kconfig.cpu linux-2.6.35.4/arch/x86/Kconfig.cpu +--- linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/Kconfig.cpu 2010-09-17 20:12:09.000000000 -0400 +@@ -336,7 +336,7 @@ config X86_PPRO_FENCE + + config X86_F00F_BUG + def_bool y +- depends on M586MMX || M586TSC || M586 || M486 || M386 ++ depends on (M586MMX || M586TSC || M586 || M486 || M386) && !PAX_KERNEXEC + + config X86_INVD_BUG + def_bool y +@@ -360,7 +360,7 @@ config X86_POPAD_OK + + config X86_ALIGNMENT_16 + def_bool y +- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 ++ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1 + + config X86_INTEL_USERCOPY + def_bool y +@@ -406,7 +406,7 @@ config X86_CMPXCHG64 + # generates cmov. + config X86_CMOV + def_bool y +- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) ++ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX) + + config X86_MINIMUM_CPU_FAMILY + int +diff -urNp linux-2.6.35.4/arch/x86/Kconfig.debug linux-2.6.35.4/arch/x86/Kconfig.debug +--- linux-2.6.35.4/arch/x86/Kconfig.debug 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/Kconfig.debug 2010-09-17 20:12:09.000000000 -0400 +@@ -97,7 +97,7 @@ config X86_PTDUMP + config DEBUG_RODATA + bool "Write protect kernel read-only data structures" + default y +- depends on DEBUG_KERNEL ++ depends on DEBUG_KERNEL && BROKEN + ---help--- + Mark the kernel read-only data as write-protected in the pagetables, + in order to catch accidental (and incorrect) writes to such const +diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/boot.c linux-2.6.35.4/arch/x86/kernel/acpi/boot.c +--- linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/acpi/boot.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1472,7 +1472,7 @@ static struct dmi_system_id __initdata a + DMI_MATCH(DMI_PRODUCT_NAME, "HP Compaq 6715b"), + }, + }, +- {} ++ { NULL, NULL, {{0, {0}}}, NULL} + }; + + /* +diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S +--- linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/acpi/realmode/wakeup.S 2010-09-17 20:12:09.000000000 -0400 +@@ -104,7 +104,7 @@ _start: + movl %eax, %ecx + orl %edx, %ecx + jz 1f +- movl $0xc0000080, %ecx ++ mov $MSR_EFER, %ecx + wrmsr + 1: + +diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c +--- linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/acpi/sleep.c 2010-09-17 20:12:09.000000000 -0400 +@@ -11,11 +11,12 @@ + #include <linux/cpumask.h> + #include <asm/segment.h> + #include <asm/desc.h> ++#include <asm/e820.h> + + #include "realmode/wakeup.h" + #include "sleep.h" + +-unsigned long acpi_wakeup_address; ++unsigned long acpi_wakeup_address = 0x2000; + unsigned long acpi_realmode_flags; + + /* address in low memory of the wakeup routine. */ +@@ -96,8 +97,12 @@ int acpi_save_state_mem(void) + header->trampoline_segment = setup_trampoline() >> 4; + #ifdef CONFIG_SMP + stack_start.sp = temp_stack + sizeof(temp_stack); ++ ++ pax_open_kernel(); + early_gdt_descr.address = + (unsigned long)get_cpu_gdt_table(smp_processor_id()); ++ pax_close_kernel(); ++ + initial_gs = per_cpu_offset(smp_processor_id()); + #endif + initial_code = (unsigned long)wakeup_long64; +diff -urNp linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S +--- linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/acpi/wakeup_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -30,13 +30,11 @@ wakeup_pmode_return: + # and restore the stack ... but you need gdt for this to work + movl saved_context_esp, %esp + +- movl %cs:saved_magic, %eax +- cmpl $0x12345678, %eax ++ cmpl $0x12345678, saved_magic + jne bogus_magic + + # jump to place where we left off +- movl saved_eip, %eax +- jmp *%eax ++ jmp *(saved_eip) + + bogus_magic: + jmp bogus_magic +diff -urNp linux-2.6.35.4/arch/x86/kernel/alternative.c linux-2.6.35.4/arch/x86/kernel/alternative.c +--- linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/alternative.c 2010-09-17 20:12:09.000000000 -0400 +@@ -247,7 +247,7 @@ static void alternatives_smp_lock(const + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn DS segment override prefix into lock prefix */ +- if (*ptr == 0x3e) ++ if (*ktla_ktva(ptr) == 0x3e) + text_poke(ptr, ((unsigned char []){0xf0}), 1); + }; + mutex_unlock(&text_mutex); +@@ -268,7 +268,7 @@ static void alternatives_smp_unlock(cons + if (!*poff || ptr < text || ptr >= text_end) + continue; + /* turn lock prefix into DS segment override prefix */ +- if (*ptr == 0xf0) ++ if (*ktla_ktva(ptr) == 0xf0) + text_poke(ptr, ((unsigned char []){0x3E}), 1); + }; + mutex_unlock(&text_mutex); +@@ -436,7 +436,7 @@ void __init_or_module apply_paravirt(str + + BUG_ON(p->len > MAX_PATCH_LEN); + /* prep the buffer with the original instructions */ +- memcpy(insnbuf, p->instr, p->len); ++ memcpy(insnbuf, ktla_ktva(p->instr), p->len); + used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf, + (unsigned long)p->instr, p->len); + +@@ -504,7 +504,7 @@ void __init alternative_instructions(voi + if (smp_alt_once) + free_init_pages("SMP alternatives", + (unsigned long)__smp_locks, +- (unsigned long)__smp_locks_end); ++ PAGE_ALIGN((unsigned long)__smp_locks_end)); + + restart_nmi(); + } +@@ -521,13 +521,17 @@ void __init alternative_instructions(voi + * instructions. And on the local CPU you need to be protected again NMI or MCE + * handlers seeing an inconsistent instruction while you patch. + */ +-static void *__init_or_module text_poke_early(void *addr, const void *opcode, ++static void *__kprobes text_poke_early(void *addr, const void *opcode, + size_t len) + { + unsigned long flags; + local_irq_save(flags); +- memcpy(addr, opcode, len); ++ ++ pax_open_kernel(); ++ memcpy(ktla_ktva(addr), opcode, len); + sync_core(); ++ pax_close_kernel(); ++ + local_irq_restore(flags); + /* Could also do a CLFLUSH here to speed up CPU recovery; but + that causes hangs on some VIA CPUs. */ +@@ -549,36 +553,22 @@ static void *__init_or_module text_poke_ + */ + void *__kprobes text_poke(void *addr, const void *opcode, size_t len) + { +- unsigned long flags; +- char *vaddr; ++ unsigned char *vaddr = ktla_ktva(addr); + struct page *pages[2]; +- int i; ++ size_t i; + + if (!core_kernel_text((unsigned long)addr)) { +- pages[0] = vmalloc_to_page(addr); +- pages[1] = vmalloc_to_page(addr + PAGE_SIZE); ++ pages[0] = vmalloc_to_page(vaddr); ++ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE); + } else { +- pages[0] = virt_to_page(addr); ++ pages[0] = virt_to_page(vaddr); + WARN_ON(!PageReserved(pages[0])); +- pages[1] = virt_to_page(addr + PAGE_SIZE); ++ pages[1] = virt_to_page(vaddr + PAGE_SIZE); + } + BUG_ON(!pages[0]); +- local_irq_save(flags); +- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0])); +- if (pages[1]) +- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1])); +- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0); +- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len); +- clear_fixmap(FIX_TEXT_POKE0); +- if (pages[1]) +- clear_fixmap(FIX_TEXT_POKE1); +- local_flush_tlb(); +- sync_core(); +- /* Could also do a CLFLUSH here to speed up CPU recovery; but +- that causes hangs on some VIA CPUs. */ ++ text_poke_early(addr, opcode, len); + for (i = 0; i < len; i++) +- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]); +- local_irq_restore(flags); ++ BUG_ON(((char *)vaddr)[i] != ((char *)opcode)[i]); + return addr; + } + +diff -urNp linux-2.6.35.4/arch/x86/kernel/amd_iommu.c linux-2.6.35.4/arch/x86/kernel/amd_iommu.c +--- linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/amd_iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2284,7 +2284,7 @@ static void prealloc_protection_domains( + } + } + +-static struct dma_map_ops amd_iommu_dma_ops = { ++static const struct dma_map_ops amd_iommu_dma_ops = { + .alloc_coherent = alloc_coherent, + .free_coherent = free_coherent, + .map_page = map_page, +diff -urNp linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c +--- linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/apic/io_apic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -691,7 +691,7 @@ struct IO_APIC_route_entry **alloc_ioapi + ioapic_entries = kzalloc(sizeof(*ioapic_entries) * nr_ioapics, + GFP_ATOMIC); + if (!ioapic_entries) +- return 0; ++ return NULL; + + for (apic = 0; apic < nr_ioapics; apic++) { + ioapic_entries[apic] = +@@ -708,7 +708,7 @@ nomem: + kfree(ioapic_entries[apic]); + kfree(ioapic_entries); + +- return 0; ++ return NULL; + } + + /* +@@ -1118,7 +1118,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, + } + EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector); + +-void lock_vector_lock(void) ++void lock_vector_lock(void) __acquires(vector_lock) + { + /* Used to the online set of cpus does not change + * during assign_irq_vector. +@@ -1126,7 +1126,7 @@ void lock_vector_lock(void) + raw_spin_lock(&vector_lock); + } + +-void unlock_vector_lock(void) ++void unlock_vector_lock(void) __releases(vector_lock) + { + raw_spin_unlock(&vector_lock); + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/apm_32.c linux-2.6.35.4/arch/x86/kernel/apm_32.c +--- linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/apm_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -410,7 +410,7 @@ static DEFINE_MUTEX(apm_mutex); + * This is for buggy BIOS's that refer to (real mode) segment 0x40 + * even though they are called in protected mode. + */ +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + static const char driver_version[] = "1.16ac"; /* no spaces */ +@@ -588,7 +588,10 @@ static long __apm_bios_call(void *_call) + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -597,7 +600,11 @@ static long __apm_bios_call(void *_call) + &call->esi); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + return call->eax & 0xff; +@@ -664,7 +671,10 @@ static long __apm_bios_call_simple(void + BUG_ON(cpu != 0); + gdt = get_cpu_gdt_table(cpu); + save_desc_40 = gdt[0x40 / 8]; ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + apm_irq_save(flags); + APM_DO_SAVE_SEGS; +@@ -672,7 +682,11 @@ static long __apm_bios_call_simple(void + &call->eax); + APM_DO_RESTORE_SEGS; + apm_irq_restore(flags); ++ ++ pax_open_kernel(); + gdt[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + return error; + } +@@ -975,7 +989,7 @@ recalc: + + static void apm_power_off(void) + { +- unsigned char po_bios_call[] = { ++ const unsigned char po_bios_call[] = { + 0xb8, 0x00, 0x10, /* movw $0x1000,ax */ + 0x8e, 0xd0, /* movw ax,ss */ + 0xbc, 0x00, 0xf0, /* movw $0xf000,sp */ +@@ -1931,7 +1945,10 @@ static const struct file_operations apm_ + static struct miscdevice apm_device = { + APM_MINOR_DEV, + "apm_bios", +- &apm_bios_fops ++ &apm_bios_fops, ++ {NULL, NULL}, ++ NULL, ++ NULL + }; + + +@@ -2252,7 +2269,7 @@ static struct dmi_system_id __initdata a + { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, + }, + +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} + }; + + /* +@@ -2355,12 +2372,15 @@ static int __init apm_init(void) + * code to that CPU. + */ + gdt = get_cpu_gdt_table(0); ++ ++ pax_open_kernel(); + set_desc_base(&gdt[APM_CS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4)); + set_desc_base(&gdt[APM_CS_16 >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4)); + set_desc_base(&gdt[APM_DS >> 3], + (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4)); ++ pax_close_kernel(); + + proc_create("apm", 0, NULL, &apm_file_ops); + +diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c +--- linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -115,6 +115,11 @@ void foo(void) + OFFSET(PV_CPU_iret, pv_cpu_ops, iret); + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); ++#endif ++ + #endif + + #ifdef CONFIG_XEN +diff -urNp linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c +--- linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/asm-offsets_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -63,6 +63,18 @@ int main(void) + OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit); + OFFSET(PV_CPU_swapgs, pv_cpu_ops, swapgs); + OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0); ++ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0); ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3); ++ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3); ++ OFFSET(PV_MMU_set_pgd, pv_mmu_ops, set_pgd); ++#endif ++ + #endif + + +@@ -115,6 +127,7 @@ int main(void) + ENTRY(cr8); + BLANK(); + #undef ENTRY ++ DEFINE(TSS_size, sizeof(struct tss_struct)); + DEFINE(TSS_ist, offsetof(struct tss_struct, x86_tss.ist)); + BLANK(); + DEFINE(crypto_tfm_ctx_offset, offsetof(struct crypto_tfm, __crt_ctx)); +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/common.c linux-2.6.35.4/arch/x86/kernel/cpu/common.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/common.c 2010-09-17 20:12:09.000000000 -0400 +@@ -83,60 +83,6 @@ static const struct cpu_dev __cpuinitcon + + static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu; + +-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = { +-#ifdef CONFIG_X86_64 +- /* +- * We need valid kernel segments for data and code in long mode too +- * IRET will check the segment types kkeil 2000/10/28 +- * Also sysret mandates a special GDT layout +- * +- * TLS descriptors are currently at a different place compared to i386. +- * Hopefully nobody expects them at a fixed place (Wine?) +- */ +- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff), +-#else +- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff), +- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff), +- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff), +- /* +- * Segments used for calling PnP BIOS have byte granularity. +- * They code segments and data segments have fixed 64k limits, +- * the transfer segment sizes are set at run time. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* 16-bit data */ +- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0), +- /* +- * The APM segments have byte granularity and their bases +- * are set at run time. All have 64k limits. +- */ +- /* 32-bit code */ +- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff), +- /* 16-bit code */ +- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff), +- /* data */ +- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff), +- +- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff), +- GDT_STACK_CANARY_INIT +-#endif +-} }; +-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page); +- + static int __init x86_xsave_setup(char *s) + { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); +@@ -344,7 +290,7 @@ void switch_to_new_gdt(int cpu) + { + struct desc_ptr gdt_descr; + +- gdt_descr.address = (long)get_cpu_gdt_table(cpu); ++ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + /* Reload the per-cpu base */ +@@ -802,6 +748,10 @@ static void __cpuinit identify_cpu(struc + /* Filter out anything that depends on CPUID levels we don't have */ + filter_cpuid_features(c, true); + ++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || (defined(CONFIG_PAX_MEMORY_UDEREF) && defined(CONFIG_X86_32)) ++ setup_clear_cpu_cap(X86_FEATURE_SEP); ++#endif ++ + /* If the model name is still unset, do table lookup. */ + if (!c->x86_model_id[0]) { + const char *p; +@@ -1117,7 +1067,7 @@ void __cpuinit cpu_init(void) + int i; + + cpu = stack_smp_processor_id(); +- t = &per_cpu(init_tss, cpu); ++ t = init_tss + cpu; + oist = &per_cpu(orig_ist, cpu); + + #ifdef CONFIG_NUMA +@@ -1143,7 +1093,7 @@ void __cpuinit cpu_init(void) + switch_to_new_gdt(cpu); + loadsegment(fs, 0); + +- load_idt((const struct desc_ptr *)&idt_descr); ++ load_idt(&idt_descr); + + memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8); + syscall_init(); +@@ -1205,7 +1155,7 @@ void __cpuinit cpu_init(void) + { + int cpu = smp_processor_id(); + struct task_struct *curr = current; +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + struct thread_struct *thread = &curr->thread; + + if (cpumask_test_and_set_cpu(cpu, cpu_initialized_mask)) { +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/acpi-cpufreq.c 2010-09-17 20:12:09.000000000 -0400 +@@ -484,7 +484,7 @@ static const struct dmi_system_id sw_any + DMI_MATCH(DMI_PRODUCT_NAME, "X6DLP"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + static int acpi_cpufreq_blacklist(struct cpuinfo_x86 *c) +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/cpufreq/speedstep-centrino.c 2010-09-17 20:12:09.000000000 -0400 +@@ -226,7 +226,7 @@ static struct cpu_model models[] = + { &cpu_ids[CPU_MP4HT_D0], NULL, 0, NULL }, + { &cpu_ids[CPU_MP4HT_E0], NULL, 0, NULL }, + +- { NULL, } ++ { NULL, NULL, 0, NULL} + }; + #undef _BANIAS + #undef BANIAS +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/intel.c linux-2.6.35.4/arch/x86/kernel/cpu/intel.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/intel.c 2010-09-17 20:12:09.000000000 -0400 +@@ -160,7 +160,7 @@ static void __cpuinit trap_init_f00f_bug + * Update the IDT descriptor and reload the IDT so that + * it uses the read-only mapped virtual address. + */ +- idt_descr.address = fix_to_virt(FIX_F00F_IDT); ++ idt_descr.address = (struct desc_struct *)fix_to_virt(FIX_F00F_IDT); + load_idt(&idt_descr); + } + #endif +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/Makefile linux-2.6.35.4/arch/x86/kernel/cpu/Makefile +--- linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/Makefile 2010-09-17 20:12:09.000000000 -0400 +@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg + CFLAGS_REMOVE_perf_event.o = -pg + endif + +-# Make sure load_percpu_segment has no stackprotector +-nostackp := $(call cc-option, -fno-stack-protector) +-CFLAGS_common.o := $(nostackp) +- + obj-y := intel_cacheinfo.o addon_cpuid_features.o + obj-y += proc.o capflags.o powerflags.o common.o + obj-y += vmware.o hypervisor.o sched.o mshyperv.o +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/mcheck/mce.c 2010-09-17 20:12:09.000000000 -0400 +@@ -219,7 +219,7 @@ static void print_mce(struct mce *m) + !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "", + m->cs, m->ip); + +- if (m->cs == __KERNEL_CS) ++ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS) + print_symbol("{%s}", m->ip); + pr_cont("\n"); + } +@@ -1471,14 +1471,14 @@ void __cpuinit mcheck_cpu_init(struct cp + */ + + static DEFINE_SPINLOCK(mce_state_lock); +-static int open_count; /* #times opened */ ++static atomic_t open_count; /* #times opened */ + static int open_exclu; /* already open exclusive? */ + + static int mce_open(struct inode *inode, struct file *file) + { + spin_lock(&mce_state_lock); + +- if (open_exclu || (open_count && (file->f_flags & O_EXCL))) { ++ if (open_exclu || (atomic_read(&open_count) && (file->f_flags & O_EXCL))) { + spin_unlock(&mce_state_lock); + + return -EBUSY; +@@ -1486,7 +1486,7 @@ static int mce_open(struct inode *inode, + + if (file->f_flags & O_EXCL) + open_exclu = 1; +- open_count++; ++ atomic_inc(&open_count); + + spin_unlock(&mce_state_lock); + +@@ -1497,7 +1497,7 @@ static int mce_release(struct inode *ino + { + spin_lock(&mce_state_lock); + +- open_count--; ++ atomic_dec(&open_count); + open_exclu = 0; + + spin_unlock(&mce_state_lock); +@@ -1683,6 +1683,7 @@ static struct miscdevice mce_log_device + MISC_MCELOG_MINOR, + "mcelog", + &mce_chrdev_ops, ++ {NULL, NULL}, NULL, NULL + }; + + /* +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/generic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -28,7 +28,7 @@ static struct fixed_range_block fixed_ra + { MSR_MTRRfix64K_00000, 1 }, /* one 64k MTRR */ + { MSR_MTRRfix16K_80000, 2 }, /* two 16k MTRRs */ + { MSR_MTRRfix4K_C0000, 8 }, /* eight 4k MTRRs */ +- {} ++ { 0, 0 } + }; + + static unsigned long smp_changes_mask; +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -61,7 +61,7 @@ static DEFINE_MUTEX(mtrr_mutex); + u64 size_or_mask, size_and_mask; + static bool mtrr_aps_delayed_init; + +-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM]; ++static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only; + + const struct mtrr_ops *mtrr_if; + +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h +--- linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/mtrr/mtrr.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,19 +12,19 @@ + extern unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES]; + + struct mtrr_ops { +- u32 vendor; +- u32 use_intel_if; +- void (*set)(unsigned int reg, unsigned long base, ++ const u32 vendor; ++ const u32 use_intel_if; ++ void (* const set)(unsigned int reg, unsigned long base, + unsigned long size, mtrr_type type); +- void (*set_all)(void); ++ void (* const set_all)(void); + +- void (*get)(unsigned int reg, unsigned long *base, ++ void (* const get)(unsigned int reg, unsigned long *base, + unsigned long *size, mtrr_type *type); +- int (*get_free_region)(unsigned long base, unsigned long size, ++ int (* const get_free_region)(unsigned long base, unsigned long size, + int replace_reg); +- int (*validate_add_page)(unsigned long base, unsigned long size, ++ int (* const validate_add_page)(unsigned long base, unsigned long size, + unsigned int type); +- int (*have_wrcomb)(void); ++ int (* const have_wrcomb)(void); + }; + + extern int generic_get_free_region(unsigned long base, unsigned long size, +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/perfctr-watchdog.c 2010-09-17 20:12:09.000000000 -0400 +@@ -30,11 +30,11 @@ struct nmi_watchdog_ctlblk { + + /* Interface defining a CPU specific perfctr watchdog */ + struct wd_ops { +- int (*reserve)(void); +- void (*unreserve)(void); +- int (*setup)(unsigned nmi_hz); +- void (*rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); +- void (*stop)(void); ++ int (* const reserve)(void); ++ void (* const unreserve)(void); ++ int (* const setup)(unsigned nmi_hz); ++ void (* const rearm)(struct nmi_watchdog_ctlblk *wd, unsigned nmi_hz); ++ void (* const stop)(void); + unsigned perfctr; + unsigned evntsel; + u64 checkbit; +@@ -634,6 +634,7 @@ static const struct wd_ops p4_wd_ops = { + #define ARCH_PERFMON_NMI_EVENT_SEL ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL + #define ARCH_PERFMON_NMI_EVENT_UMASK ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK + ++/* cannot be const, see probe_nmi_watchdog */ + static struct wd_ops intel_arch_wd_ops; + + static int setup_intel_arch_watchdog(unsigned nmi_hz) +@@ -686,6 +687,7 @@ static int setup_intel_arch_watchdog(uns + return 1; + } + ++/* cannot be const */ + static struct wd_ops intel_arch_wd_ops __read_mostly = { + .reserve = single_msr_reserve, + .unreserve = single_msr_unreserve, +diff -urNp linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c +--- linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/cpu/perf_event.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1685,7 +1685,7 @@ perf_callchain_user(struct pt_regs *regs + break; + + callchain_store(entry, frame.return_address); +- fp = frame.next_frame; ++ fp = (__force const void __user *)frame.next_frame; + } + } + +diff -urNp linux-2.6.35.4/arch/x86/kernel/crash.c linux-2.6.35.4/arch/x86/kernel/crash.c +--- linux-2.6.35.4/arch/x86/kernel/crash.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/crash.c 2010-09-17 20:12:09.000000000 -0400 +@@ -40,7 +40,7 @@ static void kdump_nmi_callback(int cpu, + regs = args->regs; + + #ifdef CONFIG_X86_32 +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + crash_fixup_ss_esp(&fixed_regs, regs); + regs = &fixed_regs; + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/doublefault_32.c linux-2.6.35.4/arch/x86/kernel/doublefault_32.c +--- linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/doublefault_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -11,7 +11,7 @@ + + #define DOUBLEFAULT_STACKSIZE (1024) + static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE]; +-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE) ++#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2) + + #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM) + +@@ -21,7 +21,7 @@ static void doublefault_fn(void) + unsigned long gdt, tss; + + store_gdt(&gdt_desc); +- gdt = gdt_desc.address; ++ gdt = (unsigned long)gdt_desc.address; + + printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size); + +@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cach + /* 0x2 bit is always set */ + .flags = X86_EFLAGS_SF | 0x2, + .sp = STACK_START, +- .es = __USER_DS, ++ .es = __KERNEL_DS, + .cs = __KERNEL_CS, + .ss = __KERNEL_DS, +- .ds = __USER_DS, ++ .ds = __KERNEL_DS, + .fs = __KERNEL_PERCPU, + + .__cr3 = __pa_nodebug(swapper_pg_dir), +diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c +--- linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/dumpstack_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -107,11 +107,12 @@ void show_registers(struct pt_regs *regs + * When in-kernel, we also print out the stack and code at the + * time of the fault.. + */ +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned int code_prologue = code_bytes * 43 / 64; + unsigned int code_len = code_bytes; + unsigned char c; + u8 *ip; ++ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(smp_processor_id())[(0xffff & regs->cs) >> 3]); + + printk(KERN_EMERG "Stack:\n"); + show_stack_log_lvl(NULL, regs, ®s->sp, +@@ -119,10 +120,10 @@ void show_registers(struct pt_regs *regs + + printk(KERN_EMERG "Code: "); + +- ip = (u8 *)regs->ip - code_prologue; ++ ip = (u8 *)regs->ip - code_prologue + cs_base; + if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) { + /* try starting at IP */ +- ip = (u8 *)regs->ip; ++ ip = (u8 *)regs->ip + cs_base; + code_len = code_len - code_prologue + 1; + } + for (i = 0; i < code_len; i++, ip++) { +@@ -131,7 +132,7 @@ void show_registers(struct pt_regs *regs + printk(" Bad EIP value."); + break; + } +- if (ip == (u8 *)regs->ip) ++ if (ip == (u8 *)regs->ip + cs_base) + printk("<%02x> ", c); + else + printk("%02x ", c); +@@ -144,6 +145,7 @@ int is_valid_bugaddr(unsigned long ip) + { + unsigned short ud2; + ++ ip = ktla_ktva(ip); + if (ip < PAGE_OFFSET) + return 0; + if (probe_kernel_address((unsigned short *)ip, ud2)) +diff -urNp linux-2.6.35.4/arch/x86/kernel/dumpstack.c linux-2.6.35.4/arch/x86/kernel/dumpstack.c +--- linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/dumpstack.c 2010-09-17 20:12:09.000000000 -0400 +@@ -207,7 +207,7 @@ void dump_stack(void) + #endif + + printk("Pid: %d, comm: %.20s %s %s %.*s\n", +- current->pid, current->comm, print_tainted(), ++ task_pid_nr(current), current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version); +@@ -263,7 +263,7 @@ void __kprobes oops_end(unsigned long fl + panic("Fatal exception in interrupt"); + if (panic_on_oops) + panic("Fatal exception"); +- do_exit(signr); ++ do_group_exit(signr); + } + + int __kprobes __die(const char *str, struct pt_regs *regs, long err) +@@ -290,7 +290,7 @@ int __kprobes __die(const char *str, str + + show_registers(regs); + #ifdef CONFIG_X86_32 +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + } else { +@@ -318,7 +318,7 @@ void die(const char *str, struct pt_regs + unsigned long flags = oops_begin(); + int sig = SIGSEGV; + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + report_bug(regs->ip, regs); + + if (__die(str, regs, err)) +diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_32.c linux-2.6.35.4/arch/x86/kernel/efi_32.c +--- linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/efi_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -38,70 +38,38 @@ + */ + + static unsigned long efi_rt_eflags; +-static pgd_t efi_bak_pg_dir_pointer[2]; ++static pgd_t __initdata efi_bak_pg_dir_pointer[KERNEL_PGD_PTRS]; + +-void efi_call_phys_prelog(void) ++void __init efi_call_phys_prelog(void) + { +- unsigned long cr4; +- unsigned long temp; + struct desc_ptr gdt_descr; + + local_irq_save(efi_rt_eflags); + +- /* +- * If I don't have PAE, I should just duplicate two entries in page +- * directory. If I have PAE, I just need to duplicate one entry in +- * page directory. +- */ +- cr4 = read_cr4_safe(); + +- if (cr4 & X86_CR4_PAE) { +- efi_bak_pg_dir_pointer[0].pgd = +- swapper_pg_dir[pgd_index(0)].pgd; +- swapper_pg_dir[0].pgd = +- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; +- } else { +- efi_bak_pg_dir_pointer[0].pgd = +- swapper_pg_dir[pgd_index(0)].pgd; +- efi_bak_pg_dir_pointer[1].pgd = +- swapper_pg_dir[pgd_index(0x400000)].pgd; +- swapper_pg_dir[pgd_index(0)].pgd = +- swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; +- temp = PAGE_OFFSET + 0x400000; +- swapper_pg_dir[pgd_index(0x400000)].pgd = +- swapper_pg_dir[pgd_index(temp)].pgd; +- } ++ clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); ++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); + + /* + * After the lock is released, the original page table is restored. + */ + __flush_tlb_all(); + +- gdt_descr.address = __pa(get_cpu_gdt_table(0)); ++ gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0)); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + } + +-void efi_call_phys_epilog(void) ++void __init efi_call_phys_epilog(void) + { +- unsigned long cr4; + struct desc_ptr gdt_descr; + +- gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); ++ gdt_descr.address = get_cpu_gdt_table(0); + gdt_descr.size = GDT_SIZE - 1; + load_gdt(&gdt_descr); + +- cr4 = read_cr4_safe(); +- +- if (cr4 & X86_CR4_PAE) { +- swapper_pg_dir[pgd_index(0)].pgd = +- efi_bak_pg_dir_pointer[0].pgd; +- } else { +- swapper_pg_dir[pgd_index(0)].pgd = +- efi_bak_pg_dir_pointer[0].pgd; +- swapper_pg_dir[pgd_index(0x400000)].pgd = +- efi_bak_pg_dir_pointer[1].pgd; +- } ++ clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); + + /* + * After the lock is released, the original page table is restored. +diff -urNp linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S +--- linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/efi_stub_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -6,6 +6,7 @@ + */ + + #include <linux/linkage.h> ++#include <linux/init.h> + #include <asm/page_types.h> + + /* +@@ -20,7 +21,7 @@ + * service functions will comply with gcc calling convention, too. + */ + +-.text ++__INIT + ENTRY(efi_call_phys) + /* + * 0. The function can only be called in Linux kernel. So CS has been +@@ -36,9 +37,7 @@ ENTRY(efi_call_phys) + * The mapping of lower virtual memory has been created in prelog and + * epilog. + */ +- movl $1f, %edx +- subl $__PAGE_OFFSET, %edx +- jmp *%edx ++ jmp 1f-__PAGE_OFFSET + 1: + + /* +@@ -47,14 +46,8 @@ ENTRY(efi_call_phys) + * parameter 2, ..., param n. To make things easy, we save the return + * address of efi_call_phys in a global variable. + */ +- popl %edx +- movl %edx, saved_return_addr +- /* get the function pointer into ECX*/ +- popl %ecx +- movl %ecx, efi_rt_function_ptr +- movl $2f, %edx +- subl $__PAGE_OFFSET, %edx +- pushl %edx ++ popl (saved_return_addr) ++ popl (efi_rt_function_ptr) + + /* + * 3. Clear PG bit in %CR0. +@@ -73,9 +66,8 @@ ENTRY(efi_call_phys) + /* + * 5. Call the physical function. + */ +- jmp *%ecx ++ call *(efi_rt_function_ptr-__PAGE_OFFSET) + +-2: + /* + * 6. After EFI runtime service returns, control will return to + * following instruction. We'd better readjust stack pointer first. +@@ -88,35 +80,28 @@ ENTRY(efi_call_phys) + movl %cr0, %edx + orl $0x80000000, %edx + movl %edx, %cr0 +- jmp 1f +-1: ++ + /* + * 8. Now restore the virtual mode from flat mode by + * adding EIP with PAGE_OFFSET. + */ +- movl $1f, %edx +- jmp *%edx ++ jmp 1f+__PAGE_OFFSET + 1: + + /* + * 9. Balance the stack. And because EAX contain the return value, + * we'd better not clobber it. + */ +- leal efi_rt_function_ptr, %edx +- movl (%edx), %ecx +- pushl %ecx ++ pushl (efi_rt_function_ptr) + + /* +- * 10. Push the saved return address onto the stack and return. ++ * 10. Return to the saved return address. + */ +- leal saved_return_addr, %edx +- movl (%edx), %ecx +- pushl %ecx +- ret ++ jmpl *(saved_return_addr) + ENDPROC(efi_call_phys) + .previous + +-.data ++__INITDATA + saved_return_addr: + .long 0 + efi_rt_function_ptr: +diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_32.S linux-2.6.35.4/arch/x86/kernel/entry_32.S +--- linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/entry_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -192,7 +192,67 @@ + + #endif /* CONFIG_X86_32_LAZY_GS */ + +-.macro SAVE_ALL ++.macro PAX_EXIT_KERNEL ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_PARAVIRT ++ push %eax; push %ecx; ++#endif ++ mov %cs, %esi ++ cmp $__KERNEXEC_KERNEL_CS, %esi ++ jnz 2f ++#ifdef CONFIG_PARAVIRT ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ btr $16, %esi ++ ljmp $__KERNEL_CS, $1f ++1: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0); ++#else ++ mov %esi, %cr0 ++#endif ++2: ++#ifdef CONFIG_PARAVIRT ++ pop %ecx; pop %eax ++#endif ++#endif ++.endm ++ ++.macro PAX_ENTER_KERNEL ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_PARAVIRT ++ push %eax; push %ecx; ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0) ++ mov %eax, %esi ++#else ++ mov %cr0, %esi ++#endif ++ bts $16, %esi ++ jnc 1f ++ mov %cs, %esi ++ cmp $__KERNEL_CS, %esi ++ jz 3f ++ ljmp $__KERNEL_CS, $3f ++1: ljmp $__KERNEXEC_KERNEL_CS, $2f ++2: ++#ifdef CONFIG_PARAVIRT ++ mov %esi, %eax ++ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0) ++#else ++ mov %esi, %cr0 ++#endif ++3: ++#ifdef CONFIG_PARAVIRT ++ pop %ecx; pop %eax ++#endif ++#endif ++.endm ++ ++.macro __SAVE_ALL _DS + cld + PUSH_GS + pushl %fs +@@ -225,7 +285,7 @@ + pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebx, 0 +- movl $(__USER_DS), %edx ++ movl $\_DS, %edx + movl %edx, %ds + movl %edx, %es + movl $(__KERNEL_PERCPU), %edx +@@ -233,6 +293,15 @@ + SET_KERNEL_GS %edx + .endm + ++.macro SAVE_ALL ++#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF) ++ __SAVE_ALL __KERNEL_DS ++ PAX_ENTER_KERNEL ++#else ++ __SAVE_ALL __USER_DS ++#endif ++.endm ++ + .macro RESTORE_INT_REGS + popl %ebx + CFI_ADJUST_CFA_OFFSET -4 +@@ -357,7 +426,15 @@ check_userspace: + movb PT_CS(%esp), %al + andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax + cmpl $USER_RPL, %eax ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jae resume_userspace ++ ++ PAX_EXIT_KERNEL ++ jmp resume_kernel ++#else + jb resume_kernel # not returning to v8086 or userspace ++#endif + + ENTRY(resume_userspace) + LOCKDEP_SYS_EXIT +@@ -423,10 +500,9 @@ sysenter_past_esp: + /*CFI_REL_OFFSET cs, 0*/ + /* + * Push current_thread_info()->sysenter_return to the stack. +- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words +- * pushed above; +8 corresponds to copy_thread's esp0 setting. + */ +- pushl (TI_sysenter_return-THREAD_SIZE+8+4*4)(%esp) ++ GET_THREAD_INFO(%ebp) ++ pushl TI_sysenter_return(%ebp) + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET eip, 0 + +@@ -439,9 +515,19 @@ sysenter_past_esp: + * Load the potential sixth argument from user stack. + * Careful about security. + */ ++ movl PT_OLDESP(%esp),%ebp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov PT_OLDSS(%esp),%ds ++1: movl %ds:(%ebp),%ebp ++ push %ss ++ pop %ds ++#else + cmpl $__PAGE_OFFSET-3,%ebp + jae syscall_fault + 1: movl (%ebp),%ebp ++#endif ++ + movl %ebp,PT_EBP(%esp) + .section __ex_table,"a" + .align 4 +@@ -464,12 +550,23 @@ sysenter_do_call: + testl $_TIF_ALLWORK_MASK, %ecx + jne sysexit_audit + sysenter_exit: ++ ++#ifdef CONFIG_PAX_RANDKSTACK ++ pushl %eax ++ CFI_ADJUST_CFA_OFFSET 4 ++ call pax_randomize_kstack ++ popl %eax ++ CFI_ADJUST_CFA_OFFSET -4 ++#endif ++ + /* if something modifies registers it must also disable sysexit */ + movl PT_EIP(%esp), %edx + movl PT_OLDESP(%esp), %ecx + xorl %ebp,%ebp + TRACE_IRQS_ON + 1: mov PT_FS(%esp), %fs ++2: mov PT_DS(%esp), %ds ++3: mov PT_ES(%esp), %es + PTGS_TO_GS + ENABLE_INTERRUPTS_SYSEXIT + +@@ -513,11 +610,17 @@ sysexit_audit: + + CFI_ENDPROC + .pushsection .fixup,"ax" +-2: movl $0,PT_FS(%esp) ++4: movl $0,PT_FS(%esp) ++ jmp 1b ++5: movl $0,PT_DS(%esp) ++ jmp 1b ++6: movl $0,PT_ES(%esp) + jmp 1b + .section __ex_table,"a" + .align 4 +- .long 1b,2b ++ .long 1b,4b ++ .long 2b,5b ++ .long 3b,6b + .popsection + PTGS_TO_GS_EX + ENDPROC(ia32_sysenter_target) +@@ -551,6 +654,10 @@ syscall_exit: + testl $_TIF_ALLWORK_MASK, %ecx # current->work + jne syscall_exit_work + ++#ifdef CONFIG_PAX_RANDKSTACK ++ call pax_randomize_kstack ++#endif ++ + restore_all: + TRACE_IRQS_IRET + restore_all_notrace: +@@ -615,7 +722,13 @@ ldt_ss: + mov PT_OLDESP(%esp), %eax /* load userspace esp */ + mov %dx, %ax /* eax: new kernel esp */ + sub %eax, %edx /* offset (low word is 0) */ +- PER_CPU(gdt_page, %ebx) ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif + shr $16, %edx + mov %dl, GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx) /* bits 16..23 */ + mov %dh, GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx) /* bits 24..31 */ +@@ -655,25 +768,19 @@ work_resched: + + work_notifysig: # deal with pending signals and + # notify-resume requests ++ movl %esp, %eax + #ifdef CONFIG_VM86 + testl $X86_EFLAGS_VM, PT_EFLAGS(%esp) +- movl %esp, %eax +- jne work_notifysig_v86 # returning to kernel-space or ++ jz 1f # returning to kernel-space or + # vm86-space +- xorl %edx, %edx +- call do_notify_resume +- jmp resume_userspace_sig + +- ALIGN +-work_notifysig_v86: + pushl %ecx # save ti_flags for do_notify_resume + CFI_ADJUST_CFA_OFFSET 4 + call save_v86_state # %eax contains pt_regs pointer + popl %ecx + CFI_ADJUST_CFA_OFFSET -4 + movl %eax, %esp +-#else +- movl %esp, %eax ++1: + #endif + xorl %edx, %edx + call do_notify_resume +@@ -708,6 +815,10 @@ END(syscall_exit_work) + + RING0_INT_FRAME # can't unwind into user space anyway + syscall_fault: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %ss ++ pop %ds ++#endif + GET_THREAD_INFO(%ebp) + movl $-EFAULT,PT_EAX(%esp) + jmp resume_userspace +@@ -791,7 +902,13 @@ ptregs_clone: + * normal stack and adjusts ESP with the matching offset. + */ + /* fixup the stack */ +- PER_CPU(gdt_page, %ebx) ++#ifdef CONFIG_SMP ++ movl PER_CPU_VAR(cpu_number), %ebx ++ shll $PAGE_SHIFT_asm, %ebx ++ addl $cpu_gdt_table, %ebx ++#else ++ movl $cpu_gdt_table, %ebx ++#endif + mov GDT_ENTRY_ESPFIX_SS * 8 + 4(%ebx), %al /* bits 16..23 */ + mov GDT_ENTRY_ESPFIX_SS * 8 + 7(%ebx), %ah /* bits 24..31 */ + shl $16, %eax +@@ -1273,7 +1390,6 @@ return_to_handler: + jmp *%ecx + #endif + +-.section .rodata,"a" + #include "syscall_table_32.S" + + syscall_table_size=(.-sys_call_table) +@@ -1330,9 +1446,12 @@ error_code: + movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart + REG_TO_PTGS %ecx + SET_KERNEL_GS %ecx +- movl $(__USER_DS), %ecx ++ movl $(__KERNEL_DS), %ecx + movl %ecx, %ds + movl %ecx, %es ++ ++ PAX_ENTER_KERNEL ++ + TRACE_IRQS_OFF + movl %esp,%eax # pt_regs pointer + call *%edi +@@ -1426,6 +1545,9 @@ nmi_stack_correct: + xorl %edx,%edx # zero error code + movl %esp,%eax # pt_regs pointer + call do_nmi ++ ++ PAX_EXIT_KERNEL ++ + jmp restore_all_notrace + CFI_ENDPROC + +@@ -1466,6 +1588,9 @@ nmi_espfix_stack: + FIXUP_ESPFIX_STACK # %eax == %esp + xorl %edx,%edx # zero error code + call do_nmi ++ ++ PAX_EXIT_KERNEL ++ + RESTORE_REGS + lss 12+4(%esp), %esp # back to espfix stack + CFI_ADJUST_CFA_OFFSET -24 +diff -urNp linux-2.6.35.4/arch/x86/kernel/entry_64.S linux-2.6.35.4/arch/x86/kernel/entry_64.S +--- linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/entry_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -53,6 +53,7 @@ + #include <asm/paravirt.h> + #include <asm/ftrace.h> + #include <asm/percpu.h> ++#include <asm/pgtable.h> + + /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */ + #include <linux/elf-em.h> +@@ -174,6 +175,189 @@ ENTRY(native_usergs_sysret64) + ENDPROC(native_usergs_sysret64) + #endif /* CONFIG_PARAVIRT */ + ++ .macro ljmpq sel, off ++#if defined(CONFIG_MCORE2) || defined (CONFIG_MATOM) ++ .byte 0x48; ljmp *1234f(%rip) ++ .pushsection .rodata ++ .align 16 ++ 1234: .quad \off; .word \sel ++ .popsection ++#else ++ push $\sel ++ push $\off ++ lretq ++#endif ++ .endm ++ ++ENTRY(pax_enter_kernel) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ push %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ jnc 1f ++ mov %cs,%edi ++ cmp $__KERNEL_CS,%edi ++ jz 3f ++ ljmpq __KERNEL_CS,3f ++1: ljmpq __KERNEXEC_KERNEL_CS,2f ++2: SET_RDI_INTO_CR0 ++3: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ pop %rdi ++#endif ++ ++ retq ++ENDPROC(pax_enter_kernel) ++ ++ENTRY(pax_exit_kernel) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ push %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ mov %cs,%rdi ++ cmp $__KERNEXEC_KERNEL_CS,%edi ++ jnz 2f ++ GET_CR0_INTO_RDI ++ btr $16,%rdi ++ ljmpq __KERNEL_CS,1f ++1: SET_RDI_INTO_CR0 ++2: ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI); ++#endif ++ ++ pop %rdi ++#endif ++ ++ retq ++ENDPROC(pax_exit_kernel) ++ ++ENTRY(pax_enter_kernel_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %rdi ++ push %rbx ++ ++#ifdef CONFIG_PARAVIRT ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++ GET_CR3_INTO_RDI ++ mov %rdi,%rbx ++ add $__START_KERNEL_map,%rbx ++ sub phys_base(%rip),%rbx ++ ++#ifdef CONFIG_PARAVIRT ++ push %rdi ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) ++ i = i + 1 ++ .endr ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0,i*8(%rbx) ++ i = i + 1 ++ .endr ++ ++#ifdef CONFIG_PARAVIRT ++2: pop %rdi ++#endif ++ SET_RDI_INTO_CR3 ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ bts $16,%rdi ++ SET_RDI_INTO_CR0 ++#endif ++ ++#ifdef CONFIG_PARAVIRT ++ PV_RESTORE_REGS(CLBR_RDI) ++#endif ++ ++ pop %rbx ++ pop %rdi ++#endif ++ ++ retq ++ENDPROC(pax_enter_kernel_user) ++ ++ENTRY(pax_exit_kernel_user) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ push %rdi ++ ++#ifdef CONFIG_PARAVIRT ++ push %rbx ++ PV_SAVE_REGS(CLBR_RDI) ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ GET_CR0_INTO_RDI ++ btr $16,%rdi ++ SET_RDI_INTO_CR0 ++#endif ++ ++ GET_CR3_INTO_RDI ++ add $__START_KERNEL_map,%rdi ++ sub phys_base(%rip),%rdi ++ ++#ifdef CONFIG_PARAVIRT ++ cmpl $0, pv_info+PARAVIRT_enabled ++ jz 1f ++ mov %rdi,%rbx ++ i = 0 ++ .rept USER_PGD_PTRS ++ mov i*8(%rbx),%rsi ++ mov $0x67,%sil ++ lea i*8(%rbx),%rdi ++ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd) ++ i = i + 1 ++ .endr ++ jmp 2f ++1: ++#endif ++ ++ i = 0 ++ .rept USER_PGD_PTRS ++ movb $0x67,i*8(%rdi) ++ i = i + 1 ++ .endr ++ ++#ifdef CONFIG_PARAVIRT ++2: PV_RESTORE_REGS(CLBR_RDI) ++ pop %rbx ++#endif ++ ++ pop %rdi ++#endif ++ ++ retq ++ENDPROC(pax_exit_kernel_user) + + .macro TRACE_IRQS_IRETQ offset=ARGOFFSET + #ifdef CONFIG_TRACE_IRQFLAGS +@@ -317,7 +501,7 @@ ENTRY(save_args) + leaq -ARGOFFSET+16(%rsp),%rdi /* arg1 for handler */ + movq_cfi rbp, 8 /* push %rbp */ + leaq 8(%rsp), %rbp /* mov %rsp, %ebp */ +- testl $3, CS(%rdi) ++ testb $3, CS(%rdi) + je 1f + SWAPGS + /* +@@ -409,7 +593,7 @@ ENTRY(ret_from_fork) + + RESTORE_REST + +- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread? ++ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread? + je int_ret_from_sys_call + + testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET +@@ -468,6 +652,11 @@ ENTRY(system_call_after_swapgs) + + movq %rsp,PER_CPU_VAR(old_rsp) + movq PER_CPU_VAR(kernel_stack),%rsp ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_enter_kernel_user ++#endif ++ + /* + * No need to follow this irqs off/on section - it's straight + * and short: +@@ -502,6 +691,11 @@ sysret_check: + andl %edi,%edx + jnz sysret_careful + CFI_REMEMBER_STATE ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++ + /* + * sysretq will re-enable interrupts: + */ +@@ -613,7 +807,7 @@ tracesys: + GLOBAL(int_ret_from_sys_call) + DISABLE_INTERRUPTS(CLBR_NONE) + TRACE_IRQS_OFF +- testl $3,CS-ARGOFFSET(%rsp) ++ testb $3,CS-ARGOFFSET(%rsp) + je retint_restore_args + movl $_TIF_ALLWORK_MASK,%edi + /* edi: mask to check */ +@@ -800,6 +994,16 @@ END(interrupt) + CFI_ADJUST_CFA_OFFSET 10*8 + call save_args + PARTIAL_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rdi) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + call \func + .endm + +@@ -826,7 +1030,7 @@ ret_from_intr: + CFI_ADJUST_CFA_OFFSET -8 + exit_intr: + GET_THREAD_INFO(%rcx) +- testl $3,CS-ARGOFFSET(%rsp) ++ testb $3,CS-ARGOFFSET(%rsp) + je retint_kernel + + /* Interrupt came from user space */ +@@ -848,12 +1052,18 @@ retint_swapgs: /* return to user-space + * The iretq could re-enable interrupts: + */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#endif ++ + TRACE_IRQS_IRETQ + SWAPGS + jmp restore_args + + retint_restore_args: /* return to kernel space */ + DISABLE_INTERRUPTS(CLBR_ANY) ++ call pax_exit_kernel + /* + * The iretq could re-enable interrupts: + */ +@@ -1040,6 +1250,16 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET 15*8 + call error_entry + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym +@@ -1057,6 +1277,16 @@ ENTRY(\sym) + subq $15*8, %rsp + call save_paranoid + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ + call \do_sym +@@ -1074,9 +1304,24 @@ ENTRY(\sym) + subq $15*8, %rsp + call save_paranoid + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + xorl %esi,%esi /* no error code */ +- PER_CPU(init_tss, %r12) ++#ifdef CONFIG_SMP ++ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d ++ lea init_tss(%r12), %r12 ++#else ++ lea init_tss(%rip), %r12 ++#endif + subq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) + call \do_sym + addq $EXCEPTION_STKSZ, TSS_ist + (\ist - 1) * 8(%r12) +@@ -1093,6 +1338,16 @@ ENTRY(\sym) + CFI_ADJUST_CFA_OFFSET 15*8 + call error_entry + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ +@@ -1112,6 +1367,16 @@ ENTRY(\sym) + call save_paranoid + DEFAULT_FRAME 0 + TRACE_IRQS_OFF ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + movq %rsp,%rdi /* pt_regs pointer */ + movq ORIG_RAX(%rsp),%rsi /* get error code */ + movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */ +@@ -1370,14 +1635,27 @@ ENTRY(paranoid_exit) + TRACE_IRQS_OFF + testl %ebx,%ebx /* swapgs needed? */ + jnz paranoid_restore +- testl $3,CS(%rsp) ++ testb $3,CS(%rsp) + jnz paranoid_userspace ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel ++ TRACE_IRQS_IRETQ 0 ++ SWAPGS_UNSAFE_STACK ++ RESTORE_ALL 8 ++ jmp irq_return ++#endif + paranoid_swapgs: ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ call pax_exit_kernel_user ++#else ++ call pax_exit_kernel ++#endif + TRACE_IRQS_IRETQ 0 + SWAPGS_UNSAFE_STACK + RESTORE_ALL 8 + jmp irq_return + paranoid_restore: ++ call pax_exit_kernel + TRACE_IRQS_IRETQ 0 + RESTORE_ALL 8 + jmp irq_return +@@ -1435,7 +1713,7 @@ ENTRY(error_entry) + movq_cfi r14, R14+8 + movq_cfi r15, R15+8 + xorl %ebx,%ebx +- testl $3,CS+8(%rsp) ++ testb $3,CS+8(%rsp) + je error_kernelspace + error_swapgs: + SWAPGS +@@ -1499,6 +1777,16 @@ ENTRY(nmi) + CFI_ADJUST_CFA_OFFSET 15*8 + call save_paranoid + DEFAULT_FRAME 0 ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ testb $3, CS(%rsp) ++ jnz 1f ++ call pax_enter_kernel ++ jmp 2f ++1: call pax_enter_kernel_user ++2: ++#else ++ call pax_enter_kernel ++#endif + /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */ + movq %rsp,%rdi + movq $-1,%rsi +@@ -1509,11 +1797,12 @@ ENTRY(nmi) + DISABLE_INTERRUPTS(CLBR_NONE) + testl %ebx,%ebx /* swapgs needed? */ + jnz nmi_restore +- testl $3,CS(%rsp) ++ testb $3,CS(%rsp) + jnz nmi_userspace + nmi_swapgs: + SWAPGS_UNSAFE_STACK + nmi_restore: ++ call pax_exit_kernel + RESTORE_ALL 8 + jmp irq_return + nmi_userspace: +diff -urNp linux-2.6.35.4/arch/x86/kernel/ftrace.c linux-2.6.35.4/arch/x86/kernel/ftrace.c +--- linux-2.6.35.4/arch/x86/kernel/ftrace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/ftrace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -174,7 +174,9 @@ void ftrace_nmi_enter(void) + + if (atomic_inc_return(&nmi_running) & MOD_CODE_WRITE_FLAG) { + smp_rmb(); ++ pax_open_kernel(); + ftrace_mod_code(); ++ pax_close_kernel(); + atomic_inc(&nmi_update_count); + } + /* Must have previous changes seen before executions */ +@@ -260,7 +262,7 @@ do_ftrace_mod_code(unsigned long ip, voi + + + +-static unsigned char ftrace_nop[MCOUNT_INSN_SIZE]; ++static unsigned char ftrace_nop[MCOUNT_INSN_SIZE] __read_only; + + static unsigned char *ftrace_nop_replace(void) + { +@@ -273,6 +275,8 @@ ftrace_modify_code(unsigned long ip, uns + { + unsigned char replaced[MCOUNT_INSN_SIZE]; + ++ ip = ktla_ktva(ip); ++ + /* + * Note: Due to modules and __init, code can + * disappear and change, we need to protect against faulting +@@ -329,7 +333,7 @@ int ftrace_update_ftrace_func(ftrace_fun + unsigned char old[MCOUNT_INSN_SIZE], *new; + int ret; + +- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE); ++ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE); + new = ftrace_call_replace(ip, (unsigned long)func); + ret = ftrace_modify_code(ip, old, new); + +@@ -382,15 +386,15 @@ int __init ftrace_dyn_arch_init(void *da + switch (faulted) { + case 0: + pr_info("converting mcount calls to 0f 1f 44 00 00\n"); +- memcpy(ftrace_nop, ftrace_test_p6nop, MCOUNT_INSN_SIZE); ++ memcpy(ftrace_nop, ktla_ktva(ftrace_test_p6nop), MCOUNT_INSN_SIZE); + break; + case 1: + pr_info("converting mcount calls to 66 66 66 66 90\n"); +- memcpy(ftrace_nop, ftrace_test_nop5, MCOUNT_INSN_SIZE); ++ memcpy(ftrace_nop, ktla_ktva(ftrace_test_nop5), MCOUNT_INSN_SIZE); + break; + case 2: + pr_info("converting mcount calls to jmp . + 5\n"); +- memcpy(ftrace_nop, ftrace_test_jmp, MCOUNT_INSN_SIZE); ++ memcpy(ftrace_nop, ktla_ktva(ftrace_test_jmp), MCOUNT_INSN_SIZE); + break; + } + +@@ -411,6 +415,8 @@ static int ftrace_mod_jmp(unsigned long + { + unsigned char code[MCOUNT_INSN_SIZE]; + ++ ip = ktla_ktva(ip); ++ + if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE)) + return -EFAULT; + +diff -urNp linux-2.6.35.4/arch/x86/kernel/head32.c linux-2.6.35.4/arch/x86/kernel/head32.c +--- linux-2.6.35.4/arch/x86/kernel/head32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/head32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -17,6 +17,7 @@ + #include <asm/apic.h> + #include <asm/io_apic.h> + #include <asm/bios_ebda.h> ++#include <asm/boot.h> + + static void __init i386_default_early_setup(void) + { +@@ -40,7 +41,7 @@ void __init i386_start_kernel(void) + "EX TRAMPOLINE"); + #endif + +- reserve_early(__pa_symbol(&_text), __pa_symbol(&__bss_stop), "TEXT DATA BSS"); ++ reserve_early(LOAD_PHYSICAL_ADDR, __pa_symbol(&__bss_stop), "TEXT DATA BSS"); + + #ifdef CONFIG_BLK_DEV_INITRD + /* Reserve INITRD */ +diff -urNp linux-2.6.35.4/arch/x86/kernel/head_32.S linux-2.6.35.4/arch/x86/kernel/head_32.S +--- linux-2.6.35.4/arch/x86/kernel/head_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/head_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -25,6 +25,12 @@ + /* Physical address */ + #define pa(X) ((X) - __PAGE_OFFSET) + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) ((X) - __PAGE_OFFSET) ++#endif ++ + /* + * References to members of the new_cpu_data structure. + */ +@@ -54,11 +60,7 @@ + * and small than max_low_pfn, otherwise will waste some page table entries + */ + +-#if PTRS_PER_PMD > 1 +-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD) +-#else +-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD) +-#endif ++#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE) + + /* Enough space to fit pagetables for the low memory linear map */ + MAPPING_BEYOND_END = \ +@@ -75,6 +77,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_P + RESERVE_BRK(pagetables, INIT_MAP_SIZE) + + /* ++ * Real beginning of normal "text" segment ++ */ ++ENTRY(stext) ++ENTRY(_stext) ++ ++/* + * 32-bit kernel entrypoint; only used by the boot CPU. On entry, + * %esi points to the real-mode code as a 32-bit pointer. + * CS and DS must be 4 GB flat segments, but we don't depend on +@@ -82,6 +90,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE) + * can. + */ + __HEAD ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ jmp startup_32 ++/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */ ++.fill PAGE_SIZE-5,1,0xcc ++#endif ++ + ENTRY(startup_32) + /* test KEEP_SEGMENTS flag to see if the bootloader is asking + us to not reload segments */ +@@ -99,6 +114,55 @@ ENTRY(startup_32) + movl %eax,%gs + 2: + ++#ifdef CONFIG_SMP ++ movl $pa(cpu_gdt_table),%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,__KERNEL_PERCPU + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_PERCPU + 4(%edi) ++ movb %ah,__KERNEL_PERCPU + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,__KERNEL_PERCPU + 0(%edi) ++#endif ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++1: ++ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi) ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ movl $pa(boot_gdt),%edi ++ movl $__LOAD_PHYSICAL_ADDR,%eax ++ movw %ax,__BOOT_CS + 2(%edi) ++ rorl $16,%eax ++ movb %al,__BOOT_CS + 4(%edi) ++ movb %ah,__BOOT_CS + 7(%edi) ++ rorl $16,%eax ++ ++ ljmp $(__BOOT_CS),$1f ++1: ++ ++ movl $NR_CPUS,%ecx ++ movl $pa(cpu_gdt_table),%edi ++ addl $__PAGE_OFFSET,%eax ++1: ++ movw %ax,__KERNEL_CS + 2(%edi) ++ movw %ax,__KERNEXEC_KERNEL_CS + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_CS + 4(%edi) ++ movb %al,__KERNEXEC_KERNEL_CS + 4(%edi) ++ movb %ah,__KERNEL_CS + 7(%edi) ++ movb %ah,__KERNEXEC_KERNEL_CS + 7(%edi) ++ rorl $16,%eax ++ addl $PAGE_SIZE_asm,%edi ++ loop 1b ++#endif ++ + /* + * Clear BSS first so that there are no surprises... + */ +@@ -142,9 +206,7 @@ ENTRY(startup_32) + cmpl $num_subarch_entries, %eax + jae bad_subarch + +- movl pa(subarch_entries)(,%eax,4), %eax +- subl $__PAGE_OFFSET, %eax +- jmp *%eax ++ jmp *pa(subarch_entries)(,%eax,4) + + bad_subarch: + WEAK(lguest_entry) +@@ -156,10 +218,10 @@ WEAK(xen_entry) + __INITDATA + + subarch_entries: +- .long default_entry /* normal x86/PC */ +- .long lguest_entry /* lguest hypervisor */ +- .long xen_entry /* Xen hypervisor */ +- .long default_entry /* Moorestown MID */ ++ .long ta(default_entry) /* normal x86/PC */ ++ .long ta(lguest_entry) /* lguest hypervisor */ ++ .long ta(xen_entry) /* Xen hypervisor */ ++ .long ta(default_entry) /* Moorestown MID */ + num_subarch_entries = (. - subarch_entries) / 4 + .previous + #endif /* CONFIG_PARAVIRT */ +@@ -220,8 +282,11 @@ default_entry: + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(swapper_pg_pmd+0x1000*KPMDS-8) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_pmd+0x1000*KPMDS-8) ++#else ++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_pmd+0x1000*KPMDS-8) ++#endif + #else /* Not PAE */ + + page_pde_offset = (__PAGE_OFFSET >> 20); +@@ -251,8 +316,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20); + movl %eax, pa(max_pfn_mapped) + + /* Do early initialization of the fixmap area */ +- movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,%eax +- movl %eax,pa(swapper_pg_dir+0xffc) ++#ifdef CONFIG_COMPAT_VDSO ++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(swapper_pg_dir+0xffc) ++#else ++ movl $pa(swapper_pg_fixmap)+PDE_IDENT_ATTR,pa(swapper_pg_dir+0xffc) ++#endif + #endif + jmp 3f + /* +@@ -299,6 +367,7 @@ ENTRY(startup_32_smp) + orl %edx,%eax + movl %eax,%cr4 + ++#ifdef CONFIG_X86_PAE + testb $X86_CR4_PAE, %al # check if PAE is enabled + jz 6f + +@@ -323,6 +392,9 @@ ENTRY(startup_32_smp) + /* Make changes effective */ + wrmsr + ++ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4) ++#endif ++ + 6: + + /* +@@ -348,9 +420,7 @@ ENTRY(startup_32_smp) + + #ifdef CONFIG_SMP + cmpb $0, ready +- jz 1f /* Initial CPU cleans BSS */ +- jmp checkCPUtype +-1: ++ jnz checkCPUtype /* Initial CPU cleans BSS */ + #endif /* CONFIG_SMP */ + + /* +@@ -428,7 +498,7 @@ is386: movl $2,%ecx # set MP + 1: movl $(__KERNEL_DS),%eax # reload all the segment registers + movl %eax,%ss # after changing gdt. + +- movl $(__USER_DS),%eax # DS/ES contains default USER segment ++# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment + movl %eax,%ds + movl %eax,%es + +@@ -442,8 +512,11 @@ is386: movl $2,%ecx # set MP + */ + cmpb $0,ready + jne 1f +- movl $gdt_page,%eax ++ movl $cpu_gdt_table,%eax + movl $stack_canary,%ecx ++#ifdef CONFIG_SMP ++ addl $__per_cpu_load,%ecx ++#endif + movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax) + shrl $16, %ecx + movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax) +@@ -461,10 +534,6 @@ is386: movl $2,%ecx # set MP + #ifdef CONFIG_SMP + movb ready, %cl + movb $1, ready +- cmpb $0,%cl # the first CPU calls start_kernel +- je 1f +- movl (stack_start), %esp +-1: + #endif /* CONFIG_SMP */ + jmp *(initial_code) + +@@ -550,22 +619,22 @@ early_page_fault: + jmp early_fault + + early_fault: +- cld + #ifdef CONFIG_PRINTK ++ cmpl $1,%ss:early_recursion_flag ++ je hlt_loop ++ incl %ss:early_recursion_flag ++ cld + pusha + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es +- cmpl $2,early_recursion_flag +- je hlt_loop +- incl early_recursion_flag + movl %cr2,%eax + pushl %eax + pushl %edx /* trapno */ + pushl $fault_msg + call printk ++; call dump_stack + #endif +- call dump_stack + hlt_loop: + hlt + jmp hlt_loop +@@ -573,8 +642,11 @@ hlt_loop: + /* This is the default interrupt "handler" :-) */ + ALIGN + ignore_int: +- cld + #ifdef CONFIG_PRINTK ++ cmpl $2,%ss:early_recursion_flag ++ je hlt_loop ++ incl %ss:early_recursion_flag ++ cld + pushl %eax + pushl %ecx + pushl %edx +@@ -583,9 +655,6 @@ ignore_int: + movl $(__KERNEL_DS),%eax + movl %eax,%ds + movl %eax,%es +- cmpl $2,early_recursion_flag +- je hlt_loop +- incl early_recursion_flag + pushl 16(%esp) + pushl 24(%esp) + pushl 32(%esp) +@@ -612,27 +681,38 @@ ENTRY(initial_code) + /* + * BSS section + */ +-__PAGE_ALIGNED_BSS +- .align PAGE_SIZE_asm + #ifdef CONFIG_X86_PAE ++.section .swapper_pg_pmd,"a",@progbits + swapper_pg_pmd: + .fill 1024*KPMDS,4,0 + #else ++.section .swapper_pg_dir,"a",@progbits + ENTRY(swapper_pg_dir) + .fill 1024,4,0 + #endif ++ + swapper_pg_fixmap: + .fill 1024,4,0 ++ ++.section .empty_zero_page,"a",@progbits + ENTRY(empty_zero_page) + .fill 4096,1,0 + + /* ++ * The IDT has to be page-aligned to simplify the Pentium ++ * F0 0F bug workaround.. We have a special link segment ++ * for this. ++ */ ++.section .idt,"a",@progbits ++ENTRY(idt_table) ++ .fill 256,8,0 ++ ++/* + * This starts the data section. + */ + #ifdef CONFIG_X86_PAE +-__PAGE_ALIGNED_DATA +- /* Page-aligned for the benefit of paravirt? */ +- .align PAGE_SIZE_asm ++.section .swapper_pg_dir,"a",@progbits ++ + ENTRY(swapper_pg_dir) + .long pa(swapper_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */ + # if KPMDS == 3 +@@ -651,15 +731,24 @@ ENTRY(swapper_pg_dir) + # error "Kernel PMDs should be 1, 2 or 3" + # endif + .align PAGE_SIZE_asm /* needs to be page-sized too */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ENTRY(cpu_pgd) ++ .rept NR_CPUS ++ .fill 4,8,0 ++ .endr ++#endif ++ + #endif + + .data + ENTRY(stack_start) +- .long init_thread_union+THREAD_SIZE ++ .long init_thread_union+THREAD_SIZE-8 + .long __BOOT_DS + + ready: .byte 0 + ++.section .rodata,"a",@progbits + early_recursion_flag: + .long 0 + +@@ -695,7 +784,7 @@ fault_msg: + .word 0 # 32 bit align gdt_desc.address + boot_gdt_descr: + .word __BOOT_DS+7 +- .long boot_gdt - __PAGE_OFFSET ++ .long pa(boot_gdt) + + .word 0 # 32-bit align idt_desc.address + idt_descr: +@@ -706,7 +795,7 @@ idt_descr: + .word 0 # 32 bit align gdt_desc.address + ENTRY(early_gdt_descr) + .word GDT_ENTRIES*8-1 +- .long gdt_page /* Overwritten for secondary CPUs */ ++ .long cpu_gdt_table /* Overwritten for secondary CPUs */ + + /* + * The boot_gdt must mirror the equivalent in setup.S and is +@@ -715,5 +804,65 @@ ENTRY(early_gdt_descr) + .align L1_CACHE_BYTES + ENTRY(boot_gdt) + .fill GDT_ENTRY_BOOT_CS,8,0 +- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */ +- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */ ++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */ ++ ++ .align PAGE_SIZE_asm ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x0000000000000000 /* 0x0b reserved */ ++ .quad 0x0000000000000000 /* 0x13 reserved */ ++ .quad 0x0000000000000000 /* 0x1b reserved */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */ ++#else ++ .quad 0x0000000000000000 /* 0x20 unused */ ++#endif ++ ++ .quad 0x0000000000000000 /* 0x28 unused */ ++ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */ ++ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */ ++ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */ ++ .quad 0x0000000000000000 /* 0x4b reserved */ ++ .quad 0x0000000000000000 /* 0x53 reserved */ ++ .quad 0x0000000000000000 /* 0x5b reserved */ ++ ++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */ ++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */ ++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */ ++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */ ++ ++ .quad 0x0000000000000000 /* 0x80 TSS descriptor */ ++ .quad 0x0000000000000000 /* 0x88 LDT descriptor */ ++ ++ /* ++ * Segments used for calling PnP BIOS have byte granularity. ++ * The code segments and data segments have fixed 64k limits, ++ * the transfer segment sizes are set at run time. ++ */ ++ .quad 0x00409b000000ffff /* 0x90 32-bit code */ ++ .quad 0x00009b000000ffff /* 0x98 16-bit code */ ++ .quad 0x000093000000ffff /* 0xa0 16-bit data */ ++ .quad 0x0000930000000000 /* 0xa8 16-bit data */ ++ .quad 0x0000930000000000 /* 0xb0 16-bit data */ ++ ++ /* ++ * The APM segments have byte granularity and their bases ++ * are set at run time. All have 64k limits. ++ */ ++ .quad 0x00409b000000ffff /* 0xb8 APM CS code */ ++ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */ ++ .quad 0x004093000000ffff /* 0xc8 APM DS data */ ++ ++ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */ ++ .quad 0x0040930000000000 /* 0xd8 - PERCPU */ ++ .quad 0x0040910000000018 /* 0xe0 - STACK_CANARY */ ++ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */ ++ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */ ++ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */ ++ ++ /* Be sure this is zeroed to avoid false validations in Xen */ ++ .fill PAGE_SIZE_asm - GDT_SIZE,1,0 ++ .endr +diff -urNp linux-2.6.35.4/arch/x86/kernel/head_64.S linux-2.6.35.4/arch/x86/kernel/head_64.S +--- linux-2.6.35.4/arch/x86/kernel/head_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/head_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -19,6 +19,7 @@ + #include <asm/cache.h> + #include <asm/processor-flags.h> + #include <asm/percpu.h> ++#include <asm/cpufeature.h> + + #ifdef CONFIG_PARAVIRT + #include <asm/asm-offsets.h> +@@ -38,6 +39,10 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET + L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET) + L4_START_KERNEL = pgd_index(__START_KERNEL_map) + L3_START_KERNEL = pud_index(__START_KERNEL_map) ++L4_VMALLOC_START = pgd_index(VMALLOC_START) ++L3_VMALLOC_START = pud_index(VMALLOC_START) ++L4_VMEMMAP_START = pgd_index(VMEMMAP_START) ++L3_VMEMMAP_START = pud_index(VMEMMAP_START) + + .text + __HEAD +@@ -85,35 +90,22 @@ startup_64: + */ + addq %rbp, init_level4_pgt + 0(%rip) + addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip) ++ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip) + addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip) + + addq %rbp, level3_ident_pgt + 0(%rip) ++#ifndef CONFIG_XEN ++ addq %rbp, level3_ident_pgt + 8(%rip) ++#endif + +- addq %rbp, level3_kernel_pgt + (510*8)(%rip) +- addq %rbp, level3_kernel_pgt + (511*8)(%rip) ++ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip) + +- addq %rbp, level2_fixmap_pgt + (506*8)(%rip) ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip) ++ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8+8)(%rip) + +- /* Add an Identity mapping if I am above 1G */ +- leaq _text(%rip), %rdi +- andq $PMD_PAGE_MASK, %rdi +- +- movq %rdi, %rax +- shrq $PUD_SHIFT, %rax +- andq $(PTRS_PER_PUD - 1), %rax +- jz ident_complete +- +- leaq (level2_spare_pgt - __START_KERNEL_map + _KERNPG_TABLE)(%rbp), %rdx +- leaq level3_ident_pgt(%rip), %rbx +- movq %rdx, 0(%rbx, %rax, 8) +- +- movq %rdi, %rax +- shrq $PMD_SHIFT, %rax +- andq $(PTRS_PER_PMD - 1), %rax +- leaq __PAGE_KERNEL_IDENT_LARGE_EXEC(%rdi), %rdx +- leaq level2_spare_pgt(%rip), %rbx +- movq %rdx, 0(%rbx, %rax, 8) +-ident_complete: ++ addq %rbp, level2_fixmap_pgt + (506*8)(%rip) ++ addq %rbp, level2_fixmap_pgt + (507*8)(%rip) + + /* + * Fixup the kernel text+data virtual addresses. Note that +@@ -161,8 +153,8 @@ ENTRY(secondary_startup_64) + * after the boot processor executes this code. + */ + +- /* Enable PAE mode and PGE */ +- movl $(X86_CR4_PAE | X86_CR4_PGE), %eax ++ /* Enable PAE mode and PSE/PGE */ ++ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %eax + movq %rax, %cr4 + + /* Setup early boot stage 4 level pagetables. */ +@@ -184,9 +176,14 @@ ENTRY(secondary_startup_64) + movl $MSR_EFER, %ecx + rdmsr + btsl $_EFER_SCE, %eax /* Enable System Call */ +- btl $20,%edi /* No Execute supported? */ ++ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */ + jnc 1f + btsl $_EFER_NX, %eax ++ leaq init_level4_pgt(%rip), %rdi ++ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi) ++ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi) ++ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi) ++ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip) + 1: wrmsr /* Make changes effective */ + + /* Setup cr0 */ +@@ -271,7 +268,7 @@ ENTRY(secondary_startup_64) + bad_address: + jmp bad_address + +- .section ".init.text","ax" ++ __INIT + #ifdef CONFIG_EARLY_PRINTK + .globl early_idt_handlers + early_idt_handlers: +@@ -316,18 +313,23 @@ ENTRY(early_idt_handler) + #endif /* EARLY_PRINTK */ + 1: hlt + jmp 1b ++ .previous + + #ifdef CONFIG_EARLY_PRINTK ++ __INITDATA + early_recursion_flag: + .long 0 ++ .previous + ++ .section .rodata,"a",@progbits + early_idt_msg: + .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n" + early_idt_ripmsg: + .asciz "RIP %s\n" +-#endif /* CONFIG_EARLY_PRINTK */ + .previous ++#endif /* CONFIG_EARLY_PRINTK */ + ++ .section .rodata,"a",@progbits + #define NEXT_PAGE(name) \ + .balign PAGE_SIZE; \ + ENTRY(name) +@@ -351,13 +353,36 @@ NEXT_PAGE(init_level4_pgt) + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_PAGE_OFFSET*8, 0 + .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMALLOC_START*8, 0 ++ .quad level3_vmalloc_pgt - __START_KERNEL_map + _KERNPG_TABLE ++ .org init_level4_pgt + L4_VMEMMAP_START*8, 0 ++ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + .org init_level4_pgt + L4_START_KERNEL*8, 0 + /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */ + .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++NEXT_PAGE(cpu_pgd) ++ .rept NR_CPUS ++ .fill 512,8,0 ++ .endr ++#endif ++ + NEXT_PAGE(level3_ident_pgt) + .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE ++#ifdef CONFIG_XEN + .fill 511,8,0 ++#else ++ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE ++ .fill 510,8,0 ++#endif ++ ++NEXT_PAGE(level3_vmalloc_pgt) ++ .fill 512,8,0 ++ ++NEXT_PAGE(level3_vmemmap_pgt) ++ .fill L3_VMEMMAP_START,8,0 ++ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE + + NEXT_PAGE(level3_kernel_pgt) + .fill L3_START_KERNEL,8,0 +@@ -365,20 +390,23 @@ NEXT_PAGE(level3_kernel_pgt) + .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE + .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE + ++NEXT_PAGE(level2_vmemmap_pgt) ++ .fill 512,8,0 ++ + NEXT_PAGE(level2_fixmap_pgt) +- .fill 506,8,0 +- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE +- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */ +- .fill 5,8,0 ++ .fill 507,8,0 ++ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE ++ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */ ++ .fill 4,8,0 + +-NEXT_PAGE(level1_fixmap_pgt) ++NEXT_PAGE(level1_vsyscall_pgt) + .fill 512,8,0 + +-NEXT_PAGE(level2_ident_pgt) +- /* Since I easily can, map the first 1G. ++ /* Since I easily can, map the first 2G. + * Don't set NX because code runs from these pages. + */ +- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD) ++NEXT_PAGE(level2_ident_pgt) ++ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD) + + NEXT_PAGE(level2_kernel_pgt) + /* +@@ -391,33 +419,55 @@ NEXT_PAGE(level2_kernel_pgt) + * If you want to increase this then increase MODULES_VADDR + * too.) + */ +- PMDS(0, __PAGE_KERNEL_LARGE_EXEC, +- KERNEL_IMAGE_SIZE/PMD_SIZE) +- +-NEXT_PAGE(level2_spare_pgt) +- .fill 512, 8, 0 ++ PMDS(0, __PAGE_KERNEL_LARGE_EXEC, KERNEL_IMAGE_SIZE/PMD_SIZE) + + #undef PMDS + #undef NEXT_PAGE + +- .data ++ .align PAGE_SIZE ++ENTRY(cpu_gdt_table) ++ .rept NR_CPUS ++ .quad 0x0000000000000000 /* NULL descriptor */ ++ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */ ++ .quad 0x00af9b000000ffff /* __KERNEL_CS */ ++ .quad 0x00cf93000000ffff /* __KERNEL_DS */ ++ .quad 0x00cffb000000ffff /* __USER32_CS */ ++ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */ ++ .quad 0x00affb000000ffff /* __USER_CS */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */ ++#else ++ .quad 0x0 /* unused */ ++#endif ++ ++ .quad 0,0 /* TSS */ ++ .quad 0,0 /* LDT */ ++ .quad 0,0,0 /* three TLS descriptors */ ++ .quad 0x0000f40000000000 /* node/CPU stored in limit */ ++ /* asm/segment.h:GDT_ENTRIES must match this */ ++ ++ /* zero the remaining page */ ++ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0 ++ .endr ++ + .align 16 + .globl early_gdt_descr + early_gdt_descr: + .word GDT_ENTRIES*8-1 + early_gdt_descr_base: +- .quad INIT_PER_CPU_VAR(gdt_page) ++ .quad cpu_gdt_table + + ENTRY(phys_base) + /* This must match the first entry in level2_kernel_pgt */ + .quad 0x0000000000000000 + + #include "../../x86/xen/xen-head.S" +- +- .section .bss, "aw", @nobits ++ ++ .section .rodata,"a",@progbits + .align L1_CACHE_BYTES + ENTRY(idt_table) +- .skip IDT_ENTRIES * 16 ++ .fill 512,8,0 + + __PAGE_ALIGNED_BSS + .align PAGE_SIZE +diff -urNp linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c +--- linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/i386_ksyms_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void); + EXPORT_SYMBOL(cmpxchg8b_emu); + #endif + ++EXPORT_SYMBOL_GPL(cpu_gdt_table); ++ + /* Networking helper routines. */ + EXPORT_SYMBOL(csum_partial_copy_generic); ++EXPORT_SYMBOL(csum_partial_copy_generic_to_user); ++EXPORT_SYMBOL(csum_partial_copy_generic_from_user); + + EXPORT_SYMBOL(__get_user_1); + EXPORT_SYMBOL(__get_user_2); +@@ -36,3 +40,7 @@ EXPORT_SYMBOL(strstr); + + EXPORT_SYMBOL(csum_partial); + EXPORT_SYMBOL(empty_zero_page); ++ ++#ifdef CONFIG_PAX_KERNEXEC ++EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR); ++#endif +diff -urNp linux-2.6.35.4/arch/x86/kernel/init_task.c linux-2.6.35.4/arch/x86/kernel/init_task.c +--- linux-2.6.35.4/arch/x86/kernel/init_task.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/init_task.c 2010-09-17 20:12:09.000000000 -0400 +@@ -38,5 +38,5 @@ EXPORT_SYMBOL(init_task); + * section. Since TSS's are completely CPU-local, we want them + * on exact cacheline boundaries, to eliminate cacheline ping-pong. + */ +-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS; +- ++struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS }; ++EXPORT_SYMBOL(init_tss); +diff -urNp linux-2.6.35.4/arch/x86/kernel/ioport.c linux-2.6.35.4/arch/x86/kernel/ioport.c +--- linux-2.6.35.4/arch/x86/kernel/ioport.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/ioport.c 2010-09-17 20:12:37.000000000 -0400 +@@ -6,6 +6,7 @@ + #include <linux/sched.h> + #include <linux/kernel.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/errno.h> + #include <linux/types.h> + #include <linux/ioport.h> +@@ -41,6 +42,12 @@ asmlinkage long sys_ioperm(unsigned long + + if ((from + num <= from) || (from + num > IO_BITMAP_BITS)) + return -EINVAL; ++#ifdef CONFIG_GRKERNSEC_IO ++ if (turn_on && grsec_disable_privio) { ++ gr_handle_ioperm(); ++ return -EPERM; ++ } ++#endif + if (turn_on && !capable(CAP_SYS_RAWIO)) + return -EPERM; + +@@ -67,7 +74,7 @@ asmlinkage long sys_ioperm(unsigned long + * because the ->io_bitmap_max value must match the bitmap + * contents: + */ +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + + set_bitmap(t->io_bitmap_ptr, from, num, !turn_on); + +@@ -112,6 +119,12 @@ long sys_iopl(unsigned int level, struct + return -EINVAL; + /* Trying to gain more privileges? */ + if (level > old) { ++#ifdef CONFIG_GRKERNSEC_IO ++ if (grsec_disable_privio) { ++ gr_handle_iopl(); ++ return -EPERM; ++ } ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/irq_32.c linux-2.6.35.4/arch/x86/kernel/irq_32.c +--- linux-2.6.35.4/arch/x86/kernel/irq_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/irq_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -94,7 +94,7 @@ execute_on_irq_stack(int overflow, struc + return 0; + + /* build the stack frame on the IRQ stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); + irqctx->tinfo.task = curctx->tinfo.task; + irqctx->tinfo.previous_esp = current_stack_pointer; + +@@ -175,7 +175,7 @@ asmlinkage void do_softirq(void) + irqctx->tinfo.previous_esp = current_stack_pointer; + + /* build the stack frame on the softirq stack */ +- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx)); ++ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8); + + call_on_stack(__do_softirq, isp); + /* +diff -urNp linux-2.6.35.4/arch/x86/kernel/kgdb.c linux-2.6.35.4/arch/x86/kernel/kgdb.c +--- linux-2.6.35.4/arch/x86/kernel/kgdb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/kgdb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -77,7 +77,7 @@ void pt_regs_to_gdb_regs(unsigned long * + gdb_regs[GDB_CS] = regs->cs; + gdb_regs[GDB_FS] = 0xFFFF; + gdb_regs[GDB_GS] = 0xFFFF; +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + gdb_regs[GDB_SS] = regs->ss; + gdb_regs[GDB_SP] = regs->sp; + } else { +@@ -720,7 +720,7 @@ void kgdb_arch_set_pc(struct pt_regs *re + regs->ip = ip; + } + +-struct kgdb_arch arch_kgdb_ops = { ++const struct kgdb_arch arch_kgdb_ops = { + /* Breakpoint instruction: */ + .gdb_bpt_instr = { 0xcc }, + .flags = KGDB_HW_BREAKPOINT, +diff -urNp linux-2.6.35.4/arch/x86/kernel/kprobes.c linux-2.6.35.4/arch/x86/kernel/kprobes.c +--- linux-2.6.35.4/arch/x86/kernel/kprobes.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/kprobes.c 2010-09-17 20:12:09.000000000 -0400 +@@ -114,9 +114,12 @@ static void __kprobes __synthesize_relat + s32 raddr; + } __attribute__((packed)) *insn; + +- insn = (struct __arch_relative_insn *)from; ++ insn = (struct __arch_relative_insn *)(ktla_ktva(from)); ++ ++ pax_open_kernel(); + insn->raddr = (s32)((long)(to) - ((long)(from) + 5)); + insn->op = op; ++ pax_close_kernel(); + } + + /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/ +@@ -315,7 +318,9 @@ static int __kprobes __copy_instruction( + } + } + insn_get_length(&insn); ++ pax_open_kernel(); + memcpy(dest, insn.kaddr, insn.length); ++ pax_close_kernel(); + + #ifdef CONFIG_X86_64 + if (insn_rip_relative(&insn)) { +@@ -339,7 +344,9 @@ static int __kprobes __copy_instruction( + (u8 *) dest; + BUG_ON((s64) (s32) newdisp != newdisp); /* Sanity check. */ + disp = (u8 *) dest + insn_offset_displacement(&insn); ++ pax_open_kernel(); + *(s32 *) disp = (s32) newdisp; ++ pax_close_kernel(); + } + #endif + return insn.length; +@@ -353,12 +360,12 @@ static void __kprobes arch_copy_kprobe(s + */ + __copy_instruction(p->ainsn.insn, p->addr, 0); + +- if (can_boost(p->addr)) ++ if (can_boost(ktla_ktva(p->addr))) + p->ainsn.boostable = 0; + else + p->ainsn.boostable = -1; + +- p->opcode = *p->addr; ++ p->opcode = *(ktla_ktva(p->addr)); + } + + int __kprobes arch_prepare_kprobe(struct kprobe *p) +@@ -475,7 +482,7 @@ static void __kprobes setup_singlestep(s + * nor set current_kprobe, because it doesn't use single + * stepping. + */ +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + preempt_enable_no_resched(); + return; + } +@@ -494,7 +501,7 @@ static void __kprobes setup_singlestep(s + if (p->opcode == BREAKPOINT_INSTRUCTION) + regs->ip = (unsigned long)p->addr; + else +- regs->ip = (unsigned long)p->ainsn.insn; ++ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn); + } + + /* +@@ -573,7 +580,7 @@ static int __kprobes kprobe_handler(stru + setup_singlestep(p, regs, kcb, 0); + return 1; + } +- } else if (*addr != BREAKPOINT_INSTRUCTION) { ++ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) { + /* + * The breakpoint instruction was removed right + * after we hit it. Another cpu has removed +@@ -799,7 +806,7 @@ static void __kprobes resume_execution(s + struct pt_regs *regs, struct kprobe_ctlblk *kcb) + { + unsigned long *tos = stack_addr(regs); +- unsigned long copy_ip = (unsigned long)p->ainsn.insn; ++ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn); + unsigned long orig_ip = (unsigned long)p->addr; + kprobe_opcode_t *insn = p->ainsn.insn; + +@@ -982,7 +989,7 @@ int __kprobes kprobe_exceptions_notify(s + struct die_args *args = data; + int ret = NOTIFY_DONE; + +- if (args->regs && user_mode_vm(args->regs)) ++ if (args->regs && user_mode(args->regs)) + return ret; + + switch (val) { +diff -urNp linux-2.6.35.4/arch/x86/kernel/ldt.c linux-2.6.35.4/arch/x86/kernel/ldt.c +--- linux-2.6.35.4/arch/x86/kernel/ldt.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/ldt.c 2010-09-17 20:12:09.000000000 -0400 +@@ -67,13 +67,13 @@ static int alloc_ldt(mm_context_t *pc, i + if (reload) { + #ifdef CONFIG_SMP + preempt_disable(); +- load_LDT(pc); ++ load_LDT_nolock(pc); + if (!cpumask_equal(mm_cpumask(current->mm), + cpumask_of(smp_processor_id()))) + smp_call_function(flush_ldt, current->mm, 1); + preempt_enable(); + #else +- load_LDT(pc); ++ load_LDT_nolock(pc); + #endif + } + if (oldsize) { +@@ -95,7 +95,7 @@ static inline int copy_ldt(mm_context_t + return err; + + for (i = 0; i < old->size; i++) +- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE); ++ write_ldt_entry(new->ldt, i, old->ldt + i); + return 0; + } + +@@ -116,6 +116,24 @@ int init_new_context(struct task_struct + retval = copy_ldt(&mm->context, &old_mm->context); + mutex_unlock(&old_mm->context.lock); + } ++ ++ if (tsk == current) { ++ mm->context.vdso = ~0UL; ++ ++#ifdef CONFIG_X86_32 ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ mm->context.user_cs_base = 0UL; ++ mm->context.user_cs_limit = ~0UL; ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP) ++ cpus_clear(mm->context.cpu_user_cs_mask); ++#endif ++ ++#endif ++#endif ++ ++ } ++ + return retval; + } + +@@ -230,6 +248,13 @@ static int write_ldt(void __user *ptr, u + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) { ++ error = -EINVAL; ++ goto out_unlock; ++ } ++#endif ++ + fill_ldt(&ldt, &ldt_info); + if (oldmode) + ldt.avl = 0; +diff -urNp linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c +--- linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/machine_kexec_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -27,7 +27,7 @@ + #include <asm/cacheflush.h> + #include <asm/debugreg.h> + +-static void set_idt(void *newidt, __u16 limit) ++static void set_idt(struct desc_struct *newidt, __u16 limit) + { + struct desc_ptr curidt; + +@@ -39,7 +39,7 @@ static void set_idt(void *newidt, __u16 + } + + +-static void set_gdt(void *newgdt, __u16 limit) ++static void set_gdt(struct desc_struct *newgdt, __u16 limit) + { + struct desc_ptr curgdt; + +@@ -217,7 +217,7 @@ void machine_kexec(struct kimage *image) + } + + control_page = page_address(image->control_code_page); +- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE); ++ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE); + + relocate_kernel_ptr = control_page; + page_list[PA_CONTROL_PAGE] = __pa(control_page); +diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_amd.c linux-2.6.35.4/arch/x86/kernel/microcode_amd.c +--- linux-2.6.35.4/arch/x86/kernel/microcode_amd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/microcode_amd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -331,7 +331,7 @@ static void microcode_fini_cpu_amd(int c + uci->mc = NULL; + } + +-static struct microcode_ops microcode_amd_ops = { ++static const struct microcode_ops microcode_amd_ops = { + .request_microcode_user = request_microcode_user, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info_amd, +@@ -339,7 +339,7 @@ static struct microcode_ops microcode_am + .microcode_fini_cpu = microcode_fini_cpu_amd, + }; + +-struct microcode_ops * __init init_amd_microcode(void) ++const struct microcode_ops * __init init_amd_microcode(void) + { + return µcode_amd_ops; + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_core.c linux-2.6.35.4/arch/x86/kernel/microcode_core.c +--- linux-2.6.35.4/arch/x86/kernel/microcode_core.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/microcode_core.c 2010-09-17 20:12:09.000000000 -0400 +@@ -92,7 +92,7 @@ MODULE_LICENSE("GPL"); + + #define MICROCODE_VERSION "2.00" + +-static struct microcode_ops *microcode_ops; ++static const struct microcode_ops *microcode_ops; + + /* + * Synchronization. +diff -urNp linux-2.6.35.4/arch/x86/kernel/microcode_intel.c linux-2.6.35.4/arch/x86/kernel/microcode_intel.c +--- linux-2.6.35.4/arch/x86/kernel/microcode_intel.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/microcode_intel.c 2010-09-17 20:12:09.000000000 -0400 +@@ -446,13 +446,13 @@ static enum ucode_state request_microcod + + static int get_ucode_user(void *to, const void *from, size_t n) + { +- return copy_from_user(to, from, n); ++ return copy_from_user(to, (__force const void __user *)from, n); + } + + static enum ucode_state + request_microcode_user(int cpu, const void __user *buf, size_t size) + { +- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user); ++ return generic_load_microcode(cpu, (__force void *)buf, size, &get_ucode_user); + } + + static void microcode_fini_cpu(int cpu) +@@ -463,7 +463,7 @@ static void microcode_fini_cpu(int cpu) + uci->mc = NULL; + } + +-static struct microcode_ops microcode_intel_ops = { ++static const struct microcode_ops microcode_intel_ops = { + .request_microcode_user = request_microcode_user, + .request_microcode_fw = request_microcode_fw, + .collect_cpu_info = collect_cpu_info, +@@ -471,7 +471,7 @@ static struct microcode_ops microcode_in + .microcode_fini_cpu = microcode_fini_cpu, + }; + +-struct microcode_ops * __init init_intel_microcode(void) ++const struct microcode_ops * __init init_intel_microcode(void) + { + return µcode_intel_ops; + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/module.c linux-2.6.35.4/arch/x86/kernel/module.c +--- linux-2.6.35.4/arch/x86/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/module.c 2010-09-17 20:12:09.000000000 -0400 +@@ -35,7 +35,7 @@ + #define DEBUGP(fmt...) + #endif + +-void *module_alloc(unsigned long size) ++static void *__module_alloc(unsigned long size, pgprot_t prot) + { + struct vm_struct *area; + +@@ -49,8 +49,18 @@ void *module_alloc(unsigned long size) + if (!area) + return NULL; + +- return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM, +- PAGE_KERNEL_EXEC); ++ return __vmalloc_area(area, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot); ++} ++ ++void *module_alloc(unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return __module_alloc(size, PAGE_KERNEL); ++#else ++ return __module_alloc(size, PAGE_KERNEL_EXEC); ++#endif ++ + } + + /* Free memory returned from module_alloc */ +@@ -59,6 +69,40 @@ void module_free(struct module *mod, voi + vfree(module_region); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++void *module_alloc_exec(unsigned long size) ++{ ++ struct vm_struct *area; ++ ++ if (size == 0) ++ return NULL; ++ ++ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END); ++ return area ? area->addr : NULL; ++} ++EXPORT_SYMBOL(module_alloc_exec); ++ ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ vunmap(module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++#else ++void module_free_exec(struct module *mod, void *module_region) ++{ ++ module_free(mod, module_region); ++} ++EXPORT_SYMBOL(module_free_exec); ++ ++void *module_alloc_exec(unsigned long size) ++{ ++ return __module_alloc(size, PAGE_KERNEL_RX); ++} ++EXPORT_SYMBOL(module_alloc_exec); ++#endif ++#endif ++ + /* We don't need anything special. */ + int module_frob_arch_sections(Elf_Ehdr *hdr, + Elf_Shdr *sechdrs, +@@ -78,14 +122,16 @@ int apply_relocate(Elf32_Shdr *sechdrs, + unsigned int i; + Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr; + Elf32_Sym *sym; +- uint32_t *location; ++ uint32_t *plocation, location; + + DEBUGP("Applying relocate section %u to %u\n", relsec, + sechdrs[relsec].sh_info); + for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) { + /* This is where to make the change */ +- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +- + rel[i].r_offset; ++ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset; ++ location = (uint32_t)plocation; ++ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR) ++ plocation = ktla_ktva((void *)plocation); + /* This is the symbol it is referring to. Note that all + undefined symbols have been resolved. */ + sym = (Elf32_Sym *)sechdrs[symindex].sh_addr +@@ -94,11 +140,15 @@ int apply_relocate(Elf32_Shdr *sechdrs, + switch (ELF32_R_TYPE(rel[i].r_info)) { + case R_386_32: + /* We add the value into the location given */ +- *location += sym->st_value; ++ pax_open_kernel(); ++ *plocation += sym->st_value; ++ pax_close_kernel(); + break; + case R_386_PC32: + /* Add the value, subtract its postition */ +- *location += sym->st_value - (uint32_t)location; ++ pax_open_kernel(); ++ *plocation += sym->st_value - location; ++ pax_close_kernel(); + break; + default: + printk(KERN_ERR "module %s: Unknown relocation: %u\n", +@@ -154,21 +204,30 @@ int apply_relocate_add(Elf64_Shdr *sechd + case R_X86_64_NONE: + break; + case R_X86_64_64: ++ pax_open_kernel(); + *(u64 *)loc = val; ++ pax_close_kernel(); + break; + case R_X86_64_32: ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); + if (val != *(u32 *)loc) + goto overflow; + break; + case R_X86_64_32S: ++ pax_open_kernel(); + *(s32 *)loc = val; ++ pax_close_kernel(); + if ((s64)val != *(s32 *)loc) + goto overflow; + break; + case R_X86_64_PC32: + val -= (u64)loc; ++ pax_open_kernel(); + *(u32 *)loc = val; ++ pax_close_kernel(); ++ + #if 0 + if ((s64)val != *(s32 *)loc) + goto overflow; +diff -urNp linux-2.6.35.4/arch/x86/kernel/paravirt.c linux-2.6.35.4/arch/x86/kernel/paravirt.c +--- linux-2.6.35.4/arch/x86/kernel/paravirt.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/paravirt.c 2010-09-17 20:12:09.000000000 -0400 +@@ -122,7 +122,7 @@ unsigned paravirt_patch_jmp(void *insnbu + * corresponding structure. */ + static void *get_call_destination(u8 type) + { +- struct paravirt_patch_template tmpl = { ++ const struct paravirt_patch_template tmpl = { + .pv_init_ops = pv_init_ops, + .pv_time_ops = pv_time_ops, + .pv_cpu_ops = pv_cpu_ops, +@@ -145,14 +145,14 @@ unsigned paravirt_patch_default(u8 type, + if (opfunc == NULL) + /* If there's no function, patch it with a ud2a (BUG) */ + ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a)); +- else if (opfunc == _paravirt_nop) ++ else if (opfunc == (void *)_paravirt_nop) + /* If the operation is a nop, then nop the callsite */ + ret = paravirt_patch_nop(); + + /* identity functions just return their single argument */ +- else if (opfunc == _paravirt_ident_32) ++ else if (opfunc == (void *)_paravirt_ident_32) + ret = paravirt_patch_ident_32(insnbuf, len); +- else if (opfunc == _paravirt_ident_64) ++ else if (opfunc == (void *)_paravirt_ident_64) + ret = paravirt_patch_ident_64(insnbuf, len); + + else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) || +@@ -178,7 +178,7 @@ unsigned paravirt_patch_insns(void *insn + if (insn_len > len || start == NULL) + insn_len = len; + else +- memcpy(insnbuf, start, insn_len); ++ memcpy(insnbuf, ktla_ktva(start), insn_len); + + return insn_len; + } +@@ -294,22 +294,22 @@ void arch_flush_lazy_mmu_mode(void) + preempt_enable(); + } + +-struct pv_info pv_info = { ++struct pv_info pv_info __read_only = { + .name = "bare hardware", + .paravirt_enabled = 0, + .kernel_rpl = 0, + .shared_kernel_pmd = 1, /* Only used when CONFIG_X86_PAE is set */ + }; + +-struct pv_init_ops pv_init_ops = { ++struct pv_init_ops pv_init_ops __read_only = { + .patch = native_patch, + }; + +-struct pv_time_ops pv_time_ops = { ++struct pv_time_ops pv_time_ops __read_only = { + .sched_clock = native_sched_clock, + }; + +-struct pv_irq_ops pv_irq_ops = { ++struct pv_irq_ops pv_irq_ops __read_only = { + .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl), + .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl), + .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable), +@@ -321,7 +321,7 @@ struct pv_irq_ops pv_irq_ops = { + #endif + }; + +-struct pv_cpu_ops pv_cpu_ops = { ++struct pv_cpu_ops pv_cpu_ops __read_only = { + .cpuid = native_cpuid, + .get_debugreg = native_get_debugreg, + .set_debugreg = native_set_debugreg, +@@ -382,7 +382,7 @@ struct pv_cpu_ops pv_cpu_ops = { + .end_context_switch = paravirt_nop, + }; + +-struct pv_apic_ops pv_apic_ops = { ++struct pv_apic_ops pv_apic_ops __read_only = { + #ifdef CONFIG_X86_LOCAL_APIC + .startup_ipi_hook = paravirt_nop, + #endif +@@ -396,7 +396,7 @@ struct pv_apic_ops pv_apic_ops = { + #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64) + #endif + +-struct pv_mmu_ops pv_mmu_ops = { ++struct pv_mmu_ops pv_mmu_ops __read_only = { + + .read_cr2 = native_read_cr2, + .write_cr2 = native_write_cr2, +@@ -463,6 +463,12 @@ struct pv_mmu_ops pv_mmu_ops = { + }, + + .set_fixmap = native_set_fixmap, ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ .pax_open_kernel = native_pax_open_kernel, ++ .pax_close_kernel = native_pax_close_kernel, ++#endif ++ + }; + + EXPORT_SYMBOL_GPL(pv_time_ops); +diff -urNp linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c +--- linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/paravirt-spinlocks.c 2010-09-17 20:12:09.000000000 -0400 +@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t + arch_spin_lock(lock); + } + +-struct pv_lock_ops pv_lock_ops = { ++struct pv_lock_ops pv_lock_ops __read_only = { + #ifdef CONFIG_SMP + .spin_is_locked = __ticket_spin_is_locked, + .spin_is_contended = __ticket_spin_is_contended, +diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c +--- linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/pci-calgary_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -475,7 +475,7 @@ static void calgary_free_coherent(struct + free_pages((unsigned long)vaddr, get_order(size)); + } + +-static struct dma_map_ops calgary_dma_ops = { ++static const struct dma_map_ops calgary_dma_ops = { + .alloc_coherent = calgary_alloc_coherent, + .free_coherent = calgary_free_coherent, + .map_sg = calgary_map_sg, +diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-dma.c linux-2.6.35.4/arch/x86/kernel/pci-dma.c +--- linux-2.6.35.4/arch/x86/kernel/pci-dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/pci-dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -16,7 +16,7 @@ + + static int forbid_dac __read_mostly; + +-struct dma_map_ops *dma_ops = &nommu_dma_ops; ++const struct dma_map_ops *dma_ops = &nommu_dma_ops; + EXPORT_SYMBOL(dma_ops); + + static int iommu_sac_force __read_mostly; +@@ -248,7 +248,7 @@ early_param("iommu", iommu_setup); + + int dma_supported(struct device *dev, u64 mask) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + #ifdef CONFIG_PCI + if (mask > 0xffffffff && forbid_dac > 0) { +diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c +--- linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/pci-gart_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -699,7 +699,7 @@ static __init int init_k8_gatt(struct ag + return -1; + } + +-static struct dma_map_ops gart_dma_ops = { ++static const struct dma_map_ops gart_dma_ops = { + .map_sg = gart_map_sg, + .unmap_sg = gart_unmap_sg, + .map_page = gart_map_page, +diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-nommu.c linux-2.6.35.4/arch/x86/kernel/pci-nommu.c +--- linux-2.6.35.4/arch/x86/kernel/pci-nommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/pci-nommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -95,7 +95,7 @@ static void nommu_sync_sg_for_device(str + flush_write_buffers(); + } + +-struct dma_map_ops nommu_dma_ops = { ++const struct dma_map_ops nommu_dma_ops = { + .alloc_coherent = dma_generic_alloc_coherent, + .free_coherent = nommu_free_coherent, + .map_sg = nommu_map_sg, +diff -urNp linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c +--- linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/pci-swiotlb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -25,7 +25,7 @@ static void *x86_swiotlb_alloc_coherent( + return swiotlb_alloc_coherent(hwdev, size, dma_handle, flags); + } + +-static struct dma_map_ops swiotlb_dma_ops = { ++static const struct dma_map_ops swiotlb_dma_ops = { + .mapping_error = swiotlb_dma_mapping_error, + .alloc_coherent = x86_swiotlb_alloc_coherent, + .free_coherent = swiotlb_free_coherent, +diff -urNp linux-2.6.35.4/arch/x86/kernel/process_32.c linux-2.6.35.4/arch/x86/kernel/process_32.c +--- linux-2.6.35.4/arch/x86/kernel/process_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/process_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -65,6 +65,7 @@ asmlinkage void ret_from_fork(void) __as + unsigned long thread_saved_pc(struct task_struct *tsk) + { + return ((unsigned long *)tsk->thread.sp)[3]; ++//XXX return tsk->thread.eip; + } + + #ifndef CONFIG_SMP +@@ -126,7 +127,7 @@ void __show_regs(struct pt_regs *regs, i + unsigned long sp; + unsigned short ss, gs; + +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + sp = regs->sp; + ss = regs->ss & 0xffff; + gs = get_user_gs(regs); +@@ -196,7 +197,7 @@ int copy_thread(unsigned long clone_flag + struct task_struct *tsk; + int err; + +- childregs = task_pt_regs(p); ++ childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8; + *childregs = *regs; + childregs->ax = 0; + childregs->sp = sp; +@@ -230,6 +231,7 @@ int copy_thread(unsigned long clone_flag + * Set a new TLS for the child thread? + */ + if (clone_flags & CLONE_SETTLS) ++//XXX needs set_fs()? + err = do_set_thread_area(p, -1, + (struct user_desc __user *)childregs->si, 0); + +@@ -293,7 +295,7 @@ __switch_to(struct task_struct *prev_p, + struct thread_struct *prev = &prev_p->thread, + *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + bool preload_fpu; + + /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ +@@ -328,6 +330,11 @@ __switch_to(struct task_struct *prev_p, + */ + lazy_save_gs(prev->gs); + ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ if (!segment_eq(task_thread_info(prev_p)->addr_limit, task_thread_info(next_p)->addr_limit)) ++ __set_fs(task_thread_info(next_p)->addr_limit, cpu); ++#endif ++ + /* + * Load the per-thread Thread-Local Storage descriptor. + */ +@@ -404,3 +411,27 @@ unsigned long get_wchan(struct task_stru + return 0; + } + ++#ifdef CONFIG_PAX_RANDKSTACK ++asmlinkage void pax_randomize_kstack(void) ++{ ++ struct thread_struct *thread = ¤t->thread; ++ unsigned long time; ++ ++ if (!randomize_va_space) ++ return; ++ ++ rdtscl(time); ++ ++ /* P4 seems to return a 0 LSB, ignore it */ ++#ifdef CONFIG_MPENTIUM4 ++ time &= 0x1EUL; ++ time <<= 2; ++#else ++ time &= 0xFUL; ++ time <<= 3; ++#endif ++ ++ thread->sp0 ^= time; ++ load_sp0(init_tss + smp_processor_id(), thread); ++} ++#endif +diff -urNp linux-2.6.35.4/arch/x86/kernel/process_64.c linux-2.6.35.4/arch/x86/kernel/process_64.c +--- linux-2.6.35.4/arch/x86/kernel/process_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/process_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -87,7 +87,7 @@ static void __exit_idle(void) + void exit_idle(void) + { + /* idle loop has pid 0 */ +- if (current->pid) ++ if (task_pid_nr(current)) + return; + __exit_idle(); + } +@@ -375,7 +375,7 @@ __switch_to(struct task_struct *prev_p, + struct thread_struct *prev = &prev_p->thread; + struct thread_struct *next = &next_p->thread; + int cpu = smp_processor_id(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + unsigned fsindex, gsindex; + bool preload_fpu; + +@@ -528,12 +528,11 @@ unsigned long get_wchan(struct task_stru + if (!p || p == current || p->state == TASK_RUNNING) + return 0; + stack = (unsigned long)task_stack_page(p); +- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE) ++ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-8-sizeof(u64)) + return 0; + fp = *(u64 *)(p->thread.sp); + do { +- if (fp < (unsigned long)stack || +- fp >= (unsigned long)stack+THREAD_SIZE) ++ if (fp < stack || fp > stack+THREAD_SIZE-8-sizeof(u64)) + return 0; + ip = *(u64 *)(fp+8); + if (!in_sched_functions(ip)) +diff -urNp linux-2.6.35.4/arch/x86/kernel/process.c linux-2.6.35.4/arch/x86/kernel/process.c +--- linux-2.6.35.4/arch/x86/kernel/process.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/process.c 2010-09-17 20:12:09.000000000 -0400 +@@ -73,7 +73,7 @@ void exit_thread(void) + unsigned long *bp = t->io_bitmap_ptr; + + if (bp) { +- struct tss_struct *tss = &per_cpu(init_tss, get_cpu()); ++ struct tss_struct *tss = init_tss + get_cpu(); + + t->io_bitmap_ptr = NULL; + clear_thread_flag(TIF_IO_BITMAP); +@@ -107,7 +107,7 @@ void show_regs_common(void) + + printk(KERN_CONT "\n"); + printk(KERN_DEFAULT "Pid: %d, comm: %.20s %s %s %.*s %s/%s\n", +- current->pid, current->comm, print_tainted(), ++ task_pid_nr(current), current->comm, print_tainted(), + init_utsname()->release, + (int)strcspn(init_utsname()->version, " "), + init_utsname()->version, board, product); +@@ -117,6 +117,9 @@ void flush_thread(void) + { + struct task_struct *tsk = current; + ++#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) ++ loadsegment(gs, 0); ++#endif + flush_ptrace_hw_breakpoint(tsk); + memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array)); + /* +@@ -279,8 +282,8 @@ int kernel_thread(int (*fn)(void *), voi + regs.di = (unsigned long) arg; + + #ifdef CONFIG_X86_32 +- regs.ds = __USER_DS; +- regs.es = __USER_DS; ++ regs.ds = __KERNEL_DS; ++ regs.es = __KERNEL_DS; + regs.fs = __KERNEL_PERCPU; + regs.gs = __KERNEL_STACK_CANARY; + #else +@@ -689,17 +692,3 @@ static int __init idle_setup(char *str) + return 0; + } + early_param("idle", idle_setup); +- +-unsigned long arch_align_stack(unsigned long sp) +-{ +- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) +- sp -= get_random_int() % 8192; +- return sp & ~0xf; +-} +- +-unsigned long arch_randomize_brk(struct mm_struct *mm) +-{ +- unsigned long range_end = mm->brk + 0x02000000; +- return randomize_range(mm->brk, range_end, 0) ? : mm->brk; +-} +- +diff -urNp linux-2.6.35.4/arch/x86/kernel/ptrace.c linux-2.6.35.4/arch/x86/kernel/ptrace.c +--- linux-2.6.35.4/arch/x86/kernel/ptrace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/ptrace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -804,7 +804,7 @@ static const struct user_regset_view use + long arch_ptrace(struct task_struct *child, long request, long addr, long data) + { + int ret; +- unsigned long __user *datap = (unsigned long __user *)data; ++ unsigned long __user *datap = (__force unsigned long __user *)data; + + switch (request) { + /* read the word at location addr in the USER area. */ +@@ -891,14 +891,14 @@ long arch_ptrace(struct task_struct *chi + if (addr < 0) + return -EIO; + ret = do_get_thread_area(child, addr, +- (struct user_desc __user *) data); ++ (__force struct user_desc __user *) data); + break; + + case PTRACE_SET_THREAD_AREA: + if (addr < 0) + return -EIO; + ret = do_set_thread_area(child, addr, +- (struct user_desc __user *) data, 0); ++ (__force struct user_desc __user *) data, 0); + break; + #endif + +@@ -1315,7 +1315,7 @@ static void fill_sigtrap_info(struct tas + memset(info, 0, sizeof(*info)); + info->si_signo = SIGTRAP; + info->si_code = si_code; +- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL; ++ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL; + } + + void user_single_step_siginfo(struct task_struct *tsk, +diff -urNp linux-2.6.35.4/arch/x86/kernel/reboot.c linux-2.6.35.4/arch/x86/kernel/reboot.c +--- linux-2.6.35.4/arch/x86/kernel/reboot.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/reboot.c 2010-09-17 20:12:09.000000000 -0400 +@@ -33,7 +33,7 @@ void (*pm_power_off)(void); + EXPORT_SYMBOL(pm_power_off); + + static const struct desc_ptr no_idt = {}; +-static int reboot_mode; ++static unsigned short reboot_mode; + enum reboot_type reboot_type = BOOT_KBD; + int reboot_force; + +@@ -284,7 +284,7 @@ static struct dmi_system_id __initdata r + DMI_MATCH(DMI_BOARD_NAME, "P4S800"), + }, + }, +- { } ++ { NULL, NULL, {{0, {0}}}, NULL} + }; + + static int __init reboot_init(void) +@@ -300,12 +300,12 @@ core_initcall(reboot_init); + controller to pulse the CPU reset line, which is more thorough, but + doesn't work with at least one type of 486 motherboard. It is easy + to stop this code working; hence the copious comments. */ +-static const unsigned long long +-real_mode_gdt_entries [3] = ++static struct desc_struct ++real_mode_gdt_entries [3] __read_only = + { +- 0x0000000000000000ULL, /* Null descriptor */ +- 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */ +- 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */ ++ GDT_ENTRY_INIT(0, 0, 0), /* Null descriptor */ ++ GDT_ENTRY_INIT(0x9b, 0, 0xffff), /* 16-bit real-mode 64k code at 0x00000000 */ ++ GDT_ENTRY_INIT(0x93, 0x100, 0xffff) /* 16-bit real-mode 64k data at 0x00000100 */ + }; + + static const struct desc_ptr +@@ -354,7 +354,7 @@ static const unsigned char jump_to_bios + * specified by the code and length parameters. + * We assume that length will aways be less that 100! + */ +-void machine_real_restart(const unsigned char *code, int length) ++void machine_real_restart(const unsigned char *code, unsigned int length) + { + local_irq_disable(); + +@@ -374,8 +374,8 @@ void machine_real_restart(const unsigned + /* Remap the kernel at virtual address zero, as well as offset zero + from the kernel segment. This assumes the kernel segment starts at + virtual address PAGE_OFFSET. */ +- memcpy(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, +- sizeof(swapper_pg_dir [0]) * KERNEL_PGD_PTRS); ++ clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); + + /* + * Use `swapper_pg_dir' as our page directory. +@@ -387,16 +387,15 @@ void machine_real_restart(const unsigned + boot)". This seems like a fairly standard thing that gets set by + REBOOT.COM programs, and the previous reset routine did this + too. */ +- *((unsigned short *)0x472) = reboot_mode; ++ *(unsigned short *)(__va(0x472)) = reboot_mode; + + /* For the switch to real mode, copy some code to low memory. It has + to be in the first 64k because it is running in 16-bit mode, and it + has to have the same physical and virtual address, because it turns + off paging. Copy it near the end of the first page, out of the way + of BIOS variables. */ +- memcpy((void *)(0x1000 - sizeof(real_mode_switch) - 100), +- real_mode_switch, sizeof (real_mode_switch)); +- memcpy((void *)(0x1000 - 100), code, length); ++ memcpy(__va(0x1000 - sizeof (real_mode_switch) - 100), real_mode_switch, sizeof (real_mode_switch)); ++ memcpy(__va(0x1000 - 100), code, length); + + /* Set up the IDT for real mode. */ + load_idt(&real_mode_idt); +diff -urNp linux-2.6.35.4/arch/x86/kernel/setup.c linux-2.6.35.4/arch/x86/kernel/setup.c +--- linux-2.6.35.4/arch/x86/kernel/setup.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/setup.c 2010-09-17 20:12:09.000000000 -0400 +@@ -704,7 +704,7 @@ static void __init trim_bios_range(void) + * area (640->1Mb) as ram even though it is not. + * take them out. + */ +- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1); ++ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1); + sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map); + } + +@@ -791,14 +791,14 @@ void __init setup_arch(char **cmdline_p) + + if (!boot_params.hdr.root_flags) + root_mountflags &= ~MS_RDONLY; +- init_mm.start_code = (unsigned long) _text; +- init_mm.end_code = (unsigned long) _etext; ++ init_mm.start_code = ktla_ktva((unsigned long) _text); ++ init_mm.end_code = ktla_ktva((unsigned long) _etext); + init_mm.end_data = (unsigned long) _edata; + init_mm.brk = _brk_end; + +- code_resource.start = virt_to_phys(_text); +- code_resource.end = virt_to_phys(_etext)-1; +- data_resource.start = virt_to_phys(_etext); ++ code_resource.start = virt_to_phys(ktla_ktva(_text)); ++ code_resource.end = virt_to_phys(ktla_ktva(_etext))-1; ++ data_resource.start = virt_to_phys(_sdata); + data_resource.end = virt_to_phys(_edata)-1; + bss_resource.start = virt_to_phys(&__bss_start); + bss_resource.end = virt_to_phys(&__bss_stop)-1; +diff -urNp linux-2.6.35.4/arch/x86/kernel/setup_percpu.c linux-2.6.35.4/arch/x86/kernel/setup_percpu.c +--- linux-2.6.35.4/arch/x86/kernel/setup_percpu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/setup_percpu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -21,19 +21,17 @@ + #include <asm/cpu.h> + #include <asm/stackprotector.h> + ++#ifdef CONFIG_SMP + DEFINE_PER_CPU(int, cpu_number); + EXPORT_PER_CPU_SYMBOL(cpu_number); ++#endif + +-#ifdef CONFIG_X86_64 + #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load) +-#else +-#define BOOT_PERCPU_OFFSET 0 +-#endif + + DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET; + EXPORT_PER_CPU_SYMBOL(this_cpu_off); + +-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = { ++unsigned long __per_cpu_offset[NR_CPUS] __read_only = { + [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET, + }; + EXPORT_SYMBOL(__per_cpu_offset); +@@ -161,10 +159,10 @@ static inline void setup_percpu_segment( + { + #ifdef CONFIG_X86_32 + struct desc_struct gdt; ++ unsigned long base = per_cpu_offset(cpu); + +- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF, +- 0x2 | DESCTYPE_S, 0x8); +- gdt.s = 1; ++ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT, ++ 0x83 | DESCTYPE_S, 0xC); + write_gdt_entry(get_cpu_gdt_table(cpu), + GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S); + #endif +@@ -213,6 +211,11 @@ void __init setup_per_cpu_areas(void) + /* alrighty, percpu areas up and running */ + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) { ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_x86_32 ++ unsigned long canary = per_cpu(stack_canary, cpu); ++#endif ++#endif + per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu]; + per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu); + per_cpu(cpu_number, cpu) = cpu; +@@ -249,6 +252,12 @@ void __init setup_per_cpu_areas(void) + set_cpu_numa_node(cpu, early_cpu_to_node(cpu)); + #endif + #endif ++#ifdef CONFIG_CC_STACKPROTECTOR ++#ifdef CONFIG_x86_32 ++ if (cpu == boot_cpu_id) ++ per_cpu(stack_canary, cpu) = canary; ++#endif ++#endif + /* + * Up to this point, the boot CPU has been using .init.data + * area. Reload any changed state for the boot CPU. +diff -urNp linux-2.6.35.4/arch/x86/kernel/signal.c linux-2.6.35.4/arch/x86/kernel/signal.c +--- linux-2.6.35.4/arch/x86/kernel/signal.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/signal.c 2010-09-17 20:12:09.000000000 -0400 +@@ -198,7 +198,7 @@ static unsigned long align_sigframe(unsi + * Align the stack pointer according to the i386 ABI, + * i.e. so that on function entry ((sp + 4) & 15) == 0. + */ +- sp = ((sp + 4) & -16ul) - 4; ++ sp = ((sp - 12) & -16ul) - 4; + #else /* !CONFIG_X86_32 */ + sp = round_down(sp, 16) - 8; + #endif +@@ -249,11 +249,11 @@ get_sigframe(struct k_sigaction *ka, str + * Return an always-bogus address instead so we will die with SIGSEGV. + */ + if (onsigstack && !likely(on_sig_stack(sp))) +- return (void __user *)-1L; ++ return (__force void __user *)-1L; + + /* save i387 state */ + if (used_math() && save_i387_xstate(*fpstate) < 0) +- return (void __user *)-1L; ++ return (__force void __user *)-1L; + + return (void __user *)sp; + } +@@ -308,9 +308,9 @@ __setup_frame(int sig, struct k_sigactio + } + + if (current->mm->context.vdso) +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn); + else +- restorer = &frame->retcode; ++ restorer = (void __user *)&frame->retcode; + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + +@@ -324,7 +324,7 @@ __setup_frame(int sig, struct k_sigactio + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode); ++ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode); + + if (err) + return -EFAULT; +@@ -378,7 +378,7 @@ static int __setup_rt_frame(int sig, str + err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set)); + + /* Set up to return from userspace. */ +- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); ++ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn); + if (ka->sa.sa_flags & SA_RESTORER) + restorer = ka->sa.sa_restorer; + put_user_ex(restorer, &frame->pretcode); +@@ -390,7 +390,7 @@ static int __setup_rt_frame(int sig, str + * reasons and because gdb uses it as a signature to notice + * signal handler stack frames. + */ +- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode); ++ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode); + } put_user_catch(err); + + if (err) +@@ -780,7 +780,7 @@ static void do_signal(struct pt_regs *re + * X86_32: vm86 regs switched out by assembly code before reaching + * here, so testing against kernel CS suffices. + */ +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + return; + + if (current_thread_info()->status & TS_RESTORE_SIGMASK) +diff -urNp linux-2.6.35.4/arch/x86/kernel/smpboot.c linux-2.6.35.4/arch/x86/kernel/smpboot.c +--- linux-2.6.35.4/arch/x86/kernel/smpboot.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/smpboot.c 2010-09-17 20:12:09.000000000 -0400 +@@ -780,7 +780,11 @@ do_rest: + (unsigned long)task_stack_page(c_idle.idle) - + KERNEL_STACK_OFFSET + THREAD_SIZE; + #endif ++ ++ pax_open_kernel(); + early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); ++ pax_close_kernel(); ++ + initial_code = (unsigned long)start_secondary; + stack_start.sp = (void *) c_idle.idle->thread.sp; + +@@ -920,6 +924,12 @@ int __cpuinit native_cpu_up(unsigned int + + per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + #ifdef CONFIG_X86_32 + /* init low mem mapping */ + clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, +diff -urNp linux-2.6.35.4/arch/x86/kernel/step.c linux-2.6.35.4/arch/x86/kernel/step.c +--- linux-2.6.35.4/arch/x86/kernel/step.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/step.c 2010-09-17 20:12:09.000000000 -0400 +@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struc + struct desc_struct *desc; + unsigned long base; + +- seg &= ~7UL; ++ seg >>= 3; + + mutex_lock(&child->mm->context.lock); +- if (unlikely((seg >> 3) >= child->mm->context.size)) ++ if (unlikely(seg >= child->mm->context.size)) + addr = -1L; /* bogus selector, access would fault */ + else { + desc = child->mm->context.ldt + seg; +@@ -53,6 +53,9 @@ static int is_setting_trap_flag(struct t + unsigned char opcode[15]; + unsigned long addr = convert_ip_to_linear(child, regs); + ++ if (addr == -EINVAL) ++ return 0; ++ + copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0); + for (i = 0; i < copied; i++) { + switch (opcode[i]) { +@@ -74,7 +77,7 @@ static int is_setting_trap_flag(struct t + + #ifdef CONFIG_X86_64 + case 0x40 ... 0x4f: +- if (regs->cs != __USER_CS) ++ if ((regs->cs & 0xffff) != __USER_CS) + /* 32-bit mode: register increment */ + return 0; + /* 64-bit mode: REX prefix */ +diff -urNp linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S +--- linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/syscall_table_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -1,3 +1,4 @@ ++.section .rodata,"a",@progbits + ENTRY(sys_call_table) + .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */ + .long sys_exit +diff -urNp linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c +--- linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/sys_i386_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -24,6 +24,224 @@ + + #include <asm/syscalls.h> + ++int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags) ++{ ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (len > pax_task_size || addr > pax_task_size - len) ++ return -EINVAL; ++ ++ return 0; ++} ++ ++unsigned long ++arch_get_unmapped_area(struct file *filp, unsigned long addr, ++ unsigned long len, unsigned long pgoff, unsigned long flags) ++{ ++ struct mm_struct *mm = current->mm; ++ struct vm_area_struct *vma; ++ unsigned long start_addr, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } ++ } ++ if (len > mm->cached_hole_size) { ++ start_addr = addr = mm->free_area_cache; ++ } else { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ } ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE) && start_addr >= mm->mmap_base) { ++ start_addr = 0x00110000UL; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ start_addr += mm->delta_mmap & 0x03FFF000UL; ++#endif ++ ++ if (mm->start_brk <= start_addr && start_addr < mm->mmap_base) ++ start_addr = addr = mm->mmap_base; ++ else ++ addr = start_addr; ++ } ++#endif ++ ++full_search: ++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { ++ /* At this point: (!vma || addr < vma->vm_end). */ ++ if (pax_task_size - len < addr) { ++ /* ++ * Start a new search - just in case we missed ++ * some holes. ++ */ ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ goto full_search; ++ } ++ return -ENOMEM; ++ } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; ++ if (addr + mm->cached_hole_size < vma->vm_start) ++ mm->cached_hole_size = vma->vm_start - addr; ++ addr = vma->vm_end; ++ if (mm->start_brk <= addr && addr < mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; ++ goto full_search; ++ } ++ } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; ++} ++ ++unsigned long ++arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, ++ const unsigned long len, const unsigned long pgoff, ++ const unsigned long flags) ++{ ++ struct vm_area_struct *vma; ++ struct mm_struct *mm = current->mm; ++ unsigned long base = mm->mmap_base, addr = addr0, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ /* requested length too big for entire address space */ ++ if (len > pax_task_size) ++ return -ENOMEM; ++ ++ if (flags & MAP_FIXED) ++ return addr; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) ++ goto bottomup; ++#endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ ++ /* requesting a specific address */ ++ if (addr) { ++ addr = PAGE_ALIGN(addr); ++ if (pax_task_size - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } ++ } ++ ++ /* check if free_area_cache is useful for us */ ++ if (len <= mm->cached_hole_size) { ++ mm->cached_hole_size = 0; ++ mm->free_area_cache = mm->mmap_base; ++ } ++ ++ /* either no address requested or can't fit in requested address hole */ ++ addr = mm->free_area_cache; ++ ++ /* make sure it can fit in the remaining address space */ ++ if (addr > len) { ++ vma = find_vma(mm, addr-len); ++ if (check_heap_stack_gap(vma, addr - len, len)) ++ /* remember the address as a hint for next time */ ++ return (mm->free_area_cache = addr-len); ++ } ++ ++ if (mm->mmap_base < len) ++ goto bottomup; ++ ++ addr = mm->mmap_base-len; ++ ++ do { ++ /* ++ * Lookup failure means no vma is above this address, ++ * else if new region fits below vma->vm_start, ++ * return with success: ++ */ ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ /* remember the address as a hint for next time */ ++ return (mm->free_area_cache = addr); ++ ++ /* remember the largest hole we saw so far */ ++ if (addr + mm->cached_hole_size < vma->vm_start) ++ mm->cached_hole_size = vma->vm_start - addr; ++ ++ /* try just below the current vma->vm_start */ ++ addr = vma->vm_start-len; ++ } while (len < vma->vm_start); ++ ++bottomup: ++ /* ++ * A failed mmap() very likely causes application failure, ++ * so fall back to the bottom-up function here. This scenario ++ * can happen with large stack limits and large mmap() ++ * allocations. ++ */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; ++ mm->cached_hole_size = ~0UL; ++ addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); ++ /* ++ * Restore the topdown base: ++ */ ++ mm->mmap_base = base; ++ mm->free_area_cache = base; ++ mm->cached_hole_size = ~0UL; ++ ++ return addr; ++} ++ + /* + * Do a system call from kernel instead of calling sys_execve so we + * end up with proper pt_regs. +diff -urNp linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c +--- linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/sys_x86_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -32,8 +32,8 @@ out: + return error; + } + +-static void find_start_end(unsigned long flags, unsigned long *begin, +- unsigned long *end) ++static void find_start_end(struct mm_struct *mm, unsigned long flags, ++ unsigned long *begin, unsigned long *end) + { + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) { + unsigned long new_begin; +@@ -52,7 +52,7 @@ static void find_start_end(unsigned long + *begin = new_begin; + } + } else { +- *begin = TASK_UNMAPPED_BASE; ++ *begin = mm->mmap_base; + *end = TASK_SIZE; + } + } +@@ -69,16 +69,19 @@ arch_get_unmapped_area(struct file *filp + if (flags & MAP_FIXED) + return addr; + +- find_start_end(flags, &begin, &end); ++ find_start_end(mm, flags, &begin, &end); + + if (len > end) + return -ENOMEM; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (end - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (end - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (((flags & MAP_32BIT) || test_thread_flag(TIF_IA32)) +@@ -106,7 +109,7 @@ full_search: + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* + * Remember the place where we stopped the search: + */ +@@ -128,7 +131,7 @@ arch_get_unmapped_area_topdown(struct fi + { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; +- unsigned long addr = addr0; ++ unsigned long base = mm->mmap_base, addr = addr0; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -141,12 +144,15 @@ arch_get_unmapped_area_topdown(struct fi + if (!test_thread_flag(TIF_IA32) && (flags & MAP_32BIT)) + goto bottomup; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + +@@ -162,7 +168,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr-len; + } +@@ -179,7 +185,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return mm->free_area_cache = addr; + +@@ -198,13 +204,21 @@ bottomup: + * can happen with large stack limits and large mmap() + * allocations. + */ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; +- mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ +- mm->free_area_cache = mm->mmap_base; ++ mm->mmap_base = base; ++ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +diff -urNp linux-2.6.35.4/arch/x86/kernel/time.c linux-2.6.35.4/arch/x86/kernel/time.c +--- linux-2.6.35.4/arch/x86/kernel/time.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/time.c 2010-09-17 20:12:09.000000000 -0400 +@@ -26,17 +26,13 @@ + int timer_ack; + #endif + +-#ifdef CONFIG_X86_64 +-volatile unsigned long __jiffies __section_jiffies = INITIAL_JIFFIES; +-#endif +- + unsigned long profile_pc(struct pt_regs *regs) + { + unsigned long pc = instruction_pointer(regs); + +- if (!user_mode_vm(regs) && in_lock_functions(pc)) { ++ if (!user_mode(regs) && in_lock_functions(pc)) { + #ifdef CONFIG_FRAME_POINTER +- return *(unsigned long *)(regs->bp + sizeof(long)); ++ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long))); + #else + unsigned long *sp = + (unsigned long *)kernel_stack_pointer(regs); +@@ -45,11 +41,17 @@ unsigned long profile_pc(struct pt_regs + * or above a saved flags. Eflags has bits 22-31 zero, + * kernel addresses don't. + */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ return ktla_ktva(sp[0]); ++#else + if (sp[0] >> 22) + return sp[0]; + if (sp[1] >> 22) + return sp[1]; + #endif ++ ++#endif + } + return pc; + } +diff -urNp linux-2.6.35.4/arch/x86/kernel/tls.c linux-2.6.35.4/arch/x86/kernel/tls.c +--- linux-2.6.35.4/arch/x86/kernel/tls.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/tls.c 2010-09-17 20:12:09.000000000 -0400 +@@ -85,6 +85,11 @@ int do_set_thread_area(struct task_struc + if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE)) ++ return -EINVAL; ++#endif ++ + set_tls_desc(p, idx, &info, 1); + + return 0; +diff -urNp linux-2.6.35.4/arch/x86/kernel/trampoline_32.S linux-2.6.35.4/arch/x86/kernel/trampoline_32.S +--- linux-2.6.35.4/arch/x86/kernel/trampoline_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/trampoline_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -32,6 +32,12 @@ + #include <asm/segment.h> + #include <asm/page_types.h> + ++#ifdef CONFIG_PAX_KERNEXEC ++#define ta(X) (X) ++#else ++#define ta(X) ((X) - __PAGE_OFFSET) ++#endif ++ + /* We can free up trampoline after bootup if cpu hotplug is not supported. */ + __CPUINITRODATA + .code16 +@@ -60,7 +66,7 @@ r_base = . + inc %ax # protected mode (PE) bit + lmsw %ax # into protected mode + # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S +- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET) ++ ljmpl $__BOOT_CS, $ta(startup_32_smp) + + # These need to be in the same 64K segment as the above; + # hence we don't use the boot_gdt_descr defined in head.S +diff -urNp linux-2.6.35.4/arch/x86/kernel/traps.c linux-2.6.35.4/arch/x86/kernel/traps.c +--- linux-2.6.35.4/arch/x86/kernel/traps.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/traps.c 2010-09-17 20:12:09.000000000 -0400 +@@ -70,12 +70,6 @@ asmlinkage int system_call(void); + + /* Do we ignore FPU interrupts ? */ + char ignore_fpu_irq; +- +-/* +- * The IDT has to be page-aligned to simplify the Pentium +- * F0 0F bug workaround. +- */ +-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, }; + #endif + + DECLARE_BITMAP(used_vectors, NR_VECTORS); +@@ -110,13 +104,13 @@ static inline void preempt_conditional_c + } + + static void __kprobes +-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs, ++do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs, + long error_code, siginfo_t *info) + { + struct task_struct *tsk = current; + + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + /* + * traps 0, 1, 3, 4, and 5 should be forwarded to vm86. + * On nmi (interrupt 2), do_trap should not be called. +@@ -127,7 +121,7 @@ do_trap(int trapnr, int signr, char *str + } + #endif + +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + goto kernel_trap; + + #ifdef CONFIG_X86_32 +@@ -150,7 +144,7 @@ trap_signal: + printk_ratelimit()) { + printk(KERN_INFO + "%s[%d] trap %s ip:%lx sp:%lx error:%lx", +- tsk->comm, tsk->pid, str, ++ tsk->comm, task_pid_nr(tsk), str, + regs->ip, regs->sp, error_code); + print_vma_addr(" in ", regs->ip); + printk("\n"); +@@ -167,8 +161,20 @@ kernel_trap: + if (!fixup_exception(regs)) { + tsk->thread.error_code = error_code; + tsk->thread.trap_no = trapnr; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)) ++ str = "PAX: suspicious stack segment fault"; ++#endif ++ + die(str, regs, error_code); + } ++ ++#ifdef CONFIG_PAX_REFCOUNT ++ if (trapnr == 4) ++ pax_report_refcount_overflow(regs); ++#endif ++ + return; + + #ifdef CONFIG_X86_32 +@@ -257,14 +263,30 @@ do_general_protection(struct pt_regs *re + conditional_sti(regs); + + #ifdef CONFIG_X86_32 +- if (regs->flags & X86_VM_MASK) ++ if (v8086_mode(regs)) + goto gp_in_vm86; + #endif + + tsk = current; +- if (!user_mode(regs)) ++ if (!user_mode_novm(regs)) + goto gp_in_kernel; + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) { ++ struct mm_struct *mm = tsk->mm; ++ unsigned long limit; ++ ++ down_write(&mm->mmap_sem); ++ limit = mm->context.user_cs_limit; ++ if (limit < TASK_SIZE) { ++ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC); ++ up_write(&mm->mmap_sem); ++ return; ++ } ++ up_write(&mm->mmap_sem); ++ } ++#endif ++ + tsk->thread.error_code = error_code; + tsk->thread.trap_no = 13; + +@@ -297,6 +319,13 @@ gp_in_kernel: + if (notify_die(DIE_GPF, "general protection fault", regs, + error_code, 13, SIGSEGV) == NOTIFY_STOP) + return; ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS) ++ die("PAX: suspicious general protection fault", regs, error_code); ++ else ++#endif ++ + die("general protection fault", regs, error_code); + } + +@@ -565,7 +594,7 @@ dotraplinkage void __kprobes do_debug(st + /* It's safe to allow irq's after DR6 has been saved */ + preempt_conditional_sti(regs); + +- if (regs->flags & X86_VM_MASK) { ++ if (v8086_mode(regs)) { + handle_vm86_trap((struct kernel_vm86_regs *) regs, + error_code, 1); + return; +@@ -578,7 +607,7 @@ dotraplinkage void __kprobes do_debug(st + * We already checked v86 mode above, so we can check for kernel mode + * by just checking the CPL of CS. + */ +- if ((dr6 & DR_STEP) && !user_mode(regs)) { ++ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) { + tsk->thread.debugreg6 &= ~DR_STEP; + set_tsk_thread_flag(tsk, TIF_SINGLESTEP); + regs->flags &= ~X86_EFLAGS_TF; +@@ -607,7 +636,7 @@ void math_error(struct pt_regs *regs, in + return; + conditional_sti(regs); + +- if (!user_mode_vm(regs)) ++ if (!user_mode(regs)) + { + if (!fixup_exception(regs)) { + task->thread.error_code = error_code; +diff -urNp linux-2.6.35.4/arch/x86/kernel/tsc.c linux-2.6.35.4/arch/x86/kernel/tsc.c +--- linux-2.6.35.4/arch/x86/kernel/tsc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/tsc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -795,7 +795,7 @@ static struct dmi_system_id __initdata b + DMI_MATCH(DMI_BOARD_NAME, "2635FA0"), + }, + }, +- {} ++ { NULL, NULL, {{0, {0}}}, NULL} + }; + + static void __init check_system_tsc_reliable(void) +diff -urNp linux-2.6.35.4/arch/x86/kernel/vm86_32.c linux-2.6.35.4/arch/x86/kernel/vm86_32.c +--- linux-2.6.35.4/arch/x86/kernel/vm86_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/vm86_32.c 2010-09-17 20:12:37.000000000 -0400 +@@ -41,6 +41,7 @@ + #include <linux/ptrace.h> + #include <linux/audit.h> + #include <linux/stddef.h> ++#include <linux/grsecurity.h> + + #include <asm/uaccess.h> + #include <asm/io.h> +@@ -148,7 +149,7 @@ struct pt_regs *save_v86_state(struct ke + do_exit(SIGSEGV); + } + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + current->thread.sp0 = current->thread.saved_sp0; + current->thread.sysenter_cs = __KERNEL_CS; + load_sp0(tss, ¤t->thread); +@@ -207,6 +208,13 @@ int sys_vm86old(struct vm86_struct __use + struct task_struct *tsk; + int tmp, ret = -EPERM; + ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ goto out; ++ } ++#endif ++ + tsk = current; + if (tsk->thread.saved_sp0) + goto out; +@@ -237,6 +245,14 @@ int sys_vm86(unsigned long cmd, unsigned + int tmp, ret; + struct vm86plus_struct __user *v86; + ++#ifdef CONFIG_GRKERNSEC_VM86 ++ if (!capable(CAP_SYS_RAWIO)) { ++ gr_handle_vm86(); ++ ret = -EPERM; ++ goto out; ++ } ++#endif ++ + tsk = current; + switch (cmd) { + case VM86_REQUEST_IRQ: +@@ -323,7 +339,7 @@ static void do_sys_vm86(struct kernel_vm + tsk->thread.saved_fs = info->regs32->fs; + tsk->thread.saved_gs = get_user_gs(info->regs32); + +- tss = &per_cpu(init_tss, get_cpu()); ++ tss = init_tss + get_cpu(); + tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0; + if (cpu_has_sep) + tsk->thread.sysenter_cs = 0; +@@ -528,7 +544,7 @@ static void do_int(struct kernel_vm86_re + goto cannot_handle; + if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored)) + goto cannot_handle; +- intr_ptr = (unsigned long __user *) (i << 2); ++ intr_ptr = (__force unsigned long __user *) (i << 2); + if (get_user(segoffs, intr_ptr)) + goto cannot_handle; + if ((segoffs >> 16) == BIOSSEG) +diff -urNp linux-2.6.35.4/arch/x86/kernel/vmi_32.c linux-2.6.35.4/arch/x86/kernel/vmi_32.c +--- linux-2.6.35.4/arch/x86/kernel/vmi_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/vmi_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -46,12 +46,17 @@ typedef u32 __attribute__((regparm(1))) + typedef u64 __attribute__((regparm(2))) (VROMLONGFUNC)(int); + + #define call_vrom_func(rom,func) \ +- (((VROMFUNC *)(rom->func))()) ++ (((VROMFUNC *)(ktva_ktla(rom.func)))()) + + #define call_vrom_long_func(rom,func,arg) \ +- (((VROMLONGFUNC *)(rom->func)) (arg)) ++({\ ++ u64 __reloc = ((VROMLONGFUNC *)(ktva_ktla(rom.func))) (arg);\ ++ struct vmi_relocation_info *const __rel = (struct vmi_relocation_info *)&__reloc;\ ++ __rel->eip = (unsigned char *)ktva_ktla((unsigned long)__rel->eip);\ ++ __reloc;\ ++}) + +-static struct vrom_header *vmi_rom; ++static struct vrom_header vmi_rom __attribute((__section__(".vmi.rom"), __aligned__(PAGE_SIZE))); + static int disable_pge; + static int disable_pse; + static int disable_sep; +@@ -78,10 +83,10 @@ static struct { + void (*set_initial_ap_state)(int, int); + void (*halt)(void); + void (*set_lazy_mode)(int mode); +-} vmi_ops; ++} vmi_ops __read_only; + + /* Cached VMI operations */ +-struct vmi_timer_ops vmi_timer_ops; ++struct vmi_timer_ops vmi_timer_ops __read_only; + + /* + * VMI patching routines. +@@ -96,7 +101,7 @@ struct vmi_timer_ops vmi_timer_ops; + static inline void patch_offset(void *insnbuf, + unsigned long ip, unsigned long dest) + { +- *(unsigned long *)(insnbuf+1) = dest-ip-5; ++ *(unsigned long *)(insnbuf+1) = dest-ip-5; + } + + static unsigned patch_internal(int call, unsigned len, void *insnbuf, +@@ -104,6 +109,7 @@ static unsigned patch_internal(int call, + { + u64 reloc; + struct vmi_relocation_info *const rel = (struct vmi_relocation_info *)&reloc; ++ + reloc = call_vrom_long_func(vmi_rom, get_reloc, call); + switch(rel->type) { + case VMI_RELOCATION_CALL_REL: +@@ -382,13 +388,13 @@ static void vmi_set_pud(pud_t *pudp, pud + + static void vmi_pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep) + { +- const pte_t pte = { .pte = 0 }; ++ const pte_t pte = __pte(0ULL); + vmi_ops.set_pte(pte, ptep, vmi_flags_addr(mm, addr, VMI_PAGE_PT, 0)); + } + + static void vmi_pmd_clear(pmd_t *pmd) + { +- const pte_t pte = { .pte = 0 }; ++ const pte_t pte = __pte(0ULL); + vmi_ops.set_pte(pte, (pte_t *)pmd, VMI_PAGE_PD); + } + #endif +@@ -416,8 +422,8 @@ vmi_startup_ipi_hook(int phys_apicid, un + ap.ss = __KERNEL_DS; + ap.esp = (unsigned long) start_esp; + +- ap.ds = __USER_DS; +- ap.es = __USER_DS; ++ ap.ds = __KERNEL_DS; ++ ap.es = __KERNEL_DS; + ap.fs = __KERNEL_PERCPU; + ap.gs = __KERNEL_STACK_CANARY; + +@@ -464,6 +470,18 @@ static void vmi_leave_lazy_mmu(void) + paravirt_leave_lazy_mmu(); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++static unsigned long vmi_pax_open_kernel(void) ++{ ++ return 0; ++} ++ ++static unsigned long vmi_pax_close_kernel(void) ++{ ++ return 0; ++} ++#endif ++ + static inline int __init check_vmi_rom(struct vrom_header *rom) + { + struct pci_header *pci; +@@ -476,6 +494,10 @@ static inline int __init check_vmi_rom(s + return 0; + if (rom->vrom_signature != VMI_SIGNATURE) + return 0; ++ if (rom->rom_length * 512 > sizeof(*rom)) { ++ printk(KERN_WARNING "PAX: VMI: ROM size too big: %x\n", rom->rom_length * 512); ++ return 0; ++ } + if (rom->api_version_maj != VMI_API_REV_MAJOR || + rom->api_version_min+1 < VMI_API_REV_MINOR+1) { + printk(KERN_WARNING "VMI: Found mismatched rom version %d.%d\n", +@@ -540,7 +562,7 @@ static inline int __init probe_vmi_rom(v + struct vrom_header *romstart; + romstart = (struct vrom_header *)isa_bus_to_virt(base); + if (check_vmi_rom(romstart)) { +- vmi_rom = romstart; ++ vmi_rom = *romstart; + return 1; + } + } +@@ -816,6 +838,11 @@ static inline int __init activate_vmi(vo + + para_fill(pv_irq_ops.safe_halt, Halt); + ++#ifdef CONFIG_PAX_KERNEXEC ++ pv_mmu_ops.pax_open_kernel = vmi_pax_open_kernel; ++ pv_mmu_ops.pax_close_kernel = vmi_pax_close_kernel; ++#endif ++ + /* + * Alternative instruction rewriting doesn't happen soon enough + * to convert VMI_IRET to a call instead of a jump; so we have +@@ -833,16 +860,16 @@ static inline int __init activate_vmi(vo + + void __init vmi_init(void) + { +- if (!vmi_rom) ++ if (!vmi_rom.rom_signature) + probe_vmi_rom(); + else +- check_vmi_rom(vmi_rom); ++ check_vmi_rom(&vmi_rom); + + /* In case probing for or validating the ROM failed, basil */ +- if (!vmi_rom) ++ if (!vmi_rom.rom_signature) + return; + +- reserve_top_address(-vmi_rom->virtual_top); ++ reserve_top_address(-vmi_rom.virtual_top); + + #ifdef CONFIG_X86_IO_APIC + /* This is virtual hardware; timer routing is wired correctly */ +@@ -854,7 +881,7 @@ void __init vmi_activate(void) + { + unsigned long flags; + +- if (!vmi_rom) ++ if (!vmi_rom.rom_signature) + return; + + local_irq_save(flags); +diff -urNp linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S +--- linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/vmlinux.lds.S 2010-09-17 20:12:09.000000000 -0400 +@@ -26,6 +26,13 @@ + #include <asm/page_types.h> + #include <asm/cache.h> + #include <asm/boot.h> ++#include <asm/segment.h> ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR) ++#else ++#define __KERNEL_TEXT_OFFSET 0 ++#endif + + #undef i386 /* in case the preprocessor is a 32bit one */ + +@@ -34,13 +41,13 @@ OUTPUT_FORMAT(CONFIG_OUTPUT_FORMAT, CONF + #ifdef CONFIG_X86_32 + OUTPUT_ARCH(i386) + ENTRY(phys_startup_32) +-jiffies = jiffies_64; + #else + OUTPUT_ARCH(i386:x86-64) + ENTRY(phys_startup_64) +-jiffies_64 = jiffies; + #endif + ++jiffies = jiffies_64; ++ + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) + /* + * On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA +@@ -69,31 +76,46 @@ jiffies_64 = jiffies; + + PHDRS { + text PT_LOAD FLAGS(5); /* R_E */ +- data PT_LOAD FLAGS(7); /* RWE */ ++#ifdef CONFIG_X86_32 ++ module PT_LOAD FLAGS(5); /* R_E */ ++#endif ++#ifdef CONFIG_XEN ++ rodata PT_LOAD FLAGS(5); /* R_E */ ++#else ++ rodata PT_LOAD FLAGS(4); /* R__ */ ++#endif ++ data PT_LOAD FLAGS(6); /* RW_ */ + #ifdef CONFIG_X86_64 + user PT_LOAD FLAGS(5); /* R_E */ ++#endif ++ init.begin PT_LOAD FLAGS(6); /* RW_ */ + #ifdef CONFIG_SMP + percpu PT_LOAD FLAGS(6); /* RW_ */ + #endif ++ text.init PT_LOAD FLAGS(5); /* R_E */ ++ text.exit PT_LOAD FLAGS(5); /* R_E */ + init PT_LOAD FLAGS(7); /* RWE */ +-#endif + note PT_NOTE FLAGS(0); /* ___ */ + } + + SECTIONS + { + #ifdef CONFIG_X86_32 +- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR; +- phys_startup_32 = startup_32 - LOAD_OFFSET; ++ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR; + #else +- . = __START_KERNEL; +- phys_startup_64 = startup_64 - LOAD_OFFSET; ++ . = __START_KERNEL; + #endif + + /* Text and read-only data */ +- .text : AT(ADDR(.text) - LOAD_OFFSET) { +- _text = .; ++ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { + /* bootstrapping code */ ++#ifdef CONFIG_X86_32 ++ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#else ++ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++#endif ++ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET; ++ _text = .; + HEAD_TEXT + #ifdef CONFIG_X86_32 + . = ALIGN(PAGE_SIZE); +@@ -108,13 +130,50 @@ SECTIONS + IRQENTRY_TEXT + *(.fixup) + *(.gnu.warning) +- /* End of text section */ +- _etext = .; + } :text = 0x9090 + +- NOTES :text :note ++ . += __KERNEL_TEXT_OFFSET; ++ ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .vmi.rom : AT(ADDR(.vmi.rom) - LOAD_OFFSET) { ++ *(.vmi.rom) ++ } :module ++ ++ . = ALIGN(PAGE_SIZE); ++ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) { ++ ++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_MODULES) ++ MODULES_EXEC_VADDR = .; ++ BYTE(0) ++ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024); ++ . = ALIGN(HPAGE_SIZE); ++ MODULES_EXEC_END = . - 1; ++#endif ++ ++ } :module ++#endif ++ ++ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) { ++ /* End of text section */ ++ _etext = . - __KERNEL_TEXT_OFFSET; ++ } ++ ++#ifdef CONFIG_X86_32 ++ . = ALIGN(PAGE_SIZE); ++ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) { ++ *(.idt) ++ . = ALIGN(PAGE_SIZE); ++ *(.empty_zero_page) ++ *(.swapper_pg_pmd) ++ *(.swapper_pg_dir) ++ } :rodata ++#endif ++ ++ . = ALIGN(PAGE_SIZE); ++ NOTES :rodata :note + +- EXCEPTION_TABLE(16) :text = 0x9090 ++ EXCEPTION_TABLE(16) :rodata + + X64_ALIGN_DEBUG_RODATA_BEGIN + RO_DATA(PAGE_SIZE) +@@ -122,16 +181,20 @@ SECTIONS + + /* Data */ + .data : AT(ADDR(.data) - LOAD_OFFSET) { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + /* Start of data section */ + _sdata = .; + + /* init_task */ + INIT_TASK_DATA(THREAD_SIZE) + +-#ifdef CONFIG_X86_32 +- /* 32 bit has nosave before _edata */ + NOSAVE_DATA +-#endif + + PAGE_ALIGNED_DATA(PAGE_SIZE) + +@@ -194,12 +257,6 @@ SECTIONS + } + vgetcpu_mode = VVIRT(.vgetcpu_mode); + +- . = ALIGN(L1_CACHE_BYTES); +- .jiffies : AT(VLOAD(.jiffies)) { +- *(.jiffies) +- } +- jiffies = VVIRT(.jiffies); +- + .vsyscall_3 ADDR(.vsyscall_0) + 3072: AT(VLOAD(.vsyscall_3)) { + *(.vsyscall_3) + } +@@ -215,12 +272,19 @@ SECTIONS + #endif /* CONFIG_X86_64 */ + + /* Init code and data - will be freed after init */ +- . = ALIGN(PAGE_SIZE); + .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) { ++ BYTE(0) ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ . = ALIGN(HPAGE_SIZE); ++#else ++ . = ALIGN(PAGE_SIZE); ++#endif ++ + __init_begin = .; /* paired with __init_end */ +- } ++ } :init.begin + +-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP) ++#ifdef CONFIG_SMP + /* + * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the + * output PHDR, so the next output section - .init.text - should +@@ -229,12 +293,27 @@ SECTIONS + PERCPU_VADDR(0, :percpu) + #endif + +- INIT_TEXT_SECTION(PAGE_SIZE) +-#ifdef CONFIG_X86_64 +- :init +-#endif ++ . = ALIGN(PAGE_SIZE); ++ init_begin = .; ++ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) { ++ VMLINUX_SYMBOL(_sinittext) = .; ++ INIT_TEXT ++ VMLINUX_SYMBOL(_einittext) = .; ++ . = ALIGN(PAGE_SIZE); ++ } :text.init ++ ++ /* ++ * .exit.text is discard at runtime, not link time, to deal with ++ * references from .altinstructions and .eh_frame ++ */ ++ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) { ++ EXIT_TEXT ++ . = ALIGN(16); ++ } :text.exit ++ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text); + +- INIT_DATA_SECTION(16) ++ . = ALIGN(PAGE_SIZE); ++ INIT_DATA_SECTION(16) :init + + .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) { + __x86_cpu_dev_start = .; +@@ -260,19 +339,11 @@ SECTIONS + *(.altinstr_replacement) + } + +- /* +- * .exit.text is discard at runtime, not link time, to deal with +- * references from .altinstructions and .eh_frame +- */ +- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) { +- EXIT_TEXT +- } +- + .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) { + EXIT_DATA + } + +-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP) ++#ifndef CONFIG_SMP + PERCPU(PAGE_SIZE) + #endif + +@@ -291,16 +362,10 @@ SECTIONS + .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) { + __smp_locks = .; + *(.smp_locks) +- . = ALIGN(PAGE_SIZE); + __smp_locks_end = .; ++ . = ALIGN(PAGE_SIZE); + } + +-#ifdef CONFIG_X86_64 +- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) { +- NOSAVE_DATA +- } +-#endif +- + /* BSS */ + . = ALIGN(PAGE_SIZE); + .bss : AT(ADDR(.bss) - LOAD_OFFSET) { +@@ -316,6 +381,7 @@ SECTIONS + __brk_base = .; + . += 64 * 1024; /* 64k alignment slop space */ + *(.brk_reservation) /* areas brk users have reserved */ ++ . = ALIGN(HPAGE_SIZE); + __brk_limit = .; + } + +@@ -342,13 +408,12 @@ SECTIONS + * for the boot processor. + */ + #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load +-INIT_PER_CPU(gdt_page); + INIT_PER_CPU(irq_stack_union); + + /* + * Build-time check on the image size: + */ +-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE), ++. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE), + "kernel image bigger than KERNEL_IMAGE_SIZE"); + + #ifdef CONFIG_SMP +diff -urNp linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c +--- linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/vsyscall_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -80,6 +80,7 @@ void update_vsyscall(struct timespec *wa + + write_seqlock_irqsave(&vsyscall_gtod_data.lock, flags); + /* copy vsyscall data */ ++ strlcpy(vsyscall_gtod_data.clock.name, clock->name, sizeof vsyscall_gtod_data.clock.name); + vsyscall_gtod_data.clock.vread = clock->vread; + vsyscall_gtod_data.clock.cycle_last = clock->cycle_last; + vsyscall_gtod_data.clock.mask = clock->mask; +@@ -203,7 +204,7 @@ vgetcpu(unsigned *cpu, unsigned *node, s + We do this here because otherwise user space would do it on + its own in a likely inferior way (no access to jiffies). + If you don't like it pass NULL. */ +- if (tcache && tcache->blob[0] == (j = __jiffies)) { ++ if (tcache && tcache->blob[0] == (j = jiffies)) { + p = tcache->blob[1]; + } else if (__vgetcpu_mode == VGETCPU_RDTSCP) { + /* Load per CPU data from RDTSCP */ +diff -urNp linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c +--- linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/x8664_ksyms_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -29,8 +29,6 @@ EXPORT_SYMBOL(__put_user_8); + EXPORT_SYMBOL(copy_user_generic_string); + EXPORT_SYMBOL(copy_user_generic_unrolled); + EXPORT_SYMBOL(__copy_user_nocache); +-EXPORT_SYMBOL(_copy_from_user); +-EXPORT_SYMBOL(_copy_to_user); + + EXPORT_SYMBOL(copy_page); + EXPORT_SYMBOL(clear_page); +diff -urNp linux-2.6.35.4/arch/x86/kernel/xsave.c linux-2.6.35.4/arch/x86/kernel/xsave.c +--- linux-2.6.35.4/arch/x86/kernel/xsave.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kernel/xsave.c 2010-09-17 20:12:09.000000000 -0400 +@@ -54,7 +54,7 @@ int check_for_xstate(struct i387_fxsave_ + fx_sw_user->xstate_size > fx_sw_user->extended_size) + return -1; + +- err = __get_user(magic2, (__u32 *) (((void *)fpstate) + ++ err = __get_user(magic2, (__u32 __user *) (((void __user *)fpstate) + + fx_sw_user->extended_size - + FP_XSTATE_MAGIC2_SIZE)); + /* +@@ -196,7 +196,7 @@ fx_only: + * the other extended state. + */ + xrstor_state(init_xstate_buf, pcntxt_mask & ~XSTATE_FPSSE); +- return fxrstor_checking((__force struct i387_fxsave_struct *)buf); ++ return fxrstor_checking((struct i387_fxsave_struct __user *)buf); + } + + /* +@@ -228,7 +228,7 @@ int restore_i387_xstate(void __user *buf + if (use_xsave()) + err = restore_user_xstate(buf); + else +- err = fxrstor_checking((__force struct i387_fxsave_struct *) ++ err = fxrstor_checking((struct i387_fxsave_struct __user *) + buf); + if (unlikely(err)) { + /* +diff -urNp linux-2.6.35.4/arch/x86/kvm/emulate.c linux-2.6.35.4/arch/x86/kvm/emulate.c +--- linux-2.6.35.4/arch/x86/kvm/emulate.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kvm/emulate.c 2010-09-17 20:12:09.000000000 -0400 +@@ -88,11 +88,11 @@ + #define Src2CL (1<<29) + #define Src2ImmByte (2<<29) + #define Src2One (3<<29) +-#define Src2Imm16 (4<<29) +-#define Src2Mem16 (5<<29) /* Used for Ep encoding. First argument has to be ++#define Src2Imm16 (4U<<29) ++#define Src2Mem16 (5U<<29) /* Used for Ep encoding. First argument has to be + in memory and second argument is located + immediately after the first one in memory. */ +-#define Src2Mask (7<<29) ++#define Src2Mask (7U<<29) + + enum { + Group1_80, Group1_81, Group1_82, Group1_83, +@@ -446,6 +446,7 @@ static u32 group2_table[] = { + + #define ____emulate_2op(_op, _src, _dst, _eflags, _x, _y, _suffix) \ + do { \ ++ unsigned long _tmp; \ + __asm__ __volatile__ ( \ + _PRE_EFLAGS("0", "4", "2") \ + _op _suffix " %"_x"3,%1; " \ +@@ -459,8 +460,6 @@ static u32 group2_table[] = { + /* Raw emulation: instruction has two explicit operands. */ + #define __emulate_2op_nobyte(_op,_src,_dst,_eflags,_wx,_wy,_lx,_ly,_qx,_qy) \ + do { \ +- unsigned long _tmp; \ +- \ + switch ((_dst).bytes) { \ + case 2: \ + ____emulate_2op(_op,_src,_dst,_eflags,_wx,_wy,"w"); \ +@@ -476,7 +475,6 @@ static u32 group2_table[] = { + + #define __emulate_2op(_op,_src,_dst,_eflags,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \ + do { \ +- unsigned long _tmp; \ + switch ((_dst).bytes) { \ + case 1: \ + ____emulate_2op(_op,_src,_dst,_eflags,_bx,_by,"b"); \ +diff -urNp linux-2.6.35.4/arch/x86/kvm/lapic.c linux-2.6.35.4/arch/x86/kvm/lapic.c +--- linux-2.6.35.4/arch/x86/kvm/lapic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kvm/lapic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -52,7 +52,7 @@ + #define APIC_BUS_CYCLE_NS 1 + + /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */ +-#define apic_debug(fmt, arg...) ++#define apic_debug(fmt, arg...) do {} while (0) + + #define APIC_LVT_NUM 6 + /* 14 is the version for Xeon and Pentium 8.4.8*/ +diff -urNp linux-2.6.35.4/arch/x86/kvm/svm.c linux-2.6.35.4/arch/x86/kvm/svm.c +--- linux-2.6.35.4/arch/x86/kvm/svm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kvm/svm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2796,7 +2796,11 @@ static void reload_tss(struct kvm_vcpu * + int cpu = raw_smp_processor_id(); + + struct svm_cpu_data *sd = per_cpu(svm_data, cpu); ++ ++ pax_open_kernel(); + sd->tss_desc->type = 9; /* available 32/64-bit TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -3337,7 +3341,7 @@ static void svm_fpu_deactivate(struct kv + update_cr0_intercept(svm); + } + +-static struct kvm_x86_ops svm_x86_ops = { ++static const struct kvm_x86_ops svm_x86_ops = { + .cpu_has_kvm_support = has_svm, + .disabled_by_bios = is_disabled, + .hardware_setup = svm_hardware_setup, +diff -urNp linux-2.6.35.4/arch/x86/kvm/vmx.c linux-2.6.35.4/arch/x86/kvm/vmx.c +--- linux-2.6.35.4/arch/x86/kvm/vmx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kvm/vmx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -653,7 +653,11 @@ static void reload_tss(void) + + native_store_gdt(&gdt); + descs = (void *)gdt.address; ++ ++ pax_open_kernel(); + descs[GDT_ENTRY_TSS].type = 9; /* available TSS */ ++ pax_close_kernel(); ++ + load_TR_desc(); + } + +@@ -1550,8 +1554,11 @@ static __init int hardware_setup(void) + if (!cpu_has_vmx_flexpriority()) + flexpriority_enabled = 0; + +- if (!cpu_has_vmx_tpr_shadow()) +- kvm_x86_ops->update_cr8_intercept = NULL; ++ if (!cpu_has_vmx_tpr_shadow()) { ++ pax_open_kernel(); ++ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL; ++ pax_close_kernel(); ++ } + + if (enable_ept && !cpu_has_vmx_ept_2m_page()) + kvm_disable_largepages(); +@@ -2533,7 +2540,7 @@ static int vmx_vcpu_setup(struct vcpu_vm + vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */ + + asm("mov $.Lkvm_vmx_return, %0" : "=r"(kvm_vmx_return)); +- vmcs_writel(HOST_RIP, kvm_vmx_return); /* 22.2.5 */ ++ vmcs_writel(HOST_RIP, ktla_ktva(kvm_vmx_return)); /* 22.2.5 */ + vmcs_write32(VM_EXIT_MSR_STORE_COUNT, 0); + vmcs_write32(VM_EXIT_MSR_LOAD_COUNT, 0); + vmcs_write64(VM_EXIT_MSR_LOAD_ADDR, __pa(vmx->msr_autoload.host)); +@@ -3909,6 +3916,12 @@ static void vmx_vcpu_run(struct kvm_vcpu + "jmp .Lkvm_vmx_return \n\t" + ".Llaunched: " __ex(ASM_VMX_VMRESUME) "\n\t" + ".Lkvm_vmx_return: " ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ "ljmp %[cs],$.Lkvm_vmx_return2\n\t" ++ ".Lkvm_vmx_return2: " ++#endif ++ + /* Save guest registers, load host registers, keep flags */ + "xchg %0, (%%"R"sp) \n\t" + "mov %%"R"ax, %c[rax](%0) \n\t" +@@ -3955,8 +3968,13 @@ static void vmx_vcpu_run(struct kvm_vcpu + [r15]"i"(offsetof(struct vcpu_vmx, vcpu.arch.regs[VCPU_REGS_R15])), + #endif + [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ ,[cs]"i"(__KERNEL_CS) ++#endif ++ + : "cc", "memory" +- , R"bx", R"di", R"si" ++ , R"ax", R"bx", R"di", R"si" + #ifdef CONFIG_X86_64 + , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" + #endif +@@ -3970,7 +3988,7 @@ static void vmx_vcpu_run(struct kvm_vcpu + if (vmx->rmode.irq.pending) + fixup_rmode_irq(vmx); + +- asm("mov %0, %%ds; mov %0, %%es" : : "r"(__USER_DS)); ++ asm("mov %0, %%ds; mov %0, %%es" : : "r"(__KERNEL_DS)); + vmx->launched = 1; + + vmx_complete_interrupts(vmx); +@@ -4191,7 +4209,7 @@ static void vmx_set_supported_cpuid(u32 + { + } + +-static struct kvm_x86_ops vmx_x86_ops = { ++static const struct kvm_x86_ops vmx_x86_ops = { + .cpu_has_kvm_support = cpu_has_kvm_support, + .disabled_by_bios = vmx_disabled_by_bios, + .hardware_setup = hardware_setup, +diff -urNp linux-2.6.35.4/arch/x86/kvm/x86.c linux-2.6.35.4/arch/x86/kvm/x86.c +--- linux-2.6.35.4/arch/x86/kvm/x86.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/kvm/x86.c 2010-09-17 20:12:09.000000000 -0400 +@@ -86,7 +86,7 @@ static void update_cr8_intercept(struct + static int kvm_dev_ioctl_get_supported_cpuid(struct kvm_cpuid2 *cpuid, + struct kvm_cpuid_entry2 __user *entries); + +-struct kvm_x86_ops *kvm_x86_ops; ++const struct kvm_x86_ops *kvm_x86_ops; + EXPORT_SYMBOL_GPL(kvm_x86_ops); + + int ignore_msrs = 0; +@@ -112,38 +112,38 @@ static struct kvm_shared_msrs_global __r + static DEFINE_PER_CPU(struct kvm_shared_msrs, shared_msrs); + + struct kvm_stats_debugfs_item debugfs_entries[] = { +- { "pf_fixed", VCPU_STAT(pf_fixed) }, +- { "pf_guest", VCPU_STAT(pf_guest) }, +- { "tlb_flush", VCPU_STAT(tlb_flush) }, +- { "invlpg", VCPU_STAT(invlpg) }, +- { "exits", VCPU_STAT(exits) }, +- { "io_exits", VCPU_STAT(io_exits) }, +- { "mmio_exits", VCPU_STAT(mmio_exits) }, +- { "signal_exits", VCPU_STAT(signal_exits) }, +- { "irq_window", VCPU_STAT(irq_window_exits) }, +- { "nmi_window", VCPU_STAT(nmi_window_exits) }, +- { "halt_exits", VCPU_STAT(halt_exits) }, +- { "halt_wakeup", VCPU_STAT(halt_wakeup) }, +- { "hypercalls", VCPU_STAT(hypercalls) }, +- { "request_irq", VCPU_STAT(request_irq_exits) }, +- { "irq_exits", VCPU_STAT(irq_exits) }, +- { "host_state_reload", VCPU_STAT(host_state_reload) }, +- { "efer_reload", VCPU_STAT(efer_reload) }, +- { "fpu_reload", VCPU_STAT(fpu_reload) }, +- { "insn_emulation", VCPU_STAT(insn_emulation) }, +- { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) }, +- { "irq_injections", VCPU_STAT(irq_injections) }, +- { "nmi_injections", VCPU_STAT(nmi_injections) }, +- { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped) }, +- { "mmu_pte_write", VM_STAT(mmu_pte_write) }, +- { "mmu_pte_updated", VM_STAT(mmu_pte_updated) }, +- { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped) }, +- { "mmu_flooded", VM_STAT(mmu_flooded) }, +- { "mmu_recycled", VM_STAT(mmu_recycled) }, +- { "mmu_cache_miss", VM_STAT(mmu_cache_miss) }, +- { "mmu_unsync", VM_STAT(mmu_unsync) }, +- { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, +- { "largepages", VM_STAT(lpages) }, ++ { "pf_fixed", VCPU_STAT(pf_fixed), NULL }, ++ { "pf_guest", VCPU_STAT(pf_guest), NULL }, ++ { "tlb_flush", VCPU_STAT(tlb_flush), NULL }, ++ { "invlpg", VCPU_STAT(invlpg), NULL }, ++ { "exits", VCPU_STAT(exits), NULL }, ++ { "io_exits", VCPU_STAT(io_exits), NULL }, ++ { "mmio_exits", VCPU_STAT(mmio_exits), NULL }, ++ { "signal_exits", VCPU_STAT(signal_exits), NULL }, ++ { "irq_window", VCPU_STAT(irq_window_exits), NULL }, ++ { "nmi_window", VCPU_STAT(nmi_window_exits), NULL }, ++ { "halt_exits", VCPU_STAT(halt_exits), NULL }, ++ { "halt_wakeup", VCPU_STAT(halt_wakeup), NULL }, ++ { "hypercalls", VCPU_STAT(hypercalls), NULL }, ++ { "request_irq", VCPU_STAT(request_irq_exits), NULL }, ++ { "irq_exits", VCPU_STAT(irq_exits), NULL }, ++ { "host_state_reload", VCPU_STAT(host_state_reload), NULL }, ++ { "efer_reload", VCPU_STAT(efer_reload), NULL }, ++ { "fpu_reload", VCPU_STAT(fpu_reload), NULL }, ++ { "insn_emulation", VCPU_STAT(insn_emulation), NULL }, ++ { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail), NULL }, ++ { "irq_injections", VCPU_STAT(irq_injections), NULL }, ++ { "nmi_injections", VCPU_STAT(nmi_injections), NULL }, ++ { "mmu_shadow_zapped", VM_STAT(mmu_shadow_zapped), NULL }, ++ { "mmu_pte_write", VM_STAT(mmu_pte_write), NULL }, ++ { "mmu_pte_updated", VM_STAT(mmu_pte_updated), NULL }, ++ { "mmu_pde_zapped", VM_STAT(mmu_pde_zapped), NULL }, ++ { "mmu_flooded", VM_STAT(mmu_flooded), NULL }, ++ { "mmu_recycled", VM_STAT(mmu_recycled), NULL }, ++ { "mmu_cache_miss", VM_STAT(mmu_cache_miss), NULL }, ++ { "mmu_unsync", VM_STAT(mmu_unsync), NULL }, ++ { "remote_tlb_flush", VM_STAT(remote_tlb_flush), NULL }, ++ { "largepages", VM_STAT(lpages), NULL }, + { NULL } + }; + +@@ -1672,6 +1672,8 @@ long kvm_arch_dev_ioctl(struct file *fil + if (n < msr_list.nmsrs) + goto out; + r = -EFAULT; ++ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save)) ++ goto out; + if (copy_to_user(user_msr_list->indices, &msrs_to_save, + num_msrs_to_save * sizeof(u32))) + goto out; +@@ -2103,7 +2105,7 @@ static int kvm_vcpu_ioctl_set_lapic(stru + static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, + struct kvm_interrupt *irq) + { +- if (irq->irq < 0 || irq->irq >= 256) ++ if (irq->irq >= 256) + return -EINVAL; + if (irqchip_in_kernel(vcpu->kvm)) + return -ENXIO; +@@ -4070,10 +4072,10 @@ void kvm_after_handle_nmi(struct kvm_vcp + } + EXPORT_SYMBOL_GPL(kvm_after_handle_nmi); + +-int kvm_arch_init(void *opaque) ++int kvm_arch_init(const void *opaque) + { + int r; +- struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque; ++ const struct kvm_x86_ops *ops = (const struct kvm_x86_ops *)opaque; + + if (kvm_x86_ops) { + printk(KERN_ERR "kvm: already loaded the other module\n"); +diff -urNp linux-2.6.35.4/arch/x86/lib/checksum_32.S linux-2.6.35.4/arch/x86/lib/checksum_32.S +--- linux-2.6.35.4/arch/x86/lib/checksum_32.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/checksum_32.S 2010-09-17 20:12:09.000000000 -0400 +@@ -28,7 +28,8 @@ + #include <linux/linkage.h> + #include <asm/dwarf2.h> + #include <asm/errno.h> +- ++#include <asm/segment.h> ++ + /* + * computes a partial checksum, e.g. for TCP/UDP fragments + */ +@@ -304,9 +305,22 @@ unsigned int csum_partial_copy_generic ( + + #define ARGBASE 16 + #define FP 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ pushl $(__USER_DS) ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %es ++ CFI_ADJUST_CFA_OFFSET -4 ++ jmp csum_partial_copy_generic ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ pushl $(__USER_DS) ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %ds ++ CFI_ADJUST_CFA_OFFSET -4 ++ ++ENTRY(csum_partial_copy_generic) + subl $4,%esp + CFI_ADJUST_CFA_OFFSET 4 + pushl %edi +@@ -331,7 +345,7 @@ ENTRY(csum_partial_copy_generic) + jmp 4f + SRC(1: movw (%esi), %bx ) + addl $2, %esi +-DST( movw %bx, (%edi) ) ++DST( movw %bx, %es:(%edi) ) + addl $2, %edi + addw %bx, %ax + adcl $0, %eax +@@ -343,30 +357,30 @@ DST( movw %bx, (%edi) ) + SRC(1: movl (%esi), %ebx ) + SRC( movl 4(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 4(%edi) ) ++DST( movl %edx, %es:4(%edi) ) + + SRC( movl 8(%esi), %ebx ) + SRC( movl 12(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 8(%edi) ) ++DST( movl %ebx, %es:8(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 12(%edi) ) ++DST( movl %edx, %es:12(%edi) ) + + SRC( movl 16(%esi), %ebx ) + SRC( movl 20(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 16(%edi) ) ++DST( movl %ebx, %es:16(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 20(%edi) ) ++DST( movl %edx, %es:20(%edi) ) + + SRC( movl 24(%esi), %ebx ) + SRC( movl 28(%esi), %edx ) + adcl %ebx, %eax +-DST( movl %ebx, 24(%edi) ) ++DST( movl %ebx, %es:24(%edi) ) + adcl %edx, %eax +-DST( movl %edx, 28(%edi) ) ++DST( movl %edx, %es:28(%edi) ) + + lea 32(%esi), %esi + lea 32(%edi), %edi +@@ -380,7 +394,7 @@ DST( movl %edx, 28(%edi) ) + shrl $2, %edx # This clears CF + SRC(3: movl (%esi), %ebx ) + adcl %ebx, %eax +-DST( movl %ebx, (%edi) ) ++DST( movl %ebx, %es:(%edi) ) + lea 4(%esi), %esi + lea 4(%edi), %edi + dec %edx +@@ -392,12 +406,12 @@ DST( movl %ebx, (%edi) ) + jb 5f + SRC( movw (%esi), %cx ) + leal 2(%esi), %esi +-DST( movw %cx, (%edi) ) ++DST( movw %cx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%ecx + SRC(5: movb (%esi), %cl ) +-DST( movb %cl, (%edi) ) ++DST( movb %cl, %es:(%edi) ) + 6: addl %ecx, %eax + adcl $0, %eax + 7: +@@ -408,7 +422,7 @@ DST( movb %cl, (%edi) ) + + 6001: + movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + + # zero the complete destination - computing the rest + # is too much work +@@ -421,11 +435,19 @@ DST( movb %cl, (%edi) ) + + 6002: + movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT,(%ebx) ++ movl $-EFAULT,%ss:(%ebx) + jmp 5000b + + .previous + ++ pushl %ss ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %ds ++ CFI_ADJUST_CFA_OFFSET -4 ++ pushl %ss ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %es ++ CFI_ADJUST_CFA_OFFSET -4 + popl %ebx + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE ebx +@@ -439,26 +461,41 @@ DST( movb %cl, (%edi) ) + CFI_ADJUST_CFA_OFFSET -4 + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #else + + /* Version for PentiumII/PPro */ + + #define ROUND1(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + addl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ROUND(x) \ ++ nop; nop; nop; \ + SRC(movl x(%esi), %ebx ) ; \ + adcl %ebx, %eax ; \ +- DST(movl %ebx, x(%edi) ) ; ++ DST(movl %ebx, %es:x(%edi)) ; + + #define ARGBASE 12 +- +-ENTRY(csum_partial_copy_generic) ++ ++ENTRY(csum_partial_copy_generic_to_user) + CFI_STARTPROC ++ pushl $(__USER_DS) ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %es ++ CFI_ADJUST_CFA_OFFSET -4 ++ jmp csum_partial_copy_generic ++ ++ENTRY(csum_partial_copy_generic_from_user) ++ pushl $(__USER_DS) ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %ds ++ CFI_ADJUST_CFA_OFFSET -4 ++ ++ENTRY(csum_partial_copy_generic) + pushl %ebx + CFI_ADJUST_CFA_OFFSET 4 + CFI_REL_OFFSET ebx, 0 +@@ -482,7 +519,7 @@ ENTRY(csum_partial_copy_generic) + subl %ebx, %edi + lea -1(%esi),%edx + andl $-32,%edx +- lea 3f(%ebx,%ebx), %ebx ++ lea 3f(%ebx,%ebx,2), %ebx + testl %esi, %esi + jmp *%ebx + 1: addl $64,%esi +@@ -503,19 +540,19 @@ ENTRY(csum_partial_copy_generic) + jb 5f + SRC( movw (%esi), %dx ) + leal 2(%esi), %esi +-DST( movw %dx, (%edi) ) ++DST( movw %dx, %es:(%edi) ) + leal 2(%edi), %edi + je 6f + shll $16,%edx + 5: + SRC( movb (%esi), %dl ) +-DST( movb %dl, (%edi) ) ++DST( movb %dl, %es:(%edi) ) + 6: addl %edx, %eax + adcl $0, %eax + 7: + .section .fixup, "ax" + 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + # zero the complete destination (computing the rest is too much work) + movl ARGBASE+8(%esp),%edi # dst + movl ARGBASE+12(%esp),%ecx # len +@@ -523,10 +560,18 @@ DST( movb %dl, (%edi) ) + rep; stosb + jmp 7b + 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr +- movl $-EFAULT, (%ebx) ++ movl $-EFAULT, %ss:(%ebx) + jmp 7b + .previous + ++ pushl %ss ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %ds ++ CFI_ADJUST_CFA_OFFSET -4 ++ pushl %ss ++ CFI_ADJUST_CFA_OFFSET 4 ++ popl %es ++ CFI_ADJUST_CFA_OFFSET -4 + popl %esi + CFI_ADJUST_CFA_OFFSET -4 + CFI_RESTORE esi +@@ -538,7 +583,7 @@ DST( movb %dl, (%edi) ) + CFI_RESTORE ebx + ret + CFI_ENDPROC +-ENDPROC(csum_partial_copy_generic) ++ENDPROC(csum_partial_copy_generic_to_user) + + #undef ROUND + #undef ROUND1 +diff -urNp linux-2.6.35.4/arch/x86/lib/clear_page_64.S linux-2.6.35.4/arch/x86/lib/clear_page_64.S +--- linux-2.6.35.4/arch/x86/lib/clear_page_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/clear_page_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,7 @@ ENDPROC(clear_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */ + 2: +diff -urNp linux-2.6.35.4/arch/x86/lib/copy_page_64.S linux-2.6.35.4/arch/x86/lib/copy_page_64.S +--- linux-2.6.35.4/arch/x86/lib/copy_page_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/copy_page_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -104,7 +104,7 @@ ENDPROC(copy_page) + + #include <asm/cpufeature.h> + +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 1: .byte 0xeb /* jmp <disp8> */ + .byte (copy_page_c - copy_page) - (2f - 1b) /* offset */ + 2: +diff -urNp linux-2.6.35.4/arch/x86/lib/copy_user_64.S linux-2.6.35.4/arch/x86/lib/copy_user_64.S +--- linux-2.6.35.4/arch/x86/lib/copy_user_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/copy_user_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -15,13 +15,14 @@ + #include <asm/asm-offsets.h> + #include <asm/thread_info.h> + #include <asm/cpufeature.h> ++#include <asm/pgtable.h> + + .macro ALTERNATIVE_JUMP feature,orig,alt + 0: + .byte 0xe9 /* 32bit jump */ + .long \orig-1f /* by default jump to orig */ + 1: +- .section .altinstr_replacement,"ax" ++ .section .altinstr_replacement,"a" + 2: .byte 0xe9 /* near jump with 32bit immediate */ + .long \alt-1b /* offset */ /* or alternatively to alt */ + .previous +@@ -64,37 +65,13 @@ + #endif + .endm + +-/* Standard copy_to_user with segment limit checking */ +-ENTRY(_copy_to_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rdi,%rcx +- addq %rdx,%rcx +- jc bad_to_user +- cmpq TI_addr_limit(%rax),%rcx +- jae bad_to_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string +- CFI_ENDPROC +-ENDPROC(_copy_to_user) +- +-/* Standard copy_from_user with segment limit checking */ +-ENTRY(_copy_from_user) +- CFI_STARTPROC +- GET_THREAD_INFO(%rax) +- movq %rsi,%rcx +- addq %rdx,%rcx +- jc bad_from_user +- cmpq TI_addr_limit(%rax),%rcx +- jae bad_from_user +- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,copy_user_generic_unrolled,copy_user_generic_string +- CFI_ENDPROC +-ENDPROC(_copy_from_user) +- + .section .fixup,"ax" + /* must zero dest */ + ENTRY(bad_from_user) + bad_from_user: + CFI_STARTPROC ++ testl %edx,%edx ++ js bad_to_user + movl %edx,%ecx + xorl %eax,%eax + rep +diff -urNp linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S +--- linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/copy_user_nocache_64.S 2010-09-17 20:12:09.000000000 -0400 +@@ -14,6 +14,7 @@ + #include <asm/current.h> + #include <asm/asm-offsets.h> + #include <asm/thread_info.h> ++#include <asm/pgtable.h> + + .macro ALIGN_DESTINATION + #ifdef FIX_ALIGNMENT +@@ -50,6 +51,15 @@ + */ + ENTRY(__copy_user_nocache) + CFI_STARTPROC ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%rcx ++ cmp %rcx,%rsi ++ jae 1f ++ add %rcx,%rsi ++1: ++#endif ++ + cmpl $8,%edx + jb 20f /* less then 8 bytes, go to byte copy loop */ + ALIGN_DESTINATION +diff -urNp linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c +--- linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/csum-wrappers_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -52,6 +52,8 @@ csum_partial_copy_from_user(const void _ + len -= 2; + } + } ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; + isum = csum_partial_copy_generic((__force const void *)src, + dst, len, isum, errp, NULL); + if (unlikely(*errp)) +@@ -105,6 +107,8 @@ csum_partial_copy_to_user(const void *sr + } + + *errp = 0; ++ if ((unsigned long)dst < PAX_USER_SHADOW_BASE) ++ dst += PAX_USER_SHADOW_BASE; + return csum_partial_copy_generic(src, (void __force *)dst, + len, isum, NULL, errp); + } +diff -urNp linux-2.6.35.4/arch/x86/lib/getuser.S linux-2.6.35.4/arch/x86/lib/getuser.S +--- linux-2.6.35.4/arch/x86/lib/getuser.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/getuser.S 2010-09-17 20:12:09.000000000 -0400 +@@ -33,14 +33,38 @@ + #include <asm/asm-offsets.h> + #include <asm/thread_info.h> + #include <asm/asm.h> ++#include <asm/segment.h> ++#include <asm/pgtable.h> + + .text + ENTRY(__get_user_1) + CFI_STARTPROC ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ + 1: movzb (%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ pop %ds ++#endif ++ + xor %eax,%eax + ret + CFI_ENDPROC +@@ -49,11 +73,33 @@ ENDPROC(__get_user_1) + ENTRY(__get_user_2) + CFI_STARTPROC + add $1,%_ASM_AX ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ + 2: movzwl -1(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ pop %ds ++#endif ++ + xor %eax,%eax + ret + CFI_ENDPROC +@@ -62,11 +108,33 @@ ENDPROC(__get_user_2) + ENTRY(__get_user_4) + CFI_STARTPROC + add $3,%_ASM_AX ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else + jc bad_get_user + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ ++#endif ++ + 3: mov -3(%_ASM_AX),%edx ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ pop %ds ++#endif ++ + xor %eax,%eax + ret + CFI_ENDPROC +@@ -80,6 +148,15 @@ ENTRY(__get_user_8) + GET_THREAD_INFO(%_ASM_DX) + cmp TI_addr_limit(%_ASM_DX),%_ASM_AX + jae bad_get_user ++ ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ mov $PAX_USER_SHADOW_BASE,%_ASM_DX ++ cmp %_ASM_DX,%_ASM_AX ++ jae 1234f ++ add %_ASM_DX,%_ASM_AX ++1234: ++#endif ++ + 4: movq -7(%_ASM_AX),%_ASM_DX + xor %eax,%eax + ret +@@ -89,6 +166,12 @@ ENDPROC(__get_user_8) + + bad_get_user: + CFI_STARTPROC ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ pop %ds ++#endif ++ + xor %edx,%edx + mov $(-EFAULT),%_ASM_AX + ret +diff -urNp linux-2.6.35.4/arch/x86/lib/insn.c linux-2.6.35.4/arch/x86/lib/insn.c +--- linux-2.6.35.4/arch/x86/lib/insn.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/insn.c 2010-09-17 20:12:09.000000000 -0400 +@@ -21,6 +21,7 @@ + #include <linux/string.h> + #include <asm/inat.h> + #include <asm/insn.h> ++#include <asm/pgtable_types.h> + + #define get_next(t, insn) \ + ({t r; r = *(t*)insn->next_byte; insn->next_byte += sizeof(t); r; }) +@@ -40,8 +41,8 @@ + void insn_init(struct insn *insn, const void *kaddr, int x86_64) + { + memset(insn, 0, sizeof(*insn)); +- insn->kaddr = kaddr; +- insn->next_byte = kaddr; ++ insn->kaddr = ktla_ktva(kaddr); ++ insn->next_byte = ktla_ktva(kaddr); + insn->x86_64 = x86_64 ? 1 : 0; + insn->opnd_bytes = 4; + if (x86_64) +diff -urNp linux-2.6.35.4/arch/x86/lib/mmx_32.c linux-2.6.35.4/arch/x86/lib/mmx_32.c +--- linux-2.6.35.4/arch/x86/lib/mmx_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/mmx_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void * + { + void *p; + int i; ++ unsigned long cr0; + + if (unlikely(in_interrupt())) + return __memcpy(to, from, len); +@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void * + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" /* This set is 28 bytes */ +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" /* This set is 28 bytes */ ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from)); ++ : "=&r" (cr0) : "r" (from) : "ax"); + + for ( ; i > 5; i--) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -158,6 +187,7 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + +@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, voi + * but that is for later. -AV + */ + __asm__ __volatile__( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < (4096-320)/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movntq %%mm0, (%1)\n" +- " movq 8(%0), %%mm1\n" +- " movntq %%mm1, 8(%1)\n" +- " movq 16(%0), %%mm2\n" +- " movntq %%mm2, 16(%1)\n" +- " movq 24(%0), %%mm3\n" +- " movntq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm4\n" +- " movntq %%mm4, 32(%1)\n" +- " movq 40(%0), %%mm5\n" +- " movntq %%mm5, 40(%1)\n" +- " movq 48(%0), %%mm6\n" +- " movntq %%mm6, 48(%1)\n" +- " movq 56(%0), %%mm7\n" +- " movntq %%mm7, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movntq %%mm0, (%2)\n" ++ " movq 8(%1), %%mm1\n" ++ " movntq %%mm1, 8(%2)\n" ++ " movq 16(%1), %%mm2\n" ++ " movntq %%mm2, 16(%2)\n" ++ " movq 24(%1), %%mm3\n" ++ " movntq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm4\n" ++ " movntq %%mm4, 32(%2)\n" ++ " movq 40(%1), %%mm5\n" ++ " movntq %%mm5, 40(%2)\n" ++ " movq 48(%1), %%mm6\n" ++ " movntq %%mm6, 48(%2)\n" ++ " movq 56(%1), %%mm7\n" ++ " movntq %%mm7, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory"); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +@@ -280,47 +338,76 @@ static void fast_clear_page(void *page) + static void fast_copy_page(void *to, void *from) + { + int i; ++ unsigned long cr0; + + kernel_fpu_begin(); + + __asm__ __volatile__ ( +- "1: prefetch (%0)\n" +- " prefetch 64(%0)\n" +- " prefetch 128(%0)\n" +- " prefetch 192(%0)\n" +- " prefetch 256(%0)\n" ++ "1: prefetch (%1)\n" ++ " prefetch 64(%1)\n" ++ " prefetch 128(%1)\n" ++ " prefetch 192(%1)\n" ++ " prefetch 256(%1)\n" + "2: \n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ "3: \n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" +- _ASM_EXTABLE(1b, 3b) : : "r" (from)); ++ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax"); + + for (i = 0; i < 4096/64; i++) { + __asm__ __volatile__ ( +- "1: prefetch 320(%0)\n" +- "2: movq (%0), %%mm0\n" +- " movq 8(%0), %%mm1\n" +- " movq 16(%0), %%mm2\n" +- " movq 24(%0), %%mm3\n" +- " movq %%mm0, (%1)\n" +- " movq %%mm1, 8(%1)\n" +- " movq %%mm2, 16(%1)\n" +- " movq %%mm3, 24(%1)\n" +- " movq 32(%0), %%mm0\n" +- " movq 40(%0), %%mm1\n" +- " movq 48(%0), %%mm2\n" +- " movq 56(%0), %%mm3\n" +- " movq %%mm0, 32(%1)\n" +- " movq %%mm1, 40(%1)\n" +- " movq %%mm2, 48(%1)\n" +- " movq %%mm3, 56(%1)\n" ++ "1: prefetch 320(%1)\n" ++ "2: movq (%1), %%mm0\n" ++ " movq 8(%1), %%mm1\n" ++ " movq 16(%1), %%mm2\n" ++ " movq 24(%1), %%mm3\n" ++ " movq %%mm0, (%2)\n" ++ " movq %%mm1, 8(%2)\n" ++ " movq %%mm2, 16(%2)\n" ++ " movq %%mm3, 24(%2)\n" ++ " movq 32(%1), %%mm0\n" ++ " movq 40(%1), %%mm1\n" ++ " movq 48(%1), %%mm2\n" ++ " movq 56(%1), %%mm3\n" ++ " movq %%mm0, 32(%2)\n" ++ " movq %%mm1, 40(%2)\n" ++ " movq %%mm2, 48(%2)\n" ++ " movq %%mm3, 56(%2)\n" + ".section .fixup, \"ax\"\n" +- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ "3:\n" ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %%cr0, %0\n" ++ " movl %0, %%eax\n" ++ " andl $0xFFFEFFFF, %%eax\n" ++ " movl %%eax, %%cr0\n" ++#endif ++ ++ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ " movl %0, %%cr0\n" ++#endif ++ + " jmp 2b\n" + ".previous\n" + _ASM_EXTABLE(1b, 3b) +- : : "r" (from), "r" (to) : "memory"); ++ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax"); + + from += 64; + to += 64; +diff -urNp linux-2.6.35.4/arch/x86/lib/putuser.S linux-2.6.35.4/arch/x86/lib/putuser.S +--- linux-2.6.35.4/arch/x86/lib/putuser.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/putuser.S 2010-09-17 20:12:09.000000000 -0400 +@@ -15,7 +15,8 @@ + #include <asm/thread_info.h> + #include <asm/errno.h> + #include <asm/asm.h> +- ++#include <asm/segment.h> ++#include <asm/pgtable.h> + + /* + * __put_user_X +@@ -29,59 +30,162 @@ + * as they get called from within inline assembly. + */ + +-#define ENTER CFI_STARTPROC ; \ +- GET_THREAD_INFO(%_ASM_BX) ++#define ENTER CFI_STARTPROC + #define EXIT ret ; \ + CFI_ENDPROC + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#define _DEST %_ASM_CX,%_ASM_BX ++#else ++#define _DEST %_ASM_CX ++#endif ++ + .text + ENTRY(__put_user_1) + ENTER ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else ++ GET_THREAD_INFO(%_ASM_BX) + cmp TI_addr_limit(%_ASM_BX),%_ASM_CX + jae bad_put_user +-1: movb %al,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++1: movb %al,(_DEST) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ popl %ds ++#endif ++ + xor %eax,%eax + EXIT + ENDPROC(__put_user_1) + + ENTRY(__put_user_2) + ENTER ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $1,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-2: movw %ax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++2: movw %ax,(_DEST) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ popl %ds ++#endif ++ + xor %eax,%eax + EXIT + ENDPROC(__put_user_2) + + ENTRY(__put_user_4) + ENTER ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $3,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-3: movl %eax,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++3: movl %eax,(_DEST) ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ popl %ds ++#endif ++ + xor %eax,%eax + EXIT + ENDPROC(__put_user_4) + + ENTRY(__put_user_8) + ENTER ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl $(__USER_DS) ++ popl %ds ++#else ++ GET_THREAD_INFO(%_ASM_BX) + mov TI_addr_limit(%_ASM_BX),%_ASM_BX + sub $7,%_ASM_BX + cmp %_ASM_BX,%_ASM_CX + jae bad_put_user +-4: mov %_ASM_AX,(%_ASM_CX) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ mov $PAX_USER_SHADOW_BASE,%_ASM_BX ++ cmp %_ASM_BX,%_ASM_CX ++ jb 1234f ++ xor %ebx,%ebx ++1234: ++#endif ++ ++#endif ++ ++4: mov %_ASM_AX,(_DEST) + #ifdef CONFIG_X86_32 +-5: movl %edx,4(%_ASM_CX) ++5: movl %edx,4(_DEST) + #endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ popl %ds ++#endif ++ + xor %eax,%eax + EXIT + ENDPROC(__put_user_8) + + bad_put_user: + CFI_STARTPROC ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ pushl %ss ++ popl %ds ++#endif ++ + movl $-EFAULT,%eax + EXIT + END(bad_put_user) +diff -urNp linux-2.6.35.4/arch/x86/lib/usercopy_32.c linux-2.6.35.4/arch/x86/lib/usercopy_32.c +--- linux-2.6.35.4/arch/x86/lib/usercopy_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/usercopy_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -36,31 +36,38 @@ static inline int __movsl_is_ok(unsigned + * Copy a null terminated string from userspace. + */ + +-#define __do_strncpy_from_user(dst, src, count, res) \ +-do { \ +- int __d0, __d1, __d2; \ +- might_fault(); \ +- __asm__ __volatile__( \ +- " testl %1,%1\n" \ +- " jz 2f\n" \ +- "0: lodsb\n" \ +- " stosb\n" \ +- " testb %%al,%%al\n" \ +- " jz 1f\n" \ +- " decl %1\n" \ +- " jnz 0b\n" \ +- "1: subl %1,%0\n" \ +- "2:\n" \ +- ".section .fixup,\"ax\"\n" \ +- "3: movl %5,%0\n" \ +- " jmp 2b\n" \ +- ".previous\n" \ +- _ASM_EXTABLE(0b,3b) \ +- : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), \ +- "=&D" (__d2) \ +- : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst) \ +- : "memory"); \ +-} while (0) ++static long __do_strncpy_from_user(char *dst, const char __user *src, long count) ++{ ++ int __d0, __d1, __d2; ++ long res = -EFAULT; ++ ++ might_fault(); ++ __asm__ __volatile__( ++ " movw %w10,%%ds\n" ++ " testl %1,%1\n" ++ " jz 2f\n" ++ "0: lodsb\n" ++ " stosb\n" ++ " testb %%al,%%al\n" ++ " jz 1f\n" ++ " decl %1\n" ++ " jnz 0b\n" ++ "1: subl %1,%0\n" ++ "2:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" ++ ".section .fixup,\"ax\"\n" ++ "3: movl %5,%0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ _ASM_EXTABLE(0b,3b) ++ : "=&d"(res), "=&c"(count), "=&a" (__d0), "=&S" (__d1), ++ "=&D" (__d2) ++ : "i"(-EFAULT), "0"(count), "1"(count), "3"(src), "4"(dst), ++ "r"(__USER_DS) ++ : "memory"); ++ return res; ++} + + /** + * __strncpy_from_user: - Copy a NUL terminated string from userspace, with less checking. +@@ -85,9 +92,7 @@ do { \ + long + __strncpy_from_user(char *dst, const char __user *src, long count) + { +- long res; +- __do_strncpy_from_user(dst, src, count, res); +- return res; ++ return __do_strncpy_from_user(dst, src, count); + } + EXPORT_SYMBOL(__strncpy_from_user); + +@@ -114,7 +119,7 @@ strncpy_from_user(char *dst, const char + { + long res = -EFAULT; + if (access_ok(VERIFY_READ, src, 1)) +- __do_strncpy_from_user(dst, src, count, res); ++ res = __do_strncpy_from_user(dst, src, count); + return res; + } + EXPORT_SYMBOL(strncpy_from_user); +@@ -123,24 +128,30 @@ EXPORT_SYMBOL(strncpy_from_user); + * Zero Userspace + */ + +-#define __do_clear_user(addr,size) \ +-do { \ +- int __d0; \ +- might_fault(); \ +- __asm__ __volatile__( \ +- "0: rep; stosl\n" \ +- " movl %2,%0\n" \ +- "1: rep; stosb\n" \ +- "2:\n" \ +- ".section .fixup,\"ax\"\n" \ +- "3: lea 0(%2,%0,4),%0\n" \ +- " jmp 2b\n" \ +- ".previous\n" \ +- _ASM_EXTABLE(0b,3b) \ +- _ASM_EXTABLE(1b,2b) \ +- : "=&c"(size), "=&D" (__d0) \ +- : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0)); \ +-} while (0) ++static unsigned long __do_clear_user(void __user *addr, unsigned long size) ++{ ++ int __d0; ++ ++ might_fault(); ++ __asm__ __volatile__( ++ " movw %w6,%%es\n" ++ "0: rep; stosl\n" ++ " movl %2,%0\n" ++ "1: rep; stosb\n" ++ "2:\n" ++ " pushl %%ss\n" ++ " popl %%es\n" ++ ".section .fixup,\"ax\"\n" ++ "3: lea 0(%2,%0,4),%0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ _ASM_EXTABLE(0b,3b) ++ _ASM_EXTABLE(1b,2b) ++ : "=&c"(size), "=&D" (__d0) ++ : "r"(size & 3), "0"(size / 4), "1"(addr), "a"(0), ++ "r"(__USER_DS)); ++ return size; ++} + + /** + * clear_user: - Zero a block of memory in user space. +@@ -157,7 +168,7 @@ clear_user(void __user *to, unsigned lon + { + might_fault(); + if (access_ok(VERIFY_WRITE, to, n)) +- __do_clear_user(to, n); ++ n = __do_clear_user(to, n); + return n; + } + EXPORT_SYMBOL(clear_user); +@@ -176,8 +187,7 @@ EXPORT_SYMBOL(clear_user); + unsigned long + __clear_user(void __user *to, unsigned long n) + { +- __do_clear_user(to, n); +- return n; ++ return __do_clear_user(to, n); + } + EXPORT_SYMBOL(__clear_user); + +@@ -200,14 +210,17 @@ long strnlen_user(const char __user *s, + might_fault(); + + __asm__ __volatile__( ++ " movw %w8,%%es\n" + " testl %0, %0\n" + " jz 3f\n" +- " andl %0,%%ecx\n" ++ " movl %0,%%ecx\n" + "0: repne; scasb\n" + " setne %%al\n" + " subl %%ecx,%0\n" + " addl %0,%%eax\n" + "1:\n" ++ " pushl %%ss\n" ++ " popl %%es\n" + ".section .fixup,\"ax\"\n" + "2: xorl %%eax,%%eax\n" + " jmp 1b\n" +@@ -219,7 +232,7 @@ long strnlen_user(const char __user *s, + " .long 0b,2b\n" + ".previous" + :"=&r" (n), "=&D" (s), "=&a" (res), "=&c" (tmp) +- :"0" (n), "1" (s), "2" (0), "3" (mask) ++ :"0" (n), "1" (s), "2" (0), "3" (mask), "r" (__USER_DS) + :"cc"); + return res & mask; + } +@@ -227,10 +240,121 @@ EXPORT_SYMBOL(strnlen_user); + + #ifdef CONFIG_X86_INTEL_USERCOPY + static unsigned long +-__copy_user_intel(void __user *to, const void *from, unsigned long size) ++__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size) ++{ ++ int d0, d1; ++ __asm__ __volatile__( ++ " movw %w6, %%es\n" ++ " .align 2,0x90\n" ++ "1: movl 32(%4), %%eax\n" ++ " cmpl $67, %0\n" ++ " jbe 3f\n" ++ "2: movl 64(%4), %%eax\n" ++ " .align 2,0x90\n" ++ "3: movl 0(%4), %%eax\n" ++ "4: movl 4(%4), %%edx\n" ++ "5: movl %%eax, %%es:0(%3)\n" ++ "6: movl %%edx, %%es:4(%3)\n" ++ "7: movl 8(%4), %%eax\n" ++ "8: movl 12(%4),%%edx\n" ++ "9: movl %%eax, %%es:8(%3)\n" ++ "10: movl %%edx, %%es:12(%3)\n" ++ "11: movl 16(%4), %%eax\n" ++ "12: movl 20(%4), %%edx\n" ++ "13: movl %%eax, %%es:16(%3)\n" ++ "14: movl %%edx, %%es:20(%3)\n" ++ "15: movl 24(%4), %%eax\n" ++ "16: movl 28(%4), %%edx\n" ++ "17: movl %%eax, %%es:24(%3)\n" ++ "18: movl %%edx, %%es:28(%3)\n" ++ "19: movl 32(%4), %%eax\n" ++ "20: movl 36(%4), %%edx\n" ++ "21: movl %%eax, %%es:32(%3)\n" ++ "22: movl %%edx, %%es:36(%3)\n" ++ "23: movl 40(%4), %%eax\n" ++ "24: movl 44(%4), %%edx\n" ++ "25: movl %%eax, %%es:40(%3)\n" ++ "26: movl %%edx, %%es:44(%3)\n" ++ "27: movl 48(%4), %%eax\n" ++ "28: movl 52(%4), %%edx\n" ++ "29: movl %%eax, %%es:48(%3)\n" ++ "30: movl %%edx, %%es:52(%3)\n" ++ "31: movl 56(%4), %%eax\n" ++ "32: movl 60(%4), %%edx\n" ++ "33: movl %%eax, %%es:56(%3)\n" ++ "34: movl %%edx, %%es:60(%3)\n" ++ " addl $-64, %0\n" ++ " addl $64, %4\n" ++ " addl $64, %3\n" ++ " cmpl $63, %0\n" ++ " ja 1b\n" ++ "35: movl %0, %%eax\n" ++ " shrl $2, %0\n" ++ " andl $3, %%eax\n" ++ " cld\n" ++ "99: rep; movsl\n" ++ "36: movl %%eax, %0\n" ++ "37: rep; movsb\n" ++ "100:\n" ++ " pushl %%ss\n" ++ " popl %%es\n" ++ ".section .fixup,\"ax\"\n" ++ "101: lea 0(%%eax,%0,4),%0\n" ++ " jmp 100b\n" ++ ".previous\n" ++ ".section __ex_table,\"a\"\n" ++ " .align 4\n" ++ " .long 1b,100b\n" ++ " .long 2b,100b\n" ++ " .long 3b,100b\n" ++ " .long 4b,100b\n" ++ " .long 5b,100b\n" ++ " .long 6b,100b\n" ++ " .long 7b,100b\n" ++ " .long 8b,100b\n" ++ " .long 9b,100b\n" ++ " .long 10b,100b\n" ++ " .long 11b,100b\n" ++ " .long 12b,100b\n" ++ " .long 13b,100b\n" ++ " .long 14b,100b\n" ++ " .long 15b,100b\n" ++ " .long 16b,100b\n" ++ " .long 17b,100b\n" ++ " .long 18b,100b\n" ++ " .long 19b,100b\n" ++ " .long 20b,100b\n" ++ " .long 21b,100b\n" ++ " .long 22b,100b\n" ++ " .long 23b,100b\n" ++ " .long 24b,100b\n" ++ " .long 25b,100b\n" ++ " .long 26b,100b\n" ++ " .long 27b,100b\n" ++ " .long 28b,100b\n" ++ " .long 29b,100b\n" ++ " .long 30b,100b\n" ++ " .long 31b,100b\n" ++ " .long 32b,100b\n" ++ " .long 33b,100b\n" ++ " .long 34b,100b\n" ++ " .long 35b,100b\n" ++ " .long 36b,100b\n" ++ " .long 37b,100b\n" ++ " .long 99b,101b\n" ++ ".previous" ++ : "=&c"(size), "=&D" (d0), "=&S" (d1) ++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) ++ : "eax", "edx", "memory"); ++ return size; ++} ++ ++static unsigned long ++__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size) + { + int d0, d1; + __asm__ __volatile__( ++ " movw %w6, %%ds\n" + " .align 2,0x90\n" + "1: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" +@@ -239,36 +363,36 @@ __copy_user_intel(void __user *to, const + " .align 2,0x90\n" + "3: movl 0(%4), %%eax\n" + "4: movl 4(%4), %%edx\n" +- "5: movl %%eax, 0(%3)\n" +- "6: movl %%edx, 4(%3)\n" ++ "5: movl %%eax, %%es:0(%3)\n" ++ "6: movl %%edx, %%es:4(%3)\n" + "7: movl 8(%4), %%eax\n" + "8: movl 12(%4),%%edx\n" +- "9: movl %%eax, 8(%3)\n" +- "10: movl %%edx, 12(%3)\n" ++ "9: movl %%eax, %%es:8(%3)\n" ++ "10: movl %%edx, %%es:12(%3)\n" + "11: movl 16(%4), %%eax\n" + "12: movl 20(%4), %%edx\n" +- "13: movl %%eax, 16(%3)\n" +- "14: movl %%edx, 20(%3)\n" ++ "13: movl %%eax, %%es:16(%3)\n" ++ "14: movl %%edx, %%es:20(%3)\n" + "15: movl 24(%4), %%eax\n" + "16: movl 28(%4), %%edx\n" +- "17: movl %%eax, 24(%3)\n" +- "18: movl %%edx, 28(%3)\n" ++ "17: movl %%eax, %%es:24(%3)\n" ++ "18: movl %%edx, %%es:28(%3)\n" + "19: movl 32(%4), %%eax\n" + "20: movl 36(%4), %%edx\n" +- "21: movl %%eax, 32(%3)\n" +- "22: movl %%edx, 36(%3)\n" ++ "21: movl %%eax, %%es:32(%3)\n" ++ "22: movl %%edx, %%es:36(%3)\n" + "23: movl 40(%4), %%eax\n" + "24: movl 44(%4), %%edx\n" +- "25: movl %%eax, 40(%3)\n" +- "26: movl %%edx, 44(%3)\n" ++ "25: movl %%eax, %%es:40(%3)\n" ++ "26: movl %%edx, %%es:44(%3)\n" + "27: movl 48(%4), %%eax\n" + "28: movl 52(%4), %%edx\n" +- "29: movl %%eax, 48(%3)\n" +- "30: movl %%edx, 52(%3)\n" ++ "29: movl %%eax, %%es:48(%3)\n" ++ "30: movl %%edx, %%es:52(%3)\n" + "31: movl 56(%4), %%eax\n" + "32: movl 60(%4), %%edx\n" +- "33: movl %%eax, 56(%3)\n" +- "34: movl %%edx, 60(%3)\n" ++ "33: movl %%eax, %%es:56(%3)\n" ++ "34: movl %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -282,6 +406,8 @@ __copy_user_intel(void __user *to, const + "36: movl %%eax, %0\n" + "37: rep; movsb\n" + "100:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "101: lea 0(%%eax,%0,4),%0\n" + " jmp 100b\n" +@@ -328,7 +454,7 @@ __copy_user_intel(void __user *to, const + " .long 99b,101b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) +- : "1"(to), "2"(from), "0"(size) ++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; + } +@@ -338,6 +464,7 @@ __copy_user_zeroing_intel(void *to, cons + { + int d0, d1; + __asm__ __volatile__( ++ " movw %w6, %%ds\n" + " .align 2,0x90\n" + "0: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" +@@ -346,36 +473,36 @@ __copy_user_zeroing_intel(void *to, cons + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" +- " movl %%eax, 0(%3)\n" +- " movl %%edx, 4(%3)\n" ++ " movl %%eax, %%es:0(%3)\n" ++ " movl %%edx, %%es:4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" +- " movl %%eax, 8(%3)\n" +- " movl %%edx, 12(%3)\n" ++ " movl %%eax, %%es:8(%3)\n" ++ " movl %%edx, %%es:12(%3)\n" + "4: movl 16(%4), %%eax\n" + "41: movl 20(%4), %%edx\n" +- " movl %%eax, 16(%3)\n" +- " movl %%edx, 20(%3)\n" ++ " movl %%eax, %%es:16(%3)\n" ++ " movl %%edx, %%es:20(%3)\n" + "10: movl 24(%4), %%eax\n" + "51: movl 28(%4), %%edx\n" +- " movl %%eax, 24(%3)\n" +- " movl %%edx, 28(%3)\n" ++ " movl %%eax, %%es:24(%3)\n" ++ " movl %%edx, %%es:28(%3)\n" + "11: movl 32(%4), %%eax\n" + "61: movl 36(%4), %%edx\n" +- " movl %%eax, 32(%3)\n" +- " movl %%edx, 36(%3)\n" ++ " movl %%eax, %%es:32(%3)\n" ++ " movl %%edx, %%es:36(%3)\n" + "12: movl 40(%4), %%eax\n" + "71: movl 44(%4), %%edx\n" +- " movl %%eax, 40(%3)\n" +- " movl %%edx, 44(%3)\n" ++ " movl %%eax, %%es:40(%3)\n" ++ " movl %%edx, %%es:44(%3)\n" + "13: movl 48(%4), %%eax\n" + "81: movl 52(%4), %%edx\n" +- " movl %%eax, 48(%3)\n" +- " movl %%edx, 52(%3)\n" ++ " movl %%eax, %%es:48(%3)\n" ++ " movl %%edx, %%es:52(%3)\n" + "14: movl 56(%4), %%eax\n" + "91: movl 60(%4), %%edx\n" +- " movl %%eax, 56(%3)\n" +- " movl %%edx, 60(%3)\n" ++ " movl %%eax, %%es:56(%3)\n" ++ " movl %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -389,6 +516,8 @@ __copy_user_zeroing_intel(void *to, cons + " movl %%eax,%0\n" + "7: rep; movsb\n" + "8:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: pushl %0\n" +@@ -423,7 +552,7 @@ __copy_user_zeroing_intel(void *to, cons + " .long 7b,16b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) +- : "1"(to), "2"(from), "0"(size) ++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; + } +@@ -439,6 +568,7 @@ static unsigned long __copy_user_zeroing + int d0, d1; + + __asm__ __volatile__( ++ " movw %w6, %%ds\n" + " .align 2,0x90\n" + "0: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" +@@ -447,36 +577,36 @@ static unsigned long __copy_user_zeroing + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" +- " movnti %%eax, 0(%3)\n" +- " movnti %%edx, 4(%3)\n" ++ " movnti %%eax, %%es:0(%3)\n" ++ " movnti %%edx, %%es:4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" +- " movnti %%eax, 8(%3)\n" +- " movnti %%edx, 12(%3)\n" ++ " movnti %%eax, %%es:8(%3)\n" ++ " movnti %%edx, %%es:12(%3)\n" + "4: movl 16(%4), %%eax\n" + "41: movl 20(%4), %%edx\n" +- " movnti %%eax, 16(%3)\n" +- " movnti %%edx, 20(%3)\n" ++ " movnti %%eax, %%es:16(%3)\n" ++ " movnti %%edx, %%es:20(%3)\n" + "10: movl 24(%4), %%eax\n" + "51: movl 28(%4), %%edx\n" +- " movnti %%eax, 24(%3)\n" +- " movnti %%edx, 28(%3)\n" ++ " movnti %%eax, %%es:24(%3)\n" ++ " movnti %%edx, %%es:28(%3)\n" + "11: movl 32(%4), %%eax\n" + "61: movl 36(%4), %%edx\n" +- " movnti %%eax, 32(%3)\n" +- " movnti %%edx, 36(%3)\n" ++ " movnti %%eax, %%es:32(%3)\n" ++ " movnti %%edx, %%es:36(%3)\n" + "12: movl 40(%4), %%eax\n" + "71: movl 44(%4), %%edx\n" +- " movnti %%eax, 40(%3)\n" +- " movnti %%edx, 44(%3)\n" ++ " movnti %%eax, %%es:40(%3)\n" ++ " movnti %%edx, %%es:44(%3)\n" + "13: movl 48(%4), %%eax\n" + "81: movl 52(%4), %%edx\n" +- " movnti %%eax, 48(%3)\n" +- " movnti %%edx, 52(%3)\n" ++ " movnti %%eax, %%es:48(%3)\n" ++ " movnti %%edx, %%es:52(%3)\n" + "14: movl 56(%4), %%eax\n" + "91: movl 60(%4), %%edx\n" +- " movnti %%eax, 56(%3)\n" +- " movnti %%edx, 60(%3)\n" ++ " movnti %%eax, %%es:56(%3)\n" ++ " movnti %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -491,6 +621,8 @@ static unsigned long __copy_user_zeroing + " movl %%eax,%0\n" + "7: rep; movsb\n" + "8:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: pushl %0\n" +@@ -525,7 +657,7 @@ static unsigned long __copy_user_zeroing + " .long 7b,16b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) +- : "1"(to), "2"(from), "0"(size) ++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; + } +@@ -536,6 +668,7 @@ static unsigned long __copy_user_intel_n + int d0, d1; + + __asm__ __volatile__( ++ " movw %w6, %%ds\n" + " .align 2,0x90\n" + "0: movl 32(%4), %%eax\n" + " cmpl $67, %0\n" +@@ -544,36 +677,36 @@ static unsigned long __copy_user_intel_n + " .align 2,0x90\n" + "2: movl 0(%4), %%eax\n" + "21: movl 4(%4), %%edx\n" +- " movnti %%eax, 0(%3)\n" +- " movnti %%edx, 4(%3)\n" ++ " movnti %%eax, %%es:0(%3)\n" ++ " movnti %%edx, %%es:4(%3)\n" + "3: movl 8(%4), %%eax\n" + "31: movl 12(%4),%%edx\n" +- " movnti %%eax, 8(%3)\n" +- " movnti %%edx, 12(%3)\n" ++ " movnti %%eax, %%es:8(%3)\n" ++ " movnti %%edx, %%es:12(%3)\n" + "4: movl 16(%4), %%eax\n" + "41: movl 20(%4), %%edx\n" +- " movnti %%eax, 16(%3)\n" +- " movnti %%edx, 20(%3)\n" ++ " movnti %%eax, %%es:16(%3)\n" ++ " movnti %%edx, %%es:20(%3)\n" + "10: movl 24(%4), %%eax\n" + "51: movl 28(%4), %%edx\n" +- " movnti %%eax, 24(%3)\n" +- " movnti %%edx, 28(%3)\n" ++ " movnti %%eax, %%es:24(%3)\n" ++ " movnti %%edx, %%es:28(%3)\n" + "11: movl 32(%4), %%eax\n" + "61: movl 36(%4), %%edx\n" +- " movnti %%eax, 32(%3)\n" +- " movnti %%edx, 36(%3)\n" ++ " movnti %%eax, %%es:32(%3)\n" ++ " movnti %%edx, %%es:36(%3)\n" + "12: movl 40(%4), %%eax\n" + "71: movl 44(%4), %%edx\n" +- " movnti %%eax, 40(%3)\n" +- " movnti %%edx, 44(%3)\n" ++ " movnti %%eax, %%es:40(%3)\n" ++ " movnti %%edx, %%es:44(%3)\n" + "13: movl 48(%4), %%eax\n" + "81: movl 52(%4), %%edx\n" +- " movnti %%eax, 48(%3)\n" +- " movnti %%edx, 52(%3)\n" ++ " movnti %%eax, %%es:48(%3)\n" ++ " movnti %%edx, %%es:52(%3)\n" + "14: movl 56(%4), %%eax\n" + "91: movl 60(%4), %%edx\n" +- " movnti %%eax, 56(%3)\n" +- " movnti %%edx, 60(%3)\n" ++ " movnti %%eax, %%es:56(%3)\n" ++ " movnti %%edx, %%es:60(%3)\n" + " addl $-64, %0\n" + " addl $64, %4\n" + " addl $64, %3\n" +@@ -588,6 +721,8 @@ static unsigned long __copy_user_intel_n + " movl %%eax,%0\n" + "7: rep; movsb\n" + "8:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" + ".section .fixup,\"ax\"\n" + "9: lea 0(%%eax,%0,4),%0\n" + "16: jmp 8b\n" +@@ -616,7 +751,7 @@ static unsigned long __copy_user_intel_n + " .long 7b,16b\n" + ".previous" + : "=&c"(size), "=&D" (d0), "=&S" (d1) +- : "1"(to), "2"(from), "0"(size) ++ : "1"(to), "2"(from), "0"(size), "r"(__USER_DS) + : "eax", "edx", "memory"); + return size; + } +@@ -629,90 +764,146 @@ static unsigned long __copy_user_intel_n + */ + unsigned long __copy_user_zeroing_intel(void *to, const void __user *from, + unsigned long size); +-unsigned long __copy_user_intel(void __user *to, const void *from, ++unsigned long __generic_copy_to_user_intel(void __user *to, const void *from, ++ unsigned long size); ++unsigned long __generic_copy_from_user_intel(void *to, const void __user *from, + unsigned long size); + unsigned long __copy_user_zeroing_intel_nocache(void *to, + const void __user *from, unsigned long size); + #endif /* CONFIG_X86_INTEL_USERCOPY */ + + /* Generic arbitrary sized copy. */ +-#define __copy_user(to, from, size) \ +-do { \ +- int __d0, __d1, __d2; \ +- __asm__ __volatile__( \ +- " cmp $7,%0\n" \ +- " jbe 1f\n" \ +- " movl %1,%0\n" \ +- " negl %0\n" \ +- " andl $7,%0\n" \ +- " subl %0,%3\n" \ +- "4: rep; movsb\n" \ +- " movl %3,%0\n" \ +- " shrl $2,%0\n" \ +- " andl $3,%3\n" \ +- " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ +- " movl %3,%0\n" \ +- "1: rep; movsb\n" \ +- "2:\n" \ +- ".section .fixup,\"ax\"\n" \ +- "5: addl %3,%0\n" \ +- " jmp 2b\n" \ +- "3: lea 0(%3,%0,4),%0\n" \ +- " jmp 2b\n" \ +- ".previous\n" \ +- ".section __ex_table,\"a\"\n" \ +- " .align 4\n" \ +- " .long 4b,5b\n" \ +- " .long 0b,3b\n" \ +- " .long 1b,2b\n" \ +- ".previous" \ +- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ +- : "3"(size), "0"(size), "1"(to), "2"(from) \ +- : "memory"); \ +-} while (0) +- +-#define __copy_user_zeroing(to, from, size) \ +-do { \ +- int __d0, __d1, __d2; \ +- __asm__ __volatile__( \ +- " cmp $7,%0\n" \ +- " jbe 1f\n" \ +- " movl %1,%0\n" \ +- " negl %0\n" \ +- " andl $7,%0\n" \ +- " subl %0,%3\n" \ +- "4: rep; movsb\n" \ +- " movl %3,%0\n" \ +- " shrl $2,%0\n" \ +- " andl $3,%3\n" \ +- " .align 2,0x90\n" \ +- "0: rep; movsl\n" \ +- " movl %3,%0\n" \ +- "1: rep; movsb\n" \ +- "2:\n" \ +- ".section .fixup,\"ax\"\n" \ +- "5: addl %3,%0\n" \ +- " jmp 6f\n" \ +- "3: lea 0(%3,%0,4),%0\n" \ +- "6: pushl %0\n" \ +- " pushl %%eax\n" \ +- " xorl %%eax,%%eax\n" \ +- " rep; stosb\n" \ +- " popl %%eax\n" \ +- " popl %0\n" \ +- " jmp 2b\n" \ +- ".previous\n" \ +- ".section __ex_table,\"a\"\n" \ +- " .align 4\n" \ +- " .long 4b,5b\n" \ +- " .long 0b,3b\n" \ +- " .long 1b,6b\n" \ +- ".previous" \ +- : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) \ +- : "3"(size), "0"(size), "1"(to), "2"(from) \ +- : "memory"); \ +-} while (0) ++static unsigned long ++__generic_copy_to_user(void __user *to, const void *from, unsigned long size) ++{ ++ int __d0, __d1, __d2; ++ ++ __asm__ __volatile__( ++ " movw %w8,%%es\n" ++ " cmp $7,%0\n" ++ " jbe 1f\n" ++ " movl %1,%0\n" ++ " negl %0\n" ++ " andl $7,%0\n" ++ " subl %0,%3\n" ++ "4: rep; movsb\n" ++ " movl %3,%0\n" ++ " shrl $2,%0\n" ++ " andl $3,%3\n" ++ " .align 2,0x90\n" ++ "0: rep; movsl\n" ++ " movl %3,%0\n" ++ "1: rep; movsb\n" ++ "2:\n" ++ " pushl %%ss\n" ++ " popl %%es\n" ++ ".section .fixup,\"ax\"\n" ++ "5: addl %3,%0\n" ++ " jmp 2b\n" ++ "3: lea 0(%3,%0,4),%0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ ".section __ex_table,\"a\"\n" ++ " .align 4\n" ++ " .long 4b,5b\n" ++ " .long 0b,3b\n" ++ " .long 1b,2b\n" ++ ".previous" ++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) ++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) ++ : "memory"); ++ return size; ++} ++ ++static unsigned long ++__generic_copy_from_user(void *to, const void __user *from, unsigned long size) ++{ ++ int __d0, __d1, __d2; ++ ++ __asm__ __volatile__( ++ " movw %w8,%%ds\n" ++ " cmp $7,%0\n" ++ " jbe 1f\n" ++ " movl %1,%0\n" ++ " negl %0\n" ++ " andl $7,%0\n" ++ " subl %0,%3\n" ++ "4: rep; movsb\n" ++ " movl %3,%0\n" ++ " shrl $2,%0\n" ++ " andl $3,%3\n" ++ " .align 2,0x90\n" ++ "0: rep; movsl\n" ++ " movl %3,%0\n" ++ "1: rep; movsb\n" ++ "2:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" ++ ".section .fixup,\"ax\"\n" ++ "5: addl %3,%0\n" ++ " jmp 2b\n" ++ "3: lea 0(%3,%0,4),%0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ ".section __ex_table,\"a\"\n" ++ " .align 4\n" ++ " .long 4b,5b\n" ++ " .long 0b,3b\n" ++ " .long 1b,2b\n" ++ ".previous" ++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) ++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) ++ : "memory"); ++ return size; ++} ++ ++static unsigned long ++__copy_user_zeroing(void *to, const void __user *from, unsigned long size) ++{ ++ int __d0, __d1, __d2; ++ ++ __asm__ __volatile__( ++ " movw %w8,%%ds\n" ++ " cmp $7,%0\n" ++ " jbe 1f\n" ++ " movl %1,%0\n" ++ " negl %0\n" ++ " andl $7,%0\n" ++ " subl %0,%3\n" ++ "4: rep; movsb\n" ++ " movl %3,%0\n" ++ " shrl $2,%0\n" ++ " andl $3,%3\n" ++ " .align 2,0x90\n" ++ "0: rep; movsl\n" ++ " movl %3,%0\n" ++ "1: rep; movsb\n" ++ "2:\n" ++ " pushl %%ss\n" ++ " popl %%ds\n" ++ ".section .fixup,\"ax\"\n" ++ "5: addl %3,%0\n" ++ " jmp 6f\n" ++ "3: lea 0(%3,%0,4),%0\n" ++ "6: pushl %0\n" ++ " pushl %%eax\n" ++ " xorl %%eax,%%eax\n" ++ " rep; stosb\n" ++ " popl %%eax\n" ++ " popl %0\n" ++ " jmp 2b\n" ++ ".previous\n" ++ ".section __ex_table,\"a\"\n" ++ " .align 4\n" ++ " .long 4b,5b\n" ++ " .long 0b,3b\n" ++ " .long 1b,6b\n" ++ ".previous" ++ : "=&c"(size), "=&D" (__d0), "=&S" (__d1), "=r"(__d2) ++ : "3"(size), "0"(size), "1"(to), "2"(from), "r"(__USER_DS) ++ : "memory"); ++ return size; ++} + + unsigned long __copy_to_user_ll(void __user *to, const void *from, + unsigned long n) +@@ -775,9 +966,9 @@ survive: + } + #endif + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ n = __generic_copy_to_user(to, from, n); + else +- n = __copy_user_intel(to, from, n); ++ n = __generic_copy_to_user_intel(to, from, n); + return n; + } + EXPORT_SYMBOL(__copy_to_user_ll); +@@ -786,7 +977,7 @@ unsigned long __copy_from_user_ll(void * + unsigned long n) + { + if (movsl_is_ok(to, from, n)) +- __copy_user_zeroing(to, from, n); ++ n = __copy_user_zeroing(to, from, n); + else + n = __copy_user_zeroing_intel(to, from, n); + return n; +@@ -797,10 +988,9 @@ unsigned long __copy_from_user_ll_nozero + unsigned long n) + { + if (movsl_is_ok(to, from, n)) +- __copy_user(to, from, n); ++ n = __generic_copy_from_user(to, from, n); + else +- n = __copy_user_intel((void __user *)to, +- (const void *)from, n); ++ n = __generic_copy_from_user_intel(to, from, n); + return n; + } + EXPORT_SYMBOL(__copy_from_user_ll_nozero); +@@ -812,9 +1002,9 @@ unsigned long __copy_from_user_ll_nocach + if (n > 64 && cpu_has_xmm2) + n = __copy_user_zeroing_intel_nocache(to, from, n); + else +- __copy_user_zeroing(to, from, n); ++ n = __copy_user_zeroing(to, from, n); + #else +- __copy_user_zeroing(to, from, n); ++ n = __copy_user_zeroing(to, from, n); + #endif + return n; + } +@@ -827,65 +1017,53 @@ unsigned long __copy_from_user_ll_nocach + if (n > 64 && cpu_has_xmm2) + n = __copy_user_intel_nocache(to, from, n); + else +- __copy_user(to, from, n); ++ n = __generic_copy_from_user(to, from, n); + #else +- __copy_user(to, from, n); ++ n = __generic_copy_from_user(to, from, n); + #endif + return n; + } + EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero); + +-/** +- * copy_to_user: - Copy a block of data into user space. +- * @to: Destination address, in user space. +- * @from: Source address, in kernel space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from kernel space to user space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- */ +-unsigned long +-copy_to_user(void __user *to, const void *from, unsigned long n) ++void copy_from_user_overflow(void) + { +- if (access_ok(VERIFY_WRITE, to, n)) +- n = __copy_to_user(to, from, n); +- return n; ++ WARN(1, "Buffer overflow detected!\n"); + } +-EXPORT_SYMBOL(copy_to_user); ++EXPORT_SYMBOL(copy_from_user_overflow); + +-/** +- * copy_from_user: - Copy a block of data from user space. +- * @to: Destination address, in kernel space. +- * @from: Source address, in user space. +- * @n: Number of bytes to copy. +- * +- * Context: User context only. This function may sleep. +- * +- * Copy data from user space to kernel space. +- * +- * Returns number of bytes that could not be copied. +- * On success, this will be zero. +- * +- * If some data could not be copied, this function will pad the copied +- * data to the requested size using zero bytes. +- */ +-unsigned long +-_copy_from_user(void *to, const void __user *from, unsigned long n) ++void copy_to_user_overflow(void) + { +- if (access_ok(VERIFY_READ, from, n)) +- n = __copy_from_user(to, from, n); +- else +- memset(to, 0, n); +- return n; ++ WARN(1, "Buffer overflow detected!\n"); + } +-EXPORT_SYMBOL(_copy_from_user); ++EXPORT_SYMBOL(copy_to_user_overflow); + +-void copy_from_user_overflow(void) ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++void __set_fs(mm_segment_t x, int cpu) + { +- WARN(1, "Buffer overflow detected!\n"); ++ unsigned long limit = x.seg; ++ struct desc_struct d; ++ ++ current_thread_info()->addr_limit = x; ++ if (unlikely(paravirt_enabled())) ++ return; ++ ++ if (likely(limit)) ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ pack_descriptor(&d, 0UL, limit, 0xF3, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_DS, &d, DESCTYPE_S); + } +-EXPORT_SYMBOL(copy_from_user_overflow); ++ ++void set_fs(mm_segment_t x) ++{ ++ __set_fs(x, get_cpu()); ++ put_cpu(); ++} ++EXPORT_SYMBOL(copy_from_user); ++#else ++void set_fs(mm_segment_t x) ++{ ++ current_thread_info()->addr_limit = x; ++} ++#endif ++ ++EXPORT_SYMBOL(set_fs); +diff -urNp linux-2.6.35.4/arch/x86/lib/usercopy_64.c linux-2.6.35.4/arch/x86/lib/usercopy_64.c +--- linux-2.6.35.4/arch/x86/lib/usercopy_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/lib/usercopy_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -42,6 +42,8 @@ long + __strncpy_from_user(char *dst, const char __user *src, long count) + { + long res; ++ if ((unsigned long)src < PAX_USER_SHADOW_BASE) ++ src += PAX_USER_SHADOW_BASE; + __do_strncpy_from_user(dst, src, count, res); + return res; + } +@@ -65,6 +67,8 @@ unsigned long __clear_user(void __user * + { + long __d0; + might_fault(); ++ if ((unsigned long)addr < PAX_USER_SHADOW_BASE) ++ addr += PAX_USER_SHADOW_BASE; + /* no memory constraint because it doesn't change any memory gcc knows + about */ + asm volatile( +@@ -151,10 +155,14 @@ EXPORT_SYMBOL(strlen_user); + + unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len) + { +- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { ++ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) { ++ if ((unsigned long)to < PAX_USER_SHADOW_BASE) ++ to += PAX_USER_SHADOW_BASE; ++ if ((unsigned long)from < PAX_USER_SHADOW_BASE) ++ from += PAX_USER_SHADOW_BASE; + return copy_user_generic((__force void *)to, (__force void *)from, len); +- } +- return len; ++ } ++ return len; + } + EXPORT_SYMBOL(copy_in_user); + +diff -urNp linux-2.6.35.4/arch/x86/Makefile linux-2.6.35.4/arch/x86/Makefile +--- linux-2.6.35.4/arch/x86/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/Makefile 2010-09-17 20:12:09.000000000 -0400 +@@ -191,3 +191,12 @@ define archhelp + echo ' FDARGS="..." arguments for the booted kernel' + echo ' FDINITRD=file initrd for the booted kernel' + endef ++ ++define OLD_LD ++ ++*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils. ++*** Please upgrade your binutils to 2.18 or newer ++endef ++ ++archprepare: ++ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD))) +diff -urNp linux-2.6.35.4/arch/x86/mm/extable.c linux-2.6.35.4/arch/x86/mm/extable.c +--- linux-2.6.35.4/arch/x86/mm/extable.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/extable.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1,14 +1,71 @@ + #include <linux/module.h> + #include <linux/spinlock.h> ++#include <linux/sort.h> + #include <asm/uaccess.h> ++#include <asm/pgtable.h> + ++/* ++ * The exception table needs to be sorted so that the binary ++ * search that we use to find entries in it works properly. ++ * This is used both for the kernel exception table and for ++ * the exception tables of modules that get loaded. ++ */ ++static int cmp_ex(const void *a, const void *b) ++{ ++ const struct exception_table_entry *x = a, *y = b; ++ ++ /* avoid overflow */ ++ if (x->insn > y->insn) ++ return 1; ++ if (x->insn < y->insn) ++ return -1; ++ return 0; ++} ++ ++static void swap_ex(void *a, void *b, int size) ++{ ++ struct exception_table_entry t, *x = a, *y = b; ++ ++ t = *x; ++ ++ pax_open_kernel(); ++ *x = *y; ++ *y = t; ++ pax_close_kernel(); ++} ++ ++void sort_extable(struct exception_table_entry *start, ++ struct exception_table_entry *finish) ++{ ++ sort(start, finish - start, sizeof(struct exception_table_entry), ++ cmp_ex, swap_ex); ++} ++ ++#ifdef CONFIG_MODULES ++/* ++ * If the exception table is sorted, any referring to the module init ++ * will be at the beginning or the end. ++ */ ++void trim_init_extable(struct module *m) ++{ ++ /*trim the beginning*/ ++ while (m->num_exentries && within_module_init(m->extable[0].insn, m)) { ++ m->extable++; ++ m->num_exentries--; ++ } ++ /*trim the end*/ ++ while (m->num_exentries && ++ within_module_init(m->extable[m->num_exentries-1].insn, m)) ++ m->num_exentries--; ++} ++#endif /* CONFIG_MODULES */ + + int fixup_exception(struct pt_regs *regs) + { + const struct exception_table_entry *fixup; + + #ifdef CONFIG_PNPBIOS +- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) { ++ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) { + extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp; + extern u32 pnp_bios_is_utter_crap; + pnp_bios_is_utter_crap = 1; +diff -urNp linux-2.6.35.4/arch/x86/mm/fault.c linux-2.6.35.4/arch/x86/mm/fault.c +--- linux-2.6.35.4/arch/x86/mm/fault.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/fault.c 2010-09-17 20:12:37.000000000 -0400 +@@ -11,10 +11,19 @@ + #include <linux/kprobes.h> /* __kprobes, ... */ + #include <linux/mmiotrace.h> /* kmmio_handler, ... */ + #include <linux/perf_event.h> /* perf_sw_event */ ++#include <linux/unistd.h> ++#include <linux/compiler.h> + + #include <asm/traps.h> /* dotraplinkage, ... */ + #include <asm/pgalloc.h> /* pgd_*(), ... */ + #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */ ++#include <asm/vsyscall.h> ++#include <asm/tlbflush.h> ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++#include <asm/stacktrace.h> ++#include "../kernel/dumpstack.h" ++#endif + + /* + * Page fault error code bits: +@@ -52,7 +61,7 @@ static inline int __kprobes notify_page_ + int ret = 0; + + /* kprobe_running() needs smp_processor_id() */ +- if (kprobes_built_in() && !user_mode_vm(regs)) { ++ if (kprobes_built_in() && !user_mode(regs)) { + preempt_disable(); + if (kprobe_running() && kprobe_fault_handler(regs, 14)) + ret = 1; +@@ -173,6 +182,30 @@ force_sig_info_fault(int si_signo, int s + force_sig_info(si_signo, &info, tsk); + } + ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault(struct pt_regs *regs); ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address) ++{ ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ ++ pgd = pgd_offset(mm, address); ++ if (!pgd_present(*pgd)) ++ return NULL; ++ pud = pud_offset(pgd, address); ++ if (!pud_present(*pud)) ++ return NULL; ++ pmd = pmd_offset(pud, address); ++ if (!pmd_present(*pmd)) ++ return NULL; ++ return pmd; ++} ++#endif ++ + DEFINE_SPINLOCK(pgd_lock); + LIST_HEAD(pgd_list); + +@@ -225,11 +258,24 @@ void vmalloc_sync_all(void) + address += PMD_SIZE) { + + unsigned long flags; ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + spin_lock_irqsave(&pgd_lock, flags); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu); ++#else + list_for_each_entry(page, &pgd_list, lru) { +- if (!vmalloc_sync_one(page_address(page), address)) ++ pgd_t *pgd = page_address(page); ++#endif ++ ++ if (!vmalloc_sync_one(pgd, address)) + break; + } + spin_unlock_irqrestore(&pgd_lock, flags); +@@ -259,6 +305,11 @@ static noinline __kprobes int vmalloc_fa + * an interrupt in the middle of a task switch.. + */ + pgd_paddr = read_cr3(); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK)); ++#endif ++ + pmd_k = vmalloc_sync_one(__va(pgd_paddr), address); + if (!pmd_k) + return -1; +@@ -333,15 +384,27 @@ void vmalloc_sync_all(void) + + const pgd_t *pgd_ref = pgd_offset_k(address); + unsigned long flags; ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + + if (pgd_none(*pgd_ref)) + continue; + + spin_lock_irqsave(&pgd_lock, flags); ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) { ++ pgd_t *pgd = pgd_offset_cpu(cpu, address); ++#else + list_for_each_entry(page, &pgd_list, lru) { + pgd_t *pgd; + pgd = (pgd_t *)page_address(page) + pgd_index(address); ++#endif ++ + if (pgd_none(*pgd)) + set_pgd(pgd, *pgd_ref); + else +@@ -374,7 +437,14 @@ static noinline __kprobes int vmalloc_fa + * happen within a race in page table update. In the later + * case just flush: + */ ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK)); ++ pgd = pgd_offset_cpu(smp_processor_id(), address); ++#else + pgd = pgd_offset(current->active_mm, address); ++#endif ++ + pgd_ref = pgd_offset_k(address); + if (pgd_none(*pgd_ref)) + return -1; +@@ -536,7 +606,7 @@ static int is_errata93(struct pt_regs *r + static int is_errata100(struct pt_regs *regs, unsigned long address) + { + #ifdef CONFIG_X86_64 +- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32)) ++ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32)) + return 1; + #endif + return 0; +@@ -563,7 +633,7 @@ static int is_f00f_bug(struct pt_regs *r + } + + static const char nx_warning[] = KERN_CRIT +-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n"; ++"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n"; + + static void + show_fault_oops(struct pt_regs *regs, unsigned long error_code, +@@ -572,15 +642,26 @@ show_fault_oops(struct pt_regs *regs, un + if (!oops_may_print()) + return; + +- if (error_code & PF_INSTR) { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) { + unsigned int level; + + pte_t *pte = lookup_address(address, &level); + + if (pte && pte_present(*pte) && !pte_exec(*pte)) +- printk(nx_warning, current_uid()); ++ printk(nx_warning, current_uid(), current->comm, task_pid_nr(current)); + } + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (init_mm.start_code <= address && address < init_mm.end_code) { ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ else ++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", ++ current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ } ++#endif ++ + printk(KERN_ALERT "BUG: unable to handle kernel "); + if (address < PAGE_SIZE) + printk(KERN_CONT "NULL pointer dereference"); +@@ -705,6 +786,68 @@ __bad_area_nosemaphore(struct pt_regs *r + unsigned long address, int si_code) + { + struct task_struct *tsk = current; ++ struct mm_struct *mm = tsk->mm; ++ ++#ifdef CONFIG_X86_64 ++ if (mm && (error_code & PF_INSTR)) { ++ if (regs->ip == (unsigned long)vgettimeofday) { ++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_gettimeofday); ++ return; ++ } else if (regs->ip == (unsigned long)vtime) { ++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, fallback_time); ++ return; ++ } else if (regs->ip == (unsigned long)vgetcpu) { ++ regs->ip = (unsigned long)VDSO64_SYMBOL(mm->context.vdso, getcpu); ++ return; ++ } ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm && (error_code & PF_USER)) { ++ unsigned long ip = regs->ip; ++ ++ if (v8086_mode(regs)) ++ ip = ((regs->cs & 0xffff) << 4) + (regs->ip & 0xffff); ++ ++ /* ++ * It's possible to have interrupts off here: ++ */ ++ local_irq_enable(); ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && ++ (((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) || (!(error_code & (PF_PROT | PF_WRITE)) && regs->ip == address))) { ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && !(error_code & (PF_PROT | PF_WRITE)) && (regs->ip + SEGMEXEC_TASK_SIZE == address)) { ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++#endif ++ ++ } ++#endif + + /* User mode accesses just cause a SIGSEGV */ + if (error_code & PF_USER) { +@@ -851,6 +994,106 @@ static int spurious_fault_check(unsigned + return 1; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code) ++{ ++ pte_t *pte; ++ pmd_t *pmd; ++ spinlock_t *ptl; ++ unsigned char pte_mask; ++ ++ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) || ++ !(mm->pax_flags & MF_PAX_PAGEEXEC)) ++ return 0; ++ ++ /* PaX: it's our fault, let's handle it if we can */ ++ ++ /* PaX: take a look at read faults before acquiring any locks */ ++ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) { ++ /* instruction fetch attempt from a protected page in user mode */ ++ up_read(&mm->mmap_sem); ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ switch (pax_handle_fetch_fault(regs)) { ++ case 2: ++ return 1; ++ } ++#endif ++ ++ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp); ++ do_group_exit(SIGKILL); ++ } ++ ++ pmd = pax_get_pmd(mm, address); ++ if (unlikely(!pmd)) ++ return 0; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) { ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) { ++ /* write attempt to a protected page in user mode */ ++ pte_unmap_unlock(pte, ptl); ++ return 0; ++ } ++ ++#ifdef CONFIG_SMP ++ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask))) ++#else ++ if (likely(address > get_limit(regs->cs))) ++#endif ++ { ++ set_pte(pte, pte_mkread(*pte)); ++ __flush_tlb_one(address); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++ } ++ ++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1)); ++ ++ /* ++ * PaX: fill DTLB with user rights and retry ++ */ ++ __asm__ __volatile__ ( ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ "movw %w4,%%es\n" ++#endif ++ "orb %2,(%1)\n" ++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC) ++/* ++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's ++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any* ++ * page fault when examined during a TLB load attempt. this is true not only ++ * for PTEs holding a non-present entry but also present entries that will ++ * raise a page fault (such as those set up by PaX, or the copy-on-write ++ * mechanism). in effect it means that we do *not* need to flush the TLBs ++ * for our target pages since their PTEs are simply not in the TLBs at all. ++ ++ * the best thing in omitting it is that we gain around 15-20% speed in the ++ * fast path of the page fault handler and can get rid of tracing since we ++ * can no longer flush unintended entries. ++ */ ++ "invlpg (%0)\n" ++#endif ++ "testb $0,%%es:(%0)\n" ++ "xorb %3,(%1)\n" ++#ifdef CONFIG_PAX_MEMORY_UDEREF ++ "pushl %%ss\n" ++ "popl %%es\n" ++#endif ++ : ++ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER), "r" (__USER_DS) ++ : "memory", "cc"); ++ pte_unmap_unlock(pte, ptl); ++ up_read(&mm->mmap_sem); ++ return 1; ++} ++#endif ++ + /* + * Handle a spurious fault caused by a stale TLB entry. + * +@@ -917,6 +1160,9 @@ int show_unhandled_signals = 1; + static inline int + access_error(unsigned long error_code, int write, struct vm_area_struct *vma) + { ++ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC)) ++ return 1; ++ + if (write) { + /* write, present and write, not present: */ + if (unlikely(!(vma->vm_flags & VM_WRITE))) +@@ -950,17 +1196,31 @@ do_page_fault(struct pt_regs *regs, unsi + { + struct vm_area_struct *vma; + struct task_struct *tsk; +- unsigned long address; + struct mm_struct *mm; + int write; + int fault; + ++ /* Get the faulting address: */ ++ unsigned long address = read_cr2(); ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ if (!user_mode(regs) && address < 2 * PAX_USER_SHADOW_BASE) { ++ if (!search_exception_tables(regs->ip)) { ++ bad_area_nosemaphore(regs, error_code, address); ++ return; ++ } ++ if (address < PAX_USER_SHADOW_BASE) { ++ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n"); ++ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip); ++ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR); ++ } else ++ address -= PAX_USER_SHADOW_BASE; ++ } ++#endif ++ + tsk = current; + mm = tsk->mm; + +- /* Get the faulting address: */ +- address = read_cr2(); +- + /* + * Detect and handle instructions that would cause a page fault for + * both a tracked kernel page and a userspace page. +@@ -1020,7 +1280,7 @@ do_page_fault(struct pt_regs *regs, unsi + * User-mode registers count as a user access even for any + * potential system fault or CPU buglet: + */ +- if (user_mode_vm(regs)) { ++ if (user_mode(regs)) { + local_irq_enable(); + error_code |= PF_USER; + } else { +@@ -1074,6 +1334,11 @@ do_page_fault(struct pt_regs *regs, unsi + might_sleep(); + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) ++ if (pax_handle_pageexec_fault(regs, mm, address, error_code)) ++ return; ++#endif ++ + vma = find_vma(mm, address); + if (unlikely(!vma)) { + bad_area(regs, error_code, address); +@@ -1085,18 +1350,24 @@ do_page_fault(struct pt_regs *regs, unsi + bad_area(regs, error_code, address); + return; + } +- if (error_code & PF_USER) { +- /* +- * Accessing the stack below %sp is always a bug. +- * The large cushion allows instructions like enter +- * and pusha to work. ("enter $65535, $31" pushes +- * 32 pointers and then decrements %sp by 65535.) +- */ +- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) { +- bad_area(regs, error_code, address); +- return; +- } ++ /* ++ * Accessing the stack below %sp is always a bug. ++ * The large cushion allows instructions like enter ++ * and pusha to work. ("enter $65535, $31" pushes ++ * 32 pointers and then decrements %sp by 65535.) ++ */ ++ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) { ++ bad_area(regs, error_code, address); ++ return; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) { ++ bad_area(regs, error_code, address); ++ return; ++ } ++#endif ++ + if (unlikely(expand_stack(vma, address))) { + bad_area(regs, error_code, address); + return; +@@ -1140,3 +1411,199 @@ good_area: + + up_read(&mm->mmap_sem); + } ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++static int pax_handle_fetch_fault_32(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned char mov1, mov2; ++ unsigned short jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 11) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov1, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) { ++ regs->cx = addr1; ++ regs->ax = addr2; ++ regs->ip = addr2; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned char mov, jmp; ++ unsigned int addr1, addr2; ++ ++#ifdef CONFIG_X86_64 ++ if ((regs->ip + 9) >> 32) ++ break; ++#endif ++ ++ err = get_user(mov, (unsigned char __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1)); ++ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5)); ++ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6)); ++ ++ if (err) ++ break; ++ ++ if (mov == 0xB9 && jmp == 0xE9) { ++ regs->cx = addr1; ++ regs->ip = (unsigned int)(regs->ip + addr2 + 10); ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++ ++#ifdef CONFIG_X86_64 ++static int pax_handle_fetch_fault_64(struct pt_regs *regs) ++{ ++ int err; ++ ++ do { /* PaX: gcc trampoline emulation #1 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned int addr1; ++ unsigned long addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ do { /* PaX: gcc trampoline emulation #2 */ ++ unsigned short mov1, mov2, jmp1; ++ unsigned char jmp2; ++ unsigned long addr1, addr2; ++ ++ err = get_user(mov1, (unsigned short __user *)regs->ip); ++ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2)); ++ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10)); ++ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12)); ++ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20)); ++ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22)); ++ ++ if (err) ++ break; ++ ++ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) { ++ regs->r11 = addr1; ++ regs->r10 = addr2; ++ regs->ip = addr1; ++ return 2; ++ } ++ } while (0); ++ ++ return 1; /* PaX in action */ ++} ++#endif ++ ++/* ++ * PaX: decide what to do with offenders (regs->ip = fault address) ++ * ++ * returns 1 when task should be killed ++ * 2 when gcc trampoline was detected ++ */ ++static int pax_handle_fetch_fault(struct pt_regs *regs) ++{ ++ if (v8086_mode(regs)) ++ return 1; ++ ++ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP)) ++ return 1; ++ ++#ifdef CONFIG_X86_32 ++ return pax_handle_fetch_fault_32(regs); ++#else ++ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) ++ return pax_handle_fetch_fault_32(regs); ++ else ++ return pax_handle_fetch_fault_64(regs); ++#endif ++} ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_insns(void *pc, void *sp) ++{ ++ long i; ++ ++ printk(KERN_ERR "PAX: bytes at PC: "); ++ for (i = 0; i < 20; i++) { ++ unsigned char c; ++ if (get_user(c, (__force unsigned char __user *)pc+i)) ++ printk(KERN_CONT "?? "); ++ else ++ printk(KERN_CONT "%02x ", c); ++ } ++ printk("\n"); ++ ++ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long)); ++ for (i = -1; i < 80 / (long)sizeof(long); i++) { ++ unsigned long c; ++ if (get_user(c, (__force unsigned long __user *)sp+i)) ++#ifdef CONFIG_X86_32 ++ printk(KERN_CONT "???????? "); ++#else ++ printk(KERN_CONT "???????????????? "); ++#endif ++ else ++ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c); ++ } ++ printk("\n"); ++} ++#endif ++ ++/** ++ * probe_kernel_write(): safely attempt to write to a location ++ * @dst: address to write to ++ * @src: pointer to the data that shall be written ++ * @size: size of the data chunk ++ * ++ * Safely write to address @dst from the buffer at @src. If a kernel fault ++ * happens, handle that and return -EFAULT. ++ */ ++long notrace probe_kernel_write(void *dst, const void *src, size_t size) ++{ ++ long ret; ++ mm_segment_t old_fs = get_fs(); ++ ++ set_fs(KERNEL_DS); ++ pagefault_disable(); ++ pax_open_kernel(); ++ ret = __copy_to_user_inatomic((__force void __user *)dst, src, size); ++ pax_close_kernel(); ++ pagefault_enable(); ++ set_fs(old_fs); ++ ++ return ret ? -EFAULT : 0; ++} +diff -urNp linux-2.6.35.4/arch/x86/mm/gup.c linux-2.6.35.4/arch/x86/mm/gup.c +--- linux-2.6.35.4/arch/x86/mm/gup.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/gup.c 2010-09-17 20:12:09.000000000 -0400 +@@ -237,7 +237,7 @@ int __get_user_pages_fast(unsigned long + addr = start; + len = (unsigned long) nr_pages << PAGE_SHIFT; + end = start + len; +- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ, ++ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ, + (void __user *)start, len))) + return 0; + +diff -urNp linux-2.6.35.4/arch/x86/mm/highmem_32.c linux-2.6.35.4/arch/x86/mm/highmem_32.c +--- linux-2.6.35.4/arch/x86/mm/highmem_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/highmem_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,10 @@ void *kmap_atomic_prot(struct page *page + idx = type + KM_TYPE_NR*smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); + BUG_ON(!pte_none(*(kmap_pte-idx))); ++ ++ pax_open_kernel(); + set_pte(kmap_pte-idx, mk_pte(page, prot)); ++ pax_close_kernel(); + + return (void *)vaddr; + } +diff -urNp linux-2.6.35.4/arch/x86/mm/hugetlbpage.c linux-2.6.35.4/arch/x86/mm/hugetlbpage.c +--- linux-2.6.35.4/arch/x86/mm/hugetlbpage.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/hugetlbpage.c 2010-09-17 20:12:09.000000000 -0400 +@@ -266,13 +266,18 @@ static unsigned long hugetlb_get_unmappe + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; +- unsigned long start_addr; ++ unsigned long start_addr, pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif + + if (len > mm->cached_hole_size) { +- start_addr = mm->free_area_cache; ++ start_addr = mm->free_area_cache; + } else { +- start_addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -280,26 +285,27 @@ full_search: + + for (vma = find_vma(mm, addr); ; vma = vma->vm_next) { + /* At this point: (!vma || addr < vma->vm_end). */ +- if (TASK_SIZE - len < addr) { ++ if (pax_task_size - len < addr) { + /* + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- start_addr = TASK_UNMAPPED_BASE; ++ if (start_addr != mm->mmap_base) { ++ start_addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = ALIGN(vma->vm_end, huge_page_size(h)); + } ++ ++ mm->free_area_cache = addr + len; ++ return addr; + } + + static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, +@@ -308,10 +314,9 @@ static unsigned long hugetlb_get_unmappe + { + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; +- struct vm_area_struct *vma, *prev_vma; +- unsigned long base = mm->mmap_base, addr = addr0; ++ struct vm_area_struct *vma; ++ unsigned long base = mm->mmap_base, addr; + unsigned long largest_hole = mm->cached_hole_size; +- int first_time = 1; + + /* don't allow allocations above current base */ + if (mm->free_area_cache > base) +@@ -321,7 +326,7 @@ static unsigned long hugetlb_get_unmappe + largest_hole = 0; + mm->free_area_cache = base; + } +-try_again: ++ + /* make sure it can fit in the remaining address space */ + if (mm->free_area_cache < len) + goto fail; +@@ -329,33 +334,27 @@ try_again: + /* either no address requested or cant fit in requested address hole */ + addr = (mm->free_area_cache - len) & huge_page_mask(h); + do { ++ vma = find_vma(mm, addr); + /* + * Lookup failure means no vma is above this address, + * i.e. return with success: +- */ +- if (!(vma = find_vma_prev(mm, addr, &prev_vma))) +- return addr; +- +- /* + * new region fits between prev_vma->vm_end and + * vma->vm_start, use it: + */ +- if (addr + len <= vma->vm_start && +- (!prev_vma || (addr >= prev_vma->vm_end))) { ++ if (check_heap_stack_gap(vma, addr, len)) { + /* remember the address as a hint for next time */ +- mm->cached_hole_size = largest_hole; +- return (mm->free_area_cache = addr); +- } else { +- /* pull free_area_cache down to the first hole */ +- if (mm->free_area_cache == vma->vm_end) { +- mm->free_area_cache = vma->vm_start; +- mm->cached_hole_size = largest_hole; +- } ++ mm->cached_hole_size = largest_hole; ++ return (mm->free_area_cache = addr); ++ } ++ /* pull free_area_cache down to the first hole */ ++ if (mm->free_area_cache == vma->vm_end) { ++ mm->free_area_cache = vma->vm_start; ++ mm->cached_hole_size = largest_hole; + } + + /* remember the largest hole we saw so far */ + if (addr + largest_hole < vma->vm_start) +- largest_hole = vma->vm_start - addr; ++ largest_hole = vma->vm_start - addr; + + /* try just below the current vma->vm_start */ + addr = (vma->vm_start - len) & huge_page_mask(h); +@@ -363,22 +362,26 @@ try_again: + + fail: + /* +- * if hint left us with no space for the requested +- * mapping then try again: +- */ +- if (first_time) { +- mm->free_area_cache = base; +- largest_hole = 0; +- first_time = 0; +- goto try_again; +- } +- /* + * A failed mmap() very likely causes application failure, + * so fall back to the bottom-up function here. This scenario + * can happen with large stack limits and large mmap() + * allocations. + */ +- mm->free_area_cache = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ mm->mmap_base = SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; + addr = hugetlb_get_unmapped_area_bottomup(file, addr0, + len, pgoff, flags); +@@ -386,6 +389,7 @@ fail: + /* + * Restore the topdown base: + */ ++ mm->mmap_base = base; + mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + +@@ -399,10 +403,17 @@ hugetlb_get_unmapped_area(struct file *f + struct hstate *h = hstate_file(file); + struct mm_struct *mm = current->mm; + struct vm_area_struct *vma; ++ unsigned long pax_task_size = TASK_SIZE; + + if (len & ~huge_page_mask(h)) + return -EINVAL; +- if (len > TASK_SIZE) ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (len > pax_task_size) + return -ENOMEM; + + if (flags & MAP_FIXED) { +@@ -414,8 +425,7 @@ hugetlb_get_unmapped_area(struct file *f + if (addr) { + addr = ALIGN(addr, huge_page_size(h)); + vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) ++ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len)) + return addr; + } + if (mm->get_unmapped_area == arch_get_unmapped_area) +diff -urNp linux-2.6.35.4/arch/x86/mm/init_32.c linux-2.6.35.4/arch/x86/mm/init_32.c +--- linux-2.6.35.4/arch/x86/mm/init_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/init_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -72,36 +72,6 @@ static __init void *alloc_low_page(void) + } + + /* +- * Creates a middle page table and puts a pointer to it in the +- * given global directory entry. This only returns the gd entry +- * in non-PAE compilation mode, since the middle layer is folded. +- */ +-static pmd_t * __init one_md_table_init(pgd_t *pgd) +-{ +- pud_t *pud; +- pmd_t *pmd_table; +- +-#ifdef CONFIG_X86_PAE +- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) { +- if (after_bootmem) +- pmd_table = (pmd_t *)alloc_bootmem_pages(PAGE_SIZE); +- else +- pmd_table = (pmd_t *)alloc_low_page(); +- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT); +- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); +- pud = pud_offset(pgd, 0); +- BUG_ON(pmd_table != pmd_offset(pud, 0)); +- +- return pmd_table; +- } +-#endif +- pud = pud_offset(pgd, 0); +- pmd_table = pmd_offset(pud, 0); +- +- return pmd_table; +-} +- +-/* + * Create a page table and place a pointer to it in a middle page + * directory entry: + */ +@@ -121,13 +91,28 @@ static pte_t * __init one_page_table_ini + page_table = (pte_t *)alloc_low_page(); + + paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT); ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE)); ++#else + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); ++#endif + BUG_ON(page_table != pte_offset_kernel(pmd, 0)); + } + + return pte_offset_kernel(pmd, 0); + } + ++static pmd_t * __init one_md_table_init(pgd_t *pgd) ++{ ++ pud_t *pud; ++ pmd_t *pmd_table; ++ ++ pud = pud_offset(pgd, 0); ++ pmd_table = pmd_offset(pud, 0); ++ ++ return pmd_table; ++} ++ + pmd_t * __init populate_extra_pmd(unsigned long vaddr) + { + int pgd_idx = pgd_index(vaddr); +@@ -201,6 +186,7 @@ page_table_range_init(unsigned long star + int pgd_idx, pmd_idx; + unsigned long vaddr; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte = NULL; + +@@ -210,8 +196,13 @@ page_table_range_init(unsigned long star + pgd = pgd_base + pgd_idx; + + for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); +- pmd = pmd + pmd_index(vaddr); ++ pud = pud_offset(pgd, vaddr); ++ pmd = pmd_offset(pud, vaddr); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif ++ + for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end); + pmd++, pmd_idx++) { + pte = page_table_kmap_check(one_page_table_init(pmd), +@@ -223,11 +214,20 @@ page_table_range_init(unsigned long star + } + } + +-static inline int is_kernel_text(unsigned long addr) ++static inline int is_kernel_text(unsigned long start, unsigned long end) + { +- if (addr >= PAGE_OFFSET && addr <= (unsigned long)__init_end) +- return 1; +- return 0; ++ if ((start > ktla_ktva((unsigned long)_etext) || ++ end <= ktla_ktva((unsigned long)_stext)) && ++ (start > ktla_ktva((unsigned long)_einittext) || ++ end <= ktla_ktva((unsigned long)_sinittext)) && ++ ++#ifdef CONFIG_ACPI_SLEEP ++ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) && ++#endif ++ ++ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000))) ++ return 0; ++ return 1; + } + + /* +@@ -244,9 +244,10 @@ kernel_physical_mapping_init(unsigned lo + unsigned long last_map_addr = end; + unsigned long start_pfn, end_pfn; + pgd_t *pgd_base = swapper_pg_dir; +- int pgd_idx, pmd_idx, pte_ofs; ++ unsigned int pgd_idx, pmd_idx, pte_ofs; + unsigned long pfn; + pgd_t *pgd; ++ pud_t *pud; + pmd_t *pmd; + pte_t *pte; + unsigned pages_2m, pages_4k; +@@ -279,8 +280,13 @@ repeat: + pfn = start_pfn; + pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pgd = pgd_base + pgd_idx; +- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { +- pmd = one_md_table_init(pgd); ++ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) { ++ pud = pud_offset(pgd, 0); ++ pmd = pmd_offset(pud, 0); ++ ++#ifdef CONFIG_X86_PAE ++ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT); ++#endif + + if (pfn >= end_pfn) + continue; +@@ -292,14 +298,13 @@ repeat: + #endif + for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn; + pmd++, pmd_idx++) { +- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET; ++ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET; + + /* + * Map with big pages if possible, otherwise + * create normal page tables: + */ + if (use_pse) { +- unsigned int addr2; + pgprot_t prot = PAGE_KERNEL_LARGE; + /* + * first pass will use the same initial +@@ -309,11 +314,7 @@ repeat: + __pgprot(PTE_IDENT_ATTR | + _PAGE_PSE); + +- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE + +- PAGE_OFFSET + PAGE_SIZE-1; +- +- if (is_kernel_text(addr) || +- is_kernel_text(addr2)) ++ if (is_kernel_text(address, address + PMD_SIZE)) + prot = PAGE_KERNEL_LARGE_EXEC; + + pages_2m++; +@@ -330,7 +331,7 @@ repeat: + pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET); + pte += pte_ofs; + for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn; +- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) { ++ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) { + pgprot_t prot = PAGE_KERNEL; + /* + * first pass will use the same initial +@@ -338,7 +339,7 @@ repeat: + */ + pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR); + +- if (is_kernel_text(addr)) ++ if (is_kernel_text(address, address + PAGE_SIZE)) + prot = PAGE_KERNEL_EXEC; + + pages_4k++; +@@ -491,7 +492,7 @@ void __init native_pagetable_setup_start + + pud = pud_offset(pgd, va); + pmd = pmd_offset(pud, va); +- if (!pmd_present(*pmd)) ++ if (!pmd_present(*pmd) || pmd_huge(*pmd)) + break; + + pte = pte_offset_kernel(pmd, va); +@@ -543,9 +544,7 @@ void __init early_ioremap_page_table_ran + + static void __init pagetable_init(void) + { +- pgd_t *pgd_base = swapper_pg_dir; +- +- permanent_kmaps_init(pgd_base); ++ permanent_kmaps_init(swapper_pg_dir); + } + + #ifdef CONFIG_ACPI_SLEEP +@@ -553,12 +552,12 @@ static void __init pagetable_init(void) + * ACPI suspend needs this for resume, because things like the intel-agp + * driver might have split up a kernel 4MB mapping. + */ +-char swsusp_pg_dir[PAGE_SIZE] ++pgd_t swsusp_pg_dir[PTRS_PER_PGD] + __attribute__ ((aligned(PAGE_SIZE))); + + static inline void save_pg_dir(void) + { +- memcpy(swsusp_pg_dir, swapper_pg_dir, PAGE_SIZE); ++ clone_pgd_range(swsusp_pg_dir, swapper_pg_dir, PTRS_PER_PGD); + } + #else /* !CONFIG_ACPI_SLEEP */ + static inline void save_pg_dir(void) +@@ -590,7 +589,7 @@ void zap_low_mappings(bool early) + flush_tlb_all(); + } + +-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + /* user-defined highmem size */ +@@ -781,7 +780,7 @@ void __init setup_bootmem_allocator(void + * Initialize the boot-time allocator (with low memory only): + */ + bootmap_size = bootmem_bootmap_pages(max_low_pfn)<<PAGE_SHIFT; +- bootmap = find_e820_area(0, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, ++ bootmap = find_e820_area(0x100000, max_pfn_mapped<<PAGE_SHIFT, bootmap_size, + PAGE_SIZE); + if (bootmap == -1L) + panic("Cannot find bootmem map of size %ld\n", bootmap_size); +@@ -871,6 +870,12 @@ void __init mem_init(void) + + pci_iommu_alloc(); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + #ifdef CONFIG_FLATMEM + BUG_ON(!mem_map); + #endif +@@ -888,7 +893,7 @@ void __init mem_init(void) + set_highmem_pages_init(); + + codesize = (unsigned long) &_etext - (unsigned long) &_text; +- datasize = (unsigned long) &_edata - (unsigned long) &_etext; ++ datasize = (unsigned long) &_edata - (unsigned long) &_sdata; + initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin; + + printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, " +@@ -929,10 +934,10 @@ void __init mem_init(void) + ((unsigned long)&__init_end - + (unsigned long)&__init_begin) >> 10, + +- (unsigned long)&_etext, (unsigned long)&_edata, +- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, ++ (unsigned long)&_sdata, (unsigned long)&_edata, ++ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10, + +- (unsigned long)&_text, (unsigned long)&_etext, ++ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext), + ((unsigned long)&_etext - (unsigned long)&_text) >> 10); + + /* +@@ -1013,6 +1018,7 @@ void set_kernel_text_rw(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read write\n", + start, start+size); + +@@ -1027,6 +1033,7 @@ void set_kernel_text_ro(void) + if (!kernel_set_to_readonly) + return; + ++ start = ktla_ktva(start); + pr_debug("Set kernel text: %lx - %lx for read only\n", + start, start+size); + +@@ -1038,6 +1045,7 @@ void mark_rodata_ro(void) + unsigned long start = PFN_ALIGN(_text); + unsigned long size = PFN_ALIGN(_etext) - start; + ++ start = ktla_ktva(start); + set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT); + printk(KERN_INFO "Write protecting the kernel text: %luk\n", + size >> 10); +diff -urNp linux-2.6.35.4/arch/x86/mm/init_64.c linux-2.6.35.4/arch/x86/mm/init_64.c +--- linux-2.6.35.4/arch/x86/mm/init_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/init_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,7 +50,6 @@ + #include <asm/numa.h> + #include <asm/cacheflush.h> + #include <asm/init.h> +-#include <linux/bootmem.h> + + static unsigned long dma_reserve __initdata; + +@@ -74,7 +73,7 @@ early_param("gbpages", parse_direct_gbpa + * around without checking the pgd every time. + */ + +-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP; ++pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP); + EXPORT_SYMBOL_GPL(__supported_pte_mask); + + int force_personality32; +@@ -165,7 +164,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, + pmd = fill_pmd(pud, vaddr); + pte = fill_pte(pmd, vaddr); + ++ pax_open_kernel(); + set_pte(pte, new_pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +@@ -224,14 +225,12 @@ static void __init __init_extra_mapping( + pgd = pgd_offset_k((unsigned long)__va(phys)); + if (pgd_none(*pgd)) { + pud = (pud_t *) spp_getpage(); +- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE)); + } + pud = pud_offset(pgd, (unsigned long)__va(phys)); + if (pud_none(*pud)) { + pmd = (pmd_t *) spp_getpage(); +- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE | +- _PAGE_USER)); ++ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE)); + } + pmd = pmd_offset(pud, phys); + BUG_ON(!pmd_none(*pmd)); +@@ -680,6 +679,12 @@ void __init mem_init(void) + + pci_iommu_alloc(); + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY, ++ swapper_pg_dir + KERNEL_PGD_BOUNDARY, ++ KERNEL_PGD_PTRS); ++#endif ++ + /* clear_bss() already clear the empty_zero_page */ + + reservedpages = 0; +@@ -886,8 +891,8 @@ int kern_addr_valid(unsigned long addr) + static struct vm_area_struct gate_vma = { + .vm_start = VSYSCALL_START, + .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE), +- .vm_page_prot = PAGE_READONLY_EXEC, +- .vm_flags = VM_READ | VM_EXEC ++ .vm_page_prot = PAGE_READONLY, ++ .vm_flags = VM_READ + }; + + struct vm_area_struct *get_gate_vma(struct task_struct *tsk) +@@ -921,7 +926,7 @@ int in_gate_area_no_task(unsigned long a + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; + if (vma == &gate_vma) + return "[vsyscall]"; +diff -urNp linux-2.6.35.4/arch/x86/mm/init.c linux-2.6.35.4/arch/x86/mm/init.c +--- linux-2.6.35.4/arch/x86/mm/init.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/init.c 2010-09-17 20:12:09.000000000 -0400 +@@ -70,11 +70,7 @@ static void __init find_early_table_spac + * cause a hotspot and fill up ZONE_DMA. The page tables + * need roughly 0.5KB per GB. + */ +-#ifdef CONFIG_X86_32 +- start = 0x7000; +-#else +- start = 0x8000; +-#endif ++ start = 0x100000; + e820_table_start = find_e820_area(start, max_pfn_mapped<<PAGE_SHIFT, + tables, PAGE_SIZE); + if (e820_table_start == -1UL) +@@ -321,7 +317,13 @@ unsigned long __init_refok init_memory_m + */ + int devmem_is_allowed(unsigned long pagenr) + { +- if (pagenr <= 256) ++ if (!pagenr) ++ return 1; ++#ifdef CONFIG_VM86 ++ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT)) ++ return 1; ++#endif ++ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT)) + return 1; + if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) + return 0; +@@ -380,6 +382,88 @@ void free_init_pages(char *what, unsigne + + void free_initmem(void) + { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_X86_32 ++ /* PaX: limit KERNEL_CS to actual size */ ++ unsigned long addr, limit; ++ struct desc_struct d; ++ int cpu; ++ ++ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext; ++ limit = (limit - 1UL) >> PAGE_SHIFT; ++ ++ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE); ++ for (cpu = 0; cpu < NR_CPUS; cpu++) { ++ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC); ++ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S); ++ } ++ ++ /* PaX: make KERNEL_CS read-only */ ++ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text)); ++ if (!paravirt_enabled()) ++ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT); ++/* ++ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ } ++*/ ++#ifdef CONFIG_X86_PAE ++ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT); ++/* ++ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++*/ ++#endif ++ ++#ifdef CONFIG_MODULES ++ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT); ++#endif ++ ++#else ++ pgd_t *pgd; ++ pud_t *pud; ++ pmd_t *pmd; ++ unsigned long addr, end; ++ ++ /* PaX: make kernel code/rodata read-only, rest non-executable */ ++ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ else ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++ ++ addr = (unsigned long)__va(__pa(__START_KERNEL_map)); ++ end = addr + KERNEL_IMAGE_SIZE; ++ for (; addr < end; addr += PMD_SIZE) { ++ pgd = pgd_offset_k(addr); ++ pud = pud_offset(pgd, addr); ++ pmd = pmd_offset(pud, addr); ++ if (!pmd_present(*pmd)) ++ continue; ++ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata))) ++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW)); ++ else ++ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask))); ++ } ++#endif ++ ++ flush_tlb_all(); ++#endif ++ + free_init_pages("unused kernel memory", + (unsigned long)(&__init_begin), + (unsigned long)(&__init_end)); +diff -urNp linux-2.6.35.4/arch/x86/mm/iomap_32.c linux-2.6.35.4/arch/x86/mm/iomap_32.c +--- linux-2.6.35.4/arch/x86/mm/iomap_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/iomap_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -65,7 +65,11 @@ void *kmap_atomic_prot_pfn(unsigned long + debug_kmap_atomic(type); + idx = type + KM_TYPE_NR * smp_processor_id(); + vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); ++ ++ pax_open_kernel(); + set_pte(kmap_pte - idx, pfn_pte(pfn, prot)); ++ pax_close_kernel(); ++ + arch_flush_lazy_mmu_mode(); + + return (void *)vaddr; +diff -urNp linux-2.6.35.4/arch/x86/mm/ioremap.c linux-2.6.35.4/arch/x86/mm/ioremap.c +--- linux-2.6.35.4/arch/x86/mm/ioremap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/ioremap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -100,13 +100,10 @@ static void __iomem *__ioremap_caller(re + /* + * Don't allow anybody to remap normal RAM that we're using.. + */ +- for (pfn = phys_addr >> PAGE_SHIFT; +- (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); +- pfn++) { +- ++ for (pfn = phys_addr >> PAGE_SHIFT; ((resource_size_t)pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); pfn++) { + int is_ram = page_is_ram(pfn); + +- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) ++ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn)))) + return NULL; + WARN_ON_ONCE(is_ram); + } +@@ -346,7 +343,7 @@ static int __init early_ioremap_debug_se + early_param("early_ioremap_debug", early_ioremap_debug_setup); + + static __initdata int after_paging_init; +-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; ++static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE); + + static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) + { +@@ -378,8 +375,7 @@ void __init early_ioremap_init(void) + slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); + + pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); +- memset(bm_pte, 0, sizeof(bm_pte)); +- pmd_populate_kernel(&init_mm, pmd, bm_pte); ++ pmd_populate_user(&init_mm, pmd, bm_pte); + + /* + * The boot-ioremap range spans multiple pmds, for which +diff -urNp linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c +--- linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/kmemcheck/kmemcheck.c 2010-09-17 20:12:09.000000000 -0400 +@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *reg + * memory (e.g. tracked pages)? For now, we need this to avoid + * invoking kmemcheck for PnP BIOS calls. + */ +- if (regs->flags & X86_VM_MASK) ++ if (v8086_mode(regs)) + return false; +- if (regs->cs != __KERNEL_CS) ++ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS) + return false; + + pte = kmemcheck_pte_lookup(address); +diff -urNp linux-2.6.35.4/arch/x86/mm/mmap.c linux-2.6.35.4/arch/x86/mm/mmap.c +--- linux-2.6.35.4/arch/x86/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/mmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -49,7 +49,7 @@ static unsigned int stack_maxrandom_size + * Leave an at least ~128 MB hole with possible stack randomization. + */ + #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size()) +-#define MAX_GAP (TASK_SIZE/6*5) ++#define MAX_GAP (pax_task_size/6*5) + + /* + * True on X86_32 or when emulating IA32 on X86_64 +@@ -94,27 +94,40 @@ static unsigned long mmap_rnd(void) + return rnd << PAGE_SHIFT; + } + +-static unsigned long mmap_base(void) ++static unsigned long mmap_base(struct mm_struct *mm) + { + unsigned long gap = rlimit(RLIMIT_STACK); ++ unsigned long pax_task_size = TASK_SIZE; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif + + if (gap < MIN_GAP) + gap = MIN_GAP; + else if (gap > MAX_GAP) + gap = MAX_GAP; + +- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd()); ++ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd()); + } + + /* + * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64 + * does, but not when emulating X86_32 + */ +-static unsigned long mmap_legacy_base(void) ++static unsigned long mmap_legacy_base(struct mm_struct *mm) + { +- if (mmap_is_ia32()) ++ if (mmap_is_ia32()) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ return SEGMEXEC_TASK_UNMAPPED_BASE; ++ else ++#endif ++ + return TASK_UNMAPPED_BASE; +- else ++ } else + return TASK_UNMAPPED_BASE + mmap_rnd(); + } + +@@ -125,11 +138,23 @@ static unsigned long mmap_legacy_base(vo + void arch_pick_mmap_layout(struct mm_struct *mm) + { + if (mmap_is_legacy()) { +- mm->mmap_base = mmap_legacy_base(); ++ mm->mmap_base = mmap_legacy_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } else { +- mm->mmap_base = mmap_base(); ++ mm->mmap_base = mmap_base(mm); ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base -= mm->delta_mmap + mm->delta_stack; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area_topdown; + mm->unmap_area = arch_unmap_area_topdown; + } +diff -urNp linux-2.6.35.4/arch/x86/mm/numa_32.c linux-2.6.35.4/arch/x86/mm/numa_32.c +--- linux-2.6.35.4/arch/x86/mm/numa_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/numa_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -98,7 +98,6 @@ unsigned long node_memmap_size_bytes(int + } + #endif + +-extern unsigned long find_max_low_pfn(void); + extern unsigned long highend_pfn, highstart_pfn; + + #define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE) +diff -urNp linux-2.6.35.4/arch/x86/mm/pageattr.c linux-2.6.35.4/arch/x86/mm/pageattr.c +--- linux-2.6.35.4/arch/x86/mm/pageattr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/pageattr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -261,16 +261,17 @@ static inline pgprot_t static_protection + * PCI BIOS based config access (CONFIG_PCI_GOBIOS) support. + */ + if (within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + + /* + * The kernel text needs to be executable for obvious reasons + * Does not cover __inittext since that is gone later on. On + * 64bit we do not enforce !NX on the low mapping + */ +- if (within(address, (unsigned long)_text, (unsigned long)_etext)) +- pgprot_val(forbidden) |= _PAGE_NX; ++ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext))) ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; + ++#ifdef CONFIG_DEBUG_RODATA + /* + * The .rodata section needs to be read-only. Using the pfn + * catches all aliases. +@@ -278,6 +279,7 @@ static inline pgprot_t static_protection + if (within(pfn, __pa((unsigned long)__start_rodata) >> PAGE_SHIFT, + __pa((unsigned long)__end_rodata) >> PAGE_SHIFT)) + pgprot_val(forbidden) |= _PAGE_RW; ++#endif + + #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) + /* +@@ -316,6 +318,13 @@ static inline pgprot_t static_protection + } + #endif + ++#ifdef CONFIG_PAX_KERNEXEC ++ if (within(pfn, __pa((unsigned long)&_text), __pa((unsigned long)&_sdata))) { ++ pgprot_val(forbidden) |= _PAGE_RW; ++ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask; ++ } ++#endif ++ + prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden)); + + return prot; +@@ -368,23 +377,37 @@ EXPORT_SYMBOL_GPL(lookup_address); + static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte) + { + /* change init_mm */ ++ pax_open_kernel(); + set_pte_atomic(kpte, pte); ++ + #ifdef CONFIG_X86_32 + if (!SHARED_KERNEL_PMD) { ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ unsigned long cpu; ++#else + struct page *page; ++#endif + ++#ifdef CONFIG_PAX_PER_CPU_PGD ++ for (cpu = 0; cpu < NR_CPUS; ++cpu) { ++ pgd_t *pgd = get_cpu_pgd(cpu); ++#else + list_for_each_entry(page, &pgd_list, lru) { +- pgd_t *pgd; ++ pgd_t *pgd = (pgd_t *)page_address(page); ++#endif ++ + pud_t *pud; + pmd_t *pmd; + +- pgd = (pgd_t *)page_address(page) + pgd_index(address); ++ pgd += pgd_index(address); + pud = pud_offset(pgd, address); + pmd = pmd_offset(pud, address); + set_pte_atomic((pte_t *)pmd, pte); + } + } + #endif ++ pax_close_kernel(); + } + + static int +diff -urNp linux-2.6.35.4/arch/x86/mm/pageattr-test.c linux-2.6.35.4/arch/x86/mm/pageattr-test.c +--- linux-2.6.35.4/arch/x86/mm/pageattr-test.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/pageattr-test.c 2010-09-17 20:12:09.000000000 -0400 +@@ -36,7 +36,7 @@ enum { + + static int pte_testbit(pte_t pte) + { +- return pte_flags(pte) & _PAGE_UNUSED1; ++ return pte_flags(pte) & _PAGE_CPA_TEST; + } + + struct split_state { +diff -urNp linux-2.6.35.4/arch/x86/mm/pat.c linux-2.6.35.4/arch/x86/mm/pat.c +--- linux-2.6.35.4/arch/x86/mm/pat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/pat.c 2010-09-17 20:12:09.000000000 -0400 +@@ -361,7 +361,7 @@ int free_memtype(u64 start, u64 end) + + if (!entry) { + printk(KERN_INFO "%s:%d freeing invalid memtype %Lx-%Lx\n", +- current->comm, current->pid, start, end); ++ current->comm, task_pid_nr(current), start, end); + return -EINVAL; + } + +@@ -492,8 +492,8 @@ static inline int range_is_allowed(unsig + while (cursor < to) { + if (!devmem_is_allowed(pfn)) { + printk(KERN_INFO +- "Program %s tried to access /dev/mem between %Lx->%Lx.\n", +- current->comm, from, to); ++ "Program %s tried to access /dev/mem between %Lx->%Lx (%Lx).\n", ++ current->comm, from, to, cursor); + return 0; + } + cursor += PAGE_SIZE; +@@ -557,7 +557,7 @@ int kernel_map_sync_memtype(u64 base, un + printk(KERN_INFO + "%s:%d ioremap_change_attr failed %s " + "for %Lx-%Lx\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(flags), + base, (unsigned long long)(base + size)); + return -EINVAL; +@@ -593,7 +593,7 @@ static int reserve_pfn_range(u64 paddr, + if (want_flags != flags) { + printk(KERN_WARNING + "%s:%d map pfn RAM range req %s for %Lx-%Lx, got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), +@@ -615,7 +615,7 @@ static int reserve_pfn_range(u64 paddr, + free_memtype(paddr, paddr + size); + printk(KERN_ERR "%s:%d map pfn expected mapping type %s" + " for %Lx-%Lx, got %s\n", +- current->comm, current->pid, ++ current->comm, task_pid_nr(current), + cattr_name(want_flags), + (unsigned long long)paddr, + (unsigned long long)(paddr + size), +diff -urNp linux-2.6.35.4/arch/x86/mm/pgtable_32.c linux-2.6.35.4/arch/x86/mm/pgtable_32.c +--- linux-2.6.35.4/arch/x86/mm/pgtable_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/pgtable_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -48,10 +48,13 @@ void set_pte_vaddr(unsigned long vaddr, + return; + } + pte = pte_offset_kernel(pmd, vaddr); ++ ++ pax_open_kernel(); + if (pte_val(pteval)) + set_pte_at(&init_mm, vaddr, pte, pteval); + else + pte_clear(&init_mm, vaddr, pte); ++ pax_close_kernel(); + + /* + * It's enough to flush this one mapping. +diff -urNp linux-2.6.35.4/arch/x86/mm/pgtable.c linux-2.6.35.4/arch/x86/mm/pgtable.c +--- linux-2.6.35.4/arch/x86/mm/pgtable.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/pgtable.c 2010-09-17 20:12:09.000000000 -0400 +@@ -84,8 +84,59 @@ static inline void pgd_list_del(pgd_t *p + list_del(&page->lru); + } + +-#define UNSHARED_PTRS_PER_PGD \ +- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT; ++ ++void __shadow_user_pgds(pgd_t *dst, const pgd_t *src, int count) ++{ ++ while (count--) ++ *dst++ = __pgd((pgd_val(*src++) | _PAGE_NX) & ~_PAGE_USER); ++ ++} ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++void __clone_user_pgds(pgd_t *dst, const pgd_t *src, int count) ++{ ++ while (count--) ++ ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++ *dst++ = __pgd(pgd_val(*src++) & clone_pgd_mask); ++#else ++ *dst++ = *src++; ++#endif ++ ++} ++#endif ++ ++#ifdef CONFIG_PAX_PER_CPU_PGD ++static inline void pgd_ctor(pgd_t *pgd) {} ++static inline void pgd_dtor(pgd_t *pgd) {} ++#ifdef CONFIG_X86_64 ++#define pxd_t pud_t ++#define pyd_t pgd_t ++#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn) ++#define pxd_free(mm, pud) pud_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm ,address) pgd_offset((mm), (address)) ++#define PYD_SIZE PGDIR_SIZE ++#else ++#define pxd_t pmd_t ++#define pyd_t pud_t ++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) ++#define pxd_free(mm, pud) pmd_free((mm), (pud)) ++#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud)) ++#define pyd_offset(mm ,address) pud_offset((mm), (address)) ++#define PYD_SIZE PUD_SIZE ++#endif ++#else ++#define pxd_t pmd_t ++#define pyd_t pud_t ++#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn) ++#define pxd_free(mm, pmd) pmd_free((mm), (pmd)) ++#define pyd_populate(mm, pud, pmd) pud_populate((mm), (pud), (pmd)) ++#define pyd_offset(mm ,address) pud_offset((mm), (address)) ++#define PYD_SIZE PUD_SIZE + + static void pgd_ctor(pgd_t *pgd) + { +@@ -120,6 +171,7 @@ static void pgd_dtor(pgd_t *pgd) + pgd_list_del(pgd); + spin_unlock_irqrestore(&pgd_lock, flags); + } ++#endif + + /* + * List of all pgd's needed for non-PAE so it can invalidate entries +@@ -132,7 +184,7 @@ static void pgd_dtor(pgd_t *pgd) + * -- wli + */ + +-#ifdef CONFIG_X86_PAE ++#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE) + /* + * In PAE mode, we need to do a cr3 reload (=tlb flush) when + * updating the top-level pagetable entries to guarantee the +@@ -144,7 +196,7 @@ static void pgd_dtor(pgd_t *pgd) + * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate + * and initialize the kernel pmds here. + */ +-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD ++#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD) + + void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd) + { +@@ -163,36 +215,38 @@ void pud_populate(struct mm_struct *mm, + if (mm == current->active_mm) + write_cr3(read_cr3()); + } ++#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD) ++#define PREALLOCATED_PXDS USER_PGD_PTRS + #else /* !CONFIG_X86_PAE */ + + /* No need to prepopulate any pagetable entries in non-PAE modes. */ +-#define PREALLOCATED_PMDS 0 ++#define PREALLOCATED_PXDS 0 + + #endif /* CONFIG_X86_PAE */ + +-static void free_pmds(pmd_t *pmds[]) ++static void free_pxds(pxd_t *pxds[]) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) +- if (pmds[i]) +- free_page((unsigned long)pmds[i]); ++ for(i = 0; i < PREALLOCATED_PXDS; i++) ++ if (pxds[i]) ++ free_page((unsigned long)pxds[i]); + } + +-static int preallocate_pmds(pmd_t *pmds[]) ++static int preallocate_pxds(pxd_t *pxds[]) + { + int i; + bool failed = false; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { +- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP); +- if (pmd == NULL) ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { ++ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP); ++ if (pxd == NULL) + failed = true; +- pmds[i] = pmd; ++ pxds[i] = pxd; + } + + if (failed) { +- free_pmds(pmds); ++ free_pxds(pxds); + return -ENOMEM; + } + +@@ -205,51 +259,56 @@ static int preallocate_pmds(pmd_t *pmds[ + * preallocate which never got a corresponding vma will need to be + * freed manually. + */ +-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) ++static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp) + { + int i; + +- for(i = 0; i < PREALLOCATED_PMDS; i++) { ++ for(i = 0; i < PREALLOCATED_PXDS; i++) { + pgd_t pgd = pgdp[i]; + + if (pgd_val(pgd) != 0) { +- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); ++ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd); + +- pgdp[i] = native_make_pgd(0); ++ set_pgd(pgdp + i, native_make_pgd(0)); + +- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT); +- pmd_free(mm, pmd); ++ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT); ++ pxd_free(mm, pxd); + } + } + } + +-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[]) ++static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[]) + { +- pud_t *pud; ++ pyd_t *pyd; + unsigned long addr; + int i; + +- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */ ++ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */ + return; + +- pud = pud_offset(pgd, 0); ++#ifdef CONFIG_X86_64 ++ pyd = pyd_offset(mm, 0L); ++#else ++ pyd = pyd_offset(pgd, 0L); ++#endif + +- for (addr = i = 0; i < PREALLOCATED_PMDS; +- i++, pud++, addr += PUD_SIZE) { +- pmd_t *pmd = pmds[i]; ++ for (addr = i = 0; i < PREALLOCATED_PXDS; ++ i++, pyd++, addr += PYD_SIZE) { ++ pxd_t *pxd = pxds[i]; + + if (i >= KERNEL_PGD_BOUNDARY) +- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), +- sizeof(pmd_t) * PTRS_PER_PMD); ++ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]), ++ sizeof(pxd_t) * PTRS_PER_PMD); + +- pud_populate(mm, pud, pmd); ++ pyd_populate(mm, pyd, pxd); + } + } + + pgd_t *pgd_alloc(struct mm_struct *mm) + { + pgd_t *pgd; +- pmd_t *pmds[PREALLOCATED_PMDS]; ++ pxd_t *pxds[PREALLOCATED_PXDS]; ++ + unsigned long flags; + + pgd = (pgd_t *)__get_free_page(PGALLOC_GFP); +@@ -259,11 +318,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + + mm->pgd = pgd; + +- if (preallocate_pmds(pmds) != 0) ++ if (preallocate_pxds(pxds) != 0) + goto out_free_pgd; + + if (paravirt_pgd_alloc(mm) != 0) +- goto out_free_pmds; ++ goto out_free_pxds; + + /* + * Make sure that pre-populating the pmds is atomic with +@@ -273,14 +332,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm) + spin_lock_irqsave(&pgd_lock, flags); + + pgd_ctor(pgd); +- pgd_prepopulate_pmd(mm, pgd, pmds); ++ pgd_prepopulate_pxd(mm, pgd, pxds); + + spin_unlock_irqrestore(&pgd_lock, flags); + + return pgd; + +-out_free_pmds: +- free_pmds(pmds); ++out_free_pxds: ++ free_pxds(pxds); + out_free_pgd: + free_page((unsigned long)pgd); + out: +@@ -289,7 +348,7 @@ out: + + void pgd_free(struct mm_struct *mm, pgd_t *pgd) + { +- pgd_mop_up_pmds(mm, pgd); ++ pgd_mop_up_pxds(mm, pgd); + pgd_dtor(pgd); + paravirt_pgd_free(mm, pgd); + free_page((unsigned long)pgd); +diff -urNp linux-2.6.35.4/arch/x86/mm/setup_nx.c linux-2.6.35.4/arch/x86/mm/setup_nx.c +--- linux-2.6.35.4/arch/x86/mm/setup_nx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/setup_nx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -5,8 +5,10 @@ + #include <asm/pgtable.h> + #include <asm/proto.h> + ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + static int disable_nx __cpuinitdata; + ++#ifndef CONFIG_PAX_PAGEEXEC + /* + * noexec = on|off + * +@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str + return 0; + } + early_param("noexec", noexec_setup); ++#endif ++ ++#endif + + void __cpuinit x86_configure_nx(void) + { ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) + if (cpu_has_nx && !disable_nx) + __supported_pte_mask |= _PAGE_NX; + else ++#endif + __supported_pte_mask &= ~_PAGE_NX; + } + +diff -urNp linux-2.6.35.4/arch/x86/mm/tlb.c linux-2.6.35.4/arch/x86/mm/tlb.c +--- linux-2.6.35.4/arch/x86/mm/tlb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/mm/tlb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -13,7 +13,7 @@ + #include <asm/uv/uv.h> + + DEFINE_PER_CPU_SHARED_ALIGNED(struct tlb_state, cpu_tlbstate) +- = { &init_mm, 0, }; ++ = { &init_mm, 0 }; + + /* + * Smarter SMP flushing macros. +@@ -62,7 +62,11 @@ void leave_mm(int cpu) + BUG(); + cpumask_clear_cpu(cpu, + mm_cpumask(percpu_read(cpu_tlbstate.active_mm))); ++ ++#ifndef CONFIG_PAX_PER_CPU_PGD + load_cr3(swapper_pg_dir); ++#endif ++ + } + EXPORT_SYMBOL_GPL(leave_mm); + +diff -urNp linux-2.6.35.4/arch/x86/oprofile/backtrace.c linux-2.6.35.4/arch/x86/oprofile/backtrace.c +--- linux-2.6.35.4/arch/x86/oprofile/backtrace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/oprofile/backtrace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -58,7 +58,7 @@ static struct frame_head *dump_user_back + struct frame_head bufhead[2]; + + /* Also check accessibility of one struct frame_head beyond */ +- if (!access_ok(VERIFY_READ, head, sizeof(bufhead))) ++ if (!__access_ok(VERIFY_READ, head, sizeof(bufhead))) + return NULL; + if (__copy_from_user_inatomic(bufhead, head, sizeof(bufhead))) + return NULL; +@@ -78,7 +78,7 @@ x86_backtrace(struct pt_regs * const reg + { + struct frame_head *head = (struct frame_head *)frame_pointer(regs); + +- if (!user_mode_vm(regs)) { ++ if (!user_mode(regs)) { + unsigned long stack = kernel_stack_pointer(regs); + if (depth) + dump_trace(NULL, regs, (unsigned long *)stack, 0, +diff -urNp linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c +--- linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/oprofile/op_model_p4.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,7 +50,7 @@ static inline void setup_num_counters(vo + #endif + } + +-static int inline addr_increment(void) ++static inline int addr_increment(void) + { + #ifdef CONFIG_SMP + return smp_num_siblings == 2 ? 2 : 1; +diff -urNp linux-2.6.35.4/arch/x86/pci/common.c linux-2.6.35.4/arch/x86/pci/common.c +--- linux-2.6.35.4/arch/x86/pci/common.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/common.c 2010-09-17 20:12:09.000000000 -0400 +@@ -32,8 +32,8 @@ int noioapicreroute = 1; + int pcibios_last_bus = -1; + unsigned long pirq_table_addr; + struct pci_bus *pci_root_bus; +-struct pci_raw_ops *raw_pci_ops; +-struct pci_raw_ops *raw_pci_ext_ops; ++const struct pci_raw_ops *raw_pci_ops; ++const struct pci_raw_ops *raw_pci_ext_ops; + + int raw_pci_read(unsigned int domain, unsigned int bus, unsigned int devfn, + int reg, int len, u32 *val) +@@ -365,7 +365,7 @@ static const struct dmi_system_id __devi + DMI_MATCH(DMI_PRODUCT_NAME, "ProLiant DL585 G2"), + }, + }, +- {} ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} + }; + + void __init dmi_check_pciprobe(void) +diff -urNp linux-2.6.35.4/arch/x86/pci/direct.c linux-2.6.35.4/arch/x86/pci/direct.c +--- linux-2.6.35.4/arch/x86/pci/direct.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/direct.c 2010-09-17 20:12:09.000000000 -0400 +@@ -79,7 +79,7 @@ static int pci_conf1_write(unsigned int + + #undef PCI_CONF1_ADDRESS + +-struct pci_raw_ops pci_direct_conf1 = { ++const struct pci_raw_ops pci_direct_conf1 = { + .read = pci_conf1_read, + .write = pci_conf1_write, + }; +@@ -173,7 +173,7 @@ static int pci_conf2_write(unsigned int + + #undef PCI_CONF2_ADDRESS + +-struct pci_raw_ops pci_direct_conf2 = { ++const struct pci_raw_ops pci_direct_conf2 = { + .read = pci_conf2_read, + .write = pci_conf2_write, + }; +@@ -189,7 +189,7 @@ struct pci_raw_ops pci_direct_conf2 = { + * This should be close to trivial, but it isn't, because there are buggy + * chipsets (yes, you guessed it, by Intel and Compaq) that have no class ID. + */ +-static int __init pci_sanity_check(struct pci_raw_ops *o) ++static int __init pci_sanity_check(const struct pci_raw_ops *o) + { + u32 x = 0; + int year, devfn; +diff -urNp linux-2.6.35.4/arch/x86/pci/fixup.c linux-2.6.35.4/arch/x86/pci/fixup.c +--- linux-2.6.35.4/arch/x86/pci/fixup.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/fixup.c 2010-09-17 20:12:09.000000000 -0400 +@@ -364,7 +364,7 @@ static const struct dmi_system_id __devi + DMI_MATCH(DMI_PRODUCT_NAME, "MS-6702E"), + }, + }, +- {} ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + /* +@@ -435,7 +435,7 @@ static const struct dmi_system_id __devi + DMI_MATCH(DMI_PRODUCT_VERSION, "PSA40U"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + static void __devinit pci_pre_fixup_toshiba_ohci1394(struct pci_dev *dev) +diff -urNp linux-2.6.35.4/arch/x86/pci/irq.c linux-2.6.35.4/arch/x86/pci/irq.c +--- linux-2.6.35.4/arch/x86/pci/irq.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/irq.c 2010-09-17 20:12:09.000000000 -0400 +@@ -542,7 +542,7 @@ static __init int intel_router_probe(str + static struct pci_device_id __initdata pirq_440gx[] = { + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_0) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82443GX_2) }, +- { }, ++ { PCI_DEVICE(0, 0) } + }; + + /* 440GX has a proprietary PIRQ router -- don't use it */ +@@ -1113,7 +1113,7 @@ static struct dmi_system_id __initdata p + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 360"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + void __init pcibios_irq_init(void) +diff -urNp linux-2.6.35.4/arch/x86/pci/mmconfig_32.c linux-2.6.35.4/arch/x86/pci/mmconfig_32.c +--- linux-2.6.35.4/arch/x86/pci/mmconfig_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/mmconfig_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -117,7 +117,7 @@ static int pci_mmcfg_write(unsigned int + return 0; + } + +-static struct pci_raw_ops pci_mmcfg = { ++static const struct pci_raw_ops pci_mmcfg = { + .read = pci_mmcfg_read, + .write = pci_mmcfg_write, + }; +diff -urNp linux-2.6.35.4/arch/x86/pci/mmconfig_64.c linux-2.6.35.4/arch/x86/pci/mmconfig_64.c +--- linux-2.6.35.4/arch/x86/pci/mmconfig_64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/mmconfig_64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -81,7 +81,7 @@ static int pci_mmcfg_write(unsigned int + return 0; + } + +-static struct pci_raw_ops pci_mmcfg = { ++static const struct pci_raw_ops pci_mmcfg = { + .read = pci_mmcfg_read, + .write = pci_mmcfg_write, + }; +diff -urNp linux-2.6.35.4/arch/x86/pci/numaq_32.c linux-2.6.35.4/arch/x86/pci/numaq_32.c +--- linux-2.6.35.4/arch/x86/pci/numaq_32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/numaq_32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -108,7 +108,7 @@ static int pci_conf1_mq_write(unsigned i + + #undef PCI_CONF1_MQ_ADDRESS + +-static struct pci_raw_ops pci_direct_conf1_mq = { ++static const struct pci_raw_ops pci_direct_conf1_mq = { + .read = pci_conf1_mq_read, + .write = pci_conf1_mq_write + }; +diff -urNp linux-2.6.35.4/arch/x86/pci/olpc.c linux-2.6.35.4/arch/x86/pci/olpc.c +--- linux-2.6.35.4/arch/x86/pci/olpc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/olpc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -297,7 +297,7 @@ static int pci_olpc_write(unsigned int s + return 0; + } + +-static struct pci_raw_ops pci_olpc_conf = { ++static const struct pci_raw_ops pci_olpc_conf = { + .read = pci_olpc_read, + .write = pci_olpc_write, + }; +diff -urNp linux-2.6.35.4/arch/x86/pci/pcbios.c linux-2.6.35.4/arch/x86/pci/pcbios.c +--- linux-2.6.35.4/arch/x86/pci/pcbios.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/pci/pcbios.c 2010-09-17 20:12:09.000000000 -0400 +@@ -57,50 +57,93 @@ union bios32 { + static struct { + unsigned long address; + unsigned short segment; +-} bios32_indirect = { 0, __KERNEL_CS }; ++} bios32_indirect __read_only = { 0, __PCIBIOS_CS }; + + /* + * Returns the entry point for the given service, NULL on error + */ + +-static unsigned long bios32_service(unsigned long service) ++static unsigned long __devinit bios32_service(unsigned long service) + { + unsigned char return_code; /* %al */ + unsigned long address; /* %ebx */ + unsigned long length; /* %ecx */ + unsigned long entry; /* %edx */ + unsigned long flags; ++ struct desc_struct d, *gdt; + + local_irq_save(flags); +- __asm__("lcall *(%%edi); cld" ++ ++ gdt = get_cpu_gdt_table(smp_processor_id()); ++ ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ ++ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld" + : "=a" (return_code), + "=b" (address), + "=c" (length), + "=d" (entry) + : "0" (service), + "1" (0), +- "D" (&bios32_indirect)); ++ "D" (&bios32_indirect), ++ "r"(__PCIBIOS_DS) ++ : "memory"); ++ ++ pax_open_kernel(); ++ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0; ++ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0; ++ pax_close_kernel(); ++ + local_irq_restore(flags); + + switch (return_code) { +- case 0: +- return address + entry; +- case 0x80: /* Not present */ +- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); +- return 0; +- default: /* Shouldn't happen */ +- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", +- service, return_code); ++ case 0: { ++ int cpu; ++ unsigned char flags; ++ ++ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry); ++ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) { ++ printk(KERN_WARNING "bios32_service: not valid\n"); + return 0; ++ } ++ address = address + PAGE_OFFSET; ++ length += 16UL; /* some BIOSs underreport this... */ ++ flags = 4; ++ if (length >= 64*1024*1024) { ++ length >>= PAGE_SHIFT; ++ flags |= 8; ++ } ++ ++ for (cpu = 0; cpu < NR_CPUS; cpu++) { ++ gdt = get_cpu_gdt_table(cpu); ++ pack_descriptor(&d, address, length, 0x9b, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S); ++ pack_descriptor(&d, address, length, 0x93, flags); ++ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S); ++ } ++ return entry; ++ } ++ case 0x80: /* Not present */ ++ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service); ++ return 0; ++ default: /* Shouldn't happen */ ++ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n", ++ service, return_code); ++ return 0; + } + } + + static struct { + unsigned long address; + unsigned short segment; +-} pci_indirect = { 0, __KERNEL_CS }; ++} pci_indirect __read_only = { 0, __PCIBIOS_CS }; + +-static int pci_bios_present; ++static int pci_bios_present __read_only; + + static int __devinit check_pcibios(void) + { +@@ -109,11 +152,13 @@ static int __devinit check_pcibios(void) + unsigned long flags, pcibios_entry; + + if ((pcibios_entry = bios32_service(PCI_SERVICE))) { +- pci_indirect.address = pcibios_entry + PAGE_OFFSET; ++ pci_indirect.address = pcibios_entry; + + local_irq_save(flags); +- __asm__( +- "lcall *(%%edi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%edi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -122,7 +167,8 @@ static int __devinit check_pcibios(void) + "=b" (ebx), + "=c" (ecx) + : "1" (PCIBIOS_PCI_BIOS_PRESENT), +- "D" (&pci_indirect) ++ "D" (&pci_indirect), ++ "r" (__PCIBIOS_DS) + : "memory"); + local_irq_restore(flags); + +@@ -166,7 +212,10 @@ static int pci_bios_read(unsigned int se + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -175,7 +224,8 @@ static int pci_bios_read(unsigned int se + : "1" (PCIBIOS_READ_CONFIG_BYTE), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 8 bits, do not trust the + * BIOS having done it: +@@ -183,7 +233,10 @@ static int pci_bios_read(unsigned int se + *value &= 0xff; + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -192,7 +245,8 @@ static int pci_bios_read(unsigned int se + : "1" (PCIBIOS_READ_CONFIG_WORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + /* + * Zero-extend the result beyond 16 bits, do not trust the + * BIOS having done it: +@@ -200,7 +254,10 @@ static int pci_bios_read(unsigned int se + *value &= 0xffff; + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -209,7 +266,8 @@ static int pci_bios_read(unsigned int se + : "1" (PCIBIOS_READ_CONFIG_DWORD), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -232,7 +290,10 @@ static int pci_bios_write(unsigned int s + + switch (len) { + case 1: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -241,10 +302,14 @@ static int pci_bios_write(unsigned int s + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 2: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -253,10 +318,14 @@ static int pci_bios_write(unsigned int s + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + case 4: +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w6, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n\t" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -265,7 +334,8 @@ static int pci_bios_write(unsigned int s + "c" (value), + "b" (bx), + "D" ((long)reg), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + break; + } + +@@ -279,7 +349,7 @@ static int pci_bios_write(unsigned int s + * Function table for BIOS32 access + */ + +-static struct pci_raw_ops pci_bios_access = { ++static const struct pci_raw_ops pci_bios_access = { + .read = pci_bios_read, + .write = pci_bios_write + }; +@@ -288,7 +358,7 @@ static struct pci_raw_ops pci_bios_acces + * Try to find PCI BIOS. + */ + +-static struct pci_raw_ops * __devinit pci_find_bios(void) ++static const struct pci_raw_ops * __devinit pci_find_bios(void) + { + union bios32 *check; + unsigned char sum; +@@ -369,10 +439,13 @@ struct irq_routing_table * pcibios_get_i + + DBG("PCI: Fetching IRQ routing table... "); + __asm__("push %%es\n\t" ++ "movw %w8, %%ds\n\t" + "push %%ds\n\t" + "pop %%es\n\t" +- "lcall *(%%esi); cld\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" + "pop %%es\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -383,7 +456,8 @@ struct irq_routing_table * pcibios_get_i + "1" (0), + "D" ((long) &opt), + "S" (&pci_indirect), +- "m" (opt) ++ "m" (opt), ++ "r" (__PCIBIOS_DS) + : "memory"); + DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map); + if (ret & 0xff00) +@@ -407,7 +481,10 @@ int pcibios_set_irq_routing(struct pci_d + { + int ret; + +- __asm__("lcall *(%%esi); cld\n\t" ++ __asm__("movw %w5, %%ds\n\t" ++ "lcall *%%ss:(%%esi); cld\n\t" ++ "push %%ss\n\t" ++ "pop %%ds\n" + "jc 1f\n\t" + "xor %%ah, %%ah\n" + "1:" +@@ -415,7 +492,8 @@ int pcibios_set_irq_routing(struct pci_d + : "0" (PCIBIOS_SET_PCI_HW_INT), + "b" ((dev->bus->number << 8) | dev->devfn), + "c" ((irq << 8) | (pin + 10)), +- "S" (&pci_indirect)); ++ "S" (&pci_indirect), ++ "r" (__PCIBIOS_DS)); + return !(ret & 0xff00); + } + EXPORT_SYMBOL(pcibios_set_irq_routing); +diff -urNp linux-2.6.35.4/arch/x86/power/cpu.c linux-2.6.35.4/arch/x86/power/cpu.c +--- linux-2.6.35.4/arch/x86/power/cpu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/power/cpu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -129,7 +129,7 @@ static void do_fpu_end(void) + static void fix_processor_context(void) + { + int cpu = smp_processor_id(); +- struct tss_struct *t = &per_cpu(init_tss, cpu); ++ struct tss_struct *t = init_tss + cpu; + + set_tss_desc(cpu, t); /* + * This just modifies memory; should not be +@@ -139,7 +139,9 @@ static void fix_processor_context(void) + */ + + #ifdef CONFIG_X86_64 ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[GDT_ENTRY_TSS].type = 9; ++ pax_close_kernel(); + + syscall_init(); /* This sets MSR_*STAR and related */ + #endif +diff -urNp linux-2.6.35.4/arch/x86/vdso/Makefile linux-2.6.35.4/arch/x86/vdso/Makefile +--- linux-2.6.35.4/arch/x86/vdso/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/Makefile 2010-09-17 20:12:09.000000000 -0400 +@@ -122,7 +122,7 @@ quiet_cmd_vdso = VDSO $@ + $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \ + -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) + +-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) ++VDSO_LDFLAGS = -fPIC -shared --no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) + GCOV_PROFILE := n + + # +diff -urNp linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c +--- linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/vclock_gettime.c 2010-09-17 20:12:09.000000000 -0400 +@@ -22,24 +22,48 @@ + #include <asm/hpet.h> + #include <asm/unistd.h> + #include <asm/io.h> ++#include <asm/fixmap.h> + #include "vextern.h" + + #define gtod vdso_vsyscall_gtod_data + ++notrace noinline long __vdso_fallback_time(long *t) ++{ ++ long secs; ++ asm volatile("syscall" ++ : "=a" (secs) ++ : "0" (__NR_time),"D" (t) : "r11", "cx", "memory"); ++ return secs; ++} ++ + notrace static long vdso_fallback_gettime(long clock, struct timespec *ts) + { + long ret; + asm("syscall" : "=a" (ret) : +- "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "memory"); ++ "0" (__NR_clock_gettime),"D" (clock), "S" (ts) : "r11", "cx", "memory"); + return ret; + } + ++notrace static inline cycle_t __vdso_vread_hpet(void) ++{ ++ return readl((const void __iomem *)fix_to_virt(VSYSCALL_HPET) + 0xf0); ++} ++ ++notrace static inline cycle_t __vdso_vread_tsc(void) ++{ ++ cycle_t ret = (cycle_t)vget_cycles(); ++ ++ return ret >= gtod->clock.cycle_last ? ret : gtod->clock.cycle_last; ++} ++ + notrace static inline long vgetns(void) + { + long v; +- cycles_t (*vread)(void); +- vread = gtod->clock.vread; +- v = (vread() - gtod->clock.cycle_last) & gtod->clock.mask; ++ if (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3]) ++ v = __vdso_vread_tsc(); ++ else ++ v = __vdso_vread_hpet(); ++ v = (v - gtod->clock.cycle_last) & gtod->clock.mask; + return (v * gtod->clock.mult) >> gtod->clock.shift; + } + +@@ -113,7 +137,9 @@ notrace static noinline int do_monotonic + + notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts) + { +- if (likely(gtod->sysctl_enabled)) ++ if (likely(gtod->sysctl_enabled && ++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || ++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) + switch (clock) { + case CLOCK_REALTIME: + if (likely(gtod->clock.vread)) +@@ -133,10 +159,20 @@ notrace int __vdso_clock_gettime(clockid + int clock_gettime(clockid_t, struct timespec *) + __attribute__((weak, alias("__vdso_clock_gettime"))); + +-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) ++notrace noinline int __vdso_fallback_gettimeofday(struct timeval *tv, struct timezone *tz) + { + long ret; +- if (likely(gtod->sysctl_enabled && gtod->clock.vread)) { ++ asm("syscall" : "=a" (ret) : ++ "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "r11", "cx", "memory"); ++ return ret; ++} ++ ++notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz) ++{ ++ if (likely(gtod->sysctl_enabled && ++ ((gtod->clock.name[0] == 'h' && gtod->clock.name[1] == 'p' && gtod->clock.name[2] == 'e' && gtod->clock.name[3] == 't' && !gtod->clock.name[4]) || ++ (gtod->clock.name[0] == 't' && gtod->clock.name[1] == 's' && gtod->clock.name[2] == 'c' && !gtod->clock.name[3])))) ++ { + if (likely(tv != NULL)) { + BUILD_BUG_ON(offsetof(struct timeval, tv_usec) != + offsetof(struct timespec, tv_nsec) || +@@ -151,9 +187,7 @@ notrace int __vdso_gettimeofday(struct t + } + return 0; + } +- asm("syscall" : "=a" (ret) : +- "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory"); +- return ret; ++ return __vdso_fallback_gettimeofday(tv, tz); + } + int gettimeofday(struct timeval *, struct timezone *) + __attribute__((weak, alias("__vdso_gettimeofday"))); +diff -urNp linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c +--- linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/vdso32-setup.c 2010-09-17 20:12:09.000000000 -0400 +@@ -25,6 +25,7 @@ + #include <asm/tlbflush.h> + #include <asm/vdso.h> + #include <asm/proto.h> ++#include <asm/mman.h> + + enum { + VDSO_DISABLED = 0, +@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int m + void enable_sep_cpu(void) + { + int cpu = get_cpu(); +- struct tss_struct *tss = &per_cpu(init_tss, cpu); ++ struct tss_struct *tss = init_tss + cpu; + + if (!boot_cpu_has(X86_FEATURE_SEP)) { + put_cpu(); +@@ -249,7 +250,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later +@@ -331,14 +332,14 @@ int arch_setup_additional_pages(struct l + if (compat) + addr = VDSO_HIGH_BASE; + else { +- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0); ++ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE); + if (IS_ERR_VALUE(addr)) { + ret = addr; + goto up_fail; + } + } + +- current->mm->context.vdso = (void *)addr; ++ current->mm->context.vdso = addr; + + if (compat_uses_vma || !compat) { + /* +@@ -361,11 +362,11 @@ int arch_setup_additional_pages(struct l + } + + current_thread_info()->sysenter_return = +- VDSO32_SYMBOL(addr, SYSENTER_RETURN); ++ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN); + + up_fail: + if (ret) +- current->mm->context.vdso = NULL; ++ current->mm->context.vdso = 0; + + up_write(&mm->mmap_sem); + +@@ -412,8 +413,14 @@ __initcall(ia32_binfmt_init); + + const char *arch_vma_name(struct vm_area_struct *vma) + { +- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso) ++ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso) + return "[vdso]"; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso) ++ return "[vdso]"; ++#endif ++ + return NULL; + } + +@@ -422,7 +429,7 @@ struct vm_area_struct *get_gate_vma(stru + struct mm_struct *mm = tsk->mm; + + /* Check to see if this task was created in compat vdso mode */ +- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE) ++ if (mm && mm->context.vdso == VDSO_HIGH_BASE) + return &gate_vma; + return NULL; + } +diff -urNp linux-2.6.35.4/arch/x86/vdso/vdso.lds.S linux-2.6.35.4/arch/x86/vdso/vdso.lds.S +--- linux-2.6.35.4/arch/x86/vdso/vdso.lds.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/vdso.lds.S 2010-09-17 20:12:09.000000000 -0400 +@@ -35,3 +35,9 @@ VDSO64_PRELINK = VDSO_PRELINK; + #define VEXTERN(x) VDSO64_ ## x = vdso_ ## x; + #include "vextern.h" + #undef VEXTERN ++ ++#define VEXTERN(x) VDSO64_ ## x = __vdso_ ## x; ++VEXTERN(fallback_gettimeofday) ++VEXTERN(fallback_time) ++VEXTERN(getcpu) ++#undef VEXTERN +diff -urNp linux-2.6.35.4/arch/x86/vdso/vextern.h linux-2.6.35.4/arch/x86/vdso/vextern.h +--- linux-2.6.35.4/arch/x86/vdso/vextern.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/vextern.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,6 +11,5 @@ + put into vextern.h and be referenced as a pointer with vdso prefix. + The main kernel later fills in the values. */ + +-VEXTERN(jiffies) + VEXTERN(vgetcpu_mode) + VEXTERN(vsyscall_gtod_data) +diff -urNp linux-2.6.35.4/arch/x86/vdso/vma.c linux-2.6.35.4/arch/x86/vdso/vma.c +--- linux-2.6.35.4/arch/x86/vdso/vma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/vdso/vma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -58,7 +58,7 @@ static int __init init_vdso_vars(void) + if (!vbase) + goto oom; + +- if (memcmp(vbase, "\177ELF", 4)) { ++ if (memcmp(vbase, ELFMAG, SELFMAG)) { + printk("VDSO: I'm broken; not ELF\n"); + vdso_enabled = 0; + } +@@ -67,6 +67,7 @@ static int __init init_vdso_vars(void) + *(typeof(__ ## x) **) var_ref(VDSO64_SYMBOL(vbase, x), #x) = &__ ## x; + #include "vextern.h" + #undef VEXTERN ++ vunmap(vbase); + return 0; + + oom: +@@ -117,7 +118,7 @@ int arch_setup_additional_pages(struct l + goto up_fail; + } + +- current->mm->context.vdso = (void *)addr; ++ current->mm->context.vdso = addr; + + ret = install_special_mapping(mm, addr, vdso_size, + VM_READ|VM_EXEC| +@@ -125,7 +126,7 @@ int arch_setup_additional_pages(struct l + VM_ALWAYSDUMP, + vdso_pages); + if (ret) { +- current->mm->context.vdso = NULL; ++ current->mm->context.vdso = 0; + goto up_fail; + } + +@@ -133,10 +134,3 @@ up_fail: + up_write(&mm->mmap_sem); + return ret; + } +- +-static __init int vdso_setup(char *s) +-{ +- vdso_enabled = simple_strtoul(s, NULL, 0); +- return 0; +-} +-__setup("vdso=", vdso_setup); +diff -urNp linux-2.6.35.4/arch/x86/xen/enlighten.c linux-2.6.35.4/arch/x86/xen/enlighten.c +--- linux-2.6.35.4/arch/x86/xen/enlighten.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/xen/enlighten.c 2010-09-17 20:12:09.000000000 -0400 +@@ -74,8 +74,6 @@ EXPORT_SYMBOL_GPL(xen_start_info); + + struct shared_info xen_dummy_shared_info; + +-void *xen_initial_gdt; +- + /* + * Point at some empty memory to start with. We map the real shared_info + * page as soon as fixmap is up and running. +@@ -551,7 +549,7 @@ static void xen_write_idt_entry(gate_des + + preempt_disable(); + +- start = __get_cpu_var(idt_desc).address; ++ start = (unsigned long)__get_cpu_var(idt_desc).address; + end = start + __get_cpu_var(idt_desc).size + 1; + + xen_mc_flush(); +@@ -1103,7 +1101,17 @@ asmlinkage void __init xen_start_kernel( + __userpte_alloc_gfp &= ~__GFP_HIGHMEM; + + /* Work out if we support NX */ +- x86_configure_nx(); ++#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE) ++ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 && ++ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) { ++ unsigned l, h; ++ ++ __supported_pte_mask |= _PAGE_NX; ++ rdmsr(MSR_EFER, l, h); ++ l |= EFER_NX; ++ wrmsr(MSR_EFER, l, h); ++ } ++#endif + + xen_setup_features(); + +@@ -1134,13 +1142,6 @@ asmlinkage void __init xen_start_kernel( + + machine_ops = xen_machine_ops; + +- /* +- * The only reliable way to retain the initial address of the +- * percpu gdt_page is to remember it here, so we can go and +- * mark it RW later, when the initial percpu area is freed. +- */ +- xen_initial_gdt = &per_cpu(gdt_page, 0); +- + xen_smp_init(); + + pgd = (pgd_t *)xen_start_info->pt_base; +diff -urNp linux-2.6.35.4/arch/x86/xen/mmu.c linux-2.6.35.4/arch/x86/xen/mmu.c +--- linux-2.6.35.4/arch/x86/xen/mmu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/xen/mmu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1694,6 +1694,8 @@ __init pgd_t *xen_setup_kernel_pagetable + convert_pfn_mfn(init_level4_pgt); + convert_pfn_mfn(level3_ident_pgt); + convert_pfn_mfn(level3_kernel_pgt); ++ convert_pfn_mfn(level3_vmalloc_pgt); ++ convert_pfn_mfn(level3_vmemmap_pgt); + + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); + l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); +@@ -1712,7 +1714,10 @@ __init pgd_t *xen_setup_kernel_pagetable + set_page_prot(init_level4_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmalloc_pgt, PAGE_KERNEL_RO); ++ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); ++ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + +diff -urNp linux-2.6.35.4/arch/x86/xen/smp.c linux-2.6.35.4/arch/x86/xen/smp.c +--- linux-2.6.35.4/arch/x86/xen/smp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/xen/smp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -169,11 +169,6 @@ static void __init xen_smp_prepare_boot_ + { + BUG_ON(smp_processor_id() != 0); + native_smp_prepare_boot_cpu(); +- +- /* We've switched to the "real" per-cpu gdt, so make sure the +- old memory can be recycled */ +- make_lowmem_page_readwrite(xen_initial_gdt); +- + xen_setup_vcpu_info_placement(); + } + +@@ -233,8 +228,8 @@ cpu_initialize_context(unsigned int cpu, + gdt = get_cpu_gdt_table(cpu); + + ctxt->flags = VGCF_IN_KERNEL; +- ctxt->user_regs.ds = __USER_DS; +- ctxt->user_regs.es = __USER_DS; ++ ctxt->user_regs.ds = __KERNEL_DS; ++ ctxt->user_regs.es = __KERNEL_DS; + ctxt->user_regs.ss = __KERNEL_DS; + #ifdef CONFIG_X86_32 + ctxt->user_regs.fs = __KERNEL_PERCPU; +diff -urNp linux-2.6.35.4/arch/x86/xen/xen-head.S linux-2.6.35.4/arch/x86/xen/xen-head.S +--- linux-2.6.35.4/arch/x86/xen/xen-head.S 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/xen/xen-head.S 2010-09-17 20:12:09.000000000 -0400 +@@ -19,6 +19,17 @@ ENTRY(startup_xen) + #ifdef CONFIG_X86_32 + mov %esi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%esp ++#ifdef CONFIG_SMP ++ movl $cpu_gdt_table,%edi ++ movl $__per_cpu_load,%eax ++ movw %ax,__KERNEL_PERCPU + 2(%edi) ++ rorl $16,%eax ++ movb %al,__KERNEL_PERCPU + 4(%edi) ++ movb %ah,__KERNEL_PERCPU + 7(%edi) ++ movl $__per_cpu_end - 1,%eax ++ subl $__per_cpu_start,%eax ++ movw %ax,__KERNEL_PERCPU + 0(%edi) ++#endif + #else + mov %rsi,xen_start_info + mov $init_thread_union+THREAD_SIZE,%rsp +diff -urNp linux-2.6.35.4/arch/x86/xen/xen-ops.h linux-2.6.35.4/arch/x86/xen/xen-ops.h +--- linux-2.6.35.4/arch/x86/xen/xen-ops.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/arch/x86/xen/xen-ops.h 2010-09-17 20:12:09.000000000 -0400 +@@ -10,8 +10,6 @@ + extern const char xen_hypervisor_callback[]; + extern const char xen_failsafe_callback[]; + +-extern void *xen_initial_gdt; +- + struct trap_info; + void xen_copy_trap_info(struct trap_info *traps); + +diff -urNp linux-2.6.35.4/block/blk-iopoll.c linux-2.6.35.4/block/blk-iopoll.c +--- linux-2.6.35.4/block/blk-iopoll.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/block/blk-iopoll.c 2010-09-17 20:12:09.000000000 -0400 +@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopo + } + EXPORT_SYMBOL(blk_iopoll_complete); + +-static void blk_iopoll_softirq(struct softirq_action *h) ++static void blk_iopoll_softirq(void) + { + struct list_head *list = &__get_cpu_var(blk_cpu_iopoll); + int rearm = 0, budget = blk_iopoll_budget; +diff -urNp linux-2.6.35.4/block/blk-map.c linux-2.6.35.4/block/blk-map.c +--- linux-2.6.35.4/block/blk-map.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/block/blk-map.c 2010-09-17 20:12:09.000000000 -0400 +@@ -54,7 +54,7 @@ static int __blk_rq_map_user(struct requ + * direct dma. else, set up kernel bounce buffers + */ + uaddr = (unsigned long) ubuf; +- if (blk_rq_aligned(q, ubuf, len) && !map_data) ++ if (blk_rq_aligned(q, (__force void *)ubuf, len) && !map_data) + bio = bio_map_user(q, NULL, uaddr, len, reading, gfp_mask); + else + bio = bio_copy_user(q, map_data, uaddr, len, reading, gfp_mask); +@@ -297,7 +297,7 @@ int blk_rq_map_kern(struct request_queue + if (!len || !kbuf) + return -EINVAL; + +- do_copy = !blk_rq_aligned(q, kbuf, len) || object_is_on_stack(kbuf); ++ do_copy = !blk_rq_aligned(q, kbuf, len) || object_starts_on_stack(kbuf); + if (do_copy) + bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading); + else +diff -urNp linux-2.6.35.4/block/blk-softirq.c linux-2.6.35.4/block/blk-softirq.c +--- linux-2.6.35.4/block/blk-softirq.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/block/blk-softirq.c 2010-09-17 20:12:09.000000000 -0400 +@@ -17,7 +17,7 @@ static DEFINE_PER_CPU(struct list_head, + * Softirq action handler - move entries to local list and loop over them + * while passing them to the queue registered handler. + */ +-static void blk_done_softirq(struct softirq_action *h) ++static void blk_done_softirq(void) + { + struct list_head *cpu_list, local_list; + +diff -urNp linux-2.6.35.4/crypto/lrw.c linux-2.6.35.4/crypto/lrw.c +--- linux-2.6.35.4/crypto/lrw.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/crypto/lrw.c 2010-09-17 20:12:09.000000000 -0400 +@@ -60,7 +60,7 @@ static int setkey(struct crypto_tfm *par + struct priv *ctx = crypto_tfm_ctx(parent); + struct crypto_cipher *child = ctx->child; + int err, i; +- be128 tmp = { 0 }; ++ be128 tmp = { 0, 0 }; + int bsize = crypto_cipher_blocksize(child); + + crypto_cipher_clear_flags(child, CRYPTO_TFM_REQ_MASK); +diff -urNp linux-2.6.35.4/Documentation/dontdiff linux-2.6.35.4/Documentation/dontdiff +--- linux-2.6.35.4/Documentation/dontdiff 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/Documentation/dontdiff 2010-09-17 20:12:09.000000000 -0400 +@@ -3,6 +3,7 @@ + *.bin + *.cpio + *.csp ++*.dbg + *.dsp + *.dvi + *.elf +@@ -38,8 +39,10 @@ + *.tab.h + *.tex + *.ver ++*.vim + *.xml + *_MODULES ++*_reg_safe.h + *_vga16.c + *~ + *.9 +@@ -49,11 +52,16 @@ + 53c700_d.h + CVS + ChangeSet ++GPATH ++GRTAGS ++GSYMS ++GTAGS + Image + Kerntypes + Module.markers + Module.symvers + PENDING ++PERF* + SCCS + System.map* + TAGS +@@ -76,7 +84,10 @@ btfixupprep + build + bvmlinux + bzImage* ++capflags.c + classlist.h* ++clut_vga16.c ++common-cmds.h + comp*.log + compile.h* + conf +@@ -100,19 +111,22 @@ fore200e_mkfirm + fore200e_pca_fw.c* + gconf + gen-devlist ++gen-kdb_cmds.c + gen_crc32table + gen_init_cpio + generated + genheaders + genksyms + *_gray256.c ++hash + ihex2fw + ikconfig.h* ++inat-tables.c + initramfs_data.cpio ++initramfs_data.cpio.bz2 + initramfs_data.cpio.gz + initramfs_list + kallsyms +-kconfig + keywords.c + ksym.c* + ksym.h* +@@ -136,10 +150,13 @@ mkboot + mkbugboot + mkcpustr + mkdep ++mkpiggy + mkprep ++mkregtable + mktables + mktree + modpost ++modules.builtin + modules.order + modversions.h* + ncscope.* +@@ -151,7 +168,9 @@ parse.h + patches* + pca200e.bin + pca200e_ecd.bin2 ++perf-archive + piggy.gz ++piggy.S + piggyback + pnmtologo + ppc_defs.h* +@@ -160,12 +179,14 @@ qconf + raid6altivec*.c + raid6int*.c + raid6tables.c ++regdb.c + relocs + series + setup + setup.bin + setup.elf + sImage ++slabinfo + sm_tbl* + split-include + syscalltab.h +@@ -189,14 +210,20 @@ version.h* + vmlinux + vmlinux-* + vmlinux.aout ++vmlinux.bin.all ++vmlinux.bin.bz2 + vmlinux.lds ++vmlinux.relocs ++voffset.h + vsyscall.lds + vsyscall_32.lds + wanxlfw.inc + uImage + unifdef ++utsrelease.h + wakeup.bin + wakeup.elf + wakeup.lds + zImage* + zconf.hash.c ++zoffset.h +diff -urNp linux-2.6.35.4/Documentation/filesystems/sysfs.txt linux-2.6.35.4/Documentation/filesystems/sysfs.txt +--- linux-2.6.35.4/Documentation/filesystems/sysfs.txt 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/Documentation/filesystems/sysfs.txt 2010-09-17 20:12:09.000000000 -0400 +@@ -123,8 +123,8 @@ set of sysfs operations for forwarding r + show and store methods of the attribute owners. + + struct sysfs_ops { +- ssize_t (*show)(struct kobject *, struct attribute *, char *); +- ssize_t (*store)(struct kobject *, struct attribute *, const char *); ++ ssize_t (* const show)(struct kobject *, struct attribute *, char *); ++ ssize_t (* const store)(struct kobject *, struct attribute *, const char *); + }; + + [ Subsystems should have already defined a struct kobj_type as a +diff -urNp linux-2.6.35.4/Documentation/kernel-parameters.txt linux-2.6.35.4/Documentation/kernel-parameters.txt +--- linux-2.6.35.4/Documentation/kernel-parameters.txt 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/Documentation/kernel-parameters.txt 2010-09-17 20:12:09.000000000 -0400 +@@ -1910,6 +1910,12 @@ and is between 256 and 4096 characters. + the specified number of seconds. This is to be used if + your oopses keep scrolling off the screen. + ++ pax_nouderef [X86-32] disables UDEREF. Most likely needed under certain ++ virtualization environments that don't cope well with the ++ expand down segment used by UDEREF on X86-32. ++ ++ pax_softmode= [X86-32] 0/1 to disable/enable PaX softmode on boot already. ++ + pcbit= [HW,ISDN] + + pcd. [PARIDE] +diff -urNp linux-2.6.35.4/drivers/acpi/battery.c linux-2.6.35.4/drivers/acpi/battery.c +--- linux-2.6.35.4/drivers/acpi/battery.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/battery.c 2010-09-17 20:12:09.000000000 -0400 +@@ -810,7 +810,7 @@ DECLARE_FILE_FUNCTIONS(alarm); + } + + static struct battery_file { +- struct file_operations ops; ++ const struct file_operations ops; + mode_t mode; + const char *name; + } acpi_battery_file[] = { +diff -urNp linux-2.6.35.4/drivers/acpi/blacklist.c linux-2.6.35.4/drivers/acpi/blacklist.c +--- linux-2.6.35.4/drivers/acpi/blacklist.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/blacklist.c 2010-09-17 20:12:09.000000000 -0400 +@@ -73,7 +73,7 @@ static struct acpi_blacklist_item acpi_b + {"IBM ", "TP600E ", 0x00000105, ACPI_SIG_DSDT, less_than_or_equal, + "Incorrect _ADR", 1}, + +- {""} ++ {"", "", 0, NULL, all_versions, NULL, 0} + }; + + #if CONFIG_ACPI_BLACKLIST_YEAR +diff -urNp linux-2.6.35.4/drivers/acpi/dock.c linux-2.6.35.4/drivers/acpi/dock.c +--- linux-2.6.35.4/drivers/acpi/dock.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/dock.c 2010-09-17 20:12:09.000000000 -0400 +@@ -77,7 +77,7 @@ struct dock_dependent_device { + struct list_head list; + struct list_head hotplug_list; + acpi_handle handle; +- struct acpi_dock_ops *ops; ++ const struct acpi_dock_ops *ops; + void *context; + }; + +@@ -589,7 +589,7 @@ EXPORT_SYMBOL_GPL(unregister_dock_notifi + * the dock driver after _DCK is executed. + */ + int +-register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops, ++register_hotplug_dock_device(acpi_handle handle, const struct acpi_dock_ops *ops, + void *context) + { + struct dock_dependent_device *dd; +diff -urNp linux-2.6.35.4/drivers/acpi/osl.c linux-2.6.35.4/drivers/acpi/osl.c +--- linux-2.6.35.4/drivers/acpi/osl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/osl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -523,6 +523,8 @@ acpi_os_read_memory(acpi_physical_addres + void __iomem *virt_addr; + + virt_addr = ioremap(phys_addr, width); ++ if (!virt_addr) ++ return AE_NO_MEMORY; + if (!value) + value = &dummy; + +@@ -551,6 +553,8 @@ acpi_os_write_memory(acpi_physical_addre + void __iomem *virt_addr; + + virt_addr = ioremap(phys_addr, width); ++ if (!virt_addr) ++ return AE_NO_MEMORY; + + switch (width) { + case 8: +diff -urNp linux-2.6.35.4/drivers/acpi/power_meter.c linux-2.6.35.4/drivers/acpi/power_meter.c +--- linux-2.6.35.4/drivers/acpi/power_meter.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/power_meter.c 2010-09-17 20:12:09.000000000 -0400 +@@ -316,8 +316,6 @@ static ssize_t set_trip(struct device *d + return res; + + temp /= 1000; +- if (temp < 0) +- return -EINVAL; + + mutex_lock(&resource->lock); + resource->trip[attr->index - 7] = temp; +diff -urNp linux-2.6.35.4/drivers/acpi/proc.c linux-2.6.35.4/drivers/acpi/proc.c +--- linux-2.6.35.4/drivers/acpi/proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/proc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -391,20 +391,15 @@ acpi_system_write_wakeup_device(struct f + size_t count, loff_t * ppos) + { + struct list_head *node, *next; +- char strbuf[5]; +- char str[5] = ""; +- unsigned int len = count; ++ char strbuf[5] = {0}; + struct acpi_device *found_dev = NULL; + +- if (len > 4) +- len = 4; +- if (len < 0) +- return -EFAULT; ++ if (count > 4) ++ count = 4; + +- if (copy_from_user(strbuf, buffer, len)) ++ if (copy_from_user(strbuf, buffer, count)) + return -EFAULT; +- strbuf[len] = '\0'; +- sscanf(strbuf, "%s", str); ++ strbuf[count] = '\0'; + + mutex_lock(&acpi_device_lock); + list_for_each_safe(node, next, &acpi_wakeup_device_list) { +@@ -413,7 +408,7 @@ acpi_system_write_wakeup_device(struct f + if (!dev->wakeup.flags.valid) + continue; + +- if (!strncmp(dev->pnp.bus_id, str, 4)) { ++ if (!strncmp(dev->pnp.bus_id, strbuf, 4)) { + dev->wakeup.state.enabled = + dev->wakeup.state.enabled ? 0 : 1; + found_dev = dev; +diff -urNp linux-2.6.35.4/drivers/acpi/processor_driver.c linux-2.6.35.4/drivers/acpi/processor_driver.c +--- linux-2.6.35.4/drivers/acpi/processor_driver.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/processor_driver.c 2010-09-17 20:12:09.000000000 -0400 +@@ -586,7 +586,7 @@ static int __cpuinit acpi_processor_add( + return 0; + #endif + +- BUG_ON((pr->id >= nr_cpu_ids) || (pr->id < 0)); ++ BUG_ON(pr->id >= nr_cpu_ids); + + /* + * Buggy BIOS check +diff -urNp linux-2.6.35.4/drivers/acpi/processor_idle.c linux-2.6.35.4/drivers/acpi/processor_idle.c +--- linux-2.6.35.4/drivers/acpi/processor_idle.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/processor_idle.c 2010-09-17 20:12:09.000000000 -0400 +@@ -124,7 +124,7 @@ static struct dmi_system_id __cpuinitdat + DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."), + DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")}, + (void *)1}, +- {}, ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL}, + }; + + +diff -urNp linux-2.6.35.4/drivers/acpi/sleep.c linux-2.6.35.4/drivers/acpi/sleep.c +--- linux-2.6.35.4/drivers/acpi/sleep.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/sleep.c 2010-09-17 20:12:09.000000000 -0400 +@@ -318,7 +318,7 @@ static int acpi_suspend_state_valid(susp + } + } + +-static struct platform_suspend_ops acpi_suspend_ops = { ++static const struct platform_suspend_ops acpi_suspend_ops = { + .valid = acpi_suspend_state_valid, + .begin = acpi_suspend_begin, + .prepare_late = acpi_pm_prepare, +@@ -346,7 +346,7 @@ static int acpi_suspend_begin_old(suspen + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +-static struct platform_suspend_ops acpi_suspend_ops_old = { ++static const struct platform_suspend_ops acpi_suspend_ops_old = { + .valid = acpi_suspend_state_valid, + .begin = acpi_suspend_begin_old, + .prepare_late = acpi_pm_freeze, +@@ -478,7 +478,7 @@ static void acpi_pm_thaw(void) + acpi_enable_all_runtime_gpes(); + } + +-static struct platform_hibernation_ops acpi_hibernation_ops = { ++static const struct platform_hibernation_ops acpi_hibernation_ops = { + .begin = acpi_hibernation_begin, + .end = acpi_pm_end, + .pre_snapshot = acpi_hibernation_pre_snapshot, +@@ -528,7 +528,7 @@ static int acpi_hibernation_pre_snapshot + * The following callbacks are used if the pre-ACPI 2.0 suspend ordering has + * been requested. + */ +-static struct platform_hibernation_ops acpi_hibernation_ops_old = { ++static const struct platform_hibernation_ops acpi_hibernation_ops_old = { + .begin = acpi_hibernation_begin_old, + .end = acpi_pm_end, + .pre_snapshot = acpi_hibernation_pre_snapshot_old, +diff -urNp linux-2.6.35.4/drivers/acpi/video.c linux-2.6.35.4/drivers/acpi/video.c +--- linux-2.6.35.4/drivers/acpi/video.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/acpi/video.c 2010-09-17 20:12:09.000000000 -0400 +@@ -363,7 +363,7 @@ static int acpi_video_set_brightness(str + vd->brightness->levels[request_level]); + } + +-static struct backlight_ops acpi_backlight_ops = { ++static const struct backlight_ops acpi_backlight_ops = { + .get_brightness = acpi_video_get_brightness, + .update_status = acpi_video_set_brightness, + }; +diff -urNp linux-2.6.35.4/drivers/ata/ahci.c linux-2.6.35.4/drivers/ata/ahci.c +--- linux-2.6.35.4/drivers/ata/ahci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/ahci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -89,17 +89,17 @@ static int ahci_pci_device_suspend(struc + static int ahci_pci_device_resume(struct pci_dev *pdev); + #endif + +-static struct ata_port_operations ahci_vt8251_ops = { ++static const struct ata_port_operations ahci_vt8251_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_vt8251_hardreset, + }; + +-static struct ata_port_operations ahci_p5wdh_ops = { ++static const struct ata_port_operations ahci_p5wdh_ops = { + .inherits = &ahci_ops, + .hardreset = ahci_p5wdh_hardreset, + }; + +-static struct ata_port_operations ahci_sb600_ops = { ++static const struct ata_port_operations ahci_sb600_ops = { + .inherits = &ahci_ops, + .softreset = ahci_sb600_softreset, + .pmp_softreset = ahci_sb600_softreset, +@@ -370,7 +370,7 @@ static const struct pci_device_id ahci_p + { PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_STORAGE_SATA_AHCI, 0xffffff, board_ahci }, + +- { } /* terminate list */ ++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ + }; + + +diff -urNp linux-2.6.35.4/drivers/ata/ahci.h linux-2.6.35.4/drivers/ata/ahci.h +--- linux-2.6.35.4/drivers/ata/ahci.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/ahci.h 2010-09-17 20:12:09.000000000 -0400 +@@ -298,7 +298,7 @@ struct ahci_host_priv { + extern int ahci_ignore_sss; + + extern struct scsi_host_template ahci_sht; +-extern struct ata_port_operations ahci_ops; ++extern const struct ata_port_operations ahci_ops; + + void ahci_save_initial_config(struct device *dev, + struct ahci_host_priv *hpriv, +diff -urNp linux-2.6.35.4/drivers/ata/ata_generic.c linux-2.6.35.4/drivers/ata/ata_generic.c +--- linux-2.6.35.4/drivers/ata/ata_generic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/ata_generic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -104,7 +104,7 @@ static struct scsi_host_template generic + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations generic_port_ops = { ++static const struct ata_port_operations generic_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, + .set_mode = generic_set_mode, +diff -urNp linux-2.6.35.4/drivers/ata/ata_piix.c linux-2.6.35.4/drivers/ata/ata_piix.c +--- linux-2.6.35.4/drivers/ata/ata_piix.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/ata_piix.c 2010-09-17 20:12:09.000000000 -0400 +@@ -302,7 +302,7 @@ static const struct pci_device_id piix_p + { 0x8086, 0x1c08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, + /* SATA Controller IDE (CPT) */ + { 0x8086, 0x1c09, PCI_ANY_ID, PCI_ANY_ID, 0, 0, ich8_2port_sata }, +- { } /* terminate list */ ++ { 0, 0, 0, 0, 0, 0, 0 } /* terminate list */ + }; + + static struct pci_driver piix_pci_driver = { +@@ -320,12 +320,12 @@ static struct scsi_host_template piix_sh + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations piix_sata_ops = { ++static const struct ata_port_operations piix_sata_ops = { + .inherits = &ata_bmdma32_port_ops, + .sff_irq_check = piix_irq_check, + }; + +-static struct ata_port_operations piix_pata_ops = { ++static const struct ata_port_operations piix_pata_ops = { + .inherits = &piix_sata_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = piix_set_piomode, +@@ -333,18 +333,18 @@ static struct ata_port_operations piix_p + .prereset = piix_pata_prereset, + }; + +-static struct ata_port_operations piix_vmw_ops = { ++static const struct ata_port_operations piix_vmw_ops = { + .inherits = &piix_pata_ops, + .bmdma_status = piix_vmw_bmdma_status, + }; + +-static struct ata_port_operations ich_pata_ops = { ++static const struct ata_port_operations ich_pata_ops = { + .inherits = &piix_pata_ops, + .cable_detect = ich_pata_cable_detect, + .set_dmamode = ich_set_dmamode, + }; + +-static struct ata_port_operations piix_sidpr_sata_ops = { ++static const struct ata_port_operations piix_sidpr_sata_ops = { + .inherits = &piix_sata_ops, + .hardreset = sata_std_hardreset, + .scr_read = piix_sidpr_scr_read, +@@ -620,7 +620,7 @@ static const struct ich_laptop ich_lapto + { 0x2653, 0x1043, 0x82D8 }, /* ICH6M on Asus Eee 701 */ + { 0x27df, 0x104d, 0x900e }, /* ICH7 on Sony TZ-90 */ + /* end marker */ +- { 0, } ++ { 0, 0, 0 } + }; + + /** +@@ -1112,7 +1112,7 @@ static int piix_broken_suspend(void) + }, + }, + +- { } /* terminate list */ ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } /* terminate list */ + }; + static const char *oemstrs[] = { + "Tecra M3,", +diff -urNp linux-2.6.35.4/drivers/ata/libahci.c linux-2.6.35.4/drivers/ata/libahci.c +--- linux-2.6.35.4/drivers/ata/libahci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/libahci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -149,7 +149,7 @@ struct scsi_host_template ahci_sht = { + }; + EXPORT_SYMBOL_GPL(ahci_sht); + +-struct ata_port_operations ahci_ops = { ++const struct ata_port_operations ahci_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = ahci_pmp_qc_defer, +diff -urNp linux-2.6.35.4/drivers/ata/libata-acpi.c linux-2.6.35.4/drivers/ata/libata-acpi.c +--- linux-2.6.35.4/drivers/ata/libata-acpi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/libata-acpi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -224,12 +224,12 @@ static void ata_acpi_dev_uevent(acpi_han + ata_acpi_uevent(dev->link->ap, dev, event); + } + +-static struct acpi_dock_ops ata_acpi_dev_dock_ops = { ++static const struct acpi_dock_ops ata_acpi_dev_dock_ops = { + .handler = ata_acpi_dev_notify_dock, + .uevent = ata_acpi_dev_uevent, + }; + +-static struct acpi_dock_ops ata_acpi_ap_dock_ops = { ++static const struct acpi_dock_ops ata_acpi_ap_dock_ops = { + .handler = ata_acpi_ap_notify_dock, + .uevent = ata_acpi_ap_uevent, + }; +diff -urNp linux-2.6.35.4/drivers/ata/libata-core.c linux-2.6.35.4/drivers/ata/libata-core.c +--- linux-2.6.35.4/drivers/ata/libata-core.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/libata-core.c 2010-09-17 20:12:09.000000000 -0400 +@@ -901,7 +901,7 @@ static const struct ata_xfer_ent { + { ATA_SHIFT_PIO, ATA_NR_PIO_MODES, XFER_PIO_0 }, + { ATA_SHIFT_MWDMA, ATA_NR_MWDMA_MODES, XFER_MW_DMA_0 }, + { ATA_SHIFT_UDMA, ATA_NR_UDMA_MODES, XFER_UDMA_0 }, +- { -1, }, ++ { -1, 0, 0 } + }; + + /** +@@ -3073,7 +3073,7 @@ static const struct ata_timing ata_timin + { XFER_UDMA_5, 0, 0, 0, 0, 0, 0, 0, 0, 20 }, + { XFER_UDMA_6, 0, 0, 0, 0, 0, 0, 0, 0, 15 }, + +- { 0xFF } ++ { 0xFF, 0, 0, 0, 0, 0, 0, 0, 0 } + }; + + #define ENOUGH(v, unit) (((v)-1)/(unit)+1) +@@ -4323,7 +4323,7 @@ static const struct ata_blacklist_entry + { "PIONEER DVD-RW DVRTD08", "1.00", ATA_HORKAGE_NOSETXFER }, + + /* End Marker */ +- { } ++ { NULL, NULL, 0 } + }; + + static int strn_pattern_cmp(const char *patt, const char *name, int wildchar) +@@ -5869,7 +5869,7 @@ static void ata_host_stop(struct device + * LOCKING: + * None. + */ +-static void ata_finalize_port_ops(struct ata_port_operations *ops) ++static void ata_finalize_port_ops(const struct ata_port_operations *ops) + { + static DEFINE_SPINLOCK(lock); + const struct ata_port_operations *cur; +@@ -5881,6 +5881,7 @@ static void ata_finalize_port_ops(struct + return; + + spin_lock(&lock); ++ pax_open_kernel(); + + for (cur = ops->inherits; cur; cur = cur->inherits) { + void **inherit = (void **)cur; +@@ -5894,8 +5895,9 @@ static void ata_finalize_port_ops(struct + if (IS_ERR(*pp)) + *pp = NULL; + +- ops->inherits = NULL; ++ ((struct ata_port_operations *)ops)->inherits = NULL; + ++ pax_close_kernel(); + spin_unlock(&lock); + } + +@@ -5992,7 +5994,7 @@ int ata_host_start(struct ata_host *host + */ + /* KILLME - the only user left is ipr */ + void ata_host_init(struct ata_host *host, struct device *dev, +- unsigned long flags, struct ata_port_operations *ops) ++ unsigned long flags, const struct ata_port_operations *ops) + { + spin_lock_init(&host->lock); + host->dev = dev; +@@ -6642,7 +6644,7 @@ static void ata_dummy_error_handler(stru + /* truly dummy */ + } + +-struct ata_port_operations ata_dummy_port_ops = { ++const struct ata_port_operations ata_dummy_port_ops = { + .qc_prep = ata_noop_qc_prep, + .qc_issue = ata_dummy_qc_issue, + .error_handler = ata_dummy_error_handler, +diff -urNp linux-2.6.35.4/drivers/ata/libata-eh.c linux-2.6.35.4/drivers/ata/libata-eh.c +--- linux-2.6.35.4/drivers/ata/libata-eh.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/libata-eh.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3680,7 +3680,7 @@ void ata_do_eh(struct ata_port *ap, ata_ + */ + void ata_std_error_handler(struct ata_port *ap) + { +- struct ata_port_operations *ops = ap->ops; ++ const struct ata_port_operations *ops = ap->ops; + ata_reset_fn_t hardreset = ops->hardreset; + + /* ignore built-in hardreset if SCR access is not available */ +diff -urNp linux-2.6.35.4/drivers/ata/libata-pmp.c linux-2.6.35.4/drivers/ata/libata-pmp.c +--- linux-2.6.35.4/drivers/ata/libata-pmp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/libata-pmp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -868,7 +868,7 @@ static int sata_pmp_handle_link_fail(str + */ + static int sata_pmp_eh_recover(struct ata_port *ap) + { +- struct ata_port_operations *ops = ap->ops; ++ const struct ata_port_operations *ops = ap->ops; + int pmp_tries, link_tries[SATA_PMP_MAX_PORTS]; + struct ata_link *pmp_link = &ap->link; + struct ata_device *pmp_dev = pmp_link->device; +diff -urNp linux-2.6.35.4/drivers/ata/pata_acpi.c linux-2.6.35.4/drivers/ata/pata_acpi.c +--- linux-2.6.35.4/drivers/ata/pata_acpi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_acpi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -216,7 +216,7 @@ static struct scsi_host_template pacpi_s + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pacpi_ops = { ++static const struct ata_port_operations pacpi_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = pacpi_qc_issue, + .cable_detect = pacpi_cable_detect, +diff -urNp linux-2.6.35.4/drivers/ata/pata_ali.c linux-2.6.35.4/drivers/ata/pata_ali.c +--- linux-2.6.35.4/drivers/ata/pata_ali.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_ali.c 2010-09-17 20:12:09.000000000 -0400 +@@ -363,7 +363,7 @@ static struct scsi_host_template ali_sht + * Port operations for PIO only ALi + */ + +-static struct ata_port_operations ali_early_port_ops = { ++static const struct ata_port_operations ali_early_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = ali_set_piomode, +@@ -380,7 +380,7 @@ static const struct ata_port_operations + * Port operations for DMA capable ALi without cable + * detect + */ +-static struct ata_port_operations ali_20_port_ops = { ++static const struct ata_port_operations ali_20_port_ops = { + .inherits = &ali_dma_base_ops, + .cable_detect = ata_cable_40wire, + .mode_filter = ali_20_filter, +@@ -391,7 +391,7 @@ static struct ata_port_operations ali_20 + /* + * Port operations for DMA capable ALi with cable detect + */ +-static struct ata_port_operations ali_c2_port_ops = { ++static const struct ata_port_operations ali_c2_port_ops = { + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, +@@ -402,7 +402,7 @@ static struct ata_port_operations ali_c2 + /* + * Port operations for DMA capable ALi with cable detect + */ +-static struct ata_port_operations ali_c4_port_ops = { ++static const struct ata_port_operations ali_c4_port_ops = { + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .cable_detect = ali_c2_cable_detect, +@@ -412,7 +412,7 @@ static struct ata_port_operations ali_c4 + /* + * Port operations for DMA capable ALi with cable detect and LBA48 + */ +-static struct ata_port_operations ali_c5_port_ops = { ++static const struct ata_port_operations ali_c5_port_ops = { + .inherits = &ali_dma_base_ops, + .check_atapi_dma = ali_check_atapi_dma, + .dev_config = ali_warn_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/pata_amd.c linux-2.6.35.4/drivers/ata/pata_amd.c +--- linux-2.6.35.4/drivers/ata/pata_amd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_amd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -397,28 +397,28 @@ static const struct ata_port_operations + .prereset = amd_pre_reset, + }; + +-static struct ata_port_operations amd33_port_ops = { ++static const struct ata_port_operations amd33_port_ops = { + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = amd33_set_piomode, + .set_dmamode = amd33_set_dmamode, + }; + +-static struct ata_port_operations amd66_port_ops = { ++static const struct ata_port_operations amd66_port_ops = { + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = amd66_set_piomode, + .set_dmamode = amd66_set_dmamode, + }; + +-static struct ata_port_operations amd100_port_ops = { ++static const struct ata_port_operations amd100_port_ops = { + .inherits = &amd_base_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = amd100_set_piomode, + .set_dmamode = amd100_set_dmamode, + }; + +-static struct ata_port_operations amd133_port_ops = { ++static const struct ata_port_operations amd133_port_ops = { + .inherits = &amd_base_port_ops, + .cable_detect = amd_cable_detect, + .set_piomode = amd133_set_piomode, +@@ -433,13 +433,13 @@ static const struct ata_port_operations + .host_stop = nv_host_stop, + }; + +-static struct ata_port_operations nv100_port_ops = { ++static const struct ata_port_operations nv100_port_ops = { + .inherits = &nv_base_port_ops, + .set_piomode = nv100_set_piomode, + .set_dmamode = nv100_set_dmamode, + }; + +-static struct ata_port_operations nv133_port_ops = { ++static const struct ata_port_operations nv133_port_ops = { + .inherits = &nv_base_port_ops, + .set_piomode = nv133_set_piomode, + .set_dmamode = nv133_set_dmamode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_artop.c linux-2.6.35.4/drivers/ata/pata_artop.c +--- linux-2.6.35.4/drivers/ata/pata_artop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_artop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -311,7 +311,7 @@ static struct scsi_host_template artop_s + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations artop6210_ops = { ++static const struct ata_port_operations artop6210_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = artop6210_set_piomode, +@@ -320,7 +320,7 @@ static struct ata_port_operations artop6 + .qc_defer = artop6210_qc_defer, + }; + +-static struct ata_port_operations artop6260_ops = { ++static const struct ata_port_operations artop6260_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = artop6260_cable_detect, + .set_piomode = artop6260_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_at32.c linux-2.6.35.4/drivers/ata/pata_at32.c +--- linux-2.6.35.4/drivers/ata/pata_at32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_at32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -173,7 +173,7 @@ static struct scsi_host_template at32_sh + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations at32_port_ops = { ++static const struct ata_port_operations at32_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = pata_at32_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_at91.c linux-2.6.35.4/drivers/ata/pata_at91.c +--- linux-2.6.35.4/drivers/ata/pata_at91.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_at91.c 2010-09-17 20:12:09.000000000 -0400 +@@ -196,7 +196,7 @@ static struct scsi_host_template pata_at + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pata_at91_port_ops = { ++static const struct ata_port_operations pata_at91_port_ops = { + .inherits = &ata_sff_port_ops, + + .sff_data_xfer = pata_at91_data_xfer_noirq, +diff -urNp linux-2.6.35.4/drivers/ata/pata_atiixp.c linux-2.6.35.4/drivers/ata/pata_atiixp.c +--- linux-2.6.35.4/drivers/ata/pata_atiixp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_atiixp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -214,7 +214,7 @@ static struct scsi_host_template atiixp_ + .sg_tablesize = LIBATA_DUMB_MAX_PRD, + }; + +-static struct ata_port_operations atiixp_port_ops = { ++static const struct ata_port_operations atiixp_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .qc_prep = ata_bmdma_dumb_qc_prep, +diff -urNp linux-2.6.35.4/drivers/ata/pata_atp867x.c linux-2.6.35.4/drivers/ata/pata_atp867x.c +--- linux-2.6.35.4/drivers/ata/pata_atp867x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_atp867x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -275,7 +275,7 @@ static struct scsi_host_template atp867x + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations atp867x_ops = { ++static const struct ata_port_operations atp867x_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = atp867x_cable_detect, + .set_piomode = atp867x_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_bf54x.c linux-2.6.35.4/drivers/ata/pata_bf54x.c +--- linux-2.6.35.4/drivers/ata/pata_bf54x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_bf54x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1420,7 +1420,7 @@ static struct scsi_host_template bfin_sh + .dma_boundary = ATA_DMA_BOUNDARY, + }; + +-static struct ata_port_operations bfin_pata_ops = { ++static const struct ata_port_operations bfin_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .set_piomode = bfin_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cmd640.c linux-2.6.35.4/drivers/ata/pata_cmd640.c +--- linux-2.6.35.4/drivers/ata/pata_cmd640.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cmd640.c 2010-09-17 20:12:09.000000000 -0400 +@@ -165,7 +165,7 @@ static struct scsi_host_template cmd640_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations cmd640_port_ops = { ++static const struct ata_port_operations cmd640_port_ops = { + .inherits = &ata_sff_port_ops, + /* In theory xfer_noirq is not needed once we kill the prefetcher */ + .sff_data_xfer = ata_sff_data_xfer_noirq, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cmd64x.c linux-2.6.35.4/drivers/ata/pata_cmd64x.c +--- linux-2.6.35.4/drivers/ata/pata_cmd64x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cmd64x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -274,18 +274,18 @@ static const struct ata_port_operations + .set_dmamode = cmd64x_set_dmamode, + }; + +-static struct ata_port_operations cmd64x_port_ops = { ++static const struct ata_port_operations cmd64x_port_ops = { + .inherits = &cmd64x_base_ops, + .cable_detect = ata_cable_40wire, + }; + +-static struct ata_port_operations cmd646r1_port_ops = { ++static const struct ata_port_operations cmd646r1_port_ops = { + .inherits = &cmd64x_base_ops, + .bmdma_stop = cmd646r1_bmdma_stop, + .cable_detect = ata_cable_40wire, + }; + +-static struct ata_port_operations cmd648_port_ops = { ++static const struct ata_port_operations cmd648_port_ops = { + .inherits = &cmd64x_base_ops, + .bmdma_stop = cmd648_bmdma_stop, + .cable_detect = cmd648_cable_detect, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5520.c linux-2.6.35.4/drivers/ata/pata_cs5520.c +--- linux-2.6.35.4/drivers/ata/pata_cs5520.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cs5520.c 2010-09-17 20:12:09.000000000 -0400 +@@ -108,7 +108,7 @@ static struct scsi_host_template cs5520_ + .sg_tablesize = LIBATA_DUMB_MAX_PRD, + }; + +-static struct ata_port_operations cs5520_port_ops = { ++static const struct ata_port_operations cs5520_port_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_prep = ata_bmdma_dumb_qc_prep, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5530.c linux-2.6.35.4/drivers/ata/pata_cs5530.c +--- linux-2.6.35.4/drivers/ata/pata_cs5530.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cs5530.c 2010-09-17 20:12:09.000000000 -0400 +@@ -164,7 +164,7 @@ static struct scsi_host_template cs5530_ + .sg_tablesize = LIBATA_DUMB_MAX_PRD, + }; + +-static struct ata_port_operations cs5530_port_ops = { ++static const struct ata_port_operations cs5530_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .qc_prep = ata_bmdma_dumb_qc_prep, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5535.c linux-2.6.35.4/drivers/ata/pata_cs5535.c +--- linux-2.6.35.4/drivers/ata/pata_cs5535.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cs5535.c 2010-09-17 20:12:09.000000000 -0400 +@@ -160,7 +160,7 @@ static struct scsi_host_template cs5535_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations cs5535_port_ops = { ++static const struct ata_port_operations cs5535_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = cs5535_cable_detect, + .set_piomode = cs5535_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cs5536.c linux-2.6.35.4/drivers/ata/pata_cs5536.c +--- linux-2.6.35.4/drivers/ata/pata_cs5536.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cs5536.c 2010-09-17 20:12:09.000000000 -0400 +@@ -223,7 +223,7 @@ static struct scsi_host_template cs5536_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations cs5536_port_ops = { ++static const struct ata_port_operations cs5536_port_ops = { + .inherits = &ata_bmdma32_port_ops, + .cable_detect = cs5536_cable_detect, + .set_piomode = cs5536_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_cypress.c linux-2.6.35.4/drivers/ata/pata_cypress.c +--- linux-2.6.35.4/drivers/ata/pata_cypress.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_cypress.c 2010-09-17 20:12:09.000000000 -0400 +@@ -115,7 +115,7 @@ static struct scsi_host_template cy82c69 + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations cy82c693_port_ops = { ++static const struct ata_port_operations cy82c693_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = cy82c693_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_efar.c linux-2.6.35.4/drivers/ata/pata_efar.c +--- linux-2.6.35.4/drivers/ata/pata_efar.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_efar.c 2010-09-17 20:12:09.000000000 -0400 +@@ -238,7 +238,7 @@ static struct scsi_host_template efar_sh + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations efar_ops = { ++static const struct ata_port_operations efar_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = efar_cable_detect, + .set_piomode = efar_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt366.c linux-2.6.35.4/drivers/ata/pata_hpt366.c +--- linux-2.6.35.4/drivers/ata/pata_hpt366.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_hpt366.c 2010-09-17 20:12:09.000000000 -0400 +@@ -269,7 +269,7 @@ static struct scsi_host_template hpt36x_ + * Configuration for HPT366/68 + */ + +-static struct ata_port_operations hpt366_port_ops = { ++static const struct ata_port_operations hpt366_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = hpt36x_cable_detect, + .mode_filter = hpt366_filter, +diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt37x.c linux-2.6.35.4/drivers/ata/pata_hpt37x.c +--- linux-2.6.35.4/drivers/ata/pata_hpt37x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_hpt37x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -564,7 +564,7 @@ static struct scsi_host_template hpt37x_ + * Configuration for HPT370 + */ + +-static struct ata_port_operations hpt370_port_ops = { ++static const struct ata_port_operations hpt370_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .bmdma_stop = hpt370_bmdma_stop, +@@ -580,7 +580,7 @@ static struct ata_port_operations hpt370 + * Configuration for HPT370A. Close to 370 but less filters + */ + +-static struct ata_port_operations hpt370a_port_ops = { ++static const struct ata_port_operations hpt370a_port_ops = { + .inherits = &hpt370_port_ops, + .mode_filter = hpt370a_filter, + }; +@@ -590,7 +590,7 @@ static struct ata_port_operations hpt370 + * and DMA mode setting functionality. + */ + +-static struct ata_port_operations hpt372_port_ops = { ++static const struct ata_port_operations hpt372_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .bmdma_stop = hpt37x_bmdma_stop, +@@ -606,7 +606,7 @@ static struct ata_port_operations hpt372 + * but we have a different cable detection procedure for function 1. + */ + +-static struct ata_port_operations hpt374_fn1_port_ops = { ++static const struct ata_port_operations hpt374_fn1_port_ops = { + .inherits = &hpt372_port_ops, + .cable_detect = hpt374_fn1_cable_detect, + .prereset = hpt37x_pre_reset, +diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c +--- linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_hpt3x2n.c 2010-09-17 20:12:09.000000000 -0400 +@@ -331,7 +331,7 @@ static struct scsi_host_template hpt3x2n + * Configuration for HPT3x2n. + */ + +-static struct ata_port_operations hpt3x2n_port_ops = { ++static const struct ata_port_operations hpt3x2n_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .bmdma_stop = hpt3x2n_bmdma_stop, +diff -urNp linux-2.6.35.4/drivers/ata/pata_hpt3x3.c linux-2.6.35.4/drivers/ata/pata_hpt3x3.c +--- linux-2.6.35.4/drivers/ata/pata_hpt3x3.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_hpt3x3.c 2010-09-17 20:12:09.000000000 -0400 +@@ -141,7 +141,7 @@ static struct scsi_host_template hpt3x3_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations hpt3x3_port_ops = { ++static const struct ata_port_operations hpt3x3_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = hpt3x3_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_icside.c linux-2.6.35.4/drivers/ata/pata_icside.c +--- linux-2.6.35.4/drivers/ata/pata_icside.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_icside.c 2010-09-17 20:12:09.000000000 -0400 +@@ -320,7 +320,7 @@ static void pata_icside_postreset(struct + } + } + +-static struct ata_port_operations pata_icside_port_ops = { ++static const struct ata_port_operations pata_icside_port_ops = { + .inherits = &ata_bmdma_port_ops, + /* no need to build any PRD tables for DMA */ + .qc_prep = ata_noop_qc_prep, +diff -urNp linux-2.6.35.4/drivers/ata/pata_isapnp.c linux-2.6.35.4/drivers/ata/pata_isapnp.c +--- linux-2.6.35.4/drivers/ata/pata_isapnp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_isapnp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -23,12 +23,12 @@ static struct scsi_host_template isapnp_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations isapnp_port_ops = { ++static const struct ata_port_operations isapnp_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + }; + +-static struct ata_port_operations isapnp_noalt_port_ops = { ++static const struct ata_port_operations isapnp_noalt_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + /* No altstatus so we don't want to use the lost interrupt poll */ +diff -urNp linux-2.6.35.4/drivers/ata/pata_it8213.c linux-2.6.35.4/drivers/ata/pata_it8213.c +--- linux-2.6.35.4/drivers/ata/pata_it8213.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_it8213.c 2010-09-17 20:12:09.000000000 -0400 +@@ -233,7 +233,7 @@ static struct scsi_host_template it8213_ + }; + + +-static struct ata_port_operations it8213_ops = { ++static const struct ata_port_operations it8213_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = it8213_cable_detect, + .set_piomode = it8213_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_it821x.c linux-2.6.35.4/drivers/ata/pata_it821x.c +--- linux-2.6.35.4/drivers/ata/pata_it821x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_it821x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -801,7 +801,7 @@ static struct scsi_host_template it821x_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations it821x_smart_port_ops = { ++static const struct ata_port_operations it821x_smart_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma= it821x_check_atapi_dma, +@@ -815,7 +815,7 @@ static struct ata_port_operations it821x + .port_start = it821x_port_start, + }; + +-static struct ata_port_operations it821x_passthru_port_ops = { ++static const struct ata_port_operations it821x_passthru_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma= it821x_check_atapi_dma, +@@ -831,7 +831,7 @@ static struct ata_port_operations it821x + .port_start = it821x_port_start, + }; + +-static struct ata_port_operations it821x_rdc_port_ops = { ++static const struct ata_port_operations it821x_rdc_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma= it821x_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c +--- linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_ixp4xx_cf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -89,7 +89,7 @@ static struct scsi_host_template ixp4xx_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations ixp4xx_port_ops = { ++static const struct ata_port_operations ixp4xx_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ixp4xx_mmio_data_xfer, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_jmicron.c linux-2.6.35.4/drivers/ata/pata_jmicron.c +--- linux-2.6.35.4/drivers/ata/pata_jmicron.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_jmicron.c 2010-09-17 20:12:09.000000000 -0400 +@@ -111,7 +111,7 @@ static struct scsi_host_template jmicron + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations jmicron_ops = { ++static const struct ata_port_operations jmicron_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = jmicron_pre_reset, + }; +diff -urNp linux-2.6.35.4/drivers/ata/pata_legacy.c linux-2.6.35.4/drivers/ata/pata_legacy.c +--- linux-2.6.35.4/drivers/ata/pata_legacy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_legacy.c 2010-09-17 20:12:09.000000000 -0400 +@@ -113,7 +113,7 @@ struct legacy_probe { + + struct legacy_controller { + const char *name; +- struct ata_port_operations *ops; ++ const struct ata_port_operations *ops; + unsigned int pio_mask; + unsigned int flags; + unsigned int pflags; +@@ -230,12 +230,12 @@ static const struct ata_port_operations + * pio_mask as well. + */ + +-static struct ata_port_operations simple_port_ops = { ++static const struct ata_port_operations simple_port_ops = { + .inherits = &legacy_base_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + }; + +-static struct ata_port_operations legacy_port_ops = { ++static const struct ata_port_operations legacy_port_ops = { + .inherits = &legacy_base_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .set_mode = legacy_set_mode, +@@ -331,7 +331,7 @@ static unsigned int pdc_data_xfer_vlb(st + return buflen; + } + +-static struct ata_port_operations pdc20230_port_ops = { ++static const struct ata_port_operations pdc20230_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = pdc20230_set_piomode, + .sff_data_xfer = pdc_data_xfer_vlb, +@@ -364,7 +364,7 @@ static void ht6560a_set_piomode(struct a + ioread8(ap->ioaddr.status_addr); + } + +-static struct ata_port_operations ht6560a_port_ops = { ++static const struct ata_port_operations ht6560a_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = ht6560a_set_piomode, + }; +@@ -407,7 +407,7 @@ static void ht6560b_set_piomode(struct a + ioread8(ap->ioaddr.status_addr); + } + +-static struct ata_port_operations ht6560b_port_ops = { ++static const struct ata_port_operations ht6560b_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = ht6560b_set_piomode, + }; +@@ -506,7 +506,7 @@ static void opti82c611a_set_piomode(stru + } + + +-static struct ata_port_operations opti82c611a_port_ops = { ++static const struct ata_port_operations opti82c611a_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = opti82c611a_set_piomode, + }; +@@ -616,7 +616,7 @@ static unsigned int opti82c46x_qc_issue( + return ata_sff_qc_issue(qc); + } + +-static struct ata_port_operations opti82c46x_port_ops = { ++static const struct ata_port_operations opti82c46x_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = opti82c46x_set_piomode, + .qc_issue = opti82c46x_qc_issue, +@@ -778,20 +778,20 @@ static int qdi_port(struct platform_devi + return 0; + } + +-static struct ata_port_operations qdi6500_port_ops = { ++static const struct ata_port_operations qdi6500_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi6500_set_piomode, + .qc_issue = qdi_qc_issue, + .sff_data_xfer = vlb32_data_xfer, + }; + +-static struct ata_port_operations qdi6580_port_ops = { ++static const struct ata_port_operations qdi6580_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi6580_set_piomode, + .sff_data_xfer = vlb32_data_xfer, + }; + +-static struct ata_port_operations qdi6580dp_port_ops = { ++static const struct ata_port_operations qdi6580dp_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = qdi6580dp_set_piomode, + .qc_issue = qdi_qc_issue, +@@ -863,7 +863,7 @@ static int winbond_port(struct platform_ + return 0; + } + +-static struct ata_port_operations winbond_port_ops = { ++static const struct ata_port_operations winbond_port_ops = { + .inherits = &legacy_base_port_ops, + .set_piomode = winbond_set_piomode, + .sff_data_xfer = vlb32_data_xfer, +@@ -986,7 +986,7 @@ static __init int legacy_init_one(struct + int pio_modes = controller->pio_mask; + unsigned long io = probe->port; + u32 mask = (1 << probe->slot); +- struct ata_port_operations *ops = controller->ops; ++ const struct ata_port_operations *ops = controller->ops; + struct legacy_data *ld = &legacy_data[probe->slot]; + struct ata_host *host = NULL; + struct ata_port *ap; +diff -urNp linux-2.6.35.4/drivers/ata/pata_macio.c linux-2.6.35.4/drivers/ata/pata_macio.c +--- linux-2.6.35.4/drivers/ata/pata_macio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_macio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -918,9 +918,8 @@ static struct scsi_host_template pata_ma + .slave_configure = pata_macio_slave_config, + }; + +-static struct ata_port_operations pata_macio_ops = { ++static const struct ata_port_operations pata_macio_ops = { + .inherits = &ata_bmdma_port_ops, +- + .freeze = pata_macio_freeze, + .set_piomode = pata_macio_set_timings, + .set_dmamode = pata_macio_set_timings, +diff -urNp linux-2.6.35.4/drivers/ata/pata_marvell.c linux-2.6.35.4/drivers/ata/pata_marvell.c +--- linux-2.6.35.4/drivers/ata/pata_marvell.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_marvell.c 2010-09-17 20:12:09.000000000 -0400 +@@ -100,7 +100,7 @@ static struct scsi_host_template marvell + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations marvell_ops = { ++static const struct ata_port_operations marvell_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = marvell_cable_detect, + .prereset = marvell_pre_reset, +diff -urNp linux-2.6.35.4/drivers/ata/pata_mpc52xx.c linux-2.6.35.4/drivers/ata/pata_mpc52xx.c +--- linux-2.6.35.4/drivers/ata/pata_mpc52xx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_mpc52xx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -609,7 +609,7 @@ static struct scsi_host_template mpc52xx + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations mpc52xx_ata_port_ops = { ++static const struct ata_port_operations mpc52xx_ata_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_dev_select = mpc52xx_ata_dev_select, + .set_piomode = mpc52xx_ata_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_mpiix.c linux-2.6.35.4/drivers/ata/pata_mpiix.c +--- linux-2.6.35.4/drivers/ata/pata_mpiix.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_mpiix.c 2010-09-17 20:12:09.000000000 -0400 +@@ -140,7 +140,7 @@ static struct scsi_host_template mpiix_s + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations mpiix_port_ops = { ++static const struct ata_port_operations mpiix_port_ops = { + .inherits = &ata_sff_port_ops, + .qc_issue = mpiix_qc_issue, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_netcell.c linux-2.6.35.4/drivers/ata/pata_netcell.c +--- linux-2.6.35.4/drivers/ata/pata_netcell.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_netcell.c 2010-09-17 20:12:09.000000000 -0400 +@@ -34,7 +34,7 @@ static struct scsi_host_template netcell + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations netcell_ops = { ++static const struct ata_port_operations netcell_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_80wire, + .read_id = netcell_read_id, +diff -urNp linux-2.6.35.4/drivers/ata/pata_ninja32.c linux-2.6.35.4/drivers/ata/pata_ninja32.c +--- linux-2.6.35.4/drivers/ata/pata_ninja32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_ninja32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -81,7 +81,7 @@ static struct scsi_host_template ninja32 + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations ninja32_port_ops = { ++static const struct ata_port_operations ninja32_port_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_dev_select = ninja32_dev_select, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_ns87410.c linux-2.6.35.4/drivers/ata/pata_ns87410.c +--- linux-2.6.35.4/drivers/ata/pata_ns87410.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_ns87410.c 2010-09-17 20:12:09.000000000 -0400 +@@ -132,7 +132,7 @@ static struct scsi_host_template ns87410 + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations ns87410_port_ops = { ++static const struct ata_port_operations ns87410_port_ops = { + .inherits = &ata_sff_port_ops, + .qc_issue = ns87410_qc_issue, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_ns87415.c linux-2.6.35.4/drivers/ata/pata_ns87415.c +--- linux-2.6.35.4/drivers/ata/pata_ns87415.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_ns87415.c 2010-09-17 20:12:09.000000000 -0400 +@@ -299,7 +299,7 @@ static u8 ns87560_bmdma_status(struct at + } + #endif /* 87560 SuperIO Support */ + +-static struct ata_port_operations ns87415_pata_ops = { ++static const struct ata_port_operations ns87415_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .check_atapi_dma = ns87415_check_atapi_dma, +@@ -313,7 +313,7 @@ static struct ata_port_operations ns8741 + }; + + #if defined(CONFIG_SUPERIO) +-static struct ata_port_operations ns87560_pata_ops = { ++static const struct ata_port_operations ns87560_pata_ops = { + .inherits = &ns87415_pata_ops, + .sff_tf_read = ns87560_tf_read, + .sff_check_status = ns87560_check_status, +diff -urNp linux-2.6.35.4/drivers/ata/pata_octeon_cf.c linux-2.6.35.4/drivers/ata/pata_octeon_cf.c +--- linux-2.6.35.4/drivers/ata/pata_octeon_cf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_octeon_cf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -782,6 +782,7 @@ static unsigned int octeon_cf_qc_issue(s + return 0; + } + ++/* cannot be const */ + static struct ata_port_operations octeon_cf_ops = { + .inherits = &ata_sff_port_ops, + .check_atapi_dma = octeon_cf_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/pata_oldpiix.c linux-2.6.35.4/drivers/ata/pata_oldpiix.c +--- linux-2.6.35.4/drivers/ata/pata_oldpiix.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_oldpiix.c 2010-09-17 20:12:09.000000000 -0400 +@@ -208,7 +208,7 @@ static struct scsi_host_template oldpiix + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations oldpiix_pata_ops = { ++static const struct ata_port_operations oldpiix_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = oldpiix_qc_issue, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_opti.c linux-2.6.35.4/drivers/ata/pata_opti.c +--- linux-2.6.35.4/drivers/ata/pata_opti.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_opti.c 2010-09-17 20:12:09.000000000 -0400 +@@ -152,7 +152,7 @@ static struct scsi_host_template opti_sh + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations opti_port_ops = { ++static const struct ata_port_operations opti_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = opti_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_optidma.c linux-2.6.35.4/drivers/ata/pata_optidma.c +--- linux-2.6.35.4/drivers/ata/pata_optidma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_optidma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -337,7 +337,7 @@ static struct scsi_host_template optidma + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations optidma_port_ops = { ++static const struct ata_port_operations optidma_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_40wire, + .set_piomode = optidma_set_pio_mode, +@@ -346,7 +346,7 @@ static struct ata_port_operations optidm + .prereset = optidma_pre_reset, + }; + +-static struct ata_port_operations optiplus_port_ops = { ++static const struct ata_port_operations optiplus_port_ops = { + .inherits = &optidma_port_ops, + .set_piomode = optiplus_set_pio_mode, + .set_dmamode = optiplus_set_dma_mode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_palmld.c linux-2.6.35.4/drivers/ata/pata_palmld.c +--- linux-2.6.35.4/drivers/ata/pata_palmld.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_palmld.c 2010-09-17 20:12:09.000000000 -0400 +@@ -37,7 +37,7 @@ static struct scsi_host_template palmld_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations palmld_port_ops = { ++static const struct ata_port_operations palmld_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pata_pcmcia.c linux-2.6.35.4/drivers/ata/pata_pcmcia.c +--- linux-2.6.35.4/drivers/ata/pata_pcmcia.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_pcmcia.c 2010-09-17 20:12:09.000000000 -0400 +@@ -153,14 +153,14 @@ static struct scsi_host_template pcmcia_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pcmcia_port_ops = { ++static const struct ata_port_operations pcmcia_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_40wire, + .set_mode = pcmcia_set_mode, + }; + +-static struct ata_port_operations pcmcia_8bit_port_ops = { ++static const struct ata_port_operations pcmcia_8bit_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_data_xfer_8bit, + .cable_detect = ata_cable_40wire, +@@ -243,7 +243,7 @@ static int pcmcia_init_one(struct pcmcia + unsigned long io_base, ctl_base; + void __iomem *io_addr, *ctl_addr; + int n_ports = 1; +- struct ata_port_operations *ops = &pcmcia_port_ops; ++ const struct ata_port_operations *ops = &pcmcia_port_ops; + + /* Set up attributes in order to probe card and get resources */ + pdev->io.Attributes1 = IO_DATA_PATH_WIDTH_AUTO; +diff -urNp linux-2.6.35.4/drivers/ata/pata_pdc2027x.c linux-2.6.35.4/drivers/ata/pata_pdc2027x.c +--- linux-2.6.35.4/drivers/ata/pata_pdc2027x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_pdc2027x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -132,14 +132,14 @@ static struct scsi_host_template pdc2027 + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pdc2027x_pata100_ops = { ++static const struct ata_port_operations pdc2027x_pata100_ops = { + .inherits = &ata_bmdma_port_ops, + .check_atapi_dma = pdc2027x_check_atapi_dma, + .cable_detect = pdc2027x_cable_detect, + .prereset = pdc2027x_prereset, + }; + +-static struct ata_port_operations pdc2027x_pata133_ops = { ++static const struct ata_port_operations pdc2027x_pata133_ops = { + .inherits = &pdc2027x_pata100_ops, + .mode_filter = pdc2027x_mode_filter, + .set_piomode = pdc2027x_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c +--- linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_pdc202xx_old.c 2010-09-17 20:12:09.000000000 -0400 +@@ -274,7 +274,7 @@ static struct scsi_host_template pdc202x + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pdc2024x_port_ops = { ++static const struct ata_port_operations pdc2024x_port_ops = { + .inherits = &ata_bmdma_port_ops, + + .cable_detect = ata_cable_40wire, +@@ -284,7 +284,7 @@ static struct ata_port_operations pdc202 + .sff_exec_command = pdc202xx_exec_command, + }; + +-static struct ata_port_operations pdc2026x_port_ops = { ++static const struct ata_port_operations pdc2026x_port_ops = { + .inherits = &pdc2024x_port_ops, + + .check_atapi_dma = pdc2026x_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/pata_piccolo.c linux-2.6.35.4/drivers/ata/pata_piccolo.c +--- linux-2.6.35.4/drivers/ata/pata_piccolo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_piccolo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -67,7 +67,7 @@ static struct scsi_host_template tosh_sh + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations tosh_port_ops = { ++static const struct ata_port_operations tosh_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = tosh_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_platform.c linux-2.6.35.4/drivers/ata/pata_platform.c +--- linux-2.6.35.4/drivers/ata/pata_platform.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_platform.c 2010-09-17 20:12:09.000000000 -0400 +@@ -48,7 +48,7 @@ static struct scsi_host_template pata_pl + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations pata_platform_port_ops = { ++static const struct ata_port_operations pata_platform_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + .cable_detect = ata_cable_unknown, +diff -urNp linux-2.6.35.4/drivers/ata/pata_qdi.c linux-2.6.35.4/drivers/ata/pata_qdi.c +--- linux-2.6.35.4/drivers/ata/pata_qdi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_qdi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -157,7 +157,7 @@ static struct scsi_host_template qdi_sht + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations qdi6500_port_ops = { ++static const struct ata_port_operations qdi6500_port_ops = { + .inherits = &ata_sff_port_ops, + .qc_issue = qdi_qc_issue, + .sff_data_xfer = qdi_data_xfer, +@@ -165,7 +165,7 @@ static struct ata_port_operations qdi650 + .set_piomode = qdi6500_set_piomode, + }; + +-static struct ata_port_operations qdi6580_port_ops = { ++static const struct ata_port_operations qdi6580_port_ops = { + .inherits = &qdi6500_port_ops, + .set_piomode = qdi6580_set_piomode, + }; +diff -urNp linux-2.6.35.4/drivers/ata/pata_radisys.c linux-2.6.35.4/drivers/ata/pata_radisys.c +--- linux-2.6.35.4/drivers/ata/pata_radisys.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_radisys.c 2010-09-17 20:12:09.000000000 -0400 +@@ -187,7 +187,7 @@ static struct scsi_host_template radisys + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations radisys_pata_ops = { ++static const struct ata_port_operations radisys_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_issue = radisys_qc_issue, + .cable_detect = ata_cable_unknown, +diff -urNp linux-2.6.35.4/drivers/ata/pata_rb532_cf.c linux-2.6.35.4/drivers/ata/pata_rb532_cf.c +--- linux-2.6.35.4/drivers/ata/pata_rb532_cf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_rb532_cf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -69,7 +69,7 @@ static irqreturn_t rb532_pata_irq_handle + return IRQ_HANDLED; + } + +-static struct ata_port_operations rb532_pata_port_ops = { ++static const struct ata_port_operations rb532_pata_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = ata_sff_data_xfer32, + }; +diff -urNp linux-2.6.35.4/drivers/ata/pata_rdc.c linux-2.6.35.4/drivers/ata/pata_rdc.c +--- linux-2.6.35.4/drivers/ata/pata_rdc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_rdc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -273,7 +273,7 @@ static void rdc_set_dmamode(struct ata_p + pci_write_config_byte(dev, 0x48, udma_enable); + } + +-static struct ata_port_operations rdc_pata_ops = { ++static const struct ata_port_operations rdc_pata_ops = { + .inherits = &ata_bmdma32_port_ops, + .cable_detect = rdc_pata_cable_detect, + .set_piomode = rdc_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_rz1000.c linux-2.6.35.4/drivers/ata/pata_rz1000.c +--- linux-2.6.35.4/drivers/ata/pata_rz1000.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_rz1000.c 2010-09-17 20:12:09.000000000 -0400 +@@ -54,7 +54,7 @@ static struct scsi_host_template rz1000_ + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations rz1000_port_ops = { ++static const struct ata_port_operations rz1000_port_ops = { + .inherits = &ata_sff_port_ops, + .cable_detect = ata_cable_40wire, + .set_mode = rz1000_set_mode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_sc1200.c linux-2.6.35.4/drivers/ata/pata_sc1200.c +--- linux-2.6.35.4/drivers/ata/pata_sc1200.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_sc1200.c 2010-09-17 20:12:09.000000000 -0400 +@@ -207,7 +207,7 @@ static struct scsi_host_template sc1200_ + .sg_tablesize = LIBATA_DUMB_MAX_PRD, + }; + +-static struct ata_port_operations sc1200_port_ops = { ++static const struct ata_port_operations sc1200_port_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_prep = ata_bmdma_dumb_qc_prep, + .qc_issue = sc1200_qc_issue, +diff -urNp linux-2.6.35.4/drivers/ata/pata_scc.c linux-2.6.35.4/drivers/ata/pata_scc.c +--- linux-2.6.35.4/drivers/ata/pata_scc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_scc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -927,7 +927,7 @@ static struct scsi_host_template scc_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations scc_pata_ops = { ++static const struct ata_port_operations scc_pata_ops = { + .inherits = &ata_bmdma_port_ops, + + .set_piomode = scc_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_sch.c linux-2.6.35.4/drivers/ata/pata_sch.c +--- linux-2.6.35.4/drivers/ata/pata_sch.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_sch.c 2010-09-17 20:12:09.000000000 -0400 +@@ -75,7 +75,7 @@ static struct scsi_host_template sch_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations sch_pata_ops = { ++static const struct ata_port_operations sch_pata_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = ata_cable_unknown, + .set_piomode = sch_set_piomode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_serverworks.c linux-2.6.35.4/drivers/ata/pata_serverworks.c +--- linux-2.6.35.4/drivers/ata/pata_serverworks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_serverworks.c 2010-09-17 20:12:09.000000000 -0400 +@@ -300,7 +300,7 @@ static struct scsi_host_template serverw + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations serverworks_osb4_port_ops = { ++static const struct ata_port_operations serverworks_osb4_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = serverworks_cable_detect, + .mode_filter = serverworks_osb4_filter, +@@ -308,7 +308,7 @@ static struct ata_port_operations server + .set_dmamode = serverworks_set_dmamode, + }; + +-static struct ata_port_operations serverworks_csb_port_ops = { ++static const struct ata_port_operations serverworks_csb_port_ops = { + .inherits = &serverworks_osb4_port_ops, + .mode_filter = serverworks_csb_filter, + }; +diff -urNp linux-2.6.35.4/drivers/ata/pata_sil680.c linux-2.6.35.4/drivers/ata/pata_sil680.c +--- linux-2.6.35.4/drivers/ata/pata_sil680.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_sil680.c 2010-09-17 20:12:09.000000000 -0400 +@@ -214,8 +214,7 @@ static struct scsi_host_template sil680_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +- +-static struct ata_port_operations sil680_port_ops = { ++static const struct ata_port_operations sil680_port_ops = { + .inherits = &ata_bmdma32_port_ops, + .sff_exec_command = sil680_sff_exec_command, + .cable_detect = sil680_cable_detect, +diff -urNp linux-2.6.35.4/drivers/ata/pata_sis.c linux-2.6.35.4/drivers/ata/pata_sis.c +--- linux-2.6.35.4/drivers/ata/pata_sis.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_sis.c 2010-09-17 20:12:09.000000000 -0400 +@@ -503,47 +503,47 @@ static struct scsi_host_template sis_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations sis_133_for_sata_ops = { ++static const struct ata_port_operations sis_133_for_sata_ops = { + .inherits = &ata_bmdma_port_ops, + .set_piomode = sis_133_set_piomode, + .set_dmamode = sis_133_set_dmamode, + .cable_detect = sis_133_cable_detect, + }; + +-static struct ata_port_operations sis_base_ops = { ++static const struct ata_port_operations sis_base_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = sis_pre_reset, + }; + +-static struct ata_port_operations sis_133_ops = { ++static const struct ata_port_operations sis_133_ops = { + .inherits = &sis_base_ops, + .set_piomode = sis_133_set_piomode, + .set_dmamode = sis_133_set_dmamode, + .cable_detect = sis_133_cable_detect, + }; + +-static struct ata_port_operations sis_133_early_ops = { ++static const struct ata_port_operations sis_133_early_ops = { + .inherits = &sis_base_ops, + .set_piomode = sis_100_set_piomode, + .set_dmamode = sis_133_early_set_dmamode, + .cable_detect = sis_66_cable_detect, + }; + +-static struct ata_port_operations sis_100_ops = { ++static const struct ata_port_operations sis_100_ops = { + .inherits = &sis_base_ops, + .set_piomode = sis_100_set_piomode, + .set_dmamode = sis_100_set_dmamode, + .cable_detect = sis_66_cable_detect, + }; + +-static struct ata_port_operations sis_66_ops = { ++static const struct ata_port_operations sis_66_ops = { + .inherits = &sis_base_ops, + .set_piomode = sis_old_set_piomode, + .set_dmamode = sis_66_set_dmamode, + .cable_detect = sis_66_cable_detect, + }; + +-static struct ata_port_operations sis_old_ops = { ++static const struct ata_port_operations sis_old_ops = { + .inherits = &sis_base_ops, + .set_piomode = sis_old_set_piomode, + .set_dmamode = sis_old_set_dmamode, +diff -urNp linux-2.6.35.4/drivers/ata/pata_sl82c105.c linux-2.6.35.4/drivers/ata/pata_sl82c105.c +--- linux-2.6.35.4/drivers/ata/pata_sl82c105.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_sl82c105.c 2010-09-17 20:12:09.000000000 -0400 +@@ -231,7 +231,7 @@ static struct scsi_host_template sl82c10 + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations sl82c105_port_ops = { ++static const struct ata_port_operations sl82c105_port_ops = { + .inherits = &ata_bmdma_port_ops, + .qc_defer = sl82c105_qc_defer, + .bmdma_start = sl82c105_bmdma_start, +diff -urNp linux-2.6.35.4/drivers/ata/pata_triflex.c linux-2.6.35.4/drivers/ata/pata_triflex.c +--- linux-2.6.35.4/drivers/ata/pata_triflex.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_triflex.c 2010-09-17 20:12:09.000000000 -0400 +@@ -178,7 +178,7 @@ static struct scsi_host_template triflex + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations triflex_port_ops = { ++static const struct ata_port_operations triflex_port_ops = { + .inherits = &ata_bmdma_port_ops, + .bmdma_start = triflex_bmdma_start, + .bmdma_stop = triflex_bmdma_stop, +diff -urNp linux-2.6.35.4/drivers/ata/pata_via.c linux-2.6.35.4/drivers/ata/pata_via.c +--- linux-2.6.35.4/drivers/ata/pata_via.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_via.c 2010-09-17 20:12:09.000000000 -0400 +@@ -439,7 +439,7 @@ static struct scsi_host_template via_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations via_port_ops = { ++static const struct ata_port_operations via_port_ops = { + .inherits = &ata_bmdma_port_ops, + .cable_detect = via_cable_detect, + .set_piomode = via_set_piomode, +@@ -450,7 +450,7 @@ static struct ata_port_operations via_po + .mode_filter = via_mode_filter, + }; + +-static struct ata_port_operations via_port_ops_noirq = { ++static const struct ata_port_operations via_port_ops_noirq = { + .inherits = &via_port_ops, + .sff_data_xfer = ata_sff_data_xfer_noirq, + }; +diff -urNp linux-2.6.35.4/drivers/ata/pata_winbond.c linux-2.6.35.4/drivers/ata/pata_winbond.c +--- linux-2.6.35.4/drivers/ata/pata_winbond.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pata_winbond.c 2010-09-17 20:12:09.000000000 -0400 +@@ -125,7 +125,7 @@ static struct scsi_host_template winbond + ATA_PIO_SHT(DRV_NAME), + }; + +-static struct ata_port_operations winbond_port_ops = { ++static const struct ata_port_operations winbond_port_ops = { + .inherits = &ata_sff_port_ops, + .sff_data_xfer = winbond_data_xfer, + .cable_detect = ata_cable_40wire, +diff -urNp linux-2.6.35.4/drivers/ata/pdc_adma.c linux-2.6.35.4/drivers/ata/pdc_adma.c +--- linux-2.6.35.4/drivers/ata/pdc_adma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/pdc_adma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -146,7 +146,7 @@ static struct scsi_host_template adma_at + .dma_boundary = ADMA_DMA_BOUNDARY, + }; + +-static struct ata_port_operations adma_ata_ops = { ++static const struct ata_port_operations adma_ata_ops = { + .inherits = &ata_sff_port_ops, + + .lost_interrupt = ATA_OP_NULL, +diff -urNp linux-2.6.35.4/drivers/ata/sata_fsl.c linux-2.6.35.4/drivers/ata/sata_fsl.c +--- linux-2.6.35.4/drivers/ata/sata_fsl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_fsl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1261,7 +1261,7 @@ static struct scsi_host_template sata_fs + .dma_boundary = ATA_DMA_BOUNDARY, + }; + +-static struct ata_port_operations sata_fsl_ops = { ++static const struct ata_port_operations sata_fsl_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = ata_std_qc_defer, +diff -urNp linux-2.6.35.4/drivers/ata/sata_inic162x.c linux-2.6.35.4/drivers/ata/sata_inic162x.c +--- linux-2.6.35.4/drivers/ata/sata_inic162x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_inic162x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -705,7 +705,7 @@ static int inic_port_start(struct ata_po + return 0; + } + +-static struct ata_port_operations inic_port_ops = { ++static const struct ata_port_operations inic_port_ops = { + .inherits = &sata_port_ops, + + .check_atapi_dma = inic_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/sata_mv.c linux-2.6.35.4/drivers/ata/sata_mv.c +--- linux-2.6.35.4/drivers/ata/sata_mv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_mv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -663,7 +663,7 @@ static struct scsi_host_template mv6_sht + .dma_boundary = MV_DMA_BOUNDARY, + }; + +-static struct ata_port_operations mv5_ops = { ++static const struct ata_port_operations mv5_ops = { + .inherits = &ata_sff_port_ops, + + .lost_interrupt = ATA_OP_NULL, +@@ -683,7 +683,7 @@ static struct ata_port_operations mv5_op + .port_stop = mv_port_stop, + }; + +-static struct ata_port_operations mv6_ops = { ++static const struct ata_port_operations mv6_ops = { + .inherits = &ata_bmdma_port_ops, + + .lost_interrupt = ATA_OP_NULL, +@@ -717,7 +717,7 @@ static struct ata_port_operations mv6_op + .port_stop = mv_port_stop, + }; + +-static struct ata_port_operations mv_iie_ops = { ++static const struct ata_port_operations mv_iie_ops = { + .inherits = &mv6_ops, + .dev_config = ATA_OP_NULL, + .qc_prep = mv_qc_prep_iie, +diff -urNp linux-2.6.35.4/drivers/ata/sata_nv.c linux-2.6.35.4/drivers/ata/sata_nv.c +--- linux-2.6.35.4/drivers/ata/sata_nv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_nv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -465,7 +465,7 @@ static struct scsi_host_template nv_swnc + * cases. Define nv_hardreset() which only kicks in for post-boot + * probing and use it for all variants. + */ +-static struct ata_port_operations nv_generic_ops = { ++static const struct ata_port_operations nv_generic_ops = { + .inherits = &ata_bmdma_port_ops, + .lost_interrupt = ATA_OP_NULL, + .scr_read = nv_scr_read, +@@ -473,20 +473,20 @@ static struct ata_port_operations nv_gen + .hardreset = nv_hardreset, + }; + +-static struct ata_port_operations nv_nf2_ops = { ++static const struct ata_port_operations nv_nf2_ops = { + .inherits = &nv_generic_ops, + .freeze = nv_nf2_freeze, + .thaw = nv_nf2_thaw, + }; + +-static struct ata_port_operations nv_ck804_ops = { ++static const struct ata_port_operations nv_ck804_ops = { + .inherits = &nv_generic_ops, + .freeze = nv_ck804_freeze, + .thaw = nv_ck804_thaw, + .host_stop = nv_ck804_host_stop, + }; + +-static struct ata_port_operations nv_adma_ops = { ++static const struct ata_port_operations nv_adma_ops = { + .inherits = &nv_ck804_ops, + + .check_atapi_dma = nv_adma_check_atapi_dma, +@@ -510,7 +510,7 @@ static struct ata_port_operations nv_adm + .host_stop = nv_adma_host_stop, + }; + +-static struct ata_port_operations nv_swncq_ops = { ++static const struct ata_port_operations nv_swncq_ops = { + .inherits = &nv_generic_ops, + + .qc_defer = ata_std_qc_defer, +diff -urNp linux-2.6.35.4/drivers/ata/sata_promise.c linux-2.6.35.4/drivers/ata/sata_promise.c +--- linux-2.6.35.4/drivers/ata/sata_promise.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_promise.c 2010-09-17 20:12:09.000000000 -0400 +@@ -196,7 +196,7 @@ static const struct ata_port_operations + .error_handler = pdc_error_handler, + }; + +-static struct ata_port_operations pdc_sata_ops = { ++static const struct ata_port_operations pdc_sata_ops = { + .inherits = &pdc_common_ops, + .cable_detect = pdc_sata_cable_detect, + .freeze = pdc_sata_freeze, +@@ -209,14 +209,14 @@ static struct ata_port_operations pdc_sa + + /* First-generation chips need a more restrictive ->check_atapi_dma op, + and ->freeze/thaw that ignore the hotplug controls. */ +-static struct ata_port_operations pdc_old_sata_ops = { ++static const struct ata_port_operations pdc_old_sata_ops = { + .inherits = &pdc_sata_ops, + .freeze = pdc_freeze, + .thaw = pdc_thaw, + .check_atapi_dma = pdc_old_sata_check_atapi_dma, + }; + +-static struct ata_port_operations pdc_pata_ops = { ++static const struct ata_port_operations pdc_pata_ops = { + .inherits = &pdc_common_ops, + .cable_detect = pdc_pata_cable_detect, + .freeze = pdc_freeze, +diff -urNp linux-2.6.35.4/drivers/ata/sata_qstor.c linux-2.6.35.4/drivers/ata/sata_qstor.c +--- linux-2.6.35.4/drivers/ata/sata_qstor.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_qstor.c 2010-09-17 20:12:09.000000000 -0400 +@@ -131,7 +131,7 @@ static struct scsi_host_template qs_ata_ + .dma_boundary = QS_DMA_BOUNDARY, + }; + +-static struct ata_port_operations qs_ata_ops = { ++static const struct ata_port_operations qs_ata_ops = { + .inherits = &ata_sff_port_ops, + + .check_atapi_dma = qs_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/sata_sil24.c linux-2.6.35.4/drivers/ata/sata_sil24.c +--- linux-2.6.35.4/drivers/ata/sata_sil24.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_sil24.c 2010-09-17 20:12:09.000000000 -0400 +@@ -389,7 +389,7 @@ static struct scsi_host_template sil24_s + .dma_boundary = ATA_DMA_BOUNDARY, + }; + +-static struct ata_port_operations sil24_ops = { ++static const struct ata_port_operations sil24_ops = { + .inherits = &sata_pmp_port_ops, + + .qc_defer = sil24_qc_defer, +diff -urNp linux-2.6.35.4/drivers/ata/sata_sil.c linux-2.6.35.4/drivers/ata/sata_sil.c +--- linux-2.6.35.4/drivers/ata/sata_sil.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_sil.c 2010-09-17 20:12:09.000000000 -0400 +@@ -182,7 +182,7 @@ static struct scsi_host_template sil_sht + .sg_tablesize = ATA_MAX_PRD + }; + +-static struct ata_port_operations sil_ops = { ++static const struct ata_port_operations sil_ops = { + .inherits = &ata_bmdma32_port_ops, + .dev_config = sil_dev_config, + .set_mode = sil_set_mode, +diff -urNp linux-2.6.35.4/drivers/ata/sata_sis.c linux-2.6.35.4/drivers/ata/sata_sis.c +--- linux-2.6.35.4/drivers/ata/sata_sis.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_sis.c 2010-09-17 20:12:09.000000000 -0400 +@@ -89,7 +89,7 @@ static struct scsi_host_template sis_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations sis_ops = { ++static const struct ata_port_operations sis_ops = { + .inherits = &ata_bmdma_port_ops, + .scr_read = sis_scr_read, + .scr_write = sis_scr_write, +diff -urNp linux-2.6.35.4/drivers/ata/sata_svw.c linux-2.6.35.4/drivers/ata/sata_svw.c +--- linux-2.6.35.4/drivers/ata/sata_svw.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_svw.c 2010-09-17 20:12:09.000000000 -0400 +@@ -344,7 +344,7 @@ static struct scsi_host_template k2_sata + }; + + +-static struct ata_port_operations k2_sata_ops = { ++static const struct ata_port_operations k2_sata_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = k2_sata_tf_load, + .sff_tf_read = k2_sata_tf_read, +diff -urNp linux-2.6.35.4/drivers/ata/sata_sx4.c linux-2.6.35.4/drivers/ata/sata_sx4.c +--- linux-2.6.35.4/drivers/ata/sata_sx4.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_sx4.c 2010-09-17 20:12:09.000000000 -0400 +@@ -249,7 +249,7 @@ static struct scsi_host_template pdc_sat + }; + + /* TODO: inherit from base port_ops after converting to new EH */ +-static struct ata_port_operations pdc_20621_ops = { ++static const struct ata_port_operations pdc_20621_ops = { + .inherits = &ata_sff_port_ops, + + .check_atapi_dma = pdc_check_atapi_dma, +diff -urNp linux-2.6.35.4/drivers/ata/sata_uli.c linux-2.6.35.4/drivers/ata/sata_uli.c +--- linux-2.6.35.4/drivers/ata/sata_uli.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_uli.c 2010-09-17 20:12:09.000000000 -0400 +@@ -80,7 +80,7 @@ static struct scsi_host_template uli_sht + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations uli_ops = { ++static const struct ata_port_operations uli_ops = { + .inherits = &ata_bmdma_port_ops, + .scr_read = uli_scr_read, + .scr_write = uli_scr_write, +diff -urNp linux-2.6.35.4/drivers/ata/sata_via.c linux-2.6.35.4/drivers/ata/sata_via.c +--- linux-2.6.35.4/drivers/ata/sata_via.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_via.c 2010-09-17 20:12:09.000000000 -0400 +@@ -115,32 +115,32 @@ static struct scsi_host_template svia_sh + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations svia_base_ops = { ++static const struct ata_port_operations svia_base_ops = { + .inherits = &ata_bmdma_port_ops, + .sff_tf_load = svia_tf_load, + }; + +-static struct ata_port_operations vt6420_sata_ops = { ++static const struct ata_port_operations vt6420_sata_ops = { + .inherits = &svia_base_ops, + .freeze = svia_noop_freeze, + .prereset = vt6420_prereset, + .bmdma_start = vt6420_bmdma_start, + }; + +-static struct ata_port_operations vt6421_pata_ops = { ++static const struct ata_port_operations vt6421_pata_ops = { + .inherits = &svia_base_ops, + .cable_detect = vt6421_pata_cable_detect, + .set_piomode = vt6421_set_pio_mode, + .set_dmamode = vt6421_set_dma_mode, + }; + +-static struct ata_port_operations vt6421_sata_ops = { ++static const struct ata_port_operations vt6421_sata_ops = { + .inherits = &svia_base_ops, + .scr_read = svia_scr_read, + .scr_write = svia_scr_write, + }; + +-static struct ata_port_operations vt8251_ops = { ++static const struct ata_port_operations vt8251_ops = { + .inherits = &svia_base_ops, + .hardreset = sata_std_hardreset, + .scr_read = vt8251_scr_read, +diff -urNp linux-2.6.35.4/drivers/ata/sata_vsc.c linux-2.6.35.4/drivers/ata/sata_vsc.c +--- linux-2.6.35.4/drivers/ata/sata_vsc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ata/sata_vsc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -300,7 +300,7 @@ static struct scsi_host_template vsc_sat + }; + + +-static struct ata_port_operations vsc_sata_ops = { ++static const struct ata_port_operations vsc_sata_ops = { + .inherits = &ata_bmdma_port_ops, + /* The IRQ handling is not quite standard SFF behaviour so we + cannot use the default lost interrupt handler */ +diff -urNp linux-2.6.35.4/drivers/atm/adummy.c linux-2.6.35.4/drivers/atm/adummy.c +--- linux-2.6.35.4/drivers/atm/adummy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/adummy.c 2010-09-17 20:12:09.000000000 -0400 +@@ -78,7 +78,7 @@ adummy_send(struct atm_vcc *vcc, struct + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff -urNp linux-2.6.35.4/drivers/atm/ambassador.c linux-2.6.35.4/drivers/atm/ambassador.c +--- linux-2.6.35.4/drivers/atm/ambassador.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/ambassador.c 2010-09-17 20:12:09.000000000 -0400 +@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, + PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx); + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the descriptor + kfree (tx_descr); +@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, + dump_skb ("<<<", vc, skb); + + // VC layer stats +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + // end of our responsability + atm_vcc->push (atm_vcc, skb); +@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, + } else { + PRINTK (KERN_INFO, "dropped over-size frame"); + // should we count this? +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + } + + } else { +@@ -1342,7 +1342,7 @@ static int amb_send (struct atm_vcc * at + } + + if (check_area (skb->data, skb->len)) { +- atomic_inc(&atm_vcc->stats->tx_err); ++ atomic_inc_unchecked(&atm_vcc->stats->tx_err); + return -ENOMEM; // ? + } + +diff -urNp linux-2.6.35.4/drivers/atm/atmtcp.c linux-2.6.35.4/drivers/atm/atmtcp.c +--- linux-2.6.35.4/drivers/atm/atmtcp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/atmtcp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -207,7 +207,7 @@ static int atmtcp_v_send(struct atm_vcc + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + if (dev_data) return 0; +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOLINK; + } + size = skb->len+sizeof(struct atmtcp_hdr); +@@ -215,7 +215,7 @@ static int atmtcp_v_send(struct atm_vcc + if (!new_skb) { + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOBUFS; + } + hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr)); +@@ -226,8 +226,8 @@ static int atmtcp_v_send(struct atm_vcc + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + return 0; + } + +@@ -301,7 +301,7 @@ static int atmtcp_c_send(struct atm_vcc + out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci)); + read_unlock(&vcc_sklist_lock); + if (!out_vcc) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + goto done; + } + skb_pull(skb,sizeof(struct atmtcp_hdr)); +@@ -313,8 +313,8 @@ static int atmtcp_c_send(struct atm_vcc + __net_timestamp(new_skb); + skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len); + out_vcc->push(out_vcc,new_skb); +- atomic_inc(&vcc->stats->tx); +- atomic_inc(&out_vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->tx); ++ atomic_inc_unchecked(&out_vcc->stats->rx); + done: + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb(skb); +diff -urNp linux-2.6.35.4/drivers/atm/eni.c linux-2.6.35.4/drivers/atm/eni.c +--- linux-2.6.35.4/drivers/atm/eni.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/eni.c 2010-09-17 20:12:09.000000000 -0400 +@@ -526,7 +526,7 @@ static int rx_aal0(struct atm_vcc *vcc) + DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n", + vcc->dev->number); + length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + length = ATM_CELL_SIZE-1; /* no HEC */ +@@ -581,7 +581,7 @@ static int rx_aal5(struct atm_vcc *vcc) + size); + } + eff = length = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + else { + size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2); +@@ -598,7 +598,7 @@ static int rx_aal5(struct atm_vcc *vcc) + "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n", + vcc->dev->number,vcc->vci,length,size << 2,descr); + length = eff = 0; +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL; +@@ -771,7 +771,7 @@ rx_dequeued++; + vcc->push(vcc,skb); + pushed++; + } +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + wake_up(&eni_dev->rx_wait); + } +@@ -1228,7 +1228,7 @@ static void dequeue_tx(struct atm_dev *d + PCI_DMA_TODEVICE); + if (vcc->pop) vcc->pop(vcc,skb); + else dev_kfree_skb_irq(skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&eni_dev->tx_wait); + dma_complete++; + } +diff -urNp linux-2.6.35.4/drivers/atm/firestream.c linux-2.6.35.4/drivers/atm/firestream.c +--- linux-2.6.35.4/drivers/atm/firestream.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/firestream.c 2010-09-17 20:12:09.000000000 -0400 +@@ -749,7 +749,7 @@ static void process_txdone_queue (struct + } + } + +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + fs_dprintk (FS_DEBUG_TXMEM, "i"); + fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb); +@@ -816,7 +816,7 @@ static void process_incoming (struct fs_ + #endif + skb_put (skb, qe->p1 & 0xffff); + ATM_SKB(skb)->vcc = atm_vcc; +- atomic_inc(&atm_vcc->stats->rx); ++ atomic_inc_unchecked(&atm_vcc->stats->rx); + __net_timestamp(skb); + fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb); + atm_vcc->push (atm_vcc, skb); +@@ -837,12 +837,12 @@ static void process_incoming (struct fs_ + kfree (pe); + } + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + case 0x1f: /* Reassembly abort: no buffers. */ + /* Silently increment error counter. */ + if (atm_vcc) +- atomic_inc(&atm_vcc->stats->rx_drop); ++ atomic_inc_unchecked(&atm_vcc->stats->rx_drop); + break; + default: /* Hmm. Haven't written the code to handle the others yet... -- REW */ + printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n", +diff -urNp linux-2.6.35.4/drivers/atm/fore200e.c linux-2.6.35.4/drivers/atm/fore200e.c +--- linux-2.6.35.4/drivers/atm/fore200e.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/fore200e.c 2010-09-17 20:12:09.000000000 -0400 +@@ -933,9 +933,9 @@ fore200e_tx_irq(struct fore200e* fore200 + #endif + /* check error condition */ + if (*entry->status & STATUS_ERROR) +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + else +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + } + +@@ -1084,7 +1084,7 @@ fore200e_push_rpd(struct fore200e* fore2 + if (skb == NULL) { + DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + +@@ -1127,14 +1127,14 @@ fore200e_push_rpd(struct fore200e* fore2 + + dev_kfree_skb_any(skb); + +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return -ENOMEM; + } + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0); + +@@ -1212,7 +1212,7 @@ fore200e_rx_irq(struct fore200e* fore200 + DPRINTK(2, "damaged PDU on %d.%d.%d\n", + fore200e->atm_dev->number, + entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + } + } + +@@ -1657,7 +1657,7 @@ fore200e_send(struct atm_vcc *vcc, struc + goto retry_here; + } + +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + + fore200e->tx_sat++; + DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n", +diff -urNp linux-2.6.35.4/drivers/atm/he.c linux-2.6.35.4/drivers/atm/he.c +--- linux-2.6.35.4/drivers/atm/he.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/he.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1770,7 +1770,7 @@ he_service_rbrq(struct he_dev *he_dev, i + + if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) { + hprintk("HBUF_ERR! (cid 0x%x)\n", cid); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto return_host_buffers; + } + +@@ -1803,7 +1803,7 @@ he_service_rbrq(struct he_dev *he_dev, i + RBRQ_LEN_ERR(he_dev->rbrq_head) + ? "LEN_ERR" : "", + vcc->vpi, vcc->vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto return_host_buffers; + } + +@@ -1862,7 +1862,7 @@ he_service_rbrq(struct he_dev *he_dev, i + vcc->push(vcc, skb); + spin_lock(&he_dev->global_lock); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return_host_buffers: + ++pdus_assembled; +@@ -2207,7 +2207,7 @@ __enqueue_tpd(struct he_dev *he_dev, str + tpd->vcc->pop(tpd->vcc, tpd->skb); + else + dev_kfree_skb_any(tpd->skb); +- atomic_inc(&tpd->vcc->stats->tx_err); ++ atomic_inc_unchecked(&tpd->vcc->stats->tx_err); + } + pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status)); + return; +@@ -2619,7 +2619,7 @@ he_send(struct atm_vcc *vcc, struct sk_b + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + +@@ -2630,7 +2630,7 @@ he_send(struct atm_vcc *vcc, struct sk_b + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -EINVAL; + } + #endif +@@ -2642,7 +2642,7 @@ he_send(struct atm_vcc *vcc, struct sk_b + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2684,7 +2684,7 @@ he_send(struct atm_vcc *vcc, struct sk_b + vcc->pop(vcc, skb); + else + dev_kfree_skb_any(skb); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + return -ENOMEM; + } +@@ -2715,7 +2715,7 @@ he_send(struct atm_vcc *vcc, struct sk_b + __enqueue_tpd(he_dev, tpd, cid); + spin_unlock_irqrestore(&he_dev->global_lock, flags); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +diff -urNp linux-2.6.35.4/drivers/atm/horizon.c linux-2.6.35.4/drivers/atm/horizon.c +--- linux-2.6.35.4/drivers/atm/horizon.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/horizon.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, + { + struct atm_vcc * vcc = ATM_SKB(skb)->vcc; + // VC layer stats +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + __net_timestamp(skb); + // end of our responsability + vcc->push (vcc, skb); +@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const + dev->tx_iovec = NULL; + + // VC layer stats +- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx); ++ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx); + + // free the skb + hrz_kfree_skb (skb); +diff -urNp linux-2.6.35.4/drivers/atm/idt77252.c linux-2.6.35.4/drivers/atm/idt77252.c +--- linux-2.6.35.4/drivers/atm/idt77252.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/idt77252.c 2010-09-17 20:12:09.000000000 -0400 +@@ -811,7 +811,7 @@ drain_scq(struct idt77252_dev *card, str + else + dev_kfree_skb(skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + } + + atomic_dec(&scq->used); +@@ -1074,13 +1074,13 @@ dequeue_rx(struct idt77252_dev *card, st + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for aal0.\n", + card->name); +- atomic_add(i, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i, &vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) { + RXPRINTK("%s: atm_charge() dropped aal0 packets.\n", + card->name); +- atomic_add(i - 1, &vcc->stats->rx_drop); ++ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); + dev_kfree_skb(sb); + break; + } +@@ -1097,7 +1097,7 @@ dequeue_rx(struct idt77252_dev *card, st + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + cell += ATM_CELL_PAYLOAD; + } +@@ -1134,13 +1134,13 @@ dequeue_rx(struct idt77252_dev *card, st + "(CDC: %08x)\n", + card->name, len, rpp->len, readl(SAR_REG_CDC)); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (stat & SAR_RSQE_CRC) { + RXPRINTK("%s: AAL5 CRC error.\n", card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (skb_queue_len(&rpp->queue) > 1) { +@@ -1151,7 +1151,7 @@ dequeue_rx(struct idt77252_dev *card, st + RXPRINTK("%s: Can't alloc RX skb.\n", + card->name); + recycle_rx_pool_skb(card, rpp); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + if (!atm_charge(vcc, skb->truesize)) { +@@ -1170,7 +1170,7 @@ dequeue_rx(struct idt77252_dev *card, st + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + return; + } +@@ -1192,7 +1192,7 @@ dequeue_rx(struct idt77252_dev *card, st + __net_timestamp(skb); + + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + if (skb->truesize > SAR_FB_SIZE_3) + add_rx_skb(card, 3, SAR_FB_SIZE_3, 1); +@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *car + if (vcc->qos.aal != ATM_AAL0) { + RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n", + card->name, vpi, vci); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto drop; + } + + if ((sb = dev_alloc_skb(64)) == NULL) { + printk("%s: Can't allocate buffers for AAL0.\n", + card->name); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto drop; + } + +@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *car + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + + drop: + skb_pull(queue, 64); +@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, s + + if (vc == NULL) { + printk("%s: NULL connection in send().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + if (!test_bit(VCF_TX, &vc->flags)) { + printk("%s: Trying to transmit on a non-tx VC.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, s + break; + default: + printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } + + if (skb_shinfo(skb)->nr_frags != 0) { + printk("%s: No scatter-gather yet.\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return -EINVAL; + } +@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, s + + err = queue_skb(card, vc, skb, oam); + if (err) { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb(skb); + return err; + } +@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, v + skb = dev_alloc_skb(64); + if (!skb) { + printk("%s: Out of memory in send_oam().\n", card->name); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + return -ENOMEM; + } + atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc); +diff -urNp linux-2.6.35.4/drivers/atm/iphase.c linux-2.6.35.4/drivers/atm/iphase.c +--- linux-2.6.35.4/drivers/atm/iphase.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/iphase.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1124,7 +1124,7 @@ static int rx_pkt(struct atm_dev *dev) + status = (u_short) (buf_desc_ptr->desc_mode); + if (status & (RX_CER | RX_PTE | RX_OFL)) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("IA: bad packet, dropping it");) + if (status & RX_CER) { + IF_ERR(printk(" cause: packet CRC error\n");) +@@ -1147,7 +1147,7 @@ static int rx_pkt(struct atm_dev *dev) + len = dma_addr - buf_addr; + if (len > iadev->rx_buf_sz) { + printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out_free_desc; + } + +@@ -1297,7 +1297,7 @@ static void rx_dle_intr(struct atm_dev * + ia_vcc = INPH_IA_VCC(vcc); + if (ia_vcc == NULL) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + dev_kfree_skb_any(skb); + atm_return(vcc, atm_guess_pdu2truesize(len)); + goto INCR_DLE; +@@ -1309,7 +1309,7 @@ static void rx_dle_intr(struct atm_dev * + if ((length > iadev->rx_buf_sz) || (length > + (skb->len - sizeof(struct cpcs_trailer)))) + { +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)", + length, skb->len);) + dev_kfree_skb_any(skb); +@@ -1325,7 +1325,7 @@ static void rx_dle_intr(struct atm_dev * + + IF_RX(printk("rx_dle_intr: skb push");) + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + iadev->rx_pkt_cnt++; + } + INCR_DLE: +@@ -2807,15 +2807,15 @@ static int ia_ioctl(struct atm_dev *dev, + { + struct k_sonet_stats *stats; + stats = &PRIV(_ia_dev[board])->sonet_stats; +- printk("section_bip: %d\n", atomic_read(&stats->section_bip)); +- printk("line_bip : %d\n", atomic_read(&stats->line_bip)); +- printk("path_bip : %d\n", atomic_read(&stats->path_bip)); +- printk("line_febe : %d\n", atomic_read(&stats->line_febe)); +- printk("path_febe : %d\n", atomic_read(&stats->path_febe)); +- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs)); +- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs)); +- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells)); +- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells)); ++ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip)); ++ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip)); ++ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip)); ++ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe)); ++ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe)); ++ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs)); ++ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs)); ++ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells)); ++ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells)); + } + ia_cmds.status = 0; + break; +@@ -2920,7 +2920,7 @@ static int ia_pkt_tx (struct atm_vcc *vc + if ((desc == 0) || (desc > iadev->num_tx_desc)) + { + IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);) +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + if (vcc->pop) + vcc->pop(vcc, skb); + else +@@ -3025,14 +3025,14 @@ static int ia_pkt_tx (struct atm_vcc *vc + ATM_DESC(skb) = vcc->vci; + skb_queue_tail(&iadev->tx_dma_q, skb); + +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + iadev->tx_pkt_cnt++; + /* Increment transaction counter */ + writel(2, iadev->dma+IPHASE5575_TX_COUNTER); + + #if 0 + /* add flow control logic */ +- if (atomic_read(&vcc->stats->tx) % 20 == 0) { ++ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) { + if (iavcc->vc_desc_cnt > 10) { + vcc->tx_quota = vcc->tx_quota * 3 / 4; + printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota ); +diff -urNp linux-2.6.35.4/drivers/atm/lanai.c linux-2.6.35.4/drivers/atm/lanai.c +--- linux-2.6.35.4/drivers/atm/lanai.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/lanai.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct l + vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0); + lanai_endtx(lanai, lvcc); + lanai_free_skb(lvcc->tx.atmvcc, skb); +- atomic_inc(&lvcc->tx.atmvcc->stats->tx); ++ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx); + } + + /* Try to fill the buffer - don't call unless there is backlog */ +@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc + ATM_SKB(skb)->vcc = lvcc->rx.atmvcc; + __net_timestamp(skb); + lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx); + out: + lvcc->rx.buf.ptr = end; + cardvcc_write(lvcc, endptr, vcc_rxreadptr); +@@ -1668,7 +1668,7 @@ static int handle_service(struct lanai_d + DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 " + "vcc %d\n", lanai->number, (unsigned int) s, vci); + lanai->stats.service_rxnotaal5++; +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + return 0; + } + if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) { +@@ -1680,7 +1680,7 @@ static int handle_service(struct lanai_d + int bytes; + read_unlock(&vcc_sklist_lock); + DPRINTK("got trashed rx pdu on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_trash++; + bytes = (SERVICE_GET_END(s) * 16) - + (((unsigned long) lvcc->rx.buf.ptr) - +@@ -1692,7 +1692,7 @@ static int handle_service(struct lanai_d + } + if (s & SERVICE_STREAM) { + read_unlock(&vcc_sklist_lock); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_stream++; + printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream " + "PDU on VCI %d!\n", lanai->number, vci); +@@ -1700,7 +1700,7 @@ static int handle_service(struct lanai_d + return 0; + } + DPRINTK("got rx crc error on vci %d\n", vci); +- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err); ++ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err); + lvcc->stats.x.aal5.service_rxcrc++; + lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4]; + cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr); +diff -urNp linux-2.6.35.4/drivers/atm/nicstar.c linux-2.6.35.4/drivers/atm/nicstar.c +--- linux-2.6.35.4/drivers/atm/nicstar.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/nicstar.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1722,7 +1722,7 @@ static int ns_send(struct atm_vcc *vcc, + if ((vc = (vc_map *) vcc->dev_data) == NULL) + { + printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1730,7 +1730,7 @@ static int ns_send(struct atm_vcc *vcc, + if (!vc->tx) + { + printk("nicstar%d: Trying to transmit on a non-tx VC.\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1738,7 +1738,7 @@ static int ns_send(struct atm_vcc *vcc, + if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) + { + printk("nicstar%d: Only AAL0 and AAL5 are supported.\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1746,7 +1746,7 @@ static int ns_send(struct atm_vcc *vcc, + if (skb_shinfo(skb)->nr_frags != 0) + { + printk("nicstar%d: No scatter-gather yet.\n", card->index); +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EINVAL; + } +@@ -1791,11 +1791,11 @@ static int ns_send(struct atm_vcc *vcc, + + if (push_scqe(card, vc, scq, &scqe, skb) != 0) + { +- atomic_inc(&vcc->stats->tx_err); ++ atomic_inc_unchecked(&vcc->stats->tx_err); + dev_kfree_skb_any(skb); + return -EIO; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + return 0; + } +@@ -2110,14 +2110,14 @@ static void dequeue_rx(ns_dev *card, ns_ + { + printk("nicstar%d: Can't allocate buffers for aal0.\n", + card->index); +- atomic_add(i,&vcc->stats->rx_drop); ++ atomic_add_unchecked(i,&vcc->stats->rx_drop); + break; + } + if (!atm_charge(vcc, sb->truesize)) + { + RXPRINTK("nicstar%d: atm_charge() dropped aal0 packets.\n", + card->index); +- atomic_add(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ ++ atomic_add_unchecked(i-1,&vcc->stats->rx_drop); /* already increased by 1 */ + dev_kfree_skb_any(sb); + break; + } +@@ -2132,7 +2132,7 @@ static void dequeue_rx(ns_dev *card, ns_ + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + cell += ATM_CELL_PAYLOAD; + } + +@@ -2151,7 +2151,7 @@ static void dequeue_rx(ns_dev *card, ns_ + if (iovb == NULL) + { + printk("nicstar%d: Out of iovec buffers.\n", card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_rx_buf(card, skb); + return; + } +@@ -2181,7 +2181,7 @@ static void dequeue_rx(ns_dev *card, ns_ + else if (NS_SKB(iovb)->iovcnt >= NS_MAX_IOVECS) + { + printk("nicstar%d: received too big AAL5 SDU.\n", card->index); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, NS_MAX_IOVECS); + NS_SKB(iovb)->iovcnt = 0; + iovb->len = 0; +@@ -2201,7 +2201,7 @@ static void dequeue_rx(ns_dev *card, ns_ + printk("nicstar%d: Expected a small buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_rx_buf(card, skb); + vc->rx_iov = NULL; + recycle_iov_buf(card, iovb); +@@ -2215,7 +2215,7 @@ static void dequeue_rx(ns_dev *card, ns_ + printk("nicstar%d: Expected a large buffer, and this is not one.\n", + card->index); + which_list(card, skb); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, + NS_SKB(iovb)->iovcnt); + vc->rx_iov = NULL; +@@ -2239,7 +2239,7 @@ static void dequeue_rx(ns_dev *card, ns_ + printk(" - PDU size mismatch.\n"); + else + printk(".\n"); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, + NS_SKB(iovb)->iovcnt); + vc->rx_iov = NULL; +@@ -2255,7 +2255,7 @@ static void dequeue_rx(ns_dev *card, ns_ + if (!atm_charge(vcc, skb->truesize)) + { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } + else + { +@@ -2267,7 +2267,7 @@ static void dequeue_rx(ns_dev *card, ns_ + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } + else if (NS_SKB(iovb)->iovcnt == 2) /* One small plus one large buffer */ +@@ -2282,7 +2282,7 @@ static void dequeue_rx(ns_dev *card, ns_ + if (!atm_charge(vcc, sb->truesize)) + { + push_rxbufs(card, sb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } + else + { +@@ -2294,7 +2294,7 @@ static void dequeue_rx(ns_dev *card, ns_ + ATM_SKB(sb)->vcc = vcc; + __net_timestamp(sb); + vcc->push(vcc, sb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, skb); +@@ -2305,7 +2305,7 @@ static void dequeue_rx(ns_dev *card, ns_ + if (!atm_charge(vcc, skb->truesize)) + { + push_rxbufs(card, skb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } + else + { +@@ -2319,7 +2319,7 @@ static void dequeue_rx(ns_dev *card, ns_ + ATM_SKB(skb)->vcc = vcc; + __net_timestamp(skb); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + + push_rxbufs(card, sb); +@@ -2341,7 +2341,7 @@ static void dequeue_rx(ns_dev *card, ns_ + if (hb == NULL) + { + printk("nicstar%d: Out of huge buffers.\n", card->index); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + recycle_iovec_rx_bufs(card, (struct iovec *) iovb->data, + NS_SKB(iovb)->iovcnt); + vc->rx_iov = NULL; +@@ -2392,7 +2392,7 @@ static void dequeue_rx(ns_dev *card, ns_ + } + else + dev_kfree_skb_any(hb); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + } + else + { +@@ -2426,7 +2426,7 @@ static void dequeue_rx(ns_dev *card, ns_ + #endif /* NS_USE_DESTRUCTORS */ + __net_timestamp(hb); + vcc->push(vcc, hb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + } + +diff -urNp linux-2.6.35.4/drivers/atm/solos-pci.c linux-2.6.35.4/drivers/atm/solos-pci.c +--- linux-2.6.35.4/drivers/atm/solos-pci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/solos-pci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -715,7 +715,7 @@ void solos_bh(unsigned long card_arg) + } + atm_charge(vcc, skb->truesize); + vcc->push(vcc, skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + break; + + case PKT_STATUS: +@@ -1023,7 +1023,7 @@ static uint32_t fpga_tx(struct solos_car + vcc = SKB_CB(oldskb)->vcc; + + if (vcc) { +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + solos_pop(vcc, oldskb); + } else + dev_kfree_skb_irq(oldskb); +diff -urNp linux-2.6.35.4/drivers/atm/suni.c linux-2.6.35.4/drivers/atm/suni.c +--- linux-2.6.35.4/drivers/atm/suni.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/suni.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(sunis_lock); + + + #define ADD_LIMITED(s,v) \ +- atomic_add((v),&stats->s); \ +- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX); ++ atomic_add_unchecked((v),&stats->s); \ ++ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX); + + + static void suni_hz(unsigned long from_timer) +diff -urNp linux-2.6.35.4/drivers/atm/uPD98402.c linux-2.6.35.4/drivers/atm/uPD98402.c +--- linux-2.6.35.4/drivers/atm/uPD98402.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/uPD98402.c 2010-09-17 20:12:09.000000000 -0400 +@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *d + struct sonet_stats tmp; + int error = 0; + +- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); ++ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs); + sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp); + if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp)); + if (zero && !error) { +@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev + + + #define ADD_LIMITED(s,v) \ +- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \ +- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \ +- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); } ++ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \ ++ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \ ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); } + + + static void stat_event(struct atm_dev *dev) +@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev + if (reason & uPD98402_INT_PFM) stat_event(dev); + if (reason & uPD98402_INT_PCO) { + (void) GET(PCOCR); /* clear interrupt cause */ +- atomic_add(GET(HECCT), ++ atomic_add_unchecked(GET(HECCT), + &PRIV(dev)->sonet_stats.uncorr_hcs); + } + if ((reason & uPD98402_INT_RFO) && +@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev + PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO | + uPD98402_INT_LOS),PIMR); /* enable them */ + (void) fetch_stats(dev,NULL,1); /* clear kernel counters */ +- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1); +- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1); +- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1); ++ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1); + return 0; + } + +diff -urNp linux-2.6.35.4/drivers/atm/zatm.c linux-2.6.35.4/drivers/atm/zatm.c +--- linux-2.6.35.4/drivers/atm/zatm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/atm/zatm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy + } + if (!size) { + dev_kfree_skb_irq(skb); +- if (vcc) atomic_inc(&vcc->stats->rx_err); ++ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err); + continue; + } + if (!atm_charge(vcc,skb->truesize)) { +@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy + skb->len = size; + ATM_SKB(skb)->vcc = vcc; + vcc->push(vcc,skb); +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + } + zout(pos & 0xffff,MTA(mbx)); + #if 0 /* probably a stupid idea */ +@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD + skb_queue_head(&zatm_vcc->backlog,skb); + break; + } +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + wake_up(&zatm_vcc->tx_wait); + } + +diff -urNp linux-2.6.35.4/drivers/char/agp/frontend.c linux-2.6.35.4/drivers/char/agp/frontend.c +--- linux-2.6.35.4/drivers/char/agp/frontend.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/agp/frontend.c 2010-09-17 20:12:09.000000000 -0400 +@@ -818,7 +818,7 @@ static int agpioc_reserve_wrap(struct ag + if (copy_from_user(&reserve, arg, sizeof(struct agp_region))) + return -EFAULT; + +- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment)) ++ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv)) + return -EFAULT; + + client = agp_find_client_by_pid(reserve.pid); +diff -urNp linux-2.6.35.4/drivers/char/agp/intel-agp.c linux-2.6.35.4/drivers/char/agp/intel-agp.c +--- linux-2.6.35.4/drivers/char/agp/intel-agp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/agp/intel-agp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1036,7 +1036,7 @@ static struct pci_device_id agp_intel_pc + ID(PCI_DEVICE_ID_INTEL_IRONLAKE_MC2_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_HB), + ID(PCI_DEVICE_ID_INTEL_SANDYBRIDGE_M_HB), +- { } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, agp_intel_pci_table); +diff -urNp linux-2.6.35.4/drivers/char/hpet.c linux-2.6.35.4/drivers/char/hpet.c +--- linux-2.6.35.4/drivers/char/hpet.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/hpet.c 2010-09-17 20:12:09.000000000 -0400 +@@ -429,7 +429,7 @@ static int hpet_release(struct inode *in + return 0; + } + +-static int hpet_ioctl_common(struct hpet_dev *, int, unsigned long, int); ++static int hpet_ioctl_common(struct hpet_dev *, unsigned int, unsigned long, int); + + static long hpet_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) +@@ -553,7 +553,7 @@ static inline unsigned long hpet_time_di + } + + static int +-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg, int kernel) ++hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg, int kernel) + { + struct hpet_timer __iomem *timer; + struct hpet __iomem *hpet; +@@ -998,7 +998,7 @@ static struct acpi_driver hpet_acpi_driv + }, + }; + +-static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops }; ++static struct miscdevice hpet_misc = { HPET_MINOR, "hpet", &hpet_fops, {NULL, NULL}, NULL, NULL }; + + static int __init hpet_init(void) + { +diff -urNp linux-2.6.35.4/drivers/char/hvc_console.h linux-2.6.35.4/drivers/char/hvc_console.h +--- linux-2.6.35.4/drivers/char/hvc_console.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/hvc_console.h 2010-09-17 20:12:09.000000000 -0400 +@@ -82,6 +82,7 @@ extern int hvc_instantiate(uint32_t vter + /* register a vterm for hvc tty operation (module_init or hotplug add) */ + extern struct hvc_struct * hvc_alloc(uint32_t vtermno, int data, + const struct hv_ops *ops, int outbuf_size); ++ + /* remove a vterm from hvc tty operation (module_exit or hotplug remove) */ + extern int hvc_remove(struct hvc_struct *hp); + +diff -urNp linux-2.6.35.4/drivers/char/hvcs.c linux-2.6.35.4/drivers/char/hvcs.c +--- linux-2.6.35.4/drivers/char/hvcs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/hvcs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -270,7 +270,7 @@ struct hvcs_struct { + unsigned int index; + + struct tty_struct *tty; +- int open_count; ++ atomic_t open_count; + + /* + * Used to tell the driver kernel_thread what operations need to take +@@ -420,7 +420,7 @@ static ssize_t hvcs_vterm_state_store(st + + spin_lock_irqsave(&hvcsd->lock, flags); + +- if (hvcsd->open_count > 0) { ++ if (atomic_read(&hvcsd->open_count) > 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + printk(KERN_INFO "HVCS: vterm state unchanged. " + "The hvcs device node is still in use.\n"); +@@ -1136,7 +1136,7 @@ static int hvcs_open(struct tty_struct * + if ((retval = hvcs_partner_connect(hvcsd))) + goto error_release; + +- hvcsd->open_count = 1; ++ atomic_set(&hvcsd->open_count, 1); + hvcsd->tty = tty; + tty->driver_data = hvcsd; + +@@ -1170,7 +1170,7 @@ fast_open: + + spin_lock_irqsave(&hvcsd->lock, flags); + kref_get(&hvcsd->kref); +- hvcsd->open_count++; ++ atomic_inc(&hvcsd->open_count); + hvcsd->todo_mask |= HVCS_SCHED_READ; + spin_unlock_irqrestore(&hvcsd->lock, flags); + +@@ -1214,7 +1214,7 @@ static void hvcs_close(struct tty_struct + hvcsd = tty->driver_data; + + spin_lock_irqsave(&hvcsd->lock, flags); +- if (--hvcsd->open_count == 0) { ++ if (atomic_dec_and_test(&hvcsd->open_count)) { + + vio_disable_interrupts(hvcsd->vdev); + +@@ -1240,10 +1240,10 @@ static void hvcs_close(struct tty_struct + free_irq(irq, hvcsd); + kref_put(&hvcsd->kref, destroy_hvcs_struct); + return; +- } else if (hvcsd->open_count < 0) { ++ } else if (atomic_read(&hvcsd->open_count) < 0) { + printk(KERN_ERR "HVCS: vty-server@%X open_count: %d" + " is missmanaged.\n", +- hvcsd->vdev->unit_address, hvcsd->open_count); ++ hvcsd->vdev->unit_address, atomic_read(&hvcsd->open_count)); + } + + spin_unlock_irqrestore(&hvcsd->lock, flags); +@@ -1259,7 +1259,7 @@ static void hvcs_hangup(struct tty_struc + + spin_lock_irqsave(&hvcsd->lock, flags); + /* Preserve this so that we know how many kref refs to put */ +- temp_open_count = hvcsd->open_count; ++ temp_open_count = atomic_read(&hvcsd->open_count); + + /* + * Don't kref put inside the spinlock because the destruction +@@ -1274,7 +1274,7 @@ static void hvcs_hangup(struct tty_struc + hvcsd->tty->driver_data = NULL; + hvcsd->tty = NULL; + +- hvcsd->open_count = 0; ++ atomic_set(&hvcsd->open_count, 0); + + /* This will drop any buffered data on the floor which is OK in a hangup + * scenario. */ +@@ -1345,7 +1345,7 @@ static int hvcs_write(struct tty_struct + * the middle of a write operation? This is a crummy place to do this + * but we want to keep it all in the spinlock. + */ +- if (hvcsd->open_count <= 0) { ++ if (atomic_read(&hvcsd->open_count) <= 0) { + spin_unlock_irqrestore(&hvcsd->lock, flags); + return -ENODEV; + } +@@ -1419,7 +1419,7 @@ static int hvcs_write_room(struct tty_st + { + struct hvcs_struct *hvcsd = tty->driver_data; + +- if (!hvcsd || hvcsd->open_count <= 0) ++ if (!hvcsd || atomic_read(&hvcsd->open_count) <= 0) + return 0; + + return HVCS_BUFF_LEN - hvcsd->chars_in_buffer; +diff -urNp linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c +--- linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/ipmi/ipmi_msghandler.c 2010-09-17 20:12:09.000000000 -0400 +@@ -414,7 +414,7 @@ struct ipmi_smi { + struct proc_dir_entry *proc_dir; + char proc_dir_name[10]; + +- atomic_t stats[IPMI_NUM_STATS]; ++ atomic_unchecked_t stats[IPMI_NUM_STATS]; + + /* + * run_to_completion duplicate of smb_info, smi_info +@@ -447,9 +447,9 @@ static DEFINE_MUTEX(smi_watchers_mutex); + + + #define ipmi_inc_stat(intf, stat) \ +- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]) + #define ipmi_get_stat(intf, stat) \ +- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])) + + static int is_lan_addr(struct ipmi_addr *addr) + { +@@ -2817,7 +2817,7 @@ int ipmi_register_smi(struct ipmi_smi_ha + INIT_LIST_HEAD(&intf->cmd_rcvrs); + init_waitqueue_head(&intf->waitq); + for (i = 0; i < IPMI_NUM_STATS; i++) +- atomic_set(&intf->stats[i], 0); ++ atomic_set_unchecked(&intf->stats[i], 0); + + intf->proc_dir = NULL; + +diff -urNp linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c +--- linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/ipmi/ipmi_si_intf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -286,7 +286,7 @@ struct smi_info { + unsigned char slave_addr; + + /* Counters and things for the proc filesystem. */ +- atomic_t stats[SI_NUM_STATS]; ++ atomic_unchecked_t stats[SI_NUM_STATS]; + + struct task_struct *thread; + +@@ -294,9 +294,9 @@ struct smi_info { + }; + + #define smi_inc_stat(smi, stat) \ +- atomic_inc(&(smi)->stats[SI_STAT_ ## stat]) ++ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat]) + #define smi_get_stat(smi, stat) \ +- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat])) ++ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat])) + + #define SI_MAX_PARMS 4 + +@@ -3143,7 +3143,7 @@ static int try_smi_init(struct smi_info + atomic_set(&new_smi->req_events, 0); + new_smi->run_to_completion = 0; + for (i = 0; i < SI_NUM_STATS; i++) +- atomic_set(&new_smi->stats[i], 0); ++ atomic_set_unchecked(&new_smi->stats[i], 0); + + new_smi->interrupt_disabled = 1; + atomic_set(&new_smi->stop_operation, 0); +diff -urNp linux-2.6.35.4/drivers/char/keyboard.c linux-2.6.35.4/drivers/char/keyboard.c +--- linux-2.6.35.4/drivers/char/keyboard.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/keyboard.c 2010-09-17 20:12:37.000000000 -0400 +@@ -640,6 +640,16 @@ static void k_spec(struct vc_data *vc, u + kbd->kbdmode == VC_MEDIUMRAW) && + value != KVAL(K_SAK)) + return; /* SAK is allowed even in raw mode */ ++ ++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP) ++ { ++ void *func = fn_handler[value]; ++ if (func == fn_show_state || func == fn_show_ptregs || ++ func == fn_show_mem) ++ return; ++ } ++#endif ++ + fn_handler[value](vc); + } + +@@ -1392,7 +1402,7 @@ static const struct input_device_id kbd_ + .evbit = { BIT_MASK(EV_SND) }, + }, + +- { }, /* Terminating entry */ ++ { 0 }, /* Terminating entry */ + }; + + MODULE_DEVICE_TABLE(input, kbd_ids); +diff -urNp linux-2.6.35.4/drivers/char/mem.c linux-2.6.35.4/drivers/char/mem.c +--- linux-2.6.35.4/drivers/char/mem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/mem.c 2010-09-17 20:12:37.000000000 -0400 +@@ -18,6 +18,7 @@ + #include <linux/raw.h> + #include <linux/tty.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/ptrace.h> + #include <linux/device.h> + #include <linux/highmem.h> +@@ -34,6 +35,10 @@ + # include <linux/efi.h> + #endif + ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++extern struct file_operations grsec_fops; ++#endif ++ + static inline unsigned long size_inside_page(unsigned long start, + unsigned long size) + { +@@ -120,6 +125,7 @@ static ssize_t read_mem(struct file *fil + + while (count > 0) { + unsigned long remaining; ++ char *temp; + + sz = size_inside_page(p, count); + +@@ -135,7 +141,23 @@ static ssize_t read_mem(struct file *fil + if (!ptr) + return -EFAULT; + +- remaining = copy_to_user(buf, ptr, sz); ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL); ++ if (!temp) { ++ unxlate_dev_mem_ptr(p, ptr); ++ return -ENOMEM; ++ } ++ memcpy(temp, ptr, sz); ++#else ++ temp = ptr; ++#endif ++ ++ remaining = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ + unxlate_dev_mem_ptr(p, ptr); + if (remaining) + return -EFAULT; +@@ -161,6 +183,11 @@ static ssize_t write_mem(struct file *fi + if (!valid_phys_addr_range(p, count)) + return -EFAULT; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_mem_write(); ++ return -EPERM; ++#endif ++ + written = 0; + + #ifdef __ARCH_HAS_NO_PAGE_ZERO_MAPPED +@@ -316,6 +343,11 @@ static int mmap_mem(struct file *file, s + &vma->vm_page_prot)) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ if (gr_handle_mem_mmap(vma->vm_pgoff << PAGE_SHIFT, vma)) ++ return -EPERM; ++#endif ++ + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_pgoff, + size, + vma->vm_page_prot); +@@ -398,9 +430,8 @@ static ssize_t read_kmem(struct file *fi + size_t count, loff_t *ppos) + { + unsigned long p = *ppos; +- ssize_t low_count, read, sz; ++ ssize_t low_count, read, sz, err = 0; + char * kbuf; /* k-addr because vread() takes vmlist_lock rwlock */ +- int err = 0; + + read = 0; + if (p < (unsigned long) high_memory) { +@@ -422,6 +453,8 @@ static ssize_t read_kmem(struct file *fi + } + #endif + while (low_count > 0) { ++ char *temp; ++ + sz = size_inside_page(p, low_count); + + /* +@@ -431,7 +464,22 @@ static ssize_t read_kmem(struct file *fi + */ + kbuf = xlate_dev_kmem_ptr((char *)p); + +- if (copy_to_user(buf, kbuf, sz)) ++#ifdef CONFIG_PAX_USERCOPY ++ temp = kmalloc(sz, GFP_KERNEL); ++ if (!temp) ++ return -ENOMEM; ++ memcpy(temp, kbuf, sz); ++#else ++ temp = kbuf; ++#endif ++ ++ err = copy_to_user(buf, temp, sz); ++ ++#ifdef CONFIG_PAX_USERCOPY ++ kfree(temp); ++#endif ++ ++ if (err) + return -EFAULT; + buf += sz; + p += sz; +@@ -530,6 +578,11 @@ static ssize_t write_kmem(struct file *f + char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */ + int err = 0; + ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_kmem_write(); ++ return -EPERM; ++#endif ++ + if (p < (unsigned long) high_memory) { + unsigned long to_write = min_t(unsigned long, count, + (unsigned long)high_memory - p); +@@ -731,6 +784,16 @@ static loff_t memory_lseek(struct file * + + static int open_port(struct inode * inode, struct file * filp) + { ++#ifdef CONFIG_GRKERNSEC_KMEM ++ gr_handle_open_port(); ++ return -EPERM; ++#endif ++ ++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; ++} ++ ++static int open_mem(struct inode * inode, struct file * filp) ++{ + return capable(CAP_SYS_RAWIO) ? 0 : -EPERM; + } + +@@ -738,7 +801,6 @@ static int open_port(struct inode * inod + #define full_lseek null_lseek + #define write_zero write_null + #define read_full read_zero +-#define open_mem open_port + #define open_kmem open_mem + #define open_oldmem open_mem + +@@ -854,6 +916,9 @@ static const struct memdev { + #ifdef CONFIG_CRASH_DUMP + [12] = { "oldmem", 0, &oldmem_fops, NULL }, + #endif ++#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC) ++ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL }, ++#endif + }; + + static int memory_open(struct inode *inode, struct file *filp) +diff -urNp linux-2.6.35.4/drivers/char/n_tty.c linux-2.6.35.4/drivers/char/n_tty.c +--- linux-2.6.35.4/drivers/char/n_tty.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/n_tty.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2105,6 +2105,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ + { + *ops = tty_ldisc_N_TTY; + ops->owner = NULL; +- ops->refcount = ops->flags = 0; ++ atomic_set(&ops->refcount, 0); ++ ops->flags = 0; + } + EXPORT_SYMBOL_GPL(n_tty_inherit_ops); +diff -urNp linux-2.6.35.4/drivers/char/nvram.c linux-2.6.35.4/drivers/char/nvram.c +--- linux-2.6.35.4/drivers/char/nvram.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/nvram.c 2010-09-17 20:12:09.000000000 -0400 +@@ -245,7 +245,7 @@ static ssize_t nvram_read(struct file *f + + spin_unlock_irq(&rtc_lock); + +- if (copy_to_user(buf, contents, tmp - contents)) ++ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents)) + return -EFAULT; + + *ppos = i; +@@ -434,7 +434,10 @@ static const struct file_operations nvra + static struct miscdevice nvram_dev = { + NVRAM_MINOR, + "nvram", +- &nvram_fops ++ &nvram_fops, ++ {NULL, NULL}, ++ NULL, ++ NULL + }; + + static int __init nvram_init(void) +diff -urNp linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c +--- linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/pcmcia/ipwireless/tty.c 2010-09-17 20:12:09.000000000 -0400 +@@ -51,7 +51,7 @@ struct ipw_tty { + int tty_type; + struct ipw_network *network; + struct tty_struct *linux_tty; +- int open_count; ++ atomic_t open_count; + unsigned int control_lines; + struct mutex ipw_tty_mutex; + int tx_bytes_queued; +@@ -127,10 +127,10 @@ static int ipw_open(struct tty_struct *l + mutex_unlock(&tty->ipw_tty_mutex); + return -ENODEV; + } +- if (tty->open_count == 0) ++ if (atomic_read(&tty->open_count) == 0) + tty->tx_bytes_queued = 0; + +- tty->open_count++; ++ atomic_inc(&tty->open_count); + + tty->linux_tty = linux_tty; + linux_tty->driver_data = tty; +@@ -146,9 +146,7 @@ static int ipw_open(struct tty_struct *l + + static void do_ipw_close(struct ipw_tty *tty) + { +- tty->open_count--; +- +- if (tty->open_count == 0) { ++ if (atomic_dec_return(&tty->open_count) == 0) { + struct tty_struct *linux_tty = tty->linux_tty; + + if (linux_tty != NULL) { +@@ -169,7 +167,7 @@ static void ipw_hangup(struct tty_struct + return; + + mutex_lock(&tty->ipw_tty_mutex); +- if (tty->open_count == 0) { ++ if (atomic_read(&tty->open_count) == 0) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -198,7 +196,7 @@ void ipwireless_tty_received(struct ipw_ + return; + } + +- if (!tty->open_count) { ++ if (!atomic_read(&tty->open_count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return; + } +@@ -240,7 +238,7 @@ static int ipw_write(struct tty_struct * + return -ENODEV; + + mutex_lock(&tty->ipw_tty_mutex); +- if (!tty->open_count) { ++ if (!atomic_read(&tty->open_count)) { + mutex_unlock(&tty->ipw_tty_mutex); + return -EINVAL; + } +@@ -280,7 +278,7 @@ static int ipw_write_room(struct tty_str + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!atomic_read(&tty->open_count)) + return -EINVAL; + + room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued; +@@ -322,7 +320,7 @@ static int ipw_chars_in_buffer(struct tt + if (!tty) + return 0; + +- if (!tty->open_count) ++ if (!atomic_read(&tty->open_count)) + return 0; + + return tty->tx_bytes_queued; +@@ -403,7 +401,7 @@ static int ipw_tiocmget(struct tty_struc + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!atomic_read(&tty->open_count)) + return -EINVAL; + + return get_control_lines(tty); +@@ -419,7 +417,7 @@ ipw_tiocmset(struct tty_struct *linux_tt + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!atomic_read(&tty->open_count)) + return -EINVAL; + + return set_control_lines(tty, set, clear); +@@ -433,7 +431,7 @@ static int ipw_ioctl(struct tty_struct * + if (!tty) + return -ENODEV; + +- if (!tty->open_count) ++ if (!atomic_read(&tty->open_count)) + return -EINVAL; + + /* FIXME: Exactly how is the tty object locked here .. */ +@@ -582,7 +580,7 @@ void ipwireless_tty_free(struct ipw_tty + against a parallel ioctl etc */ + mutex_lock(&ttyj->ipw_tty_mutex); + } +- while (ttyj->open_count) ++ while (atomic_read(&ttyj->open_count)) + do_ipw_close(ttyj); + ipwireless_disassociate_network_ttys(network, + ttyj->channel_idx); +diff -urNp linux-2.6.35.4/drivers/char/pty.c linux-2.6.35.4/drivers/char/pty.c +--- linux-2.6.35.4/drivers/char/pty.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/pty.c 2010-09-17 20:12:09.000000000 -0400 +@@ -677,7 +677,18 @@ static int ptmx_open(struct inode *inode + return ret; + } + +-static struct file_operations ptmx_fops; ++static const struct file_operations ptmx_fops = { ++ .llseek = no_llseek, ++ .read = tty_read, ++ .write = tty_write, ++ .poll = tty_poll, ++ .unlocked_ioctl = tty_ioctl, ++ .compat_ioctl = tty_compat_ioctl, ++ .open = ptmx_open, ++ .release = tty_release, ++ .fasync = tty_fasync, ++}; ++ + + static void __init unix98_pty_init(void) + { +@@ -731,9 +742,6 @@ static void __init unix98_pty_init(void) + register_sysctl_table(pty_root_table); + + /* Now create the /dev/ptmx special device */ +- tty_default_fops(&ptmx_fops); +- ptmx_fops.open = ptmx_open; +- + cdev_init(&ptmx_cdev, &ptmx_fops); + if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) || + register_chrdev_region(MKDEV(TTYAUX_MAJOR, 2), 1, "/dev/ptmx") < 0) +diff -urNp linux-2.6.35.4/drivers/char/random.c linux-2.6.35.4/drivers/char/random.c +--- linux-2.6.35.4/drivers/char/random.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/random.c 2010-09-17 20:24:41.000000000 -0400 +@@ -254,8 +254,13 @@ + /* + * Configuration information + */ ++#ifdef CONFIG_GRKERNSEC_RANDNET ++#define INPUT_POOL_WORDS 512 ++#define OUTPUT_POOL_WORDS 128 ++#else + #define INPUT_POOL_WORDS 128 + #define OUTPUT_POOL_WORDS 32 ++#endif + #define SEC_XFER_SIZE 512 + #define EXTRACT_SIZE 10 + +@@ -293,10 +298,17 @@ static struct poolinfo { + int poolwords; + int tap1, tap2, tap3, tap4, tap5; + } poolinfo_table[] = { ++#ifdef CONFIG_GRKERNSEC_RANDNET ++ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */ ++ { 512, 411, 308, 208, 104, 1 }, ++ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */ ++ { 128, 103, 76, 51, 25, 1 }, ++#else + /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */ + { 128, 103, 76, 51, 25, 1 }, + /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */ + { 32, 26, 20, 14, 7, 1 }, ++#endif + #if 0 + /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */ + { 2048, 1638, 1231, 819, 411, 1 }, +@@ -902,7 +914,7 @@ static ssize_t extract_entropy_user(stru + + extract_buf(r, tmp); + i = min_t(int, nbytes, EXTRACT_SIZE); +- if (copy_to_user(buf, tmp, i)) { ++ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) { + ret = -EFAULT; + break; + } +@@ -1205,7 +1217,7 @@ EXPORT_SYMBOL(generate_random_uuid); + #include <linux/sysctl.h> + + static int min_read_thresh = 8, min_write_thresh; +-static int max_read_thresh = INPUT_POOL_WORDS * 32; ++static int max_read_thresh = OUTPUT_POOL_WORDS * 32; + static int max_write_thresh = INPUT_POOL_WORDS * 32; + static char sysctl_bootid[16]; + +diff -urNp linux-2.6.35.4/drivers/char/sonypi.c linux-2.6.35.4/drivers/char/sonypi.c +--- linux-2.6.35.4/drivers/char/sonypi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/sonypi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -491,7 +491,7 @@ static struct sonypi_device { + spinlock_t fifo_lock; + wait_queue_head_t fifo_proc_list; + struct fasync_struct *fifo_async; +- int open_count; ++ atomic_t open_count; + int model; + struct input_dev *input_jog_dev; + struct input_dev *input_key_dev; +@@ -898,7 +898,7 @@ static int sonypi_misc_fasync(int fd, st + static int sonypi_misc_release(struct inode *inode, struct file *file) + { + mutex_lock(&sonypi_device.lock); +- sonypi_device.open_count--; ++ atomic_dec(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + return 0; + } +@@ -907,9 +907,9 @@ static int sonypi_misc_open(struct inode + { + mutex_lock(&sonypi_device.lock); + /* Flush input queue on first open */ +- if (!sonypi_device.open_count) ++ if (!atomic_read(&sonypi_device.open_count)) + kfifo_reset(&sonypi_device.fifo); +- sonypi_device.open_count++; ++ atomic_inc(&sonypi_device.open_count); + mutex_unlock(&sonypi_device.lock); + + return 0; +diff -urNp linux-2.6.35.4/drivers/char/tpm/tpm_bios.c linux-2.6.35.4/drivers/char/tpm/tpm_bios.c +--- linux-2.6.35.4/drivers/char/tpm/tpm_bios.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/tpm/tpm_bios.c 2010-09-17 20:12:09.000000000 -0400 +@@ -173,7 +173,7 @@ static void *tpm_bios_measurements_start + event = addr; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - addr - sizeof(struct tcpa_event))) + return NULL; + + return addr; +@@ -198,7 +198,7 @@ static void *tpm_bios_measurements_next( + return NULL; + + if ((event->event_type == 0 && event->event_size == 0) || +- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit)) ++ (event->event_size >= limit - v - sizeof(struct tcpa_event))) + return NULL; + + (*pos)++; +@@ -291,7 +291,8 @@ static int tpm_binary_bios_measurements_ + int i; + + for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++) +- seq_putc(m, data[i]); ++ if (!seq_putc(m, data[i])) ++ return -EFAULT; + + return 0; + } +@@ -410,6 +411,11 @@ static int read_log(struct tpm_bios_log + log->bios_event_log_end = log->bios_event_log + len; + + virt = acpi_os_map_memory(start, len); ++ if (!virt) { ++ kfree(log->bios_event_log); ++ log->bios_event_log = NULL; ++ return -EFAULT; ++ } + + memcpy(log->bios_event_log, virt, len); + +diff -urNp linux-2.6.35.4/drivers/char/tty_io.c linux-2.6.35.4/drivers/char/tty_io.c +--- linux-2.6.35.4/drivers/char/tty_io.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/tty_io.c 2010-09-17 20:12:09.000000000 -0400 +@@ -136,20 +136,10 @@ LIST_HEAD(tty_drivers); /* linked list + DEFINE_MUTEX(tty_mutex); + EXPORT_SYMBOL(tty_mutex); + +-static ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); +-static ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); + ssize_t redirected_tty_write(struct file *, const char __user *, + size_t, loff_t *); +-static unsigned int tty_poll(struct file *, poll_table *); + static int tty_open(struct inode *, struct file *); + long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg); +-#ifdef CONFIG_COMPAT +-static long tty_compat_ioctl(struct file *file, unsigned int cmd, +- unsigned long arg); +-#else +-#define tty_compat_ioctl NULL +-#endif +-static int tty_fasync(int fd, struct file *filp, int on); + static void release_tty(struct tty_struct *tty, int idx); + static void __proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); + static void proc_set_tty(struct task_struct *tsk, struct tty_struct *tty); +@@ -871,7 +861,7 @@ EXPORT_SYMBOL(start_tty); + * read calls may be outstanding in parallel. + */ + +-static ssize_t tty_read(struct file *file, char __user *buf, size_t count, ++ssize_t tty_read(struct file *file, char __user *buf, size_t count, + loff_t *ppos) + { + int i; +@@ -899,6 +889,8 @@ static ssize_t tty_read(struct file *fil + return i; + } + ++EXPORT_SYMBOL(tty_read); ++ + void tty_write_unlock(struct tty_struct *tty) + { + mutex_unlock(&tty->atomic_write_lock); +@@ -1048,7 +1040,7 @@ void tty_write_message(struct tty_struct + * write method will not be invoked in parallel for each device. + */ + +-static ssize_t tty_write(struct file *file, const char __user *buf, ++ssize_t tty_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { + struct tty_struct *tty; +@@ -1075,6 +1067,8 @@ static ssize_t tty_write(struct file *fi + return ret; + } + ++EXPORT_SYMBOL(tty_write); ++ + ssize_t redirected_tty_write(struct file *file, const char __user *buf, + size_t count, loff_t *ppos) + { +@@ -1897,6 +1891,8 @@ got_driver: + + + ++EXPORT_SYMBOL(tty_release); ++ + /** + * tty_poll - check tty status + * @filp: file being polled +@@ -1909,7 +1905,7 @@ got_driver: + * may be re-entered freely by other callers. + */ + +-static unsigned int tty_poll(struct file *filp, poll_table *wait) ++unsigned int tty_poll(struct file *filp, poll_table *wait) + { + struct tty_struct *tty; + struct tty_ldisc *ld; +@@ -1926,7 +1922,9 @@ static unsigned int tty_poll(struct file + return ret; + } + +-static int tty_fasync(int fd, struct file *filp, int on) ++EXPORT_SYMBOL(tty_poll); ++ ++int tty_fasync(int fd, struct file *filp, int on) + { + struct tty_struct *tty; + unsigned long flags; +@@ -1970,6 +1968,8 @@ out: + return retval; + } + ++EXPORT_SYMBOL(tty_fasync); ++ + /** + * tiocsti - fake input character + * @tty: tty to fake input into +@@ -2602,8 +2602,10 @@ long tty_ioctl(struct file *file, unsign + return retval; + } + ++EXPORT_SYMBOL(tty_ioctl); ++ + #ifdef CONFIG_COMPAT +-static long tty_compat_ioctl(struct file *file, unsigned int cmd, ++long tty_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long arg) + { + struct inode *inode = file->f_dentry->d_inode; +@@ -2627,6 +2629,9 @@ static long tty_compat_ioctl(struct file + + return retval; + } ++ ++EXPORT_SYMBOL(tty_compat_ioctl); ++ + #endif + + /* +@@ -3070,11 +3075,6 @@ struct tty_struct *get_current_tty(void) + } + EXPORT_SYMBOL_GPL(get_current_tty); + +-void tty_default_fops(struct file_operations *fops) +-{ +- *fops = tty_fops; +-} +- + /* + * Initialize the console device. This is called *early*, so + * we can't necessarily depend on lots of kernel help here. +diff -urNp linux-2.6.35.4/drivers/char/tty_ldisc.c linux-2.6.35.4/drivers/char/tty_ldisc.c +--- linux-2.6.35.4/drivers/char/tty_ldisc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/tty_ldisc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -75,7 +75,7 @@ static void put_ldisc(struct tty_ldisc * + if (atomic_dec_and_lock(&ld->users, &tty_ldisc_lock)) { + struct tty_ldisc_ops *ldo = ld->ops; + +- ldo->refcount--; ++ atomic_dec(&ldo->refcount); + module_put(ldo->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + +@@ -109,7 +109,7 @@ int tty_register_ldisc(int disc, struct + spin_lock_irqsave(&tty_ldisc_lock, flags); + tty_ldiscs[disc] = new_ldisc; + new_ldisc->num = disc; +- new_ldisc->refcount = 0; ++ atomic_set(&new_ldisc->refcount, 0); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + + return ret; +@@ -137,7 +137,7 @@ int tty_unregister_ldisc(int disc) + return -EINVAL; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- if (tty_ldiscs[disc]->refcount) ++ if (atomic_read(&tty_ldiscs[disc]->refcount)) + ret = -EBUSY; + else + tty_ldiscs[disc] = NULL; +@@ -158,7 +158,7 @@ static struct tty_ldisc_ops *get_ldops(i + if (ldops) { + ret = ERR_PTR(-EAGAIN); + if (try_module_get(ldops->owner)) { +- ldops->refcount++; ++ atomic_inc(&ldops->refcount); + ret = ldops; + } + } +@@ -171,7 +171,7 @@ static void put_ldops(struct tty_ldisc_o + unsigned long flags; + + spin_lock_irqsave(&tty_ldisc_lock, flags); +- ldops->refcount--; ++ atomic_dec(&ldops->refcount); + module_put(ldops->owner); + spin_unlock_irqrestore(&tty_ldisc_lock, flags); + } +diff -urNp linux-2.6.35.4/drivers/char/vt_ioctl.c linux-2.6.35.4/drivers/char/vt_ioctl.c +--- linux-2.6.35.4/drivers/char/vt_ioctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/char/vt_ioctl.c 2010-09-17 20:12:37.000000000 -0400 +@@ -210,9 +210,6 @@ do_kdsk_ioctl(int cmd, struct kbentry __ + if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry))) + return -EFAULT; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + switch (cmd) { + case KDGKBENT: + key_map = key_maps[s]; +@@ -224,8 +221,12 @@ do_kdsk_ioctl(int cmd, struct kbentry __ + val = (i ? K_HOLE : K_NOSUCHMAP); + return put_user(val, &user_kbe->kb_value); + case KDSKBENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) + return -EPERM; ++ + if (!i && v == K_NOSUCHMAP) { + /* deallocate map */ + key_map = key_maps[s]; +@@ -325,9 +326,6 @@ do_kdgkb_ioctl(int cmd, struct kbsentry + int i, j, k; + int ret; + +- if (!capable(CAP_SYS_TTY_CONFIG)) +- perm = 0; +- + kbs = kmalloc(sizeof(*kbs), GFP_KERNEL); + if (!kbs) { + ret = -ENOMEM; +@@ -361,6 +359,9 @@ do_kdgkb_ioctl(int cmd, struct kbsentry + kfree(kbs); + return ((p && *p) ? -EOVERFLOW : 0); + case KDSKBSENT: ++ if (!capable(CAP_SYS_TTY_CONFIG)) ++ perm = 0; ++ + if (!perm) { + ret = -EPERM; + goto reterr; +diff -urNp linux-2.6.35.4/drivers/cpuidle/sysfs.c linux-2.6.35.4/drivers/cpuidle/sysfs.c +--- linux-2.6.35.4/drivers/cpuidle/sysfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/cpuidle/sysfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -300,7 +300,7 @@ static struct kobj_type ktype_state_cpui + .release = cpuidle_state_sysfs_release, + }; + +-static void inline cpuidle_free_state_kobj(struct cpuidle_device *device, int i) ++static inline void cpuidle_free_state_kobj(struct cpuidle_device *device, int i) + { + kobject_put(&device->kobjs[i]->kobj); + wait_for_completion(&device->kobjs[i]->kobj_unregister); +diff -urNp linux-2.6.35.4/drivers/edac/edac_core.h linux-2.6.35.4/drivers/edac/edac_core.h +--- linux-2.6.35.4/drivers/edac/edac_core.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/edac/edac_core.h 2010-09-17 20:12:09.000000000 -0400 +@@ -100,11 +100,11 @@ extern const char *edac_mem_types[]; + + #else /* !CONFIG_EDAC_DEBUG */ + +-#define debugf0( ... ) +-#define debugf1( ... ) +-#define debugf2( ... ) +-#define debugf3( ... ) +-#define debugf4( ... ) ++#define debugf0( ... ) do {} while (0) ++#define debugf1( ... ) do {} while (0) ++#define debugf2( ... ) do {} while (0) ++#define debugf3( ... ) do {} while (0) ++#define debugf4( ... ) do {} while (0) + + #endif /* !CONFIG_EDAC_DEBUG */ + +diff -urNp linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c +--- linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/edac/edac_mc_sysfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -776,7 +776,7 @@ static void edac_inst_grp_release(struct + } + + /* Intermediate show/store table */ +-static struct sysfs_ops inst_grp_ops = { ++static const struct sysfs_ops inst_grp_ops = { + .show = inst_grp_show, + .store = inst_grp_store + }; +diff -urNp linux-2.6.35.4/drivers/firewire/core-cdev.c linux-2.6.35.4/drivers/firewire/core-cdev.c +--- linux-2.6.35.4/drivers/firewire/core-cdev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/firewire/core-cdev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1195,8 +1195,7 @@ static int init_iso_resource(struct clie + int ret; + + if ((request->channels == 0 && request->bandwidth == 0) || +- request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL || +- request->bandwidth < 0) ++ request->bandwidth > BANDWIDTH_AVAILABLE_INITIAL) + return -EINVAL; + + r = kmalloc(sizeof(*r), GFP_KERNEL); +diff -urNp linux-2.6.35.4/drivers/firmware/dmi_scan.c linux-2.6.35.4/drivers/firmware/dmi_scan.c +--- linux-2.6.35.4/drivers/firmware/dmi_scan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/firmware/dmi_scan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -387,11 +387,6 @@ void __init dmi_scan_machine(void) + } + } + else { +- /* +- * no iounmap() for that ioremap(); it would be a no-op, but +- * it's so early in setup that sucker gets confused into doing +- * what it shouldn't if we actually call it. +- */ + p = dmi_ioremap(0xF0000, 0x10000); + if (p == NULL) + goto error; +diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_drv.c linux-2.6.35.4/drivers/gpu/drm/drm_drv.c +--- linux-2.6.35.4/drivers/gpu/drm/drm_drv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/drm_drv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -449,7 +449,7 @@ long drm_ioctl(struct file *filp, + + dev = file_priv->minor->dev; + atomic_inc(&dev->ioctl_count); +- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]); + ++file_priv->ioctl_count; + + DRM_DEBUG("pid=%d, cmd=0x%02x, nr=0x%02x, dev 0x%lx, auth=%d\n", +diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_fops.c linux-2.6.35.4/drivers/gpu/drm/drm_fops.c +--- linux-2.6.35.4/drivers/gpu/drm/drm_fops.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/drm_fops.c 2010-09-17 20:12:09.000000000 -0400 +@@ -67,7 +67,7 @@ static int drm_setup(struct drm_device * + } + + for (i = 0; i < ARRAY_SIZE(dev->counts); i++) +- atomic_set(&dev->counts[i], 0); ++ atomic_set_unchecked(&dev->counts[i], 0); + + dev->sigdata.lock = NULL; + +@@ -131,9 +131,9 @@ int drm_open(struct inode *inode, struct + + retcode = drm_open_helper(inode, filp, dev); + if (!retcode) { +- atomic_inc(&dev->counts[_DRM_STAT_OPENS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]); + spin_lock(&dev->count_lock); +- if (!dev->open_count++) { ++ if (atomic_inc_return(&dev->open_count) == 1) { + spin_unlock(&dev->count_lock); + retcode = drm_setup(dev); + goto out; +@@ -474,7 +474,7 @@ int drm_release(struct inode *inode, str + + lock_kernel(); + +- DRM_DEBUG("open_count = %d\n", dev->open_count); ++ DRM_DEBUG("open_count = %d\n", atomic_read(&dev->open_count)); + + if (dev->driver->preclose) + dev->driver->preclose(dev, file_priv); +@@ -486,7 +486,7 @@ int drm_release(struct inode *inode, str + DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n", + task_pid_nr(current), + (long)old_encode_dev(file_priv->minor->device), +- dev->open_count); ++ atomic_read(&dev->open_count)); + + /* if the master has gone away we can't do anything with the lock */ + if (file_priv->minor->master) +@@ -567,9 +567,9 @@ int drm_release(struct inode *inode, str + * End inline drm_release + */ + +- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]); + spin_lock(&dev->count_lock); +- if (!--dev->open_count) { ++ if (atomic_dec_and_test(&dev->open_count)) { + if (atomic_read(&dev->ioctl_count)) { + DRM_ERROR("Device busy: %d\n", + atomic_read(&dev->ioctl_count)); +diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c +--- linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/drm_ioctl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -283,7 +283,7 @@ int drm_getstats(struct drm_device *dev, + stats->data[i].value = + (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0); + else +- stats->data[i].value = atomic_read(&dev->counts[i]); ++ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]); + stats->data[i].type = dev->types[i]; + } + +diff -urNp linux-2.6.35.4/drivers/gpu/drm/drm_lock.c linux-2.6.35.4/drivers/gpu/drm/drm_lock.c +--- linux-2.6.35.4/drivers/gpu/drm/drm_lock.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/drm_lock.c 2010-09-17 20:12:09.000000000 -0400 +@@ -87,7 +87,7 @@ int drm_lock(struct drm_device *dev, voi + if (drm_lock_take(&master->lock, lock->context)) { + master->lock.file_priv = file_priv; + master->lock.lock_time = jiffies; +- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]); + break; /* Got lock */ + } + +@@ -165,7 +165,7 @@ int drm_unlock(struct drm_device *dev, v + return -EINVAL; + } + +- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]); + + /* kernel_context_switch isn't used by any of the x86 drm + * modules but is required by the Sparc driver. +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c +--- linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i810/i810_dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -953,8 +953,8 @@ static int i810_dma_vertex(struct drm_de + dma->buflist[vertex->idx], + vertex->discard, vertex->used); + +- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +@@ -1116,8 +1116,8 @@ static int i810_dma_mc(struct drm_device + i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used, + mc->last_render); + +- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); +- atomic_inc(&dev->counts[_DRM_STAT_DMA]); ++ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]); ++ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]); + sarea_priv->last_enqueue = dev_priv->counter - 1; + sarea_priv->last_dispatch = (int)hw_status[5]; + +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7017.c 2010-09-17 20:12:09.000000000 -0400 +@@ -402,7 +402,7 @@ static void ch7017_destroy(struct intel_ + } + } + +-struct intel_dvo_dev_ops ch7017_ops = { ++const struct intel_dvo_dev_ops ch7017_ops = { + .init = ch7017_init, + .detect = ch7017_detect, + .mode_valid = ch7017_mode_valid, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ch7xxx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -322,7 +322,7 @@ static void ch7xxx_destroy(struct intel_ + } + } + +-struct intel_dvo_dev_ops ch7xxx_ops = { ++const struct intel_dvo_dev_ops ch7xxx_ops = { + .init = ch7xxx_init, + .detect = ch7xxx_detect, + .mode_valid = ch7xxx_mode_valid, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo.h 2010-09-17 20:12:09.000000000 -0400 +@@ -125,23 +125,23 @@ struct intel_dvo_dev_ops { + * + * \return singly-linked list of modes or NULL if no modes found. + */ +- struct drm_display_mode *(*get_modes)(struct intel_dvo_device *dvo); ++ struct drm_display_mode *(* const get_modes)(struct intel_dvo_device *dvo); + + /** + * Clean up driver-specific bits of the output + */ +- void (*destroy) (struct intel_dvo_device *dvo); ++ void (* const destroy) (struct intel_dvo_device *dvo); + + /** + * Debugging hook to dump device registers to log file + */ +- void (*dump_regs)(struct intel_dvo_device *dvo); ++ void (* const dump_regs)(struct intel_dvo_device *dvo); + }; + +-extern struct intel_dvo_dev_ops sil164_ops; +-extern struct intel_dvo_dev_ops ch7xxx_ops; +-extern struct intel_dvo_dev_ops ivch_ops; +-extern struct intel_dvo_dev_ops tfp410_ops; +-extern struct intel_dvo_dev_ops ch7017_ops; ++extern const struct intel_dvo_dev_ops sil164_ops; ++extern const struct intel_dvo_dev_ops ch7xxx_ops; ++extern const struct intel_dvo_dev_ops ivch_ops; ++extern const struct intel_dvo_dev_ops tfp410_ops; ++extern const struct intel_dvo_dev_ops ch7017_ops; + + #endif /* _INTEL_DVO_H */ +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_ivch.c 2010-09-17 20:12:09.000000000 -0400 +@@ -412,7 +412,7 @@ static void ivch_destroy(struct intel_dv + } + } + +-struct intel_dvo_dev_ops ivch_ops= { ++const struct intel_dvo_dev_ops ivch_ops= { + .init = ivch_init, + .dpms = ivch_dpms, + .mode_valid = ivch_mode_valid, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_sil164.c 2010-09-17 20:12:09.000000000 -0400 +@@ -254,7 +254,7 @@ static void sil164_destroy(struct intel_ + } + } + +-struct intel_dvo_dev_ops sil164_ops = { ++const struct intel_dvo_dev_ops sil164_ops = { + .init = sil164_init, + .detect = sil164_detect, + .mode_valid = sil164_mode_valid, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/dvo_tfp410.c 2010-09-17 20:12:09.000000000 -0400 +@@ -295,7 +295,7 @@ static void tfp410_destroy(struct intel_ + } + } + +-struct intel_dvo_dev_ops tfp410_ops = { ++const struct intel_dvo_dev_ops tfp410_ops = { + .init = tfp410_init, + .detect = tfp410_detect, + .mode_valid = tfp410_mode_valid, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/i915_dma.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1342,7 +1342,7 @@ static bool i915_switcheroo_can_switch(s + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (atomic_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff -urNp linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c +--- linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/i915/i915_drv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -491,7 +491,7 @@ const struct dev_pm_ops i915_pm_ops = { + .restore = i915_pm_resume, + }; + +-static struct vm_operations_struct i915_gem_vm_ops = { ++static const struct vm_operations_struct i915_gem_vm_ops = { + .fault = i915_gem_fault, + .open = drm_gem_vm_open, + .close = drm_gem_vm_close, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c +--- linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_backlight.c 2010-09-17 20:12:09.000000000 -0400 +@@ -58,7 +58,7 @@ static int nv40_set_intensity(struct bac + return 0; + } + +-static struct backlight_ops nv40_bl_ops = { ++static const struct backlight_ops nv40_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv40_get_intensity, + .update_status = nv40_set_intensity, +@@ -81,7 +81,7 @@ static int nv50_set_intensity(struct bac + return 0; + } + +-static struct backlight_ops nv50_bl_ops = { ++static const struct backlight_ops nv50_bl_ops = { + .options = BL_CORE_SUSPENDRESUME, + .get_brightness = nv50_get_intensity, + .update_status = nv50_set_intensity, +diff -urNp linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c +--- linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/nouveau/nouveau_state.c 2010-09-17 20:12:09.000000000 -0400 +@@ -395,7 +395,7 @@ static bool nouveau_switcheroo_can_switc + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (atomic_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c +--- linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/radeon/mkregtable.c 2010-09-17 20:12:09.000000000 -0400 +@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, + regex_t mask_rex; + regmatch_t match[4]; + char buf[1024]; +- size_t end; ++ long end; + int len; + int done = 0; + int r; + unsigned o; + struct offset *offset; + char last_reg_s[10]; +- int last_reg; ++ unsigned long last_reg; + + if (regcomp + (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) { +diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c +--- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_device.c 2010-09-17 20:12:09.000000000 -0400 +@@ -562,7 +562,7 @@ static bool radeon_switcheroo_can_switch + bool can_switch; + + spin_lock(&dev->count_lock); +- can_switch = (dev->open_count == 0); ++ can_switch = (atomic_read(&dev->open_count) == 0); + spin_unlock(&dev->count_lock); + return can_switch; + } +diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c +--- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_display.c 2010-09-17 20:12:09.000000000 -0400 +@@ -559,7 +559,7 @@ static void radeon_compute_pll_legacy(st + + if (pll->flags & RADEON_PLL_PREFER_CLOSEST_LOWER) { + error = freq - current_freq; +- error = error < 0 ? 0xffffffff : error; ++ error = (int32_t)error < 0 ? 0xffffffff : error; + } else + error = abs(current_freq - freq); + vco_diff = abs(vco - best_vco); +diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c +--- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_state.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_de + if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS) + sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS; + +- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, ++ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes, + sarea_priv->nbox * sizeof(depth_boxes[0]))) + return -EFAULT; + +@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm + { + drm_radeon_private_t *dev_priv = dev->dev_private; + drm_radeon_getparam_t *param = data; +- int value; ++ int value = 0; + + DRM_DEBUG("pid=%d\n", DRM_CURRENTPID); + +diff -urNp linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c +--- linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/radeon/radeon_ttm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -601,8 +601,9 @@ void radeon_ttm_fini(struct radeon_devic + DRM_INFO("radeon: ttm finalized\n"); + } + +-static struct vm_operations_struct radeon_ttm_vm_ops; +-static const struct vm_operations_struct *ttm_vm_ops = NULL; ++extern int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf); ++extern void ttm_bo_vm_open(struct vm_area_struct *vma); ++extern void ttm_bo_vm_close(struct vm_area_struct *vma); + + static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + { +@@ -610,17 +611,22 @@ static int radeon_ttm_fault(struct vm_ar + struct radeon_device *rdev; + int r; + +- bo = (struct ttm_buffer_object *)vma->vm_private_data; +- if (bo == NULL) { ++ bo = (struct ttm_buffer_object *)vma->vm_private_data; ++ if (!bo) + return VM_FAULT_NOPAGE; +- } + rdev = radeon_get_rdev(bo->bdev); + mutex_lock(&rdev->vram_mutex); +- r = ttm_vm_ops->fault(vma, vmf); ++ r = ttm_bo_vm_fault(vma, vmf); + mutex_unlock(&rdev->vram_mutex); + return r; + } + ++static const struct vm_operations_struct radeon_ttm_vm_ops = { ++ .fault = radeon_ttm_fault, ++ .open = ttm_bo_vm_open, ++ .close = ttm_bo_vm_close ++}; ++ + int radeon_mmap(struct file *filp, struct vm_area_struct *vma) + { + struct drm_file *file_priv; +@@ -633,18 +639,11 @@ int radeon_mmap(struct file *filp, struc + + file_priv = (struct drm_file *)filp->private_data; + rdev = file_priv->minor->dev->dev_private; +- if (rdev == NULL) { ++ if (!rdev) + return -EINVAL; +- } + r = ttm_bo_mmap(filp, vma, &rdev->mman.bdev); +- if (unlikely(r != 0)) { ++ if (r) + return r; +- } +- if (unlikely(ttm_vm_ops == NULL)) { +- ttm_vm_ops = vma->vm_ops; +- radeon_ttm_vm_ops = *ttm_vm_ops; +- radeon_ttm_vm_ops.fault = &radeon_ttm_fault; +- } + vma->vm_ops = &radeon_ttm_vm_ops; + return 0; + } +diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c +--- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -47,7 +47,7 @@ + #include <linux/module.h> + + #define TTM_ASSERT_LOCKED(param) +-#define TTM_DEBUG(fmt, arg...) ++#define TTM_DEBUG(fmt, arg...) do {} while (0) + #define TTM_BO_HASH_ORDER 13 + + static int ttm_bo_setup_vm(struct ttm_buffer_object *bo); +diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c +--- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_bo_vm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -69,11 +69,11 @@ static struct ttm_buffer_object *ttm_bo_ + return best_bo; + } + +-static int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) ++int ttm_bo_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) + { + struct ttm_buffer_object *bo = (struct ttm_buffer_object *) + vma->vm_private_data; +- struct ttm_bo_device *bdev = bo->bdev; ++ struct ttm_bo_device *bdev; + unsigned long page_offset; + unsigned long page_last; + unsigned long pfn; +@@ -84,6 +84,10 @@ static int ttm_bo_vm_fault(struct vm_are + unsigned long address = (unsigned long)vmf->virtual_address; + int retval = VM_FAULT_NOPAGE; + ++ if (!bo) ++ return VM_FAULT_NOPAGE; ++ bdev = bo->bdev; ++ + /* + * Work around locking order reversal in fault / nopfn + * between mmap_sem and bo_reserve: Perform a trylock operation +@@ -213,7 +217,7 @@ out_unlock: + return retval; + } + +-static void ttm_bo_vm_open(struct vm_area_struct *vma) ++void ttm_bo_vm_open(struct vm_area_struct *vma) + { + struct ttm_buffer_object *bo = + (struct ttm_buffer_object *)vma->vm_private_data; +@@ -221,7 +225,7 @@ static void ttm_bo_vm_open(struct vm_are + (void)ttm_bo_reference(bo); + } + +-static void ttm_bo_vm_close(struct vm_area_struct *vma) ++void ttm_bo_vm_close(struct vm_area_struct *vma) + { + struct ttm_buffer_object *bo = (struct ttm_buffer_object *)vma->vm_private_data; + +diff -urNp linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c +--- linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/gpu/drm/ttm/ttm_global.c 2010-09-17 20:12:09.000000000 -0400 +@@ -36,7 +36,7 @@ + struct ttm_global_item { + struct mutex mutex; + void *object; +- int refcount; ++ atomic_t refcount; + }; + + static struct ttm_global_item glob[TTM_GLOBAL_NUM]; +@@ -49,7 +49,7 @@ void ttm_global_init(void) + struct ttm_global_item *item = &glob[i]; + mutex_init(&item->mutex); + item->object = NULL; +- item->refcount = 0; ++ atomic_set(&item->refcount, 0); + } + } + +@@ -59,7 +59,7 @@ void ttm_global_release(void) + for (i = 0; i < TTM_GLOBAL_NUM; ++i) { + struct ttm_global_item *item = &glob[i]; + BUG_ON(item->object != NULL); +- BUG_ON(item->refcount != 0); ++ BUG_ON(atomic_read(&item->refcount) != 0); + } + } + +@@ -70,7 +70,7 @@ int ttm_global_item_ref(struct ttm_globa + void *object; + + mutex_lock(&item->mutex); +- if (item->refcount == 0) { ++ if (atomic_read(&item->refcount) == 0) { + item->object = kzalloc(ref->size, GFP_KERNEL); + if (unlikely(item->object == NULL)) { + ret = -ENOMEM; +@@ -83,7 +83,7 @@ int ttm_global_item_ref(struct ttm_globa + goto out_err; + + } +- ++item->refcount; ++ atomic_inc(&item->refcount); + ref->object = item->object; + object = item->object; + mutex_unlock(&item->mutex); +@@ -100,9 +100,9 @@ void ttm_global_item_unref(struct ttm_gl + struct ttm_global_item *item = &glob[ref->global_type]; + + mutex_lock(&item->mutex); +- BUG_ON(item->refcount == 0); ++ BUG_ON(atomic_read(&item->refcount) == 0); + BUG_ON(ref->object != item->object); +- if (--item->refcount == 0) { ++ if (atomic_dec_and_test(&item->refcount)) { + ref->release(ref); + item->object = NULL; + } +diff -urNp linux-2.6.35.4/drivers/hid/usbhid/hiddev.c linux-2.6.35.4/drivers/hid/usbhid/hiddev.c +--- linux-2.6.35.4/drivers/hid/usbhid/hiddev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hid/usbhid/hiddev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -616,7 +616,7 @@ static long hiddev_ioctl(struct file *fi + return put_user(HID_VERSION, (int __user *)arg); + + case HIDIOCAPPLICATION: +- if (arg < 0 || arg >= hid->maxapplication) ++ if (arg >= hid->maxapplication) + return -EINVAL; + + for (i = 0; i < hid->maxcollection; i++) +diff -urNp linux-2.6.35.4/drivers/hwmon/k8temp.c linux-2.6.35.4/drivers/hwmon/k8temp.c +--- linux-2.6.35.4/drivers/hwmon/k8temp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hwmon/k8temp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -138,7 +138,7 @@ static DEVICE_ATTR(name, S_IRUGO, show_n + + static const struct pci_device_id k8temp_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) }, +- { 0 }, ++ { 0, 0, 0, 0, 0, 0, 0 }, + }; + + MODULE_DEVICE_TABLE(pci, k8temp_ids); +diff -urNp linux-2.6.35.4/drivers/hwmon/sis5595.c linux-2.6.35.4/drivers/hwmon/sis5595.c +--- linux-2.6.35.4/drivers/hwmon/sis5595.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hwmon/sis5595.c 2010-09-17 20:12:09.000000000 -0400 +@@ -699,7 +699,7 @@ static struct sis5595_data *sis5595_upda + + static const struct pci_device_id sis5595_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, sis5595_pci_ids); +diff -urNp linux-2.6.35.4/drivers/hwmon/via686a.c linux-2.6.35.4/drivers/hwmon/via686a.c +--- linux-2.6.35.4/drivers/hwmon/via686a.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hwmon/via686a.c 2010-09-17 20:12:09.000000000 -0400 +@@ -769,7 +769,7 @@ static struct via686a_data *via686a_upda + + static const struct pci_device_id via686a_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_82C686_4) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, via686a_pci_ids); +diff -urNp linux-2.6.35.4/drivers/hwmon/vt8231.c linux-2.6.35.4/drivers/hwmon/vt8231.c +--- linux-2.6.35.4/drivers/hwmon/vt8231.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hwmon/vt8231.c 2010-09-17 20:12:09.000000000 -0400 +@@ -699,7 +699,7 @@ static struct platform_driver vt8231_dri + + static const struct pci_device_id vt8231_pci_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8231_4) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, vt8231_pci_ids); +diff -urNp linux-2.6.35.4/drivers/hwmon/w83791d.c linux-2.6.35.4/drivers/hwmon/w83791d.c +--- linux-2.6.35.4/drivers/hwmon/w83791d.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/hwmon/w83791d.c 2010-09-17 20:12:09.000000000 -0400 +@@ -329,8 +329,8 @@ static int w83791d_detect(struct i2c_cli + struct i2c_board_info *info); + static int w83791d_remove(struct i2c_client *client); + +-static int w83791d_read(struct i2c_client *client, u8 register); +-static int w83791d_write(struct i2c_client *client, u8 register, u8 value); ++static int w83791d_read(struct i2c_client *client, u8 reg); ++static int w83791d_write(struct i2c_client *client, u8 reg, u8 value); + static struct w83791d_data *w83791d_update_device(struct device *dev); + + #ifdef DEBUG +diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c +--- linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/i2c/busses/i2c-i801.c 2010-09-17 20:12:09.000000000 -0400 +@@ -592,7 +592,7 @@ static const struct pci_device_id i801_i + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_ICH10_5) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PCH_SMBUS) }, + { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CPT_SMBUS) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, i801_ids); +diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c +--- linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/i2c/busses/i2c-piix4.c 2010-09-17 20:12:09.000000000 -0400 +@@ -124,7 +124,7 @@ static struct dmi_system_id __devinitdat + .ident = "IBM", + .matches = { DMI_MATCH(DMI_SYS_VENDOR, "IBM"), }, + }, +- { }, ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + static int __devinit piix4_setup(struct pci_dev *PIIX4_dev, +@@ -491,7 +491,7 @@ static const struct pci_device_id piix4_ + PCI_DEVICE_ID_SERVERWORKS_HT1000SB) }, + { PCI_DEVICE(PCI_VENDOR_ID_SERVERWORKS, + PCI_DEVICE_ID_SERVERWORKS_HT1100LD) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE (pci, piix4_ids); +diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c +--- linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/i2c/busses/i2c-sis630.c 2010-09-17 20:12:09.000000000 -0400 +@@ -471,7 +471,7 @@ static struct i2c_adapter sis630_adapter + static const struct pci_device_id sis630_ids[] __devinitconst = { + { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_503) }, + { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_LPC) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE (pci, sis630_ids); +diff -urNp linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c +--- linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/i2c/busses/i2c-sis96x.c 2010-09-17 20:12:09.000000000 -0400 +@@ -247,7 +247,7 @@ static struct i2c_adapter sis96x_adapter + + static const struct pci_device_id sis96x_ids[] = { + { PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_SMBUS) }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE (pci, sis96x_ids); +diff -urNp linux-2.6.35.4/drivers/ide/ide-cd.c linux-2.6.35.4/drivers/ide/ide-cd.c +--- linux-2.6.35.4/drivers/ide/ide-cd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ide/ide-cd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -774,7 +774,7 @@ static void cdrom_do_block_pc(ide_drive_ + alignment = queue_dma_alignment(q) | q->dma_pad_mask; + if ((unsigned long)buf & alignment + || blk_rq_bytes(rq) & q->dma_pad_mask +- || object_is_on_stack(buf)) ++ || object_starts_on_stack(buf)) + drive->dma = 0; + } + } +diff -urNp linux-2.6.35.4/drivers/ieee1394/dv1394.c linux-2.6.35.4/drivers/ieee1394/dv1394.c +--- linux-2.6.35.4/drivers/ieee1394/dv1394.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/dv1394.c 2010-09-17 20:12:09.000000000 -0400 +@@ -739,7 +739,7 @@ static void frame_prepare(struct video_c + based upon DIF section and sequence + */ + +-static void inline ++static inline void + frame_put_packet (struct frame *f, struct packet *p) + { + int section_type = p->data[0] >> 5; /* section type is in bits 5 - 7 */ +@@ -2179,7 +2179,7 @@ static const struct ieee1394_device_id d + .specifier_id = AVC_UNIT_SPEC_ID_ENTRY & 0xffffff, + .version = AVC_SW_VERSION_ENTRY & 0xffffff + }, +- { } ++ { 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(ieee1394, dv1394_id_table); +diff -urNp linux-2.6.35.4/drivers/ieee1394/eth1394.c linux-2.6.35.4/drivers/ieee1394/eth1394.c +--- linux-2.6.35.4/drivers/ieee1394/eth1394.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/eth1394.c 2010-09-17 20:12:09.000000000 -0400 +@@ -446,7 +446,7 @@ static const struct ieee1394_device_id e + .specifier_id = ETHER1394_GASP_SPECIFIER_ID, + .version = ETHER1394_GASP_VERSION, + }, +- {} ++ { 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(ieee1394, eth1394_id_table); +diff -urNp linux-2.6.35.4/drivers/ieee1394/hosts.c linux-2.6.35.4/drivers/ieee1394/hosts.c +--- linux-2.6.35.4/drivers/ieee1394/hosts.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/hosts.c 2010-09-17 20:12:09.000000000 -0400 +@@ -78,6 +78,7 @@ static int dummy_isoctl(struct hpsb_iso + } + + static struct hpsb_host_driver dummy_driver = { ++ .name = "dummy", + .transmit_packet = dummy_transmit_packet, + .devctl = dummy_devctl, + .isoctl = dummy_isoctl +diff -urNp linux-2.6.35.4/drivers/ieee1394/ohci1394.c linux-2.6.35.4/drivers/ieee1394/ohci1394.c +--- linux-2.6.35.4/drivers/ieee1394/ohci1394.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/ohci1394.c 2010-09-17 20:12:09.000000000 -0400 +@@ -148,9 +148,9 @@ printk(level "%s: " fmt "\n" , OHCI1394_ + printk(level "%s: fw-host%d: " fmt "\n" , OHCI1394_DRIVER_NAME, ohci->host->id , ## args) + + /* Module Parameters */ +-static int phys_dma = 1; ++static int phys_dma; + module_param(phys_dma, int, 0444); +-MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 1)."); ++MODULE_PARM_DESC(phys_dma, "Enable physical DMA (default = 0)."); + + static void dma_trm_tasklet(unsigned long data); + static void dma_trm_reset(struct dma_trm_ctx *d); +@@ -3445,7 +3445,7 @@ static struct pci_device_id ohci1394_pci + .subvendor = PCI_ANY_ID, + .subdevice = PCI_ANY_ID, + }, +- { 0, }, ++ { 0, 0, 0, 0, 0, 0, 0 }, + }; + + MODULE_DEVICE_TABLE(pci, ohci1394_pci_tbl); +diff -urNp linux-2.6.35.4/drivers/ieee1394/raw1394.c linux-2.6.35.4/drivers/ieee1394/raw1394.c +--- linux-2.6.35.4/drivers/ieee1394/raw1394.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/raw1394.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3002,7 +3002,7 @@ static const struct ieee1394_device_id r + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, + .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, + .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff}, +- {} ++ { 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(ieee1394, raw1394_id_table); +diff -urNp linux-2.6.35.4/drivers/ieee1394/sbp2.c linux-2.6.35.4/drivers/ieee1394/sbp2.c +--- linux-2.6.35.4/drivers/ieee1394/sbp2.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/sbp2.c 2010-09-17 20:12:09.000000000 -0400 +@@ -289,7 +289,7 @@ static const struct ieee1394_device_id s + .match_flags = IEEE1394_MATCH_SPECIFIER_ID | IEEE1394_MATCH_VERSION, + .specifier_id = SBP2_UNIT_SPEC_ID_ENTRY & 0xffffff, + .version = SBP2_SW_VERSION_ENTRY & 0xffffff}, +- {} ++ { 0, 0, 0, 0, 0, 0 } + }; + MODULE_DEVICE_TABLE(ieee1394, sbp2_id_table); + +@@ -2110,7 +2110,7 @@ MODULE_DESCRIPTION("IEEE-1394 SBP-2 prot + MODULE_SUPPORTED_DEVICE(SBP2_DEVICE_NAME); + MODULE_LICENSE("GPL"); + +-static int sbp2_module_init(void) ++static int __init sbp2_module_init(void) + { + int ret; + +diff -urNp linux-2.6.35.4/drivers/ieee1394/video1394.c linux-2.6.35.4/drivers/ieee1394/video1394.c +--- linux-2.6.35.4/drivers/ieee1394/video1394.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/ieee1394/video1394.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1312,7 +1312,7 @@ static const struct ieee1394_device_id v + .specifier_id = CAMERA_UNIT_SPEC_ID_ENTRY & 0xffffff, + .version = (CAMERA_SW_VERSION_ENTRY + 2) & 0xffffff + }, +- { } ++ { 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(ieee1394, video1394_id_table); +diff -urNp linux-2.6.35.4/drivers/infiniband/core/cm.c linux-2.6.35.4/drivers/infiniband/core/cm.c +--- linux-2.6.35.4/drivers/infiniband/core/cm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/infiniband/core/cm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -113,7 +113,7 @@ static char const counter_group_names[CM + + struct cm_counter_group { + struct kobject obj; +- atomic_long_t counter[CM_ATTR_COUNT]; ++ atomic_long_unchecked_t counter[CM_ATTR_COUNT]; + }; + + struct cm_counter_attribute { +@@ -1387,7 +1387,7 @@ static void cm_dup_req_handler(struct cm + struct ib_mad_send_buf *msg = NULL; + int ret; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REQ_COUNTER]); + + /* Quick state check to discard duplicate REQs. */ +@@ -1765,7 +1765,7 @@ static void cm_dup_rep_handler(struct cm + if (!cm_id_priv) + return; + +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_REP_COUNTER]); + ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg); + if (ret) +@@ -1932,7 +1932,7 @@ static int cm_rtu_handler(struct cm_work + if (cm_id_priv->id.state != IB_CM_REP_SENT && + cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) { + spin_unlock_irq(&cm_id_priv->lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_RTU_COUNTER]); + goto out; + } +@@ -2111,7 +2111,7 @@ static int cm_dreq_handler(struct cm_wor + cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id, + dreq_msg->local_comm_id); + if (!cm_id_priv) { +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + cm_issue_drep(work->port, work->mad_recv_wc); + return -EINVAL; +@@ -2132,7 +2132,7 @@ static int cm_dreq_handler(struct cm_wor + case IB_CM_MRA_REP_RCVD: + break; + case IB_CM_TIMEWAIT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2146,7 +2146,7 @@ static int cm_dreq_handler(struct cm_wor + cm_free_msg(msg); + goto deref; + case IB_CM_DREQ_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_DREQ_COUNTER]); + goto unlock; + default: +@@ -2502,7 +2502,7 @@ static int cm_mra_handler(struct cm_work + ib_modify_mad(cm_id_priv->av.port->mad_agent, + cm_id_priv->msg, timeout)) { + if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD) +- atomic_long_inc(&work->port-> ++ atomic_long_inc_unchecked(&work->port-> + counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + goto out; +@@ -2511,7 +2511,7 @@ static int cm_mra_handler(struct cm_work + break; + case IB_CM_MRA_REQ_RCVD: + case IB_CM_MRA_REP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_MRA_COUNTER]); + /* fall through */ + default: +@@ -2673,7 +2673,7 @@ static int cm_lap_handler(struct cm_work + case IB_CM_LAP_IDLE: + break; + case IB_CM_MRA_LAP_SENT: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg)) + goto unlock; +@@ -2689,7 +2689,7 @@ static int cm_lap_handler(struct cm_work + cm_free_msg(msg); + goto deref; + case IB_CM_LAP_RCVD: +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_LAP_COUNTER]); + goto unlock; + default: +@@ -2973,7 +2973,7 @@ static int cm_sidr_req_handler(struct cm + cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv); + if (cur_cm_id_priv) { + spin_unlock_irq(&cm.lock); +- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES]. ++ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES]. + counter[CM_SIDR_REQ_COUNTER]); + goto out; /* Duplicate message. */ + } +@@ -3184,10 +3184,10 @@ static void cm_send_handler(struct ib_ma + if (!msg->context[0] && (attr_index != CM_REJ_COUNTER)) + msg->retries = 1; + +- atomic_long_add(1 + msg->retries, ++ atomic_long_add_unchecked(1 + msg->retries, + &port->counter_group[CM_XMIT].counter[attr_index]); + if (msg->retries) +- atomic_long_add(msg->retries, ++ atomic_long_add_unchecked(msg->retries, + &port->counter_group[CM_XMIT_RETRIES]. + counter[attr_index]); + +@@ -3397,7 +3397,7 @@ static void cm_recv_handler(struct ib_ma + } + + attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id); +- atomic_long_inc(&port->counter_group[CM_RECV]. ++ atomic_long_inc_unchecked(&port->counter_group[CM_RECV]. + counter[attr_id - CM_ATTR_ID_OFFSET]); + + work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths, +@@ -3595,7 +3595,7 @@ static ssize_t cm_show_counter(struct ko + cm_attr = container_of(attr, struct cm_counter_attribute, attr); + + return sprintf(buf, "%ld\n", +- atomic_long_read(&group->counter[cm_attr->index])); ++ atomic_long_read_unchecked(&group->counter[cm_attr->index])); + } + + static const struct sysfs_ops cm_counter_ops = { +diff -urNp linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h +--- linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/infiniband/hw/qib/qib.h 2010-09-17 20:12:09.000000000 -0400 +@@ -50,6 +50,7 @@ + #include <linux/completion.h> + #include <linux/kref.h> + #include <linux/sched.h> ++#include <linux/slab.h> + + #include "qib_common.h" + #include "qib_verbs.h" +diff -urNp linux-2.6.35.4/drivers/input/keyboard/atkbd.c linux-2.6.35.4/drivers/input/keyboard/atkbd.c +--- linux-2.6.35.4/drivers/input/keyboard/atkbd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/keyboard/atkbd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1240,7 +1240,7 @@ static struct serio_device_id atkbd_seri + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, +- { 0 } ++ { 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(serio, atkbd_serio_ids); +diff -urNp linux-2.6.35.4/drivers/input/mouse/lifebook.c linux-2.6.35.4/drivers/input/mouse/lifebook.c +--- linux-2.6.35.4/drivers/input/mouse/lifebook.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/mouse/lifebook.c 2010-09-17 20:12:09.000000000 -0400 +@@ -123,7 +123,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_NAME, "LifeBook B142"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL} + }; + + void __init lifebook_module_init(void) +diff -urNp linux-2.6.35.4/drivers/input/mouse/psmouse-base.c linux-2.6.35.4/drivers/input/mouse/psmouse-base.c +--- linux-2.6.35.4/drivers/input/mouse/psmouse-base.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/mouse/psmouse-base.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1460,7 +1460,7 @@ static struct serio_device_id psmouse_se + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, +- { 0 } ++ { 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(serio, psmouse_serio_ids); +diff -urNp linux-2.6.35.4/drivers/input/mouse/synaptics.c linux-2.6.35.4/drivers/input/mouse/synaptics.c +--- linux-2.6.35.4/drivers/input/mouse/synaptics.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/mouse/synaptics.c 2010-09-17 20:12:09.000000000 -0400 +@@ -476,7 +476,7 @@ static void synaptics_process_packet(str + break; + case 2: + if (SYN_MODEL_PEN(priv->model_id)) +- ; /* Nothing, treat a pen as a single finger */ ++ break; /* Nothing, treat a pen as a single finger */ + break; + case 4 ... 15: + if (SYN_CAP_PALMDETECT(priv->capabilities)) +@@ -701,7 +701,6 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_SYS_VENDOR, "TOSHIBA"), + DMI_MATCH(DMI_PRODUCT_NAME, "PORTEGE M300"), + }, +- + }, + { + /* Toshiba Portege M300 */ +@@ -710,9 +709,8 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_NAME, "Portable PC"), + DMI_MATCH(DMI_PRODUCT_VERSION, "Version 1.0"), + }, +- + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + #endif + }; + +diff -urNp linux-2.6.35.4/drivers/input/mousedev.c linux-2.6.35.4/drivers/input/mousedev.c +--- linux-2.6.35.4/drivers/input/mousedev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/mousedev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -754,7 +754,7 @@ static ssize_t mousedev_read(struct file + + spin_unlock_irq(&client->packet_lock); + +- if (copy_to_user(buffer, data, count)) ++ if (count > sizeof(data) || copy_to_user(buffer, data, count)) + return -EFAULT; + + return count; +@@ -1051,7 +1051,7 @@ static struct input_handler mousedev_han + + #ifdef CONFIG_INPUT_MOUSEDEV_PSAUX + static struct miscdevice psaux_mouse = { +- PSMOUSE_MINOR, "psaux", &mousedev_fops ++ PSMOUSE_MINOR, "psaux", &mousedev_fops, {NULL, NULL}, NULL, NULL + }; + static int psaux_registered; + #endif +diff -urNp linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h +--- linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/serio/i8042-x86ia64io.h 2010-09-17 20:12:09.000000000 -0400 +@@ -183,7 +183,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_VERSION, "Rev 1"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + /* +@@ -413,7 +413,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_VERSION, "0100"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + static const struct dmi_system_id __initconst i8042_dmi_reset_table[] = { +@@ -487,7 +487,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_NAME, "Vostro 1720"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + #ifdef CONFIG_PNP +@@ -506,7 +506,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + static const struct dmi_system_id __initconst i8042_dmi_laptop_table[] = { +@@ -530,7 +530,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_CHASSIS_TYPE, "14"), /* Sub-Notebook */ + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + #endif + +@@ -604,7 +604,7 @@ static const struct dmi_system_id __init + DMI_MATCH(DMI_PRODUCT_NAME, "TravelMate 4280"), + }, + }, +- { } ++ { NULL, NULL, {DMI_MATCH(DMI_NONE, {0})}, NULL } + }; + + #endif /* CONFIG_X86 */ +diff -urNp linux-2.6.35.4/drivers/input/serio/serio_raw.c linux-2.6.35.4/drivers/input/serio/serio_raw.c +--- linux-2.6.35.4/drivers/input/serio/serio_raw.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/input/serio/serio_raw.c 2010-09-17 20:12:09.000000000 -0400 +@@ -376,7 +376,7 @@ static struct serio_device_id serio_raw_ + .id = SERIO_ANY, + .extra = SERIO_ANY, + }, +- { 0 } ++ { 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(serio, serio_raw_serio_ids); +diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/common.c linux-2.6.35.4/drivers/isdn/gigaset/common.c +--- linux-2.6.35.4/drivers/isdn/gigaset/common.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/isdn/gigaset/common.c 2010-09-17 20:12:09.000000000 -0400 +@@ -723,7 +723,7 @@ struct cardstate *gigaset_initcs(struct + cs->commands_pending = 0; + cs->cur_at_seq = 0; + cs->gotfwver = -1; +- cs->open_count = 0; ++ atomic_set(&cs->open_count, 0); + cs->dev = NULL; + cs->tty = NULL; + cs->tty_dev = NULL; +diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h +--- linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/isdn/gigaset/gigaset.h 2010-09-17 20:12:09.000000000 -0400 +@@ -442,7 +442,7 @@ struct cardstate { + spinlock_t cmdlock; + unsigned curlen, cmdbytes; + +- unsigned open_count; ++ atomic_t open_count; + struct tty_struct *tty; + struct tasklet_struct if_wake_tasklet; + unsigned control_state; +diff -urNp linux-2.6.35.4/drivers/isdn/gigaset/interface.c linux-2.6.35.4/drivers/isdn/gigaset/interface.c +--- linux-2.6.35.4/drivers/isdn/gigaset/interface.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/isdn/gigaset/interface.c 2010-09-17 20:12:09.000000000 -0400 +@@ -160,9 +160,7 @@ static int if_open(struct tty_struct *tt + return -ERESTARTSYS; + tty->driver_data = cs; + +- ++cs->open_count; +- +- if (cs->open_count == 1) { ++ if (atomic_inc_return(&cs->open_count) == 1) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = tty; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -190,10 +188,10 @@ static void if_close(struct tty_struct * + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { +- if (!--cs->open_count) { ++ if (!atomic_dec_return(&cs->open_count)) { + spin_lock_irqsave(&cs->lock, flags); + cs->tty = NULL; + spin_unlock_irqrestore(&cs->lock, flags); +@@ -228,7 +226,7 @@ static int if_ioctl(struct tty_struct *t + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else { + retval = 0; +@@ -355,7 +353,7 @@ static int if_write(struct tty_struct *t + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) { + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -389,7 +387,7 @@ static int if_write_room(struct tty_stru + if (!cs->connected) { + gig_dbg(DEBUG_IF, "not connected"); + retval = -ENODEV; +- } else if (!cs->open_count) ++ } else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) { + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -419,7 +417,7 @@ static int if_chars_in_buffer(struct tty + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); +- else if (!cs->open_count) ++ else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else if (cs->mstate != MS_LOCKED) + dev_warn(cs->dev, "can't write to unlocked device\n"); +@@ -447,7 +445,7 @@ static void if_throttle(struct tty_struc + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); +@@ -471,7 +469,7 @@ static void if_unthrottle(struct tty_str + + if (!cs->connected) + gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ +- else if (!cs->open_count) ++ else if (!atomic_read(&cs->open_count)) + dev_warn(cs->dev, "%s: device not opened\n", __func__); + else + gig_dbg(DEBUG_IF, "%s: not implemented\n", __func__); +@@ -502,7 +500,7 @@ static void if_set_termios(struct tty_st + goto out; + } + +- if (!cs->open_count) { ++ if (!atomic_read(&cs->open_count)) { + dev_warn(cs->dev, "%s: device not opened\n", __func__); + goto out; + } +diff -urNp linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c +--- linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/isdn/hardware/avm/b1.c 2010-09-17 20:12:37.000000000 -0400 +@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capilo + } + if (left) { + if (t4file->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof(buf) || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capilo + } + if (left) { + if (config->user) { +- if (copy_from_user(buf, dp, left)) ++ if (left > sizeof(buf) || copy_from_user(buf, dp, left)) + return -EFAULT; + } else { + memcpy(buf, dp, left); +diff -urNp linux-2.6.35.4/drivers/isdn/icn/icn.c linux-2.6.35.4/drivers/isdn/icn/icn.c +--- linux-2.6.35.4/drivers/isdn/icn/icn.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/isdn/icn/icn.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char * buf, int len + if (count > len) + count = len; + if (user) { +- if (copy_from_user(msg, buf, count)) ++ if (count > sizeof(msg) || copy_from_user(msg, buf, count)) + return -EFAULT; + } else + memcpy(msg, buf, count); +diff -urNp linux-2.6.35.4/drivers/lguest/core.c linux-2.6.35.4/drivers/lguest/core.c +--- linux-2.6.35.4/drivers/lguest/core.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/lguest/core.c 2010-09-17 20:12:09.000000000 -0400 +@@ -92,9 +92,17 @@ static __init int map_switcher(void) + * it's worked so far. The end address needs +1 because __get_vm_area + * allocates an extra guard page, so we need space for that. + */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, ++ VM_ALLOC | VM_KERNEXEC, SWITCHER_ADDR, SWITCHER_ADDR ++ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#else + switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE, + VM_ALLOC, SWITCHER_ADDR, SWITCHER_ADDR + + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE); ++#endif ++ + if (!switcher_vma) { + err = -ENOMEM; + printk("lguest: could not map switcher pages high\n"); +diff -urNp linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c +--- linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/macintosh/via-pmu-backlight.c 2010-09-17 20:12:09.000000000 -0400 +@@ -15,7 +15,7 @@ + + #define MAX_PMU_LEVEL 0xFF + +-static struct backlight_ops pmu_backlight_data; ++static const struct backlight_ops pmu_backlight_data; + static DEFINE_SPINLOCK(pmu_backlight_lock); + static int sleeping, uses_pmu_bl; + static u8 bl_curve[FB_BACKLIGHT_LEVELS]; +@@ -115,7 +115,7 @@ static int pmu_backlight_get_brightness( + return bd->props.brightness; + } + +-static struct backlight_ops pmu_backlight_data = { ++static const struct backlight_ops pmu_backlight_data = { + .get_brightness = pmu_backlight_get_brightness, + .update_status = pmu_backlight_update_status, + +diff -urNp linux-2.6.35.4/drivers/macintosh/via-pmu.c linux-2.6.35.4/drivers/macintosh/via-pmu.c +--- linux-2.6.35.4/drivers/macintosh/via-pmu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/macintosh/via-pmu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2254,7 +2254,7 @@ static int pmu_sleep_valid(suspend_state + && (pmac_call_feature(PMAC_FTR_SLEEP_STATE, NULL, 0, -1) >= 0); + } + +-static struct platform_suspend_ops pmu_pm_ops = { ++static const struct platform_suspend_ops pmu_pm_ops = { + .enter = powerbook_sleep, + .valid = pmu_sleep_valid, + }; +diff -urNp linux-2.6.35.4/drivers/md/bitmap.c linux-2.6.35.4/drivers/md/bitmap.c +--- linux-2.6.35.4/drivers/md/bitmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/md/bitmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -58,7 +58,7 @@ + # if DEBUG > 0 + # define PRINTK(x...) printk(KERN_DEBUG x) + # else +-# define PRINTK(x...) ++# define PRINTK(x...) do {} while (0) + # endif + #endif + +diff -urNp linux-2.6.35.4/drivers/md/dm-table.c linux-2.6.35.4/drivers/md/dm-table.c +--- linux-2.6.35.4/drivers/md/dm-table.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/md/dm-table.c 2010-09-17 20:12:09.000000000 -0400 +@@ -363,7 +363,7 @@ static int device_area_is_invalid(struct + if (!dev_size) + return 0; + +- if ((start >= dev_size) || (start + len > dev_size)) { ++ if ((start >= dev_size) || (len > dev_size - start)) { + DMWARN("%s: %s too small for target: " + "start=%llu, len=%llu, dev_size=%llu", + dm_device_name(ti->table->md), bdevname(bdev, b), +diff -urNp linux-2.6.35.4/drivers/md/md.c linux-2.6.35.4/drivers/md/md.c +--- linux-2.6.35.4/drivers/md/md.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/md/md.c 2010-09-17 20:12:09.000000000 -0400 +@@ -6352,7 +6352,7 @@ static int md_seq_show(struct seq_file * + chunk_kb ? "KB" : "B"); + if (bitmap->file) { + seq_printf(seq, ", file: "); +- seq_path(seq, &bitmap->file->f_path, " \t\n"); ++ seq_path(seq, &bitmap->file->f_path, " \t\n\\"); + } + + seq_printf(seq, "\n"); +@@ -6446,7 +6446,7 @@ static int is_mddev_idle(mddev_t *mddev, + struct gendisk *disk = rdev->bdev->bd_contains->bd_disk; + curr_events = (int)part_stat_read(&disk->part0, sectors[0]) + + (int)part_stat_read(&disk->part0, sectors[1]) - +- atomic_read(&disk->sync_io); ++ atomic_read_unchecked(&disk->sync_io); + /* sync IO will cause sync_io to increase before the disk_stats + * as sync_io is counted when a request starts, and + * disk_stats is counted when it completes. +diff -urNp linux-2.6.35.4/drivers/md/md.h linux-2.6.35.4/drivers/md/md.h +--- linux-2.6.35.4/drivers/md/md.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/md/md.h 2010-09-17 20:12:09.000000000 -0400 +@@ -334,7 +334,7 @@ static inline void rdev_dec_pending(mdk_ + + static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors) + { +- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); ++ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io); + } + + struct mdk_personality +diff -urNp linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c +--- linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/media/dvb/dvb-core/dvbdev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -196,6 +196,7 @@ int dvb_register_device(struct dvb_adapt + const struct dvb_device *template, void *priv, int type) + { + struct dvb_device *dvbdev; ++ /* cannot be const, see this function */ + struct file_operations *dvbdevfops; + struct device *clsdev; + int minor; +diff -urNp linux-2.6.35.4/drivers/media/radio/radio-cadet.c linux-2.6.35.4/drivers/media/radio/radio-cadet.c +--- linux-2.6.35.4/drivers/media/radio/radio-cadet.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/media/radio/radio-cadet.c 2010-09-17 20:12:37.000000000 -0400 +@@ -347,7 +347,7 @@ static ssize_t cadet_read(struct file *f + while (i < count && dev->rdsin != dev->rdsout) + readbuf[i++] = dev->rdsbuf[dev->rdsout++]; + +- if (copy_to_user(data, readbuf, i)) ++ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i)) + return -EFAULT; + return i; + } +diff -urNp linux-2.6.35.4/drivers/message/fusion/mptbase.c linux-2.6.35.4/drivers/message/fusion/mptbase.c +--- linux-2.6.35.4/drivers/message/fusion/mptbase.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/message/fusion/mptbase.c 2010-09-17 20:12:37.000000000 -0400 +@@ -6715,8 +6715,14 @@ procmpt_iocinfo_read(char *buf, char **s + len += sprintf(buf+len, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth); + len += sprintf(buf+len, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", ++ NULL, NULL); ++#else + len += sprintf(buf+len, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", + (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma); ++#endif ++ + /* + * Rounding UP to nearest 4-kB boundary here... + */ +diff -urNp linux-2.6.35.4/drivers/message/fusion/mptdebug.h linux-2.6.35.4/drivers/message/fusion/mptdebug.h +--- linux-2.6.35.4/drivers/message/fusion/mptdebug.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/message/fusion/mptdebug.h 2010-09-17 20:12:09.000000000 -0400 +@@ -71,7 +71,7 @@ + CMD; \ + } + #else +-#define MPT_CHECK_LOGGING(IOC, CMD, BITS) ++#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0) + #endif + + +diff -urNp linux-2.6.35.4/drivers/message/fusion/mptsas.c linux-2.6.35.4/drivers/message/fusion/mptsas.c +--- linux-2.6.35.4/drivers/message/fusion/mptsas.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/message/fusion/mptsas.c 2010-09-17 20:12:09.000000000 -0400 +@@ -437,6 +437,23 @@ mptsas_is_end_device(struct mptsas_devin + return 0; + } + ++static inline void ++mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) ++{ ++ if (phy_info->port_details) { ++ phy_info->port_details->rphy = rphy; ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", ++ ioc->name, rphy)); ++ } ++ ++ if (rphy) { ++ dsaswideprintk(ioc, dev_printk(KERN_DEBUG, ++ &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); ++ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", ++ ioc->name, rphy, rphy->dev.release)); ++ } ++} ++ + /* no mutex */ + static void + mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details) +@@ -475,23 +492,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *p + return NULL; + } + +-static inline void +-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy) +-{ +- if (phy_info->port_details) { +- phy_info->port_details->rphy = rphy; +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n", +- ioc->name, rphy)); +- } +- +- if (rphy) { +- dsaswideprintk(ioc, dev_printk(KERN_DEBUG, +- &rphy->dev, MYIOC_s_FMT "add:", ioc->name)); +- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n", +- ioc->name, rphy, rphy->dev.release)); +- } +-} +- + static inline struct sas_port * + mptsas_get_port(struct mptsas_phyinfo *phy_info) + { +diff -urNp linux-2.6.35.4/drivers/message/i2o/i2o_proc.c linux-2.6.35.4/drivers/message/i2o/i2o_proc.c +--- linux-2.6.35.4/drivers/message/i2o/i2o_proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/message/i2o/i2o_proc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -255,13 +255,6 @@ static char *scsi_devices[] = { + "Array Controller Device" + }; + +-static char *chtostr(u8 * chars, int n) +-{ +- char tmp[256]; +- tmp[0] = 0; +- return strncat(tmp, (char *)chars, n); +-} +- + static int i2o_report_query_status(struct seq_file *seq, int block_status, + char *group) + { +@@ -838,8 +831,7 @@ static int i2o_seq_show_ddm_table(struct + + seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id); + seq_printf(seq, "%-#8x", ddm_table.module_id); +- seq_printf(seq, "%-29s", +- chtostr(ddm_table.module_name_version, 28)); ++ seq_printf(seq, "%-.28s", ddm_table.module_name_version); + seq_printf(seq, "%9d ", ddm_table.data_size); + seq_printf(seq, "%8d", ddm_table.code_size); + +@@ -940,8 +932,8 @@ static int i2o_seq_show_drivers_stored(s + + seq_printf(seq, "%-#7x", dst->i2o_vendor_id); + seq_printf(seq, "%-#8x", dst->module_id); +- seq_printf(seq, "%-29s", chtostr(dst->module_name_version, 28)); +- seq_printf(seq, "%-9s", chtostr(dst->date, 8)); ++ seq_printf(seq, "%-.28s", dst->module_name_version); ++ seq_printf(seq, "%-.8s", dst->date); + seq_printf(seq, "%8d ", dst->module_size); + seq_printf(seq, "%8d ", dst->mpb_size); + seq_printf(seq, "0x%04x", dst->module_flags); +@@ -1272,14 +1264,10 @@ static int i2o_seq_show_dev_identity(str + seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0])); + seq_printf(seq, "Owner TID : %0#5x\n", work16[2]); + seq_printf(seq, "Parent TID : %0#5x\n", work16[3]); +- seq_printf(seq, "Vendor info : %s\n", +- chtostr((u8 *) (work32 + 2), 16)); +- seq_printf(seq, "Product info : %s\n", +- chtostr((u8 *) (work32 + 6), 16)); +- seq_printf(seq, "Description : %s\n", +- chtostr((u8 *) (work32 + 10), 16)); +- seq_printf(seq, "Product rev. : %s\n", +- chtostr((u8 *) (work32 + 14), 8)); ++ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2)); ++ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6)); ++ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10)); ++ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14)); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, (u8 *) (work32 + 16), +@@ -1324,10 +1312,8 @@ static int i2o_seq_show_ddm_identity(str + } + + seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid); +- seq_printf(seq, "Module name : %s\n", +- chtostr(result.module_name, 24)); +- seq_printf(seq, "Module revision : %s\n", +- chtostr(result.module_rev, 8)); ++ seq_printf(seq, "Module name : %.24s\n", result.module_name); ++ seq_printf(seq, "Module revision : %.8s\n", result.module_rev); + + seq_printf(seq, "Serial number : "); + print_serial_number(seq, result.serial_number, sizeof(result) - 36); +@@ -1358,14 +1344,10 @@ static int i2o_seq_show_uinfo(struct seq + return 0; + } + +- seq_printf(seq, "Device name : %s\n", +- chtostr(result.device_name, 64)); +- seq_printf(seq, "Service name : %s\n", +- chtostr(result.service_name, 64)); +- seq_printf(seq, "Physical name : %s\n", +- chtostr(result.physical_location, 64)); +- seq_printf(seq, "Instance number : %s\n", +- chtostr(result.instance_number, 4)); ++ seq_printf(seq, "Device name : %.64s\n", result.device_name); ++ seq_printf(seq, "Service name : %.64s\n", result.service_name); ++ seq_printf(seq, "Physical name : %.64s\n", result.physical_location); ++ seq_printf(seq, "Instance number : %.4s\n", result.instance_number); + + return 0; + } +diff -urNp linux-2.6.35.4/drivers/mfd/janz-cmodio.c linux-2.6.35.4/drivers/mfd/janz-cmodio.c +--- linux-2.6.35.4/drivers/mfd/janz-cmodio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/mfd/janz-cmodio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -13,6 +13,7 @@ + + #include <linux/kernel.h> + #include <linux/module.h> ++#include <linux/slab.h> + #include <linux/init.h> + #include <linux/pci.h> + #include <linux/interrupt.h> +diff -urNp linux-2.6.35.4/drivers/misc/kgdbts.c linux-2.6.35.4/drivers/misc/kgdbts.c +--- linux-2.6.35.4/drivers/misc/kgdbts.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/misc/kgdbts.c 2010-09-17 20:12:09.000000000 -0400 +@@ -118,7 +118,7 @@ + } while (0) + #define MAX_CONFIG_LEN 40 + +-static struct kgdb_io kgdbts_io_ops; ++static const struct kgdb_io kgdbts_io_ops; + static char get_buf[BUFMAX]; + static int get_buf_cnt; + static char put_buf[BUFMAX]; +@@ -1114,7 +1114,7 @@ static void kgdbts_post_exp_handler(void + module_put(THIS_MODULE); + } + +-static struct kgdb_io kgdbts_io_ops = { ++static const struct kgdb_io kgdbts_io_ops = { + .name = "kgdbts", + .read_char = kgdbts_get_char, + .write_char = kgdbts_put_char, +diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c +--- linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/misc/sgi-gru/gruhandles.c 2010-09-17 20:12:09.000000000 -0400 +@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op + unsigned long nsec; + + nsec = CLKS2NSEC(clks); +- atomic_long_inc(&mcs_op_statistics[op].count); +- atomic_long_add(nsec, &mcs_op_statistics[op].total); ++ atomic_long_inc_unchecked(&mcs_op_statistics[op].count); ++ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total); + if (mcs_op_statistics[op].max < nsec) + mcs_op_statistics[op].max = nsec; + } +diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c +--- linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/misc/sgi-gru/gruprocfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -32,9 +32,9 @@ + + #define printstat(s, f) printstat_val(s, &gru_stats.f, #f) + +-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id) ++static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id) + { +- unsigned long val = atomic_long_read(v); ++ unsigned long val = atomic_long_read_unchecked(v); + + seq_printf(s, "%16lu %s\n", val, id); + } +@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct se + + seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks"); + for (op = 0; op < mcsop_last; op++) { +- count = atomic_long_read(&mcs_op_statistics[op].count); +- total = atomic_long_read(&mcs_op_statistics[op].total); ++ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count); ++ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total); + max = mcs_op_statistics[op].max; + seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count, + count ? total / count : 0, max); +diff -urNp linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h +--- linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/misc/sgi-gru/grutables.h 2010-09-17 20:12:09.000000000 -0400 +@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids; + * GRU statistics. + */ + struct gru_stats_s { +- atomic_long_t vdata_alloc; +- atomic_long_t vdata_free; +- atomic_long_t gts_alloc; +- atomic_long_t gts_free; +- atomic_long_t gms_alloc; +- atomic_long_t gms_free; +- atomic_long_t gts_double_allocate; +- atomic_long_t assign_context; +- atomic_long_t assign_context_failed; +- atomic_long_t free_context; +- atomic_long_t load_user_context; +- atomic_long_t load_kernel_context; +- atomic_long_t lock_kernel_context; +- atomic_long_t unlock_kernel_context; +- atomic_long_t steal_user_context; +- atomic_long_t steal_kernel_context; +- atomic_long_t steal_context_failed; +- atomic_long_t nopfn; +- atomic_long_t asid_new; +- atomic_long_t asid_next; +- atomic_long_t asid_wrap; +- atomic_long_t asid_reuse; +- atomic_long_t intr; +- atomic_long_t intr_cbr; +- atomic_long_t intr_tfh; +- atomic_long_t intr_spurious; +- atomic_long_t intr_mm_lock_failed; +- atomic_long_t call_os; +- atomic_long_t call_os_wait_queue; +- atomic_long_t user_flush_tlb; +- atomic_long_t user_unload_context; +- atomic_long_t user_exception; +- atomic_long_t set_context_option; +- atomic_long_t check_context_retarget_intr; +- atomic_long_t check_context_unload; +- atomic_long_t tlb_dropin; +- atomic_long_t tlb_preload_page; +- atomic_long_t tlb_dropin_fail_no_asid; +- atomic_long_t tlb_dropin_fail_upm; +- atomic_long_t tlb_dropin_fail_invalid; +- atomic_long_t tlb_dropin_fail_range_active; +- atomic_long_t tlb_dropin_fail_idle; +- atomic_long_t tlb_dropin_fail_fmm; +- atomic_long_t tlb_dropin_fail_no_exception; +- atomic_long_t tfh_stale_on_fault; +- atomic_long_t mmu_invalidate_range; +- atomic_long_t mmu_invalidate_page; +- atomic_long_t flush_tlb; +- atomic_long_t flush_tlb_gru; +- atomic_long_t flush_tlb_gru_tgh; +- atomic_long_t flush_tlb_gru_zero_asid; +- +- atomic_long_t copy_gpa; +- atomic_long_t read_gpa; +- +- atomic_long_t mesq_receive; +- atomic_long_t mesq_receive_none; +- atomic_long_t mesq_send; +- atomic_long_t mesq_send_failed; +- atomic_long_t mesq_noop; +- atomic_long_t mesq_send_unexpected_error; +- atomic_long_t mesq_send_lb_overflow; +- atomic_long_t mesq_send_qlimit_reached; +- atomic_long_t mesq_send_amo_nacked; +- atomic_long_t mesq_send_put_nacked; +- atomic_long_t mesq_page_overflow; +- atomic_long_t mesq_qf_locked; +- atomic_long_t mesq_qf_noop_not_full; +- atomic_long_t mesq_qf_switch_head_failed; +- atomic_long_t mesq_qf_unexpected_error; +- atomic_long_t mesq_noop_unexpected_error; +- atomic_long_t mesq_noop_lb_overflow; +- atomic_long_t mesq_noop_qlimit_reached; +- atomic_long_t mesq_noop_amo_nacked; +- atomic_long_t mesq_noop_put_nacked; +- atomic_long_t mesq_noop_page_overflow; ++ atomic_long_unchecked_t vdata_alloc; ++ atomic_long_unchecked_t vdata_free; ++ atomic_long_unchecked_t gts_alloc; ++ atomic_long_unchecked_t gts_free; ++ atomic_long_unchecked_t gms_alloc; ++ atomic_long_unchecked_t gms_free; ++ atomic_long_unchecked_t gts_double_allocate; ++ atomic_long_unchecked_t assign_context; ++ atomic_long_unchecked_t assign_context_failed; ++ atomic_long_unchecked_t free_context; ++ atomic_long_unchecked_t load_user_context; ++ atomic_long_unchecked_t load_kernel_context; ++ atomic_long_unchecked_t lock_kernel_context; ++ atomic_long_unchecked_t unlock_kernel_context; ++ atomic_long_unchecked_t steal_user_context; ++ atomic_long_unchecked_t steal_kernel_context; ++ atomic_long_unchecked_t steal_context_failed; ++ atomic_long_unchecked_t nopfn; ++ atomic_long_unchecked_t asid_new; ++ atomic_long_unchecked_t asid_next; ++ atomic_long_unchecked_t asid_wrap; ++ atomic_long_unchecked_t asid_reuse; ++ atomic_long_unchecked_t intr; ++ atomic_long_unchecked_t intr_cbr; ++ atomic_long_unchecked_t intr_tfh; ++ atomic_long_unchecked_t intr_spurious; ++ atomic_long_unchecked_t intr_mm_lock_failed; ++ atomic_long_unchecked_t call_os; ++ atomic_long_unchecked_t call_os_wait_queue; ++ atomic_long_unchecked_t user_flush_tlb; ++ atomic_long_unchecked_t user_unload_context; ++ atomic_long_unchecked_t user_exception; ++ atomic_long_unchecked_t set_context_option; ++ atomic_long_unchecked_t check_context_retarget_intr; ++ atomic_long_unchecked_t check_context_unload; ++ atomic_long_unchecked_t tlb_dropin; ++ atomic_long_unchecked_t tlb_preload_page; ++ atomic_long_unchecked_t tlb_dropin_fail_no_asid; ++ atomic_long_unchecked_t tlb_dropin_fail_upm; ++ atomic_long_unchecked_t tlb_dropin_fail_invalid; ++ atomic_long_unchecked_t tlb_dropin_fail_range_active; ++ atomic_long_unchecked_t tlb_dropin_fail_idle; ++ atomic_long_unchecked_t tlb_dropin_fail_fmm; ++ atomic_long_unchecked_t tlb_dropin_fail_no_exception; ++ atomic_long_unchecked_t tfh_stale_on_fault; ++ atomic_long_unchecked_t mmu_invalidate_range; ++ atomic_long_unchecked_t mmu_invalidate_page; ++ atomic_long_unchecked_t flush_tlb; ++ atomic_long_unchecked_t flush_tlb_gru; ++ atomic_long_unchecked_t flush_tlb_gru_tgh; ++ atomic_long_unchecked_t flush_tlb_gru_zero_asid; ++ ++ atomic_long_unchecked_t copy_gpa; ++ atomic_long_unchecked_t read_gpa; ++ ++ atomic_long_unchecked_t mesq_receive; ++ atomic_long_unchecked_t mesq_receive_none; ++ atomic_long_unchecked_t mesq_send; ++ atomic_long_unchecked_t mesq_send_failed; ++ atomic_long_unchecked_t mesq_noop; ++ atomic_long_unchecked_t mesq_send_unexpected_error; ++ atomic_long_unchecked_t mesq_send_lb_overflow; ++ atomic_long_unchecked_t mesq_send_qlimit_reached; ++ atomic_long_unchecked_t mesq_send_amo_nacked; ++ atomic_long_unchecked_t mesq_send_put_nacked; ++ atomic_long_unchecked_t mesq_page_overflow; ++ atomic_long_unchecked_t mesq_qf_locked; ++ atomic_long_unchecked_t mesq_qf_noop_not_full; ++ atomic_long_unchecked_t mesq_qf_switch_head_failed; ++ atomic_long_unchecked_t mesq_qf_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_unexpected_error; ++ atomic_long_unchecked_t mesq_noop_lb_overflow; ++ atomic_long_unchecked_t mesq_noop_qlimit_reached; ++ atomic_long_unchecked_t mesq_noop_amo_nacked; ++ atomic_long_unchecked_t mesq_noop_put_nacked; ++ atomic_long_unchecked_t mesq_noop_page_overflow; + + }; + +@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start + tghop_invalidate, mcsop_last}; + + struct mcs_op_statistic { +- atomic_long_t count; +- atomic_long_t total; ++ atomic_long_unchecked_t count; ++ atomic_long_unchecked_t total; + unsigned long max; + }; + +@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_st + + #define STAT(id) do { \ + if (gru_options & OPT_STATS) \ +- atomic_long_inc(&gru_stats.id); \ ++ atomic_long_inc_unchecked(&gru_stats.id); \ + } while (0) + + #ifdef CONFIG_SGI_GRU_DEBUG +diff -urNp linux-2.6.35.4/drivers/mtd/devices/doc2000.c linux-2.6.35.4/drivers/mtd/devices/doc2000.c +--- linux-2.6.35.4/drivers/mtd/devices/doc2000.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/mtd/devices/doc2000.c 2010-09-17 20:12:09.000000000 -0400 +@@ -776,7 +776,7 @@ static int doc_write(struct mtd_info *mt + + /* The ECC will not be calculated correctly if less than 512 is written */ + /* DBB- +- if (len != 0x200 && eccbuf) ++ if (len != 0x200) + printk(KERN_WARNING + "ECC needs a full sector write (adr: %lx size %lx)\n", + (long) to, (long) len); +diff -urNp linux-2.6.35.4/drivers/mtd/devices/doc2001.c linux-2.6.35.4/drivers/mtd/devices/doc2001.c +--- linux-2.6.35.4/drivers/mtd/devices/doc2001.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/mtd/devices/doc2001.c 2010-09-17 20:12:09.000000000 -0400 +@@ -393,7 +393,7 @@ static int doc_read (struct mtd_info *mt + struct Nand *mychip = &this->chips[from >> (this->chipshift)]; + + /* Don't allow read past end of device */ +- if (from >= this->totlen) ++ if (from >= this->totlen || !len) + return -EINVAL; + + /* Don't allow a single read to cross a 512-byte block boundary */ +diff -urNp linux-2.6.35.4/drivers/mtd/nand/denali.c linux-2.6.35.4/drivers/mtd/nand/denali.c +--- linux-2.6.35.4/drivers/mtd/nand/denali.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/mtd/nand/denali.c 2010-09-17 20:12:09.000000000 -0400 +@@ -24,6 +24,7 @@ + #include <linux/pci.h> + #include <linux/mtd/mtd.h> + #include <linux/module.h> ++#include <linux/slab.h> + + #include "denali.h" + +diff -urNp linux-2.6.35.4/drivers/mtd/ubi/build.c linux-2.6.35.4/drivers/mtd/ubi/build.c +--- linux-2.6.35.4/drivers/mtd/ubi/build.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/mtd/ubi/build.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1282,7 +1282,7 @@ module_exit(ubi_exit); + static int __init bytes_str_to_int(const char *str) + { + char *endp; +- unsigned long result; ++ unsigned long result, scale = 1; + + result = simple_strtoul(str, &endp, 0); + if (str == endp || result >= INT_MAX) { +@@ -1293,11 +1293,11 @@ static int __init bytes_str_to_int(const + + switch (*endp) { + case 'G': +- result *= 1024; ++ scale *= 1024; + case 'M': +- result *= 1024; ++ scale *= 1024; + case 'K': +- result *= 1024; ++ scale *= 1024; + if (endp[1] == 'i' && endp[2] == 'B') + endp += 2; + case '\0': +@@ -1308,7 +1308,13 @@ static int __init bytes_str_to_int(const + return -EINVAL; + } + +- return result; ++ if ((intoverflow_t)result*scale >= INT_MAX) { ++ printk(KERN_ERR "UBI error: incorrect bytes count: \"%s\"\n", ++ str); ++ return -EINVAL; ++ } ++ ++ return result*scale; + } + + /** +diff -urNp linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c +--- linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/cxgb3/cxgb3_main.c 2010-09-17 20:12:37.000000000 -0400 +@@ -2296,6 +2296,8 @@ static int cxgb_extension_ioctl(struct n + case CHELSIO_GET_QSET_NUM:{ + struct ch_reg edata; + ++ memset(&edata, 0, sizeof(edata)); ++ + edata.cmd = CHELSIO_GET_QSET_NUM; + edata.val = pi->nqsets; + if (copy_to_user(useraddr, &edata, sizeof(edata))) +diff -urNp linux-2.6.35.4/drivers/net/e1000e/82571.c linux-2.6.35.4/drivers/net/e1000e/82571.c +--- linux-2.6.35.4/drivers/net/e1000e/82571.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/e1000e/82571.c 2010-09-17 20:12:09.000000000 -0400 +@@ -207,6 +207,7 @@ static s32 e1000_init_mac_params_82571(s + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; ++ /* cannot be const */ + struct e1000_mac_operations *func = &mac->ops; + u32 swsm = 0; + u32 swsm2 = 0; +@@ -1703,7 +1704,7 @@ static void e1000_clear_hw_cntrs_82571(s + er32(ICRXDMTC); + } + +-static struct e1000_mac_operations e82571_mac_ops = { ++static const struct e1000_mac_operations e82571_mac_ops = { + /* .check_mng_mode: mac type dependent */ + /* .check_for_link: media type dependent */ + .id_led_init = e1000e_id_led_init, +@@ -1725,7 +1726,7 @@ static struct e1000_mac_operations e8257 + .read_mac_addr = e1000_read_mac_addr_82571, + }; + +-static struct e1000_phy_operations e82_phy_ops_igp = { ++static const struct e1000_phy_operations e82_phy_ops_igp = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_igp, + .check_reset_block = e1000e_check_reset_block_generic, +@@ -1743,7 +1744,7 @@ static struct e1000_phy_operations e82_p + .cfg_on_link_up = NULL, + }; + +-static struct e1000_phy_operations e82_phy_ops_m88 = { ++static const struct e1000_phy_operations e82_phy_ops_m88 = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, +@@ -1761,7 +1762,7 @@ static struct e1000_phy_operations e82_p + .cfg_on_link_up = NULL, + }; + +-static struct e1000_phy_operations e82_phy_ops_bm = { ++static const struct e1000_phy_operations e82_phy_ops_bm = { + .acquire = e1000_get_hw_semaphore_82571, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, +@@ -1779,7 +1780,7 @@ static struct e1000_phy_operations e82_p + .cfg_on_link_up = NULL, + }; + +-static struct e1000_nvm_operations e82571_nvm_ops = { ++static const struct e1000_nvm_operations e82571_nvm_ops = { + .acquire = e1000_acquire_nvm_82571, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_82571, +diff -urNp linux-2.6.35.4/drivers/net/e1000e/e1000.h linux-2.6.35.4/drivers/net/e1000e/e1000.h +--- linux-2.6.35.4/drivers/net/e1000e/e1000.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/e1000e/e1000.h 2010-09-17 20:12:09.000000000 -0400 +@@ -377,9 +377,9 @@ struct e1000_info { + u32 pba; + u32 max_hw_frame_size; + s32 (*get_variants)(struct e1000_adapter *); +- struct e1000_mac_operations *mac_ops; +- struct e1000_phy_operations *phy_ops; +- struct e1000_nvm_operations *nvm_ops; ++ const struct e1000_mac_operations *mac_ops; ++ const struct e1000_phy_operations *phy_ops; ++ const struct e1000_nvm_operations *nvm_ops; + }; + + /* hardware capability, feature, and workaround flags */ +diff -urNp linux-2.6.35.4/drivers/net/e1000e/es2lan.c linux-2.6.35.4/drivers/net/e1000e/es2lan.c +--- linux-2.6.35.4/drivers/net/e1000e/es2lan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/e1000e/es2lan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -205,6 +205,7 @@ static s32 e1000_init_mac_params_80003es + { + struct e1000_hw *hw = &adapter->hw; + struct e1000_mac_info *mac = &hw->mac; ++ /* cannot be const */ + struct e1000_mac_operations *func = &mac->ops; + + /* Set media type */ +@@ -1431,7 +1432,7 @@ static void e1000_clear_hw_cntrs_80003es + er32(ICRXDMTC); + } + +-static struct e1000_mac_operations es2_mac_ops = { ++static const struct e1000_mac_operations es2_mac_ops = { + .read_mac_addr = e1000_read_mac_addr_80003es2lan, + .id_led_init = e1000e_id_led_init, + .check_mng_mode = e1000e_check_mng_mode_generic, +@@ -1453,7 +1454,7 @@ static struct e1000_mac_operations es2_m + .setup_led = e1000e_setup_led_generic, + }; + +-static struct e1000_phy_operations es2_phy_ops = { ++static const struct e1000_phy_operations es2_phy_ops = { + .acquire = e1000_acquire_phy_80003es2lan, + .check_polarity = e1000_check_polarity_m88, + .check_reset_block = e1000e_check_reset_block_generic, +@@ -1471,7 +1472,7 @@ static struct e1000_phy_operations es2_p + .cfg_on_link_up = e1000_cfg_on_link_up_80003es2lan, + }; + +-static struct e1000_nvm_operations es2_nvm_ops = { ++static const struct e1000_nvm_operations es2_nvm_ops = { + .acquire = e1000_acquire_nvm_80003es2lan, + .read = e1000e_read_nvm_eerd, + .release = e1000_release_nvm_80003es2lan, +diff -urNp linux-2.6.35.4/drivers/net/e1000e/hw.h linux-2.6.35.4/drivers/net/e1000e/hw.h +--- linux-2.6.35.4/drivers/net/e1000e/hw.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/e1000e/hw.h 2010-09-17 20:12:09.000000000 -0400 +@@ -791,13 +791,13 @@ struct e1000_phy_operations { + + /* Function pointers for the NVM. */ + struct e1000_nvm_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*read)(struct e1000_hw *, u16, u16, u16 *); +- void (*release)(struct e1000_hw *); +- s32 (*update)(struct e1000_hw *); +- s32 (*valid_led_default)(struct e1000_hw *, u16 *); +- s32 (*validate)(struct e1000_hw *); +- s32 (*write)(struct e1000_hw *, u16, u16, u16 *); ++ s32 (* const acquire)(struct e1000_hw *); ++ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *); ++ void (* const release)(struct e1000_hw *); ++ s32 (* const update)(struct e1000_hw *); ++ s32 (* const valid_led_default)(struct e1000_hw *, u16 *); ++ s32 (* const validate)(struct e1000_hw *); ++ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *); + }; + + struct e1000_mac_info { +@@ -877,6 +877,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { ++ /* cannot be const */ + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; +diff -urNp linux-2.6.35.4/drivers/net/e1000e/ich8lan.c linux-2.6.35.4/drivers/net/e1000e/ich8lan.c +--- linux-2.6.35.4/drivers/net/e1000e/ich8lan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/e1000e/ich8lan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3388,7 +3388,7 @@ static void e1000_clear_hw_cntrs_ich8lan + } + } + +-static struct e1000_mac_operations ich8_mac_ops = { ++static const struct e1000_mac_operations ich8_mac_ops = { + .id_led_init = e1000e_id_led_init, + .check_mng_mode = e1000_check_mng_mode_ich8lan, + .check_for_link = e1000_check_for_copper_link_ich8lan, +@@ -3407,7 +3407,7 @@ static struct e1000_mac_operations ich8_ + /* id_led_init dependent on mac type */ + }; + +-static struct e1000_phy_operations ich8_phy_ops = { ++static const struct e1000_phy_operations ich8_phy_ops = { + .acquire = e1000_acquire_swflag_ich8lan, + .check_reset_block = e1000_check_reset_block_ich8lan, + .commit = NULL, +@@ -3421,7 +3421,7 @@ static struct e1000_phy_operations ich8_ + .write_reg = e1000e_write_phy_reg_igp, + }; + +-static struct e1000_nvm_operations ich8_nvm_ops = { ++static const struct e1000_nvm_operations ich8_nvm_ops = { + .acquire = e1000_acquire_nvm_ich8lan, + .read = e1000_read_nvm_ich8lan, + .release = e1000_release_nvm_ich8lan, +diff -urNp linux-2.6.35.4/drivers/net/eql.c linux-2.6.35.4/drivers/net/eql.c +--- linux-2.6.35.4/drivers/net/eql.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/eql.c 2010-09-17 20:12:37.000000000 -0400 +@@ -555,6 +555,8 @@ static int eql_g_master_cfg(struct net_d + equalizer_t *eql; + master_config_t mc; + ++ memset(&mc, 0, sizeof(mc)); ++ + if (eql_is_master(dev)) { + eql = netdev_priv(dev); + mc.max_slaves = eql->max_slaves; +diff -urNp linux-2.6.35.4/drivers/net/igb/e1000_82575.c linux-2.6.35.4/drivers/net/igb/e1000_82575.c +--- linux-2.6.35.4/drivers/net/igb/e1000_82575.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/igb/e1000_82575.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1597,7 +1597,7 @@ u16 igb_rxpbs_adjust_82580(u32 data) + return ret_val; + } + +-static struct e1000_mac_operations e1000_mac_ops_82575 = { ++static const struct e1000_mac_operations e1000_mac_ops_82575 = { + .init_hw = igb_init_hw_82575, + .check_for_link = igb_check_for_link_82575, + .rar_set = igb_rar_set, +@@ -1605,13 +1605,13 @@ static struct e1000_mac_operations e1000 + .get_speed_and_duplex = igb_get_speed_and_duplex_copper, + }; + +-static struct e1000_phy_operations e1000_phy_ops_82575 = { ++static const struct e1000_phy_operations e1000_phy_ops_82575 = { + .acquire = igb_acquire_phy_82575, + .get_cfg_done = igb_get_cfg_done_82575, + .release = igb_release_phy_82575, + }; + +-static struct e1000_nvm_operations e1000_nvm_ops_82575 = { ++static const struct e1000_nvm_operations e1000_nvm_ops_82575 = { + .acquire = igb_acquire_nvm_82575, + .read = igb_read_nvm_eerd, + .release = igb_release_nvm_82575, +diff -urNp linux-2.6.35.4/drivers/net/igb/e1000_hw.h linux-2.6.35.4/drivers/net/igb/e1000_hw.h +--- linux-2.6.35.4/drivers/net/igb/e1000_hw.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/igb/e1000_hw.h 2010-09-17 20:12:09.000000000 -0400 +@@ -323,17 +323,17 @@ struct e1000_phy_operations { + }; + + struct e1000_nvm_operations { +- s32 (*acquire)(struct e1000_hw *); +- s32 (*read)(struct e1000_hw *, u16, u16, u16 *); +- void (*release)(struct e1000_hw *); +- s32 (*write)(struct e1000_hw *, u16, u16, u16 *); ++ s32 (* const acquire)(struct e1000_hw *); ++ s32 (* const read)(struct e1000_hw *, u16, u16, u16 *); ++ void (* const release)(struct e1000_hw *); ++ s32 (* const write)(struct e1000_hw *, u16, u16, u16 *); + }; + + struct e1000_info { + s32 (*get_invariants)(struct e1000_hw *); +- struct e1000_mac_operations *mac_ops; +- struct e1000_phy_operations *phy_ops; +- struct e1000_nvm_operations *nvm_ops; ++ const struct e1000_mac_operations *mac_ops; ++ const struct e1000_phy_operations *phy_ops; ++ const struct e1000_nvm_operations *nvm_ops; + }; + + extern const struct e1000_info e1000_82575_info; +@@ -412,6 +412,7 @@ struct e1000_phy_info { + }; + + struct e1000_nvm_info { ++ /* cannot be const */ + struct e1000_nvm_operations ops; + + enum e1000_nvm_type type; +diff -urNp linux-2.6.35.4/drivers/net/irda/vlsi_ir.c linux-2.6.35.4/drivers/net/irda/vlsi_ir.c +--- linux-2.6.35.4/drivers/net/irda/vlsi_ir.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/irda/vlsi_ir.c 2010-09-17 20:12:09.000000000 -0400 +@@ -907,13 +907,12 @@ static netdev_tx_t vlsi_hard_start_xmit( + /* no race - tx-ring already empty */ + vlsi_set_baud(idev, iobase); + netif_wake_queue(ndev); +- } +- else +- ; ++ } else { + /* keep the speed change pending like it would + * for any len>0 packet. tx completion interrupt + * will apply it when the tx ring becomes empty. + */ ++ } + spin_unlock_irqrestore(&idev->lock, flags); + dev_kfree_skb_any(skb); + return NETDEV_TX_OK; +diff -urNp linux-2.6.35.4/drivers/net/pcnet32.c linux-2.6.35.4/drivers/net/pcnet32.c +--- linux-2.6.35.4/drivers/net/pcnet32.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/pcnet32.c 2010-09-17 20:12:09.000000000 -0400 +@@ -82,7 +82,7 @@ static int cards_found; + /* + * VLB I/O addresses + */ +-static unsigned int pcnet32_portlist[] __initdata = ++static unsigned int pcnet32_portlist[] __devinitdata = + { 0x300, 0x320, 0x340, 0x360, 0 }; + + static int pcnet32_debug; +diff -urNp linux-2.6.35.4/drivers/net/ppp_generic.c linux-2.6.35.4/drivers/net/ppp_generic.c +--- linux-2.6.35.4/drivers/net/ppp_generic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/ppp_generic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -992,7 +992,6 @@ ppp_net_ioctl(struct net_device *dev, st + void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data; + struct ppp_stats stats; + struct ppp_comp_stats cstats; +- char *vers; + + switch (cmd) { + case SIOCGPPPSTATS: +@@ -1014,8 +1013,7 @@ ppp_net_ioctl(struct net_device *dev, st + break; + + case SIOCGPPPVER: +- vers = PPP_VERSION; +- if (copy_to_user(addr, vers, strlen(vers) + 1)) ++ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION))) + break; + err = 0; + break; +diff -urNp linux-2.6.35.4/drivers/net/tg3.c linux-2.6.35.4/drivers/net/tg3.c +--- linux-2.6.35.4/drivers/net/tg3.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/tg3.c 2010-09-17 20:12:09.000000000 -0400 +@@ -12410,7 +12410,7 @@ static void __devinit tg3_read_vpd(struc + cnt = pci_read_vpd(tp->pdev, pos, + TG3_NVM_VPD_LEN - pos, + &vpd_data[pos]); +- if (cnt == -ETIMEDOUT || -EINTR) ++ if (cnt == -ETIMEDOUT || cnt == -EINTR) + cnt = 0; + else if (cnt < 0) + goto out_not_found; +diff -urNp linux-2.6.35.4/drivers/net/tg3.h linux-2.6.35.4/drivers/net/tg3.h +--- linux-2.6.35.4/drivers/net/tg3.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/tg3.h 2010-09-17 20:12:09.000000000 -0400 +@@ -130,6 +130,7 @@ + #define CHIPREV_ID_5750_A0 0x4000 + #define CHIPREV_ID_5750_A1 0x4001 + #define CHIPREV_ID_5750_A3 0x4003 ++#define CHIPREV_ID_5750_C1 0x4201 + #define CHIPREV_ID_5750_C2 0x4202 + #define CHIPREV_ID_5752_A0_HW 0x5000 + #define CHIPREV_ID_5752_A0 0x6000 +diff -urNp linux-2.6.35.4/drivers/net/tulip/de4x5.c linux-2.6.35.4/drivers/net/tulip/de4x5.c +--- linux-2.6.35.4/drivers/net/tulip/de4x5.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/tulip/de4x5.c 2010-09-17 20:12:37.000000000 -0400 +@@ -5401,7 +5401,7 @@ de4x5_ioctl(struct net_device *dev, stru + for (i=0; i<ETH_ALEN; i++) { + tmp.addr[i] = dev->dev_addr[i]; + } +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; ++ if (ioc->len > sizeof(tmp.addr) || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; + break; + + case DE4X5_SET_HWADDR: /* Set the hardware address */ +@@ -5441,7 +5441,7 @@ de4x5_ioctl(struct net_device *dev, stru + spin_lock_irqsave(&lp->lock, flags); + memcpy(&statbuf, &lp->pktStats, ioc->len); + spin_unlock_irqrestore(&lp->lock, flags); +- if (copy_to_user(ioc->data, &statbuf, ioc->len)) ++ if (ioc->len > sizeof(statbuf) || copy_to_user(ioc->data, &statbuf, ioc->len)) + return -EFAULT; + break; + } +@@ -5474,7 +5474,7 @@ de4x5_ioctl(struct net_device *dev, stru + tmp.lval[6] = inl(DE4X5_STRR); j+=4; + tmp.lval[7] = inl(DE4X5_SIGR); j+=4; + ioc->len = j; +- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT; ++ if (copy_to_user(ioc->data, tmp.lval, ioc->len)) return -EFAULT; + break; + + #define DE4X5_DUMP 0x0f /* Dump the DE4X5 Status */ +diff -urNp linux-2.6.35.4/drivers/net/usb/hso.c linux-2.6.35.4/drivers/net/usb/hso.c +--- linux-2.6.35.4/drivers/net/usb/hso.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/usb/hso.c 2010-09-17 20:12:37.000000000 -0400 +@@ -258,7 +258,7 @@ struct hso_serial { + + /* from usb_serial_port */ + struct tty_struct *tty; +- int open_count; ++ atomic_t open_count; + spinlock_t serial_lock; + + int (*write_data) (struct hso_serial *serial); +@@ -1201,7 +1201,7 @@ static void put_rxbuf_data_and_resubmit_ + struct urb *urb; + + urb = serial->rx_urb[0]; +- if (serial->open_count > 0) { ++ if (atomic_read(&serial->open_count) > 0) { + count = put_rxbuf_data(urb, serial); + if (count == -1) + return; +@@ -1237,7 +1237,7 @@ static void hso_std_serial_read_bulk_cal + DUMP1(urb->transfer_buffer, urb->actual_length); + + /* Anyone listening? */ +- if (serial->open_count == 0) ++ if (atomic_read(&serial->open_count) == 0) + return; + + if (status == 0) { +@@ -1332,8 +1332,7 @@ static int hso_serial_open(struct tty_st + spin_unlock_irq(&serial->serial_lock); + + /* check for port already opened, if not set the termios */ +- serial->open_count++; +- if (serial->open_count == 1) { ++ if (atomic_inc_return(&serial->open_count) == 1) { + serial->rx_state = RX_IDLE; + /* Force default termio settings */ + _hso_serial_set_termios(tty, NULL); +@@ -1345,7 +1344,7 @@ static int hso_serial_open(struct tty_st + result = hso_start_serial_device(serial->parent, GFP_KERNEL); + if (result) { + hso_stop_serial_device(serial->parent); +- serial->open_count--; ++ atomic_dec(&serial->open_count); + kref_put(&serial->parent->ref, hso_serial_ref_free); + } + } else { +@@ -1382,10 +1381,10 @@ static void hso_serial_close(struct tty_ + + /* reset the rts and dtr */ + /* do the actual close */ +- serial->open_count--; ++ atomic_dec(&serial->open_count); + +- if (serial->open_count <= 0) { +- serial->open_count = 0; ++ if (atomic_read(&serial->open_count) <= 0) { ++ atomic_set(&serial->open_count, 0); + spin_lock_irq(&serial->serial_lock); + if (serial->tty == tty) { + serial->tty->driver_data = NULL; +@@ -1467,7 +1466,7 @@ static void hso_serial_set_termios(struc + + /* the actual setup */ + spin_lock_irqsave(&serial->serial_lock, flags); +- if (serial->open_count) ++ if (atomic_read(&serial->open_count)) + _hso_serial_set_termios(tty, old); + else + tty->termios = old; +@@ -1655,6 +1654,9 @@ static int hso_get_count(struct hso_seri + + if (!tiocmget) + return -ENOENT; ++ ++ memset(&icount, 0, sizeof(icount)); ++ + spin_lock_irq(&serial->serial_lock); + memcpy(&cnow, &tiocmget->icount, sizeof(struct uart_icount)); + spin_unlock_irq(&serial->serial_lock); +@@ -1929,7 +1931,7 @@ static void intr_callback(struct urb *ur + D1("Pending read interrupt on port %d\n", i); + spin_lock(&serial->serial_lock); + if (serial->rx_state == RX_IDLE && +- serial->open_count > 0) { ++ atomic_read(&serial->open_count) > 0) { + /* Setup and send a ctrl req read on + * port i */ + if (!serial->rx_urb_filled[0]) { +@@ -3119,7 +3121,7 @@ static int hso_resume(struct usb_interfa + /* Start all serial ports */ + for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) { + if (serial_table[i] && (serial_table[i]->interface == iface)) { +- if (dev2ser(serial_table[i])->open_count) { ++ if (atomic_read(&dev2ser(serial_table[i])->open_count)) { + result = + hso_start_serial_device(serial_table[i], GFP_NOIO); + hso_kick_transmit(dev2ser(serial_table[i])); +diff -urNp linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c +--- linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/wireless/b43/debugfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,7 @@ static struct dentry *rootdir; + struct b43_debugfs_fops { + ssize_t (*read)(struct b43_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43_wldev *dev, const char *buf, size_t count); +- struct file_operations fops; ++ const struct file_operations fops; + /* Offset of struct b43_dfs_file in struct b43_dfsentry */ + size_t file_struct_offset; + }; +diff -urNp linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c +--- linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/wireless/b43legacy/debugfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -44,7 +44,7 @@ static struct dentry *rootdir; + struct b43legacy_debugfs_fops { + ssize_t (*read)(struct b43legacy_wldev *dev, char *buf, size_t bufsize); + int (*write)(struct b43legacy_wldev *dev, const char *buf, size_t count); +- struct file_operations fops; ++ const struct file_operations fops; + /* Offset of struct b43legacy_dfs_file in struct b43legacy_dfsentry */ + size_t file_struct_offset; + /* Take wl->irq_lock before calling read/write? */ +diff -urNp linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h +--- linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/wireless/iwlwifi/iwl-debug.h 2010-09-17 20:12:09.000000000 -0400 +@@ -68,8 +68,8 @@ do { + } while (0) + + #else +-#define IWL_DEBUG(__priv, level, fmt, args...) +-#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) ++#define IWL_DEBUG(__priv, level, fmt, args...) do {} while (0) ++#define IWL_DEBUG_LIMIT(__priv, level, fmt, args...) do {} while (0) + static inline void iwl_print_hex_dump(struct iwl_priv *priv, int level, + void *p, u32 len) + {} +diff -urNp linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c +--- linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/wireless/libertas/debugfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -718,7 +718,7 @@ out_unlock: + struct lbs_debugfs_files { + const char *name; + int perm; +- struct file_operations fops; ++ const struct file_operations fops; + }; + + static const struct lbs_debugfs_files debugfs_files[] = { +diff -urNp linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c +--- linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/net/wireless/rndis_wlan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1235,7 +1235,7 @@ static int set_rts_threshold(struct usbn + + netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold); + +- if (rts_threshold < 0 || rts_threshold > 2347) ++ if (rts_threshold > 2347) + rts_threshold = 2347; + + tmp = cpu_to_le32(rts_threshold); +diff -urNp linux-2.6.35.4/drivers/oprofile/buffer_sync.c linux-2.6.35.4/drivers/oprofile/buffer_sync.c +--- linux-2.6.35.4/drivers/oprofile/buffer_sync.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/buffer_sync.c 2010-09-17 20:12:09.000000000 -0400 +@@ -341,7 +341,7 @@ static void add_data(struct op_entry *en + if (cookie == NO_COOKIE) + offset = pc; + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + offset = pc; + } + if (cookie != last_cookie) { +@@ -385,14 +385,14 @@ add_sample(struct mm_struct *mm, struct + /* add userspace sample */ + + if (!mm) { +- atomic_inc(&oprofile_stats.sample_lost_no_mm); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm); + return 0; + } + + cookie = lookup_dcookie(mm, s->eip, &offset); + + if (cookie == INVALID_COOKIE) { +- atomic_inc(&oprofile_stats.sample_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping); + return 0; + } + +@@ -561,7 +561,7 @@ void sync_buffer(int cpu) + /* ignore backtraces if failed to add a sample */ + if (state == sb_bt_start) { + state = sb_bt_ignore; +- atomic_inc(&oprofile_stats.bt_lost_no_mapping); ++ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping); + } + } + release_mm(mm); +diff -urNp linux-2.6.35.4/drivers/oprofile/event_buffer.c linux-2.6.35.4/drivers/oprofile/event_buffer.c +--- linux-2.6.35.4/drivers/oprofile/event_buffer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/event_buffer.c 2010-09-17 20:12:09.000000000 -0400 +@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value + } + + if (buffer_pos == buffer_size) { +- atomic_inc(&oprofile_stats.event_lost_overflow); ++ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow); + return; + } + +diff -urNp linux-2.6.35.4/drivers/oprofile/oprof.c linux-2.6.35.4/drivers/oprofile/oprof.c +--- linux-2.6.35.4/drivers/oprofile/oprof.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/oprof.c 2010-09-17 20:12:09.000000000 -0400 +@@ -110,7 +110,7 @@ static void switch_worker(struct work_st + if (oprofile_ops.switch_events()) + return; + +- atomic_inc(&oprofile_stats.multiplex_counter); ++ atomic_inc_unchecked(&oprofile_stats.multiplex_counter); + start_switch_worker(); + } + +diff -urNp linux-2.6.35.4/drivers/oprofile/oprofilefs.c linux-2.6.35.4/drivers/oprofile/oprofilefs.c +--- linux-2.6.35.4/drivers/oprofile/oprofilefs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/oprofilefs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -187,7 +187,7 @@ static const struct file_operations atom + + + int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root, +- char const *name, atomic_t *val) ++ char const *name, atomic_unchecked_t *val) + { + struct dentry *d = __oprofilefs_create_file(sb, root, name, + &atomic_ro_fops, 0444); +diff -urNp linux-2.6.35.4/drivers/oprofile/oprofile_stats.c linux-2.6.35.4/drivers/oprofile/oprofile_stats.c +--- linux-2.6.35.4/drivers/oprofile/oprofile_stats.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/oprofile_stats.c 2010-09-17 20:12:09.000000000 -0400 +@@ -30,11 +30,11 @@ void oprofile_reset_stats(void) + cpu_buf->sample_invalid_eip = 0; + } + +- atomic_set(&oprofile_stats.sample_lost_no_mm, 0); +- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.event_lost_overflow, 0); +- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0); +- atomic_set(&oprofile_stats.multiplex_counter, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0); ++ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0); ++ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0); ++ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0); + } + + +diff -urNp linux-2.6.35.4/drivers/oprofile/oprofile_stats.h linux-2.6.35.4/drivers/oprofile/oprofile_stats.h +--- linux-2.6.35.4/drivers/oprofile/oprofile_stats.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/oprofile/oprofile_stats.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,11 +13,11 @@ + #include <asm/atomic.h> + + struct oprofile_stat_struct { +- atomic_t sample_lost_no_mm; +- atomic_t sample_lost_no_mapping; +- atomic_t bt_lost_no_mapping; +- atomic_t event_lost_overflow; +- atomic_t multiplex_counter; ++ atomic_unchecked_t sample_lost_no_mm; ++ atomic_unchecked_t sample_lost_no_mapping; ++ atomic_unchecked_t bt_lost_no_mapping; ++ atomic_unchecked_t event_lost_overflow; ++ atomic_unchecked_t multiplex_counter; + }; + + extern struct oprofile_stat_struct oprofile_stats; +diff -urNp linux-2.6.35.4/drivers/parport/procfs.c linux-2.6.35.4/drivers/parport/procfs.c +--- linux-2.6.35.4/drivers/parport/procfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/parport/procfs.c 2010-09-17 20:12:37.000000000 -0400 +@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *t + + *ppos += len; + +- return copy_to_user(result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof(buffer) || copy_to_user(result, buffer, len)) ? -EFAULT : 0; + } + + #ifdef CONFIG_PARPORT_1284 +@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table + + *ppos += len; + +- return copy_to_user (result, buffer, len) ? -EFAULT : 0; ++ return (len > sizeof(buffer) || copy_to_user (result, buffer, len)) ? -EFAULT : 0; + } + #endif /* IEEE1284.3 support. */ + +diff -urNp linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c +--- linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/hotplug/acpiphp_glue.c 2010-09-17 20:12:09.000000000 -0400 +@@ -110,7 +110,7 @@ static int post_dock_fixups(struct notif + } + + +-static struct acpi_dock_ops acpiphp_dock_ops = { ++static const struct acpi_dock_ops acpiphp_dock_ops = { + .handler = handle_hotplug_event_func, + }; + +diff -urNp linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c +--- linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/hotplug/cpqphp_nvram.c 2010-09-17 20:12:09.000000000 -0400 +@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_ + + void compaq_nvram_init (void __iomem *rom_start) + { ++ ++#ifndef CONFIG_PAX_KERNEXEC + if (rom_start) { + compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR); + } ++#endif ++ + dbg("int15 entry = %p\n", compaq_int15_entry_point); + + /* initialize our int15 lock */ +diff -urNp linux-2.6.35.4/drivers/pci/intel-iommu.c linux-2.6.35.4/drivers/pci/intel-iommu.c +--- linux-2.6.35.4/drivers/pci/intel-iommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/intel-iommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2938,7 +2938,7 @@ static int intel_mapping_error(struct de + return !dma_addr; + } + +-struct dma_map_ops intel_dma_ops = { ++const struct dma_map_ops intel_dma_ops = { + .alloc_coherent = intel_alloc_coherent, + .free_coherent = intel_free_coherent, + .map_sg = intel_map_sg, +diff -urNp linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c +--- linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/pcie/portdrv_pci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -250,7 +250,7 @@ static void pcie_portdrv_err_resume(stru + static const struct pci_device_id port_pci_ids[] = { { + /* handle any PCI-Express port */ + PCI_DEVICE_CLASS(((PCI_CLASS_BRIDGE_PCI << 8) | 0x00), ~0), +- }, { /* end: all zeroes */ } ++ }, { 0, 0, 0, 0, 0, 0, 0 } + }; + MODULE_DEVICE_TABLE(pci, port_pci_ids); + +diff -urNp linux-2.6.35.4/drivers/pci/probe.c linux-2.6.35.4/drivers/pci/probe.c +--- linux-2.6.35.4/drivers/pci/probe.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/probe.c 2010-09-17 20:12:09.000000000 -0400 +@@ -62,14 +62,14 @@ static ssize_t pci_bus_show_cpuaffinity( + return ret; + } + +-static ssize_t inline pci_bus_show_cpumaskaffinity(struct device *dev, ++static inline ssize_t pci_bus_show_cpumaskaffinity(struct device *dev, + struct device_attribute *attr, + char *buf) + { + return pci_bus_show_cpuaffinity(dev, 0, attr, buf); + } + +-static ssize_t inline pci_bus_show_cpulistaffinity(struct device *dev, ++static inline ssize_t pci_bus_show_cpulistaffinity(struct device *dev, + struct device_attribute *attr, + char *buf) + { +diff -urNp linux-2.6.35.4/drivers/pci/proc.c linux-2.6.35.4/drivers/pci/proc.c +--- linux-2.6.35.4/drivers/pci/proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pci/proc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -481,7 +481,16 @@ static const struct file_operations proc + static int __init pci_proc_init(void) + { + struct pci_dev *dev = NULL; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_bus_pci_dir = proc_mkdir("bus/pci", NULL); ++#endif + proc_create("devices", 0, proc_bus_pci_dir, + &proc_bus_pci_dev_operations); + proc_initialized = 1; +diff -urNp linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c +--- linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pcmcia/pcmcia_ioctl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -850,7 +850,7 @@ static int ds_ioctl(struct file *file, u + return -EFAULT; + } + } +- buf = kmalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); ++ buf = kzalloc(sizeof(ds_ioctl_arg_t), GFP_KERNEL); + if (!buf) + return -ENOMEM; + +diff -urNp linux-2.6.35.4/drivers/pcmcia/ti113x.h linux-2.6.35.4/drivers/pcmcia/ti113x.h +--- linux-2.6.35.4/drivers/pcmcia/ti113x.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pcmcia/ti113x.h 2010-09-17 20:12:09.000000000 -0400 +@@ -936,7 +936,7 @@ static struct pci_device_id ene_tune_tbl + DEVID(PCI_VENDOR_ID_MOTOROLA, 0x3410, 0xECC0, PCI_ANY_ID, + ENE_TEST_C9_TLTENABLE | ENE_TEST_C9_PFENABLE, ENE_TEST_C9_TLTENABLE), + +- {} ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + static void ene_tune_bridge(struct pcmcia_socket *sock, struct pci_bus *bus) +diff -urNp linux-2.6.35.4/drivers/pcmcia/yenta_socket.c linux-2.6.35.4/drivers/pcmcia/yenta_socket.c +--- linux-2.6.35.4/drivers/pcmcia/yenta_socket.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pcmcia/yenta_socket.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1428,7 +1428,7 @@ static struct pci_device_id yenta_table[ + + /* match any cardbus bridge */ + CB_ID(PCI_ANY_ID, PCI_ANY_ID, DEFAULT), +- { /* all zeroes */ } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + MODULE_DEVICE_TABLE(pci, yenta_table); + +diff -urNp linux-2.6.35.4/drivers/platform/x86/acer-wmi.c linux-2.6.35.4/drivers/platform/x86/acer-wmi.c +--- linux-2.6.35.4/drivers/platform/x86/acer-wmi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/acer-wmi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -916,7 +916,7 @@ static int update_bl_status(struct backl + return 0; + } + +-static struct backlight_ops acer_bl_ops = { ++static const struct backlight_ops acer_bl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/asus_acpi.c linux-2.6.35.4/drivers/platform/x86/asus_acpi.c +--- linux-2.6.35.4/drivers/platform/x86/asus_acpi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/asus_acpi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1464,7 +1464,7 @@ static int asus_hotk_remove(struct acpi_ + return 0; + } + +-static struct backlight_ops asus_backlight_data = { ++static const struct backlight_ops asus_backlight_data = { + .get_brightness = read_brightness, + .update_status = set_brightness_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/asus-laptop.c linux-2.6.35.4/drivers/platform/x86/asus-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/asus-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/asus-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -224,7 +224,6 @@ struct asus_laptop { + struct asus_led gled; + struct asus_led kled; + struct workqueue_struct *led_workqueue; +- + int wireless_status; + bool have_rsts; + int lcd_state; +@@ -621,7 +620,7 @@ static int update_bl_status(struct backl + return asus_lcd_set(asus, value); + } + +-static struct backlight_ops asusbl_ops = { ++static const struct backlight_ops asusbl_ops = { + .get_brightness = asus_read_brightness, + .update_status = update_bl_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/compal-laptop.c linux-2.6.35.4/drivers/platform/x86/compal-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/compal-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/compal-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -168,7 +168,7 @@ static int bl_update_status(struct backl + return set_lcd_level(b->props.brightness); + } + +-static struct backlight_ops compalbl_ops = { ++static const struct backlight_ops compalbl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/dell-laptop.c linux-2.6.35.4/drivers/platform/x86/dell-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/dell-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/dell-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -469,7 +469,7 @@ out: + return buffer->output[1]; + } + +-static struct backlight_ops dell_ops = { ++static const struct backlight_ops dell_ops = { + .get_brightness = dell_get_intensity, + .update_status = dell_send_intensity, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/eeepc-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1114,7 +1114,7 @@ static int update_bl_status(struct backl + return set_brightness(bd, bd->props.brightness); + } + +-static struct backlight_ops eeepcbl_ops = { ++static const struct backlight_ops eeepcbl_ops = { + .get_brightness = read_brightness, + .update_status = update_bl_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/fujitsu-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -437,7 +437,7 @@ static int bl_update_status(struct backl + return ret; + } + +-static struct backlight_ops fujitsubl_ops = { ++static const struct backlight_ops fujitsubl_ops = { + .get_brightness = bl_get_brightness, + .update_status = bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/sony-laptop.c linux-2.6.35.4/drivers/platform/x86/sony-laptop.c +--- linux-2.6.35.4/drivers/platform/x86/sony-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/sony-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -857,7 +857,7 @@ static int sony_backlight_get_brightness + } + + static struct backlight_device *sony_backlight_device; +-static struct backlight_ops sony_backlight_ops = { ++static const struct backlight_ops sony_backlight_ops = { + .update_status = sony_backlight_update_status, + .get_brightness = sony_backlight_get_brightness, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c +--- linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/thinkpad_acpi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -6142,7 +6142,7 @@ static void tpacpi_brightness_notify_cha + BACKLIGHT_UPDATE_HOTKEY); + } + +-static struct backlight_ops ibm_backlight_data = { ++static const struct backlight_ops ibm_backlight_data = { + .get_brightness = brightness_get, + .update_status = brightness_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c +--- linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/platform/x86/toshiba_acpi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -741,7 +741,7 @@ static acpi_status remove_device(void) + return AE_OK; + } + +-static struct backlight_ops toshiba_backlight_data = { ++static const struct backlight_ops toshiba_backlight_data = { + .get_brightness = get_lcd, + .update_status = set_lcd_status, + }; +diff -urNp linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c +--- linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pnp/pnpbios/bioscalls.c 2010-09-17 20:12:09.000000000 -0400 +@@ -59,7 +59,7 @@ do { \ + set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \ + } while(0) + +-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092, ++static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093, + (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1); + + /* +@@ -96,7 +96,10 @@ static inline u16 call_pnp_bios(u16 func + + cpu = get_cpu(); + save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8]; ++ ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc; ++ pax_close_kernel(); + + /* On some boxes IRQ's during PnP BIOS calls are deadly. */ + spin_lock_irqsave(&pnp_bios_lock, flags); +@@ -134,7 +137,10 @@ static inline u16 call_pnp_bios(u16 func + :"memory"); + spin_unlock_irqrestore(&pnp_bios_lock, flags); + ++ pax_open_kernel(); + get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40; ++ pax_close_kernel(); ++ + put_cpu(); + + /* If we get here and this is set then the PnP BIOS faulted on us. */ +@@ -468,7 +474,7 @@ int pnp_bios_read_escd(char *data, u32 n + return status; + } + +-void pnpbios_calls_init(union pnp_bios_install_struct *header) ++void __init pnpbios_calls_init(union pnp_bios_install_struct *header) + { + int i; + +@@ -476,6 +482,8 @@ void pnpbios_calls_init(union pnp_bios_i + pnp_bios_callpoint.offset = header->fields.pm16offset; + pnp_bios_callpoint.segment = PNP_CS16; + ++ pax_open_kernel(); ++ + for_each_possible_cpu(i) { + struct desc_struct *gdt = get_cpu_gdt_table(i); + if (!gdt) +@@ -487,4 +495,6 @@ void pnpbios_calls_init(union pnp_bios_i + set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS], + (unsigned long)__va(header->fields.pm16dseg)); + } ++ ++ pax_close_kernel(); + } +diff -urNp linux-2.6.35.4/drivers/pnp/quirks.c linux-2.6.35.4/drivers/pnp/quirks.c +--- linux-2.6.35.4/drivers/pnp/quirks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pnp/quirks.c 2010-09-17 20:12:09.000000000 -0400 +@@ -322,7 +322,7 @@ static struct pnp_fixup pnp_fixups[] = { + /* PnP resources that might overlap PCI BARs */ + {"PNP0c01", quirk_system_pci_resources}, + {"PNP0c02", quirk_system_pci_resources}, +- {""} ++ {"", NULL} + }; + + void pnp_fixup_device(struct pnp_dev *dev) +diff -urNp linux-2.6.35.4/drivers/pnp/resource.c linux-2.6.35.4/drivers/pnp/resource.c +--- linux-2.6.35.4/drivers/pnp/resource.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/pnp/resource.c 2010-09-17 20:12:09.000000000 -0400 +@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, s + return 1; + + /* check if the resource is valid */ +- if (*irq < 0 || *irq > 15) ++ if (*irq > 15) + return 0; + + /* check if the resource is reserved */ +@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, s + return 1; + + /* check if the resource is valid */ +- if (*dma < 0 || *dma == 4 || *dma > 7) ++ if (*dma == 4 || *dma > 7) + return 0; + + /* check if the resource is reserved */ +diff -urNp linux-2.6.35.4/drivers/s390/cio/qdio_debug.c linux-2.6.35.4/drivers/s390/cio/qdio_debug.c +--- linux-2.6.35.4/drivers/s390/cio/qdio_debug.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/s390/cio/qdio_debug.c 2010-09-17 20:12:09.000000000 -0400 +@@ -233,7 +233,7 @@ static int qperf_seq_open(struct inode * + filp->f_path.dentry->d_inode->i_private); + } + +-static struct file_operations debugfs_perf_fops = { ++static const struct file_operations debugfs_perf_fops = { + .owner = THIS_MODULE, + .open = qperf_seq_open, + .read = seq_read, +diff -urNp linux-2.6.35.4/drivers/scsi/ipr.c linux-2.6.35.4/drivers/scsi/ipr.c +--- linux-2.6.35.4/drivers/scsi/ipr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/ipr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -6091,7 +6091,7 @@ static bool ipr_qc_fill_rtf(struct ata_q + return true; + } + +-static struct ata_port_operations ipr_sata_ops = { ++static const struct ata_port_operations ipr_sata_ops = { + .phy_reset = ipr_ata_phy_reset, + .hardreset = ipr_sata_reset, + .post_internal_cmd = ipr_ata_post_internal, +diff -urNp linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c +--- linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/libfc/fc_exch.c 2010-09-17 20:12:09.000000000 -0400 +@@ -100,12 +100,12 @@ struct fc_exch_mgr { + * all together if not used XXX + */ + struct { +- atomic_t no_free_exch; +- atomic_t no_free_exch_xid; +- atomic_t xid_not_found; +- atomic_t xid_busy; +- atomic_t seq_not_found; +- atomic_t non_bls_resp; ++ atomic_unchecked_t no_free_exch; ++ atomic_unchecked_t no_free_exch_xid; ++ atomic_unchecked_t xid_not_found; ++ atomic_unchecked_t xid_busy; ++ atomic_unchecked_t seq_not_found; ++ atomic_unchecked_t non_bls_resp; + } stats; + }; + #define fc_seq_exch(sp) container_of(sp, struct fc_exch, seq) +@@ -671,7 +671,7 @@ static struct fc_exch *fc_exch_em_alloc( + /* allocate memory for exchange */ + ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC); + if (!ep) { +- atomic_inc(&mp->stats.no_free_exch); ++ atomic_inc_unchecked(&mp->stats.no_free_exch); + goto out; + } + memset(ep, 0, sizeof(*ep)); +@@ -719,7 +719,7 @@ out: + return ep; + err: + spin_unlock_bh(&pool->lock); +- atomic_inc(&mp->stats.no_free_exch_xid); ++ atomic_inc_unchecked(&mp->stats.no_free_exch_xid); + mempool_free(ep, mp->ep_pool); + return NULL; + } +@@ -864,7 +864,7 @@ static enum fc_pf_rjt_reason fc_seq_look + xid = ntohs(fh->fh_ox_id); /* we originated exch */ + ep = fc_exch_find(mp, xid); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_OX_ID; + goto out; + } +@@ -894,7 +894,7 @@ static enum fc_pf_rjt_reason fc_seq_look + ep = fc_exch_find(mp, xid); + if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) { + if (ep) { +- atomic_inc(&mp->stats.xid_busy); ++ atomic_inc_unchecked(&mp->stats.xid_busy); + reject = FC_RJT_RX_ID; + goto rel; + } +@@ -905,7 +905,7 @@ static enum fc_pf_rjt_reason fc_seq_look + } + xid = ep->xid; /* get our XID */ + } else if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + reject = FC_RJT_RX_ID; /* XID not found */ + goto out; + } +@@ -922,7 +922,7 @@ static enum fc_pf_rjt_reason fc_seq_look + } else { + sp = &ep->seq; + if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + reject = FC_RJT_SEQ_ID; /* sequence/exch should exist */ + goto rel; + } +@@ -1303,22 +1303,22 @@ static void fc_exch_recv_seq_resp(struct + + ep = fc_exch_find(mp, ntohs(fh->fh_ox_id)); + if (!ep) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->esb_stat & ESB_ST_COMPLETE) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto out; + } + if (ep->rxid == FC_XID_UNKNOWN) + ep->rxid = ntohs(fh->fh_rx_id); + if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + if (ep->did != ntoh24(fh->fh_s_id) && + ep->did != FC_FID_FLOGI) { +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + goto rel; + } + sof = fr_sof(fp); +@@ -1327,7 +1327,7 @@ static void fc_exch_recv_seq_resp(struct + sp->ssb_stat |= SSB_ST_RESP; + sp->id = fh->fh_seq_id; + } else if (sp->id != fh->fh_seq_id) { +- atomic_inc(&mp->stats.seq_not_found); ++ atomic_inc_unchecked(&mp->stats.seq_not_found); + goto rel; + } + +@@ -1390,9 +1390,9 @@ static void fc_exch_recv_resp(struct fc_ + sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */ + + if (!sp) +- atomic_inc(&mp->stats.xid_not_found); ++ atomic_inc_unchecked(&mp->stats.xid_not_found); + else +- atomic_inc(&mp->stats.non_bls_resp); ++ atomic_inc_unchecked(&mp->stats.non_bls_resp); + + fc_frame_free(fp); + } +diff -urNp linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c +--- linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/libsas/sas_ata.c 2010-09-17 20:12:09.000000000 -0400 +@@ -344,7 +344,7 @@ static int sas_ata_scr_read(struct ata_l + } + } + +-static struct ata_port_operations sas_sata_ops = { ++static const struct ata_port_operations sas_sata_ops = { + .phy_reset = sas_ata_phy_reset, + .post_internal_cmd = sas_ata_post_internal, + .qc_prep = ata_noop_qc_prep, +diff -urNp linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h +--- linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/mpt2sas/mpt2sas_debug.h 2010-09-17 20:12:09.000000000 -0400 +@@ -79,7 +79,7 @@ + CMD; \ + } + #else +-#define MPT_CHECK_LOGGING(IOC, CMD, BITS) ++#define MPT_CHECK_LOGGING(IOC, CMD, BITS) do {} while (0) + #endif /* CONFIG_SCSI_MPT2SAS_LOGGING */ + + +diff -urNp linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c +--- linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/qla2xxx/qla_os.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3899,7 +3899,7 @@ static struct pci_driver qla2xxx_pci_dri + .err_handler = &qla2xxx_err_handler, + }; + +-static struct file_operations apidev_fops = { ++static const struct file_operations apidev_fops = { + .owner = THIS_MODULE, + }; + +diff -urNp linux-2.6.35.4/drivers/scsi/scsi_logging.h linux-2.6.35.4/drivers/scsi/scsi_logging.h +--- linux-2.6.35.4/drivers/scsi/scsi_logging.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/scsi_logging.h 2010-09-17 20:12:09.000000000 -0400 +@@ -51,7 +51,7 @@ do { \ + } while (0); \ + } while (0) + #else +-#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) ++#define SCSI_CHECK_LOGGING(SHIFT, BITS, LEVEL, CMD) do {} while (0) + #endif /* CONFIG_SCSI_LOGGING */ + + /* +diff -urNp linux-2.6.35.4/drivers/scsi/sg.c linux-2.6.35.4/drivers/scsi/sg.c +--- linux-2.6.35.4/drivers/scsi/sg.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/scsi/sg.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2302,7 +2302,7 @@ struct sg_proc_leaf { + const struct file_operations * fops; + }; + +-static struct sg_proc_leaf sg_proc_leaf_arr[] = { ++static const struct sg_proc_leaf sg_proc_leaf_arr[] = { + {"allow_dio", &adio_fops}, + {"debug", &debug_fops}, + {"def_reserved_size", &dressz_fops}, +@@ -2317,7 +2317,7 @@ sg_proc_init(void) + { + int k, mask; + int num_leaves = ARRAY_SIZE(sg_proc_leaf_arr); +- struct sg_proc_leaf * leaf; ++ const struct sg_proc_leaf * leaf; + + sg_proc_sgp = proc_mkdir(sg_proc_sg_dirname, NULL); + if (!sg_proc_sgp) +diff -urNp linux-2.6.35.4/drivers/serial/8250_pci.c linux-2.6.35.4/drivers/serial/8250_pci.c +--- linux-2.6.35.4/drivers/serial/8250_pci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/serial/8250_pci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3777,7 +3777,7 @@ static struct pci_device_id serial_pci_t + PCI_ANY_ID, PCI_ANY_ID, + PCI_CLASS_COMMUNICATION_MULTISERIAL << 8, + 0xffff00, pbn_default }, +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + static struct pci_driver serial_pci_driver = { +diff -urNp linux-2.6.35.4/drivers/serial/kgdboc.c linux-2.6.35.4/drivers/serial/kgdboc.c +--- linux-2.6.35.4/drivers/serial/kgdboc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/serial/kgdboc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -20,7 +20,7 @@ + + #define MAX_CONFIG_LEN 40 + +-static struct kgdb_io kgdboc_io_ops; ++static struct kgdb_io kgdboc_io_ops; + + /* -1 = init not run yet, 0 = unconfigured, 1 = configured. */ + static int configured = -1; +diff -urNp linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c +--- linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/comedi/comedi_fops.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1425,7 +1425,7 @@ static void comedi_unmap(struct vm_area_ + mutex_unlock(&dev->mutex); + } + +-static struct vm_operations_struct comedi_vm_ops = { ++static const struct vm_operations_struct comedi_vm_ops = { + .close = comedi_unmap, + }; + +diff -urNp linux-2.6.35.4/drivers/staging/dream/pmem.c linux-2.6.35.4/drivers/staging/dream/pmem.c +--- linux-2.6.35.4/drivers/staging/dream/pmem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/pmem.c 2010-09-17 20:12:09.000000000 -0400 +@@ -175,7 +175,7 @@ static int pmem_mmap(struct file *, stru + static int pmem_open(struct inode *, struct file *); + static long pmem_ioctl(struct file *, unsigned int, unsigned long); + +-struct file_operations pmem_fops = { ++const struct file_operations pmem_fops = { + .release = pmem_release, + .mmap = pmem_mmap, + .open = pmem_open, +@@ -1201,7 +1201,7 @@ static ssize_t debug_read(struct file *f + return simple_read_from_buffer(buf, count, ppos, buffer, n); + } + +-static struct file_operations debug_fops = { ++static const struct file_operations debug_fops = { + .read = debug_read, + .open = debug_open, + }; +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/adsp_driver.c 2010-09-17 20:12:09.000000000 -0400 +@@ -577,7 +577,7 @@ static struct adsp_device *inode_to_devi + static dev_t adsp_devno; + static struct class *adsp_class; + +-static struct file_operations adsp_fops = { ++static const struct file_operations adsp_fops = { + .owner = THIS_MODULE, + .open = adsp_open, + .unlocked_ioctl = adsp_ioctl, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_aac.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1023,7 +1023,7 @@ done: + return rc; + } + +-static struct file_operations audio_aac_fops = { ++static const struct file_operations audio_aac_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_amrnb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -834,7 +834,7 @@ done: + return rc; + } + +-static struct file_operations audio_amrnb_fops = { ++static const struct file_operations audio_amrnb_fops = { + .owner = THIS_MODULE, + .open = audamrnb_open, + .release = audamrnb_release, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_evrc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -806,7 +806,7 @@ dma_fail: + return rc; + } + +-static struct file_operations audio_evrc_fops = { ++static const struct file_operations audio_evrc_fops = { + .owner = THIS_MODULE, + .open = audevrc_open, + .release = audevrc_release, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_in.c 2010-09-17 20:12:09.000000000 -0400 +@@ -914,7 +914,7 @@ static int audpre_open(struct inode *ino + return 0; + } + +-static struct file_operations audio_fops = { ++static const struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_in_open, + .release = audio_in_release, +@@ -923,7 +923,7 @@ static struct file_operations audio_fops + .unlocked_ioctl = audio_in_ioctl, + }; + +-static struct file_operations audpre_fops = { ++static const struct file_operations audpre_fops = { + .owner = THIS_MODULE, + .open = audpre_open, + .unlocked_ioctl = audpre_ioctl, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_mp3.c 2010-09-17 20:12:09.000000000 -0400 +@@ -941,7 +941,7 @@ done: + return rc; + } + +-static struct file_operations audio_mp3_fops = { ++static const struct file_operations audio_mp3_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_out.c 2010-09-17 20:12:09.000000000 -0400 +@@ -800,7 +800,7 @@ static int audpp_open(struct inode *inod + return 0; + } + +-static struct file_operations audio_fops = { ++static const struct file_operations audio_fops = { + .owner = THIS_MODULE, + .open = audio_open, + .release = audio_release, +@@ -809,7 +809,7 @@ static struct file_operations audio_fops + .unlocked_ioctl = audio_ioctl, + }; + +-static struct file_operations audpp_fops = { ++static const struct file_operations audpp_fops = { + .owner = THIS_MODULE, + .open = audpp_open, + .unlocked_ioctl = audpp_ioctl, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/audio_qcelp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -817,7 +817,7 @@ err: + return rc; + } + +-static struct file_operations audio_qcelp_fops = { ++static const struct file_operations audio_qcelp_fops = { + .owner = THIS_MODULE, + .open = audqcelp_open, + .release = audqcelp_release, +diff -urNp linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c +--- linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dream/qdsp5/snd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -242,7 +242,7 @@ err: + return rc; + } + +-static struct file_operations snd_fops = { ++static const struct file_operations snd_fops = { + .owner = THIS_MODULE, + .open = snd_open, + .release = snd_release, +diff -urNp linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c +--- linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/dt3155/dt3155_drv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -853,7 +853,7 @@ dt3155_unlocked_ioctl(struct file *file, + * needed by init_module + * register_chrdev + *****************************************************/ +-static struct file_operations dt3155_fops = { ++static const struct file_operations dt3155_fops = { + .read = dt3155_read, + .unlocked_ioctl = dt3155_unlocked_ioctl, + .mmap = dt3155_mmap, +diff -urNp linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c +--- linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/go7007/go7007-v4l2.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1673,7 +1673,7 @@ static int go7007_vm_fault(struct vm_are + return 0; + } + +-static struct vm_operations_struct go7007_vm_ops = { ++static const struct vm_operations_struct go7007_vm_ops = { + .open = go7007_vm_open, + .close = go7007_vm_close, + .fault = go7007_vm_fault, +diff -urNp linux-2.6.35.4/drivers/staging/hv/hv.c linux-2.6.35.4/drivers/staging/hv/hv.c +--- linux-2.6.35.4/drivers/staging/hv/hv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/hv/hv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -162,7 +162,7 @@ static u64 HvDoHypercall(u64 Control, vo + u64 outputAddress = (Output) ? virt_to_phys(Output) : 0; + u32 outputAddressHi = outputAddress >> 32; + u32 outputAddressLo = outputAddress & 0xFFFFFFFF; +- volatile void *hypercallPage = gHvContext.HypercallPage; ++ volatile void *hypercallPage = ktva_ktla(gHvContext.HypercallPage); + + DPRINT_DBG(VMBUS, "Hypercall <control %llx input %p output %p>", + Control, Input, Output); +diff -urNp linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c +--- linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/msm/msm_fb_bl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -42,7 +42,7 @@ static int msm_fb_bl_update_status(struc + return 0; + } + +-static struct backlight_ops msm_fb_bl_ops = { ++static const struct backlight_ops msm_fb_bl_ops = { + .get_brightness = msm_fb_bl_get_brightness, + .update_status = msm_fb_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/staging/panel/panel.c linux-2.6.35.4/drivers/staging/panel/panel.c +--- linux-2.6.35.4/drivers/staging/panel/panel.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/panel/panel.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1304,7 +1304,7 @@ static int lcd_release(struct inode *ino + return 0; + } + +-static struct file_operations lcd_fops = { ++static const struct file_operations lcd_fops = { + .write = lcd_write, + .open = lcd_open, + .release = lcd_release, +@@ -1564,7 +1564,7 @@ static int keypad_release(struct inode * + return 0; + } + +-static struct file_operations keypad_fops = { ++static const struct file_operations keypad_fops = { + .read = keypad_read, /* read */ + .open = keypad_open, /* open */ + .release = keypad_release, /* close */ +diff -urNp linux-2.6.35.4/drivers/staging/phison/phison.c linux-2.6.35.4/drivers/staging/phison/phison.c +--- linux-2.6.35.4/drivers/staging/phison/phison.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/phison/phison.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,7 @@ static struct scsi_host_template phison_ + ATA_BMDMA_SHT(DRV_NAME), + }; + +-static struct ata_port_operations phison_ops = { ++static const struct ata_port_operations phison_ops = { + .inherits = &ata_bmdma_port_ops, + .prereset = phison_pre_reset, + }; +diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/inode.c linux-2.6.35.4/drivers/staging/pohmelfs/inode.c +--- linux-2.6.35.4/drivers/staging/pohmelfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/pohmelfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1846,7 +1846,7 @@ static int pohmelfs_fill_super(struct su + mutex_init(&psb->mcache_lock); + psb->mcache_root = RB_ROOT; + psb->mcache_timeout = msecs_to_jiffies(5000); +- atomic_long_set(&psb->mcache_gen, 0); ++ atomic_long_set_unchecked(&psb->mcache_gen, 0); + + psb->trans_max_pages = 100; + +diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c +--- linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/pohmelfs/mcache.c 2010-09-17 20:12:09.000000000 -0400 +@@ -121,7 +121,7 @@ struct pohmelfs_mcache *pohmelfs_mcache_ + m->data = data; + m->start = start; + m->size = size; +- m->gen = atomic_long_inc_return(&psb->mcache_gen); ++ m->gen = atomic_long_inc_return_unchecked(&psb->mcache_gen); + + mutex_lock(&psb->mcache_lock); + err = pohmelfs_mcache_insert(psb, m); +diff -urNp linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h +--- linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/pohmelfs/netfs.h 2010-09-17 20:12:09.000000000 -0400 +@@ -571,7 +571,7 @@ struct pohmelfs_config; + struct pohmelfs_sb { + struct rb_root mcache_root; + struct mutex mcache_lock; +- atomic_long_t mcache_gen; ++ atomic_long_unchecked_t mcache_gen; + unsigned long mcache_timeout; + + unsigned int idx; +diff -urNp linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c +--- linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/ramzswap/ramzswap_drv.c 2010-09-17 20:12:09.000000000 -0400 +@@ -693,7 +693,7 @@ void ramzswap_slot_free_notify(struct bl + return; + } + +-static struct block_device_operations ramzswap_devops = { ++static const struct block_device_operations ramzswap_devops = { + .ioctl = ramzswap_ioctl, + .swap_slot_free_notify = ramzswap_slot_free_notify, + .owner = THIS_MODULE +diff -urNp linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c +--- linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/rtl8192u/ieee80211/proc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -99,7 +99,7 @@ static int crypto_info_open(struct inode + return seq_open(file, &crypto_seq_ops); + } + +-static struct file_operations proc_crypto_ops = { ++static const struct file_operations proc_crypto_ops = { + .open = crypto_info_open, + .read = seq_read, + .llseek = seq_lseek, +diff -urNp linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c +--- linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/samsung-laptop/samsung-laptop.c 2010-09-17 20:12:09.000000000 -0400 +@@ -269,7 +269,7 @@ static int update_status(struct backligh + return 0; + } + +-static struct backlight_ops backlight_ops = { ++static const struct backlight_ops backlight_ops = { + .get_brightness = get_brightness, + .update_status = update_status, + }; +diff -urNp linux-2.6.35.4/drivers/staging/sep/sep_driver.c linux-2.6.35.4/drivers/staging/sep/sep_driver.c +--- linux-2.6.35.4/drivers/staging/sep/sep_driver.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/sep/sep_driver.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2637,7 +2637,7 @@ static struct pci_driver sep_pci_driver + static dev_t sep_devno; + + /* the files operations structure of the driver */ +-static struct file_operations sep_file_operations = { ++static const struct file_operations sep_file_operations = { + .owner = THIS_MODULE, + .unlocked_ioctl = sep_ioctl, + .poll = sep_poll, +diff -urNp linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c +--- linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/staging/vme/devices/vme_user.c 2010-09-17 20:12:09.000000000 -0400 +@@ -136,7 +136,7 @@ static long vme_user_unlocked_ioctl(stru + static int __init vme_user_probe(struct device *, int, int); + static int __exit vme_user_remove(struct device *, int, int); + +-static struct file_operations vme_user_fops = { ++static const struct file_operations vme_user_fops = { + .open = vme_user_open, + .release = vme_user_release, + .read = vme_user_read, +diff -urNp linux-2.6.35.4/drivers/usb/atm/usbatm.c linux-2.6.35.4/drivers/usb/atm/usbatm.c +--- linux-2.6.35.4/drivers/usb/atm/usbatm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/atm/usbatm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(stru + if (printk_ratelimit()) + atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n", + __func__, vpi, vci); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + return; + } + +@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(stru + if (length > ATM_MAX_AAL5_PDU) { + atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n", + __func__, length, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(stru + if (sarb->len < pdu_length) { + atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n", + __func__, pdu_length, sarb->len, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + + if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) { + atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n", + __func__, vcc); +- atomic_inc(&vcc->stats->rx_err); ++ atomic_inc_unchecked(&vcc->stats->rx_err); + goto out; + } + +@@ -387,7 +387,7 @@ static void usbatm_extract_one_cell(stru + if (printk_ratelimit()) + atm_err(instance, "%s: no memory for skb (length: %u)!\n", + __func__, length); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + goto out; + } + +@@ -412,7 +412,7 @@ static void usbatm_extract_one_cell(stru + + vcc->push(vcc, skb); + +- atomic_inc(&vcc->stats->rx); ++ atomic_inc_unchecked(&vcc->stats->rx); + out: + skb_trim(sarb, 0); + } +@@ -616,7 +616,7 @@ static void usbatm_tx_process(unsigned l + struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc; + + usbatm_pop(vcc, skb); +- atomic_inc(&vcc->stats->tx); ++ atomic_inc_unchecked(&vcc->stats->tx); + + skb = skb_dequeue(&instance->sndqueue); + } +@@ -775,11 +775,11 @@ static int usbatm_atm_proc_read(struct a + if (!left--) + return sprintf(page, + "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n", +- atomic_read(&atm_dev->stats.aal5.tx), +- atomic_read(&atm_dev->stats.aal5.tx_err), +- atomic_read(&atm_dev->stats.aal5.rx), +- atomic_read(&atm_dev->stats.aal5.rx_err), +- atomic_read(&atm_dev->stats.aal5.rx_drop)); ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err), ++ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop)); + + if (!left--) { + if (instance->disconnected) +diff -urNp linux-2.6.35.4/drivers/usb/class/cdc-acm.c linux-2.6.35.4/drivers/usb/class/cdc-acm.c +--- linux-2.6.35.4/drivers/usb/class/cdc-acm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/class/cdc-acm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1619,7 +1619,7 @@ static const struct usb_device_id acm_id + { USB_INTERFACE_INFO(USB_CLASS_COMM, USB_CDC_SUBCLASS_ACM, + USB_CDC_ACM_PROTO_AT_CDMA) }, + +- { } ++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(usb, acm_ids); +diff -urNp linux-2.6.35.4/drivers/usb/class/cdc-wdm.c linux-2.6.35.4/drivers/usb/class/cdc-wdm.c +--- linux-2.6.35.4/drivers/usb/class/cdc-wdm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/class/cdc-wdm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -342,7 +342,7 @@ static ssize_t wdm_write + goto outnp; + } + +- if (!file->f_flags && O_NONBLOCK) ++ if (!(file->f_flags & O_NONBLOCK)) + r = wait_event_interruptible(desc->wait, !test_bit(WDM_IN_USE, + &desc->flags)); + else +diff -urNp linux-2.6.35.4/drivers/usb/class/usblp.c linux-2.6.35.4/drivers/usb/class/usblp.c +--- linux-2.6.35.4/drivers/usb/class/usblp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/class/usblp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -226,7 +226,7 @@ static const struct quirk_printer_struct + { 0x0482, 0x0010, USBLP_QUIRK_BIDIR }, /* Kyocera Mita FS 820, by zut <kernel@zut.de> */ + { 0x04f9, 0x000d, USBLP_QUIRK_BIDIR }, /* Brother Industries, Ltd HL-1440 Laser Printer */ + { 0x04b8, 0x0202, USBLP_QUIRK_BAD_CLASS }, /* Seiko Epson Receipt Printer M129C */ +- { 0, 0 } ++ { 0, 0, 0 } + }; + + static int usblp_wwait(struct usblp *usblp, int nonblock); +@@ -1398,7 +1398,7 @@ static const struct usb_device_id usblp_ + { USB_INTERFACE_INFO(7, 1, 2) }, + { USB_INTERFACE_INFO(7, 1, 3) }, + { USB_DEVICE(0x04b8, 0x0202) }, /* Seiko Epson Receipt Printer M129C */ +- { } /* Terminating entry */ ++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ + }; + + MODULE_DEVICE_TABLE (usb, usblp_ids); +diff -urNp linux-2.6.35.4/drivers/usb/core/hcd.c linux-2.6.35.4/drivers/usb/core/hcd.c +--- linux-2.6.35.4/drivers/usb/core/hcd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/core/hcd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2381,7 +2381,7 @@ EXPORT_SYMBOL_GPL(usb_hcd_platform_shutd + + #if defined(CONFIG_USB_MON) || defined(CONFIG_USB_MON_MODULE) + +-struct usb_mon_operations *mon_ops; ++const struct usb_mon_operations *mon_ops; + + /* + * The registration is unlocked. +@@ -2391,7 +2391,7 @@ struct usb_mon_operations *mon_ops; + * symbols from usbcore, usbcore gets referenced and cannot be unloaded first. + */ + +-int usb_mon_register (struct usb_mon_operations *ops) ++int usb_mon_register (const struct usb_mon_operations *ops) + { + + if (mon_ops) +diff -urNp linux-2.6.35.4/drivers/usb/core/hub.c linux-2.6.35.4/drivers/usb/core/hub.c +--- linux-2.6.35.4/drivers/usb/core/hub.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/core/hub.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3453,7 +3453,7 @@ static const struct usb_device_id hub_id + .bDeviceClass = USB_CLASS_HUB}, + { .match_flags = USB_DEVICE_ID_MATCH_INT_CLASS, + .bInterfaceClass = USB_CLASS_HUB}, +- { } /* Terminating entry */ ++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ + }; + + MODULE_DEVICE_TABLE (usb, hub_id_table); +diff -urNp linux-2.6.35.4/drivers/usb/core/message.c linux-2.6.35.4/drivers/usb/core/message.c +--- linux-2.6.35.4/drivers/usb/core/message.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/core/message.c 2010-09-17 20:12:09.000000000 -0400 +@@ -869,8 +869,8 @@ char *usb_cache_string(struct usb_device + buf = kmalloc(MAX_USB_STRING_SIZE, GFP_NOIO); + if (buf) { + len = usb_string(udev, index, buf, MAX_USB_STRING_SIZE); +- if (len > 0) { +- smallbuf = kmalloc(++len, GFP_NOIO); ++ if (len++ > 0) { ++ smallbuf = kmalloc(len, GFP_NOIO); + if (!smallbuf) + return buf; + memcpy(smallbuf, buf, len); +diff -urNp linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c +--- linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/early/ehci-dbgp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1026,6 +1026,7 @@ static void kgdbdbgp_write_char(u8 chr) + early_dbgp_write(NULL, &chr, 1); + } + ++/* cannot be const, see kgdbdbgp_parse_config() */ + static struct kgdb_io kgdbdbgp_io_ops = { + .name = "kgdbdbgp", + .read_char = kgdbdbgp_read_char, +diff -urNp linux-2.6.35.4/drivers/usb/host/ehci-pci.c linux-2.6.35.4/drivers/usb/host/ehci-pci.c +--- linux-2.6.35.4/drivers/usb/host/ehci-pci.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/host/ehci-pci.c 2010-09-17 20:12:09.000000000 -0400 +@@ -419,7 +419,7 @@ static const struct pci_device_id pci_id + PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_EHCI, ~0), + .driver_data = (unsigned long) &ehci_pci_hc_driver, + }, +- { /* end: all zeroes */ } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + MODULE_DEVICE_TABLE(pci, pci_ids); + +diff -urNp linux-2.6.35.4/drivers/usb/host/uhci-hcd.c linux-2.6.35.4/drivers/usb/host/uhci-hcd.c +--- linux-2.6.35.4/drivers/usb/host/uhci-hcd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/host/uhci-hcd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -941,7 +941,7 @@ static const struct pci_device_id uhci_p + /* handle any USB UHCI controller */ + PCI_DEVICE_CLASS(PCI_CLASS_SERIAL_USB_UHCI, ~0), + .driver_data = (unsigned long) &uhci_driver, +- }, { /* end: all zeroes */ } ++ }, { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, uhci_pci_ids); +diff -urNp linux-2.6.35.4/drivers/usb/mon/mon_main.c linux-2.6.35.4/drivers/usb/mon/mon_main.c +--- linux-2.6.35.4/drivers/usb/mon/mon_main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/mon/mon_main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -240,7 +240,7 @@ static struct notifier_block mon_nb = { + /* + * Ops + */ +-static struct usb_mon_operations mon_ops_0 = { ++static const struct usb_mon_operations mon_ops_0 = { + .urb_submit = mon_submit, + .urb_submit_error = mon_submit_error, + .urb_complete = mon_complete, +diff -urNp linux-2.6.35.4/drivers/usb/storage/debug.h linux-2.6.35.4/drivers/usb/storage/debug.h +--- linux-2.6.35.4/drivers/usb/storage/debug.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/storage/debug.h 2010-09-17 20:12:09.000000000 -0400 +@@ -54,9 +54,9 @@ void usb_stor_show_sense( unsigned char + #define US_DEBUGPX(x...) printk( x ) + #define US_DEBUG(x) x + #else +-#define US_DEBUGP(x...) +-#define US_DEBUGPX(x...) +-#define US_DEBUG(x) ++#define US_DEBUGP(x...) do {} while (0) ++#define US_DEBUGPX(x...) do {} while (0) ++#define US_DEBUG(x) do {} while (0) + #endif + + #endif +diff -urNp linux-2.6.35.4/drivers/usb/storage/usb.c linux-2.6.35.4/drivers/usb/storage/usb.c +--- linux-2.6.35.4/drivers/usb/storage/usb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/storage/usb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -122,7 +122,7 @@ MODULE_PARM_DESC(quirks, "supplemental l + + static struct us_unusual_dev us_unusual_dev_list[] = { + # include "unusual_devs.h" +- { } /* Terminating entry */ ++ { NULL, NULL, 0, 0, NULL } /* Terminating entry */ + }; + + #undef UNUSUAL_DEV +diff -urNp linux-2.6.35.4/drivers/usb/storage/usual-tables.c linux-2.6.35.4/drivers/usb/storage/usual-tables.c +--- linux-2.6.35.4/drivers/usb/storage/usual-tables.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/usb/storage/usual-tables.c 2010-09-17 20:12:09.000000000 -0400 +@@ -48,7 +48,7 @@ + + struct usb_device_id usb_storage_usb_ids[] = { + # include "unusual_devs.h" +- { } /* Terminating entry */ ++ { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 } /* Terminating entry */ + }; + EXPORT_SYMBOL_GPL(usb_storage_usb_ids); + +diff -urNp linux-2.6.35.4/drivers/uwb/wlp/messages.c linux-2.6.35.4/drivers/uwb/wlp/messages.c +--- linux-2.6.35.4/drivers/uwb/wlp/messages.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/uwb/wlp/messages.c 2010-09-17 20:12:09.000000000 -0400 +@@ -920,7 +920,7 @@ int wlp_parse_f0(struct wlp *wlp, struct + size_t len = skb->len; + size_t used; + ssize_t result; +- struct wlp_nonce enonce, rnonce; ++ struct wlp_nonce enonce = {{0}}, rnonce = {{0}}; + enum wlp_assc_error assc_err; + char enonce_buf[WLP_WSS_NONCE_STRSIZE]; + char rnonce_buf[WLP_WSS_NONCE_STRSIZE]; +diff -urNp linux-2.6.35.4/drivers/vhost/vhost.c linux-2.6.35.4/drivers/vhost/vhost.c +--- linux-2.6.35.4/drivers/vhost/vhost.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/vhost/vhost.c 2010-09-17 20:12:09.000000000 -0400 +@@ -357,7 +357,7 @@ static int init_used(struct vhost_virtqu + return get_user(vq->last_used_idx, &used->idx); + } + +-static long vhost_set_vring(struct vhost_dev *d, int ioctl, void __user *argp) ++static long vhost_set_vring(struct vhost_dev *d, unsigned int ioctl, void __user *argp) + { + struct file *eventfp, *filep = NULL, + *pollstart = NULL, *pollstop = NULL; +diff -urNp linux-2.6.35.4/drivers/video/atmel_lcdfb.c linux-2.6.35.4/drivers/video/atmel_lcdfb.c +--- linux-2.6.35.4/drivers/video/atmel_lcdfb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/atmel_lcdfb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -111,7 +111,7 @@ static int atmel_bl_get_brightness(struc + return lcdc_readl(sinfo, ATMEL_LCDC_CONTRAST_VAL); + } + +-static struct backlight_ops atmel_lcdc_bl_ops = { ++static const struct backlight_ops atmel_lcdc_bl_ops = { + .update_status = atmel_bl_update_status, + .get_brightness = atmel_bl_get_brightness, + }; +diff -urNp linux-2.6.35.4/drivers/video/aty/aty128fb.c linux-2.6.35.4/drivers/video/aty/aty128fb.c +--- linux-2.6.35.4/drivers/video/aty/aty128fb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/aty/aty128fb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1786,7 +1786,7 @@ static int aty128_bl_get_brightness(stru + return bd->props.brightness; + } + +-static struct backlight_ops aty128_bl_data = { ++static const struct backlight_ops aty128_bl_data = { + .get_brightness = aty128_bl_get_brightness, + .update_status = aty128_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/aty/atyfb_base.c linux-2.6.35.4/drivers/video/aty/atyfb_base.c +--- linux-2.6.35.4/drivers/video/aty/atyfb_base.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/aty/atyfb_base.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2221,7 +2221,7 @@ static int aty_bl_get_brightness(struct + return bd->props.brightness; + } + +-static struct backlight_ops aty_bl_data = { ++static const struct backlight_ops aty_bl_data = { + .get_brightness = aty_bl_get_brightness, + .update_status = aty_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/aty/radeon_backlight.c linux-2.6.35.4/drivers/video/aty/radeon_backlight.c +--- linux-2.6.35.4/drivers/video/aty/radeon_backlight.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/aty/radeon_backlight.c 2010-09-17 20:12:09.000000000 -0400 +@@ -128,7 +128,7 @@ static int radeon_bl_get_brightness(stru + return bd->props.brightness; + } + +-static struct backlight_ops radeon_bl_data = { ++static const struct backlight_ops radeon_bl_data = { + .get_brightness = radeon_bl_get_brightness, + .update_status = radeon_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c +--- linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/backlight/88pm860x_bl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -155,7 +155,7 @@ out: + return -EINVAL; + } + +-static struct backlight_ops pm860x_backlight_ops = { ++static const struct backlight_ops pm860x_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = pm860x_backlight_update_status, + .get_brightness = pm860x_backlight_get_brightness, +diff -urNp linux-2.6.35.4/drivers/video/backlight/max8925_bl.c linux-2.6.35.4/drivers/video/backlight/max8925_bl.c +--- linux-2.6.35.4/drivers/video/backlight/max8925_bl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/backlight/max8925_bl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -92,7 +92,7 @@ static int max8925_backlight_get_brightn + return ret; + } + +-static struct backlight_ops max8925_backlight_ops = { ++static const struct backlight_ops max8925_backlight_ops = { + .options = BL_CORE_SUSPENDRESUME, + .update_status = max8925_backlight_update_status, + .get_brightness = max8925_backlight_get_brightness, +diff -urNp linux-2.6.35.4/drivers/video/fbcmap.c linux-2.6.35.4/drivers/video/fbcmap.c +--- linux-2.6.35.4/drivers/video/fbcmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/fbcmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -266,8 +266,7 @@ int fb_set_user_cmap(struct fb_cmap_user + rc = -ENODEV; + goto out; + } +- if (cmap->start < 0 || (!info->fbops->fb_setcolreg && +- !info->fbops->fb_setcmap)) { ++ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) { + rc = -EINVAL; + goto out1; + } +diff -urNp linux-2.6.35.4/drivers/video/fbmem.c linux-2.6.35.4/drivers/video/fbmem.c +--- linux-2.6.35.4/drivers/video/fbmem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/fbmem.c 2010-09-17 20:12:09.000000000 -0400 +@@ -403,7 +403,7 @@ static void fb_do_show_logo(struct fb_in + image->dx += image->width + 8; + } + } else if (rotate == FB_ROTATE_UD) { +- for (x = 0; x < num && image->dx >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dx >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dx -= image->width + 8; + } +@@ -415,7 +415,7 @@ static void fb_do_show_logo(struct fb_in + image->dy += image->height + 8; + } + } else if (rotate == FB_ROTATE_CCW) { +- for (x = 0; x < num && image->dy >= 0; x++) { ++ for (x = 0; x < num && (__s32)image->dy >= 0; x++) { + info->fbops->fb_imageblit(info, image); + image->dy -= image->height + 8; + } +@@ -1119,7 +1119,7 @@ static long do_fb_ioctl(struct fb_info * + return -EFAULT; + if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES) + return -EINVAL; +- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX) ++ if (con2fb.framebuffer >= FB_MAX) + return -EINVAL; + if (!registered_fb[con2fb.framebuffer]) + request_module("fb%d", con2fb.framebuffer); +diff -urNp linux-2.6.35.4/drivers/video/fbmon.c linux-2.6.35.4/drivers/video/fbmon.c +--- linux-2.6.35.4/drivers/video/fbmon.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/fbmon.c 2010-09-17 20:12:09.000000000 -0400 +@@ -46,7 +46,7 @@ + #ifdef DEBUG + #define DPRINTK(fmt, args...) printk(fmt,## args) + #else +-#define DPRINTK(fmt, args...) ++#define DPRINTK(fmt, args...) do {} while (0) + #endif + + #define FBMON_FIX_HEADER 1 +diff -urNp linux-2.6.35.4/drivers/video/i810/i810_accel.c linux-2.6.35.4/drivers/video/i810/i810_accel.c +--- linux-2.6.35.4/drivers/video/i810/i810_accel.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/i810/i810_accel.c 2010-09-17 20:12:09.000000000 -0400 +@@ -73,6 +73,7 @@ static inline int wait_for_space(struct + } + } + printk("ringbuffer lockup!!!\n"); ++ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space); + i810_report_error(mmio); + par->dev_flags |= LOCKUP; + info->pixmap.scan_align = 1; +diff -urNp linux-2.6.35.4/drivers/video/i810/i810_main.c linux-2.6.35.4/drivers/video/i810/i810_main.c +--- linux-2.6.35.4/drivers/video/i810/i810_main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/i810/i810_main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -120,7 +120,7 @@ static struct pci_device_id i810fb_pci_t + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 4 }, + { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82815_CGC, + PCI_ANY_ID, PCI_ANY_ID, 0, 0, 5 }, +- { 0 }, ++ { 0, 0, 0, 0, 0, 0, 0 }, + }; + + static struct pci_driver i810fb_driver = { +diff -urNp linux-2.6.35.4/drivers/video/modedb.c linux-2.6.35.4/drivers/video/modedb.c +--- linux-2.6.35.4/drivers/video/modedb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/modedb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -40,240 +40,240 @@ static const struct fb_videomode modedb[ + { + /* 640x400 @ 70 Hz, 31.5 kHz hsync */ + NULL, 70, 640, 400, 39721, 40, 24, 39, 9, 96, 2, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x480 @ 60 Hz, 31.5 kHz hsync */ + NULL, 60, 640, 480, 39721, 40, 24, 32, 11, 96, 2, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x600 @ 56 Hz, 35.15 kHz hsync */ + NULL, 56, 800, 600, 27777, 128, 24, 22, 1, 72, 2, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 87 Hz interlaced, 35.5 kHz hsync */ + NULL, 87, 1024, 768, 22271, 56, 24, 33, 8, 160, 8, +- 0, FB_VMODE_INTERLACED ++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x400 @ 85 Hz, 37.86 kHz hsync */ + NULL, 85, 640, 400, 31746, 96, 32, 41, 1, 64, 3, +- FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x480 @ 72 Hz, 36.5 kHz hsync */ + NULL, 72, 640, 480, 31746, 144, 40, 30, 8, 40, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x480 @ 75 Hz, 37.50 kHz hsync */ + NULL, 75, 640, 480, 31746, 120, 16, 16, 1, 64, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x600 @ 60 Hz, 37.8 kHz hsync */ + NULL, 60, 800, 600, 25000, 88, 40, 23, 1, 128, 4, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x480 @ 85 Hz, 43.27 kHz hsync */ + NULL, 85, 640, 480, 27777, 80, 56, 25, 1, 56, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 89 Hz interlaced, 44 kHz hsync */ + NULL, 89, 1152, 864, 15384, 96, 16, 110, 1, 216, 10, +- 0, FB_VMODE_INTERLACED ++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x600 @ 72 Hz, 48.0 kHz hsync */ + NULL, 72, 800, 600, 20000, 64, 56, 23, 37, 120, 6, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 60 Hz, 48.4 kHz hsync */ + NULL, 60, 1024, 768, 15384, 168, 8, 29, 3, 144, 6, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 640x480 @ 100 Hz, 53.01 kHz hsync */ + NULL, 100, 640, 480, 21834, 96, 32, 36, 8, 96, 6, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 60 Hz, 53.5 kHz hsync */ + NULL, 60, 1152, 864, 11123, 208, 64, 16, 4, 256, 8, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x600 @ 85 Hz, 55.84 kHz hsync */ + NULL, 85, 800, 600, 16460, 160, 64, 36, 16, 64, 5, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 70 Hz, 56.5 kHz hsync */ + NULL, 70, 1024, 768, 13333, 144, 24, 29, 3, 136, 6, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 87 Hz interlaced, 51 kHz hsync */ + NULL, 87, 1280, 1024, 12500, 56, 16, 128, 1, 216, 12, +- 0, FB_VMODE_INTERLACED ++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x600 @ 100 Hz, 64.02 kHz hsync */ + NULL, 100, 800, 600, 14357, 160, 64, 30, 4, 64, 6, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 76 Hz, 62.5 kHz hsync */ + NULL, 76, 1024, 768, 11764, 208, 8, 36, 16, 120, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 70 Hz, 62.4 kHz hsync */ + NULL, 70, 1152, 864, 10869, 106, 56, 20, 1, 160, 10, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 61 Hz, 64.2 kHz hsync */ + NULL, 61, 1280, 1024, 9090, 200, 48, 26, 1, 184, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1400x1050 @ 60Hz, 63.9 kHz hsync */ + NULL, 60, 1400, 1050, 9259, 136, 40, 13, 1, 112, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1400x1050 @ 75,107 Hz, 82,392 kHz +hsync +vsync*/ + NULL, 75, 1400, 1050, 7190, 120, 56, 23, 10, 112, 13, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1400x1050 @ 60 Hz, ? kHz +hsync +vsync*/ + NULL, 60, 1400, 1050, 9259, 128, 40, 12, 0, 112, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 85 Hz, 70.24 kHz hsync */ + NULL, 85, 1024, 768, 10111, 192, 32, 34, 14, 160, 6, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 78 Hz, 70.8 kHz hsync */ + NULL, 78, 1152, 864, 9090, 228, 88, 32, 0, 84, 12, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 70 Hz, 74.59 kHz hsync */ + NULL, 70, 1280, 1024, 7905, 224, 32, 28, 8, 160, 8, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1600x1200 @ 60Hz, 75.00 kHz hsync */ + NULL, 60, 1600, 1200, 6172, 304, 64, 46, 1, 192, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 84 Hz, 76.0 kHz hsync */ + NULL, 84, 1152, 864, 7407, 184, 312, 32, 0, 128, 12, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 74 Hz, 78.85 kHz hsync */ + NULL, 74, 1280, 1024, 7407, 256, 32, 34, 3, 144, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1024x768 @ 100Hz, 80.21 kHz hsync */ + NULL, 100, 1024, 768, 8658, 192, 32, 21, 3, 192, 10, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 76 Hz, 81.13 kHz hsync */ + NULL, 76, 1280, 1024, 7407, 248, 32, 34, 3, 104, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1600x1200 @ 70 Hz, 87.50 kHz hsync */ + NULL, 70, 1600, 1200, 5291, 304, 64, 46, 1, 192, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x864 @ 100 Hz, 89.62 kHz hsync */ + NULL, 100, 1152, 864, 7264, 224, 32, 17, 2, 128, 19, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 85 Hz, 91.15 kHz hsync */ + NULL, 85, 1280, 1024, 6349, 224, 64, 44, 1, 160, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1600x1200 @ 75 Hz, 93.75 kHz hsync */ + NULL, 75, 1600, 1200, 4938, 304, 64, 46, 1, 192, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1680x1050 @ 60 Hz, 65.191 kHz hsync */ + NULL, 60, 1680, 1050, 6848, 280, 104, 30, 3, 176, 6, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1600x1200 @ 85 Hz, 105.77 kHz hsync */ + NULL, 85, 1600, 1200, 4545, 272, 16, 37, 4, 192, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x1024 @ 100 Hz, 107.16 kHz hsync */ + NULL, 100, 1280, 1024, 5502, 256, 32, 26, 7, 128, 15, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1800x1440 @ 64Hz, 96.15 kHz hsync */ + NULL, 64, 1800, 1440, 4347, 304, 96, 46, 1, 192, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1800x1440 @ 70Hz, 104.52 kHz hsync */ + NULL, 70, 1800, 1440, 4000, 304, 96, 46, 1, 192, 3, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 512x384 @ 78 Hz, 31.50 kHz hsync */ + NULL, 78, 512, 384, 49603, 48, 16, 16, 1, 64, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 512x384 @ 85 Hz, 34.38 kHz hsync */ + NULL, 85, 512, 384, 45454, 48, 16, 16, 1, 64, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 320x200 @ 70 Hz, 31.5 kHz hsync, 8:5 aspect ratio */ + NULL, 70, 320, 200, 79440, 16, 16, 20, 4, 48, 1, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 320x240 @ 60 Hz, 31.5 kHz hsync, 4:3 aspect ratio */ + NULL, 60, 320, 240, 79440, 16, 16, 16, 5, 48, 1, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 320x240 @ 72 Hz, 36.5 kHz hsync */ + NULL, 72, 320, 240, 63492, 16, 16, 16, 4, 48, 2, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 400x300 @ 56 Hz, 35.2 kHz hsync, 4:3 aspect ratio */ + NULL, 56, 400, 300, 55555, 64, 16, 10, 1, 32, 1, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 400x300 @ 60 Hz, 37.8 kHz hsync */ + NULL, 60, 400, 300, 50000, 48, 16, 11, 1, 64, 2, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 400x300 @ 72 Hz, 48.0 kHz hsync */ + NULL, 72, 400, 300, 40000, 32, 24, 11, 19, 64, 3, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 480x300 @ 56 Hz, 35.2 kHz hsync, 8:5 aspect ratio */ + NULL, 56, 480, 300, 46176, 80, 16, 10, 1, 40, 1, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 480x300 @ 60 Hz, 37.8 kHz hsync */ + NULL, 60, 480, 300, 41858, 56, 16, 11, 1, 80, 2, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 480x300 @ 63 Hz, 39.6 kHz hsync */ + NULL, 63, 480, 300, 40000, 56, 16, 11, 1, 80, 2, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 480x300 @ 72 Hz, 48.0 kHz hsync */ + NULL, 72, 480, 300, 33386, 40, 24, 11, 19, 80, 3, +- 0, FB_VMODE_DOUBLE ++ 0, FB_VMODE_DOUBLE, FB_MODE_IS_UNKNOWN + }, { + /* 1920x1200 @ 60 Hz, 74.5 Khz hsync */ + NULL, 60, 1920, 1200, 5177, 128, 336, 1, 38, 208, 3, + FB_SYNC_HOR_HIGH_ACT | FB_SYNC_VERT_HIGH_ACT, +- FB_VMODE_NONINTERLACED ++ FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1152x768, 60 Hz, PowerBook G4 Titanium I and II */ + NULL, 60, 1152, 768, 14047, 158, 26, 29, 3, 136, 6, +- FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED ++ FB_SYNC_HOR_HIGH_ACT|FB_SYNC_VERT_HIGH_ACT, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1366x768, 60 Hz, 47.403 kHz hsync, WXGA 16:9 aspect ratio */ + NULL, 60, 1366, 768, 13806, 120, 10, 14, 3, 32, 5, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 1280x800, 60 Hz, 47.403 kHz hsync, WXGA 16:10 aspect ratio */ + NULL, 60, 1280, 800, 12048, 200, 64, 24, 1, 136, 3, +- 0, FB_VMODE_NONINTERLACED ++ 0, FB_VMODE_NONINTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 720x576i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ + NULL, 50, 720, 576, 74074, 64, 16, 39, 5, 64, 5, +- 0, FB_VMODE_INTERLACED ++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN + }, { + /* 800x520i @ 50 Hz, 15.625 kHz hsync (PAL RGB) */ + NULL, 50, 800, 520, 58823, 144, 64, 72, 28, 80, 5, +- 0, FB_VMODE_INTERLACED ++ 0, FB_VMODE_INTERLACED, FB_MODE_IS_UNKNOWN + }, + }; + +diff -urNp linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c +--- linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/nvidia/nv_backlight.c 2010-09-17 20:12:09.000000000 -0400 +@@ -87,7 +87,7 @@ static int nvidia_bl_get_brightness(stru + return bd->props.brightness; + } + +-static struct backlight_ops nvidia_bl_ops = { ++static const struct backlight_ops nvidia_bl_ops = { + .get_brightness = nvidia_bl_get_brightness, + .update_status = nvidia_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c +--- linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/omap2/displays/panel-taal.c 2010-09-17 20:12:09.000000000 -0400 +@@ -319,7 +319,7 @@ static int taal_bl_get_intensity(struct + return 0; + } + +-static struct backlight_ops taal_bl_ops = { ++static const struct backlight_ops taal_bl_ops = { + .get_brightness = taal_bl_get_intensity, + .update_status = taal_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/riva/fbdev.c linux-2.6.35.4/drivers/video/riva/fbdev.c +--- linux-2.6.35.4/drivers/video/riva/fbdev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/riva/fbdev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -331,7 +331,7 @@ static int riva_bl_get_brightness(struct + return bd->props.brightness; + } + +-static struct backlight_ops riva_bl_ops = { ++static const struct backlight_ops riva_bl_ops = { + .get_brightness = riva_bl_get_brightness, + .update_status = riva_bl_update_status, + }; +diff -urNp linux-2.6.35.4/drivers/video/uvesafb.c linux-2.6.35.4/drivers/video/uvesafb.c +--- linux-2.6.35.4/drivers/video/uvesafb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/uvesafb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -19,6 +19,7 @@ + #include <linux/io.h> + #include <linux/mutex.h> + #include <linux/slab.h> ++#include <linux/moduleloader.h> + #include <video/edid.h> + #include <video/uvesafb.h> + #ifdef CONFIG_X86 +@@ -121,7 +122,7 @@ static int uvesafb_helper_start(void) + NULL, + }; + +- return call_usermodehelper(v86d_path, argv, envp, 1); ++ return call_usermodehelper(v86d_path, argv, envp, UMH_WAIT_PROC); + } + + /* +@@ -569,10 +570,32 @@ static int __devinit uvesafb_vbe_getpmi( + if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) { + par->pmi_setpal = par->ypan = 0; + } else { ++ ++#ifdef CONFIG_PAX_KERNEXEC ++#ifdef CONFIG_MODULES ++ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx); ++#endif ++ if (!par->pmi_code) { ++ par->pmi_setpal = par->ypan = 0; ++ return 0; ++ } ++#endif ++ + par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4) + + task->t.regs.edi); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx); ++ pax_close_kernel(); ++ ++ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]); ++ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]); ++#else + par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1]; + par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2]; ++#endif ++ + printk(KERN_INFO "uvesafb: protected mode interface info at " + "%04x:%04x\n", + (u16)task->t.regs.es, (u16)task->t.regs.edi); +@@ -1800,6 +1823,11 @@ out: + if (par->vbe_modes) + kfree(par->vbe_modes); + ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + framebuffer_release(info); + return err; + } +@@ -1826,6 +1854,12 @@ static int uvesafb_remove(struct platfor + kfree(par->vbe_state_orig); + if (par->vbe_state_saved) + kfree(par->vbe_state_saved); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ if (par->pmi_code) ++ module_free_exec(NULL, par->pmi_code); ++#endif ++ + } + + framebuffer_release(info); +diff -urNp linux-2.6.35.4/drivers/video/vesafb.c linux-2.6.35.4/drivers/video/vesafb.c +--- linux-2.6.35.4/drivers/video/vesafb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/drivers/video/vesafb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -9,6 +9,7 @@ + */ + + #include <linux/module.h> ++#include <linux/moduleloader.h> + #include <linux/kernel.h> + #include <linux/errno.h> + #include <linux/string.h> +@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* + static int vram_total __initdata; /* Set total amount of memory */ + static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */ + static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */ +-static void (*pmi_start)(void) __read_mostly; +-static void (*pmi_pal) (void) __read_mostly; ++static void (*pmi_start)(void) __read_only; ++static void (*pmi_pal) (void) __read_only; + static int depth __read_mostly; + static int vga_compat __read_mostly; + /* --------------------------------------------------------------------- */ +@@ -232,6 +233,7 @@ static int __init vesafb_probe(struct pl + unsigned int size_vmode; + unsigned int size_remap; + unsigned int size_total; ++ void *pmi_code = NULL; + + if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB) + return -ENODEV; +@@ -274,10 +276,6 @@ static int __init vesafb_probe(struct pl + size_remap = size_total; + vesafb_fix.smem_len = size_remap; + +-#ifndef __i386__ +- screen_info.vesapm_seg = 0; +-#endif +- + if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) { + printk(KERN_WARNING + "vesafb: cannot reserve video memory at 0x%lx\n", +@@ -319,9 +317,21 @@ static int __init vesafb_probe(struct pl + printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n", + vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages); + ++#ifdef __i386__ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_code = module_alloc_exec(screen_info.vesapm_size); ++ if (!pmi_code) ++#elif !defined(CONFIG_PAX_KERNEXEC) ++ if (0) ++#endif ++ ++#endif ++ screen_info.vesapm_seg = 0; ++ + if (screen_info.vesapm_seg) { +- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n", +- screen_info.vesapm_seg,screen_info.vesapm_off); ++ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n", ++ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size); + } + + if (screen_info.vesapm_seg < 0xc000) +@@ -329,9 +339,25 @@ static int __init vesafb_probe(struct pl + + if (ypan || pmi_setpal) { + unsigned short *pmi_base; +- pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); +- pmi_start = (void*)((char*)pmi_base + pmi_base[1]); +- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]); ++ ++ pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pax_open_kernel(); ++ memcpy(pmi_code, pmi_base, screen_info.vesapm_size); ++#else ++ pmi_code = pmi_base; ++#endif ++ ++ pmi_start = (void*)((char*)pmi_code + pmi_base[1]); ++ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ pmi_start = ktva_ktla(pmi_start); ++ pmi_pal = ktva_ktla(pmi_pal); ++ pax_close_kernel(); ++#endif ++ + printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal); + if (pmi_base[3]) { + printk(KERN_INFO "vesafb: pmi: ports = "); +@@ -473,6 +499,11 @@ static int __init vesafb_probe(struct pl + info->node, info->fix.id); + return 0; + err: ++ ++#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC) ++ module_free_exec(NULL, pmi_code); ++#endif ++ + if (info->screen_base) + iounmap(info->screen_base); + framebuffer_release(info); +diff -urNp linux-2.6.35.4/fs/9p/vfs_inode.c linux-2.6.35.4/fs/9p/vfs_inode.c +--- linux-2.6.35.4/fs/9p/vfs_inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/9p/vfs_inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1087,7 +1087,7 @@ static void *v9fs_vfs_follow_link(struct + static void + v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + P9_DPRINTK(P9_DEBUG_VFS, " %s %s\n", dentry->d_name.name, + IS_ERR(s) ? "<error>" : s); +diff -urNp linux-2.6.35.4/fs/aio.c linux-2.6.35.4/fs/aio.c +--- linux-2.6.35.4/fs/aio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/aio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -130,7 +130,7 @@ static int aio_setup_ring(struct kioctx + size += sizeof(struct io_event) * nr_events; + nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT; + +- if (nr_pages < 0) ++ if (nr_pages <= 0) + return -EINVAL; + + nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event); +diff -urNp linux-2.6.35.4/fs/attr.c linux-2.6.35.4/fs/attr.c +--- linux-2.6.35.4/fs/attr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/attr.c 2010-09-17 20:12:37.000000000 -0400 +@@ -82,6 +82,7 @@ int inode_newsize_ok(const struct inode + unsigned long limit; + + limit = rlimit(RLIMIT_FSIZE); ++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1); + if (limit != RLIM_INFINITY && offset > limit) + goto out_sig; + if (offset > inode->i_sb->s_maxbytes) +diff -urNp linux-2.6.35.4/fs/autofs/root.c linux-2.6.35.4/fs/autofs/root.c +--- linux-2.6.35.4/fs/autofs/root.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/autofs/root.c 2010-09-17 20:12:09.000000000 -0400 +@@ -301,7 +301,8 @@ static int autofs_root_symlink(struct in + set_bit(n,sbi->symlink_bitmap); + sl = &sbi->symlink[n]; + sl->len = strlen(symname); +- sl->data = kmalloc(slsize = sl->len+1, GFP_KERNEL); ++ slsize = sl->len+1; ++ sl->data = kmalloc(slsize, GFP_KERNEL); + if (!sl->data) { + clear_bit(n,sbi->symlink_bitmap); + unlock_kernel(); +diff -urNp linux-2.6.35.4/fs/autofs4/symlink.c linux-2.6.35.4/fs/autofs4/symlink.c +--- linux-2.6.35.4/fs/autofs4/symlink.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/autofs4/symlink.c 2010-09-17 20:12:09.000000000 -0400 +@@ -15,7 +15,7 @@ + static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd) + { + struct autofs_info *ino = autofs4_dentry_ino(dentry); +- nd_set_link(nd, (char *)ino->u.symlink); ++ nd_set_link(nd, ino->u.symlink); + return NULL; + } + +diff -urNp linux-2.6.35.4/fs/befs/linuxvfs.c linux-2.6.35.4/fs/befs/linuxvfs.c +--- linux-2.6.35.4/fs/befs/linuxvfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/befs/linuxvfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -493,7 +493,7 @@ static void befs_put_link(struct dentry + { + befs_inode_info *befs_ino = BEFS_I(dentry->d_inode); + if (befs_ino->i_flags & BEFS_LONG_SYMLINK) { +- char *link = nd_get_link(nd); ++ const char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); + } +diff -urNp linux-2.6.35.4/fs/binfmt_aout.c linux-2.6.35.4/fs/binfmt_aout.c +--- linux-2.6.35.4/fs/binfmt_aout.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/binfmt_aout.c 2010-09-17 20:12:37.000000000 -0400 +@@ -16,6 +16,7 @@ + #include <linux/string.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/stat.h> + #include <linux/fcntl.h> + #include <linux/ptrace.h> +@@ -97,10 +98,12 @@ static int aout_core_dump(struct coredum + + /* If the size of the dump file exceeds the rlimit, then see what would happen + if we wrote the stack, but not the data area. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1); + if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit) + dump.u_dsize = 0; + + /* Make sure we have enough room to write the stack and data areas. */ ++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1); + if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit) + dump.u_ssize = 0; + +@@ -238,6 +241,8 @@ static int load_aout_binary(struct linux + rlim = rlimit(RLIMIT_DATA); + if (rlim >= RLIM_INFINITY) + rlim = ~0; ++ ++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1); + if (ex.a_data + ex.a_bss > rlim) + return -ENOMEM; + +@@ -266,6 +271,27 @@ static int load_aout_binary(struct linux + install_exec_creds(bprm); + current->flags &= ~PF_FORKNOEXEC; + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) { ++ current->mm->pax_flags |= MF_PAX_PAGEEXEC; ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP) ++ current->mm->pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT)) ++ current->mm->pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++ } ++#endif ++ + if (N_MAGIC(ex) == OMAGIC) { + unsigned long text_addr, map_size; + loff_t pos; +@@ -338,7 +364,7 @@ static int load_aout_binary(struct linux + + down_write(¤t->mm->mmap_sem); + error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data, +- PROT_READ | PROT_WRITE | PROT_EXEC, ++ PROT_READ | PROT_WRITE, + MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE, + fd_offset + ex.a_text); + up_write(¤t->mm->mmap_sem); +diff -urNp linux-2.6.35.4/fs/binfmt_elf.c linux-2.6.35.4/fs/binfmt_elf.c +--- linux-2.6.35.4/fs/binfmt_elf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/binfmt_elf.c 2010-09-17 20:12:37.000000000 -0400 +@@ -51,6 +51,10 @@ static int elf_core_dump(struct coredump + #define elf_core_dump NULL + #endif + ++#ifdef CONFIG_PAX_MPROTECT ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags); ++#endif ++ + #if ELF_EXEC_PAGESIZE > PAGE_SIZE + #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE + #else +@@ -70,6 +74,11 @@ static struct linux_binfmt elf_format = + .load_binary = load_elf_binary, + .load_shlib = load_elf_library, + .core_dump = elf_core_dump, ++ ++#ifdef CONFIG_PAX_MPROTECT ++ .handle_mprotect= elf_handle_mprotect, ++#endif ++ + .min_coredump = ELF_EXEC_PAGESIZE, + .hasvdso = 1 + }; +@@ -78,6 +87,8 @@ static struct linux_binfmt elf_format = + + static int set_brk(unsigned long start, unsigned long end) + { ++ unsigned long e = end; ++ + start = ELF_PAGEALIGN(start); + end = ELF_PAGEALIGN(end); + if (end > start) { +@@ -88,7 +99,7 @@ static int set_brk(unsigned long start, + if (BAD_ADDR(addr)) + return addr; + } +- current->mm->start_brk = current->mm->brk = end; ++ current->mm->start_brk = current->mm->brk = e; + return 0; + } + +@@ -149,7 +160,7 @@ create_elf_tables(struct linux_binprm *b + elf_addr_t __user *u_rand_bytes; + const char *k_platform = ELF_PLATFORM; + const char *k_base_platform = ELF_BASE_PLATFORM; +- unsigned char k_rand_bytes[16]; ++ u32 k_rand_bytes[4]; + int items; + elf_addr_t *elf_info; + int ei_index = 0; +@@ -196,8 +207,12 @@ create_elf_tables(struct linux_binprm *b + * Generate 16 random bytes for userspace PRNG seeding. + */ + get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes)); +- u_rand_bytes = (elf_addr_t __user *) +- STACK_ALLOC(p, sizeof(k_rand_bytes)); ++ srandom32(k_rand_bytes[0] ^ random32()); ++ srandom32(k_rand_bytes[1] ^ random32()); ++ srandom32(k_rand_bytes[2] ^ random32()); ++ srandom32(k_rand_bytes[3] ^ random32()); ++ p = STACK_ROUND(p, sizeof(k_rand_bytes)); ++ u_rand_bytes = (elf_addr_t __user *) p; + if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes))) + return -EFAULT; + +@@ -386,10 +401,10 @@ static unsigned long load_elf_interp(str + { + struct elf_phdr *elf_phdata; + struct elf_phdr *eppnt; +- unsigned long load_addr = 0; ++ unsigned long load_addr = 0, pax_task_size = TASK_SIZE; + int load_addr_set = 0; + unsigned long last_bss = 0, elf_bss = 0; +- unsigned long error = ~0UL; ++ unsigned long error = -EINVAL; + unsigned long total_size; + int retval, i, size; + +@@ -435,6 +450,11 @@ static unsigned long load_elf_interp(str + goto out_close; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ + eppnt = elf_phdata; + for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) { + if (eppnt->p_type == PT_LOAD) { +@@ -478,8 +498,8 @@ static unsigned long load_elf_interp(str + k = load_addr + eppnt->p_vaddr; + if (BAD_ADDR(k) || + eppnt->p_filesz > eppnt->p_memsz || +- eppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - eppnt->p_memsz < k) { ++ eppnt->p_memsz > pax_task_size || ++ pax_task_size - eppnt->p_memsz < k) { + error = -ENOMEM; + goto out_close; + } +@@ -533,6 +553,177 @@ out: + return error; + } + ++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE) ++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (elf_phdata->p_flags & PF_PAGEEXEC) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (elf_phdata->p_flags & PF_SEGMEXEC) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (elf_phdata->p_flags & PF_EMUTRAMP) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (elf_phdata->p_flags & PF_MPROTECT) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (!(elf_phdata->p_flags & PF_NOMPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) ++ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++#ifdef CONFIG_PAX_EI_PAX ++static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC)) ++ pax_flags |= MF_PAX_PAGEEXEC; ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) ++ pax_flags |= MF_PAX_SEGMEXEC; ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC) ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ if ((__supported_pte_mask & _PAGE_NX)) ++ pax_flags &= ~MF_PAX_SEGMEXEC; ++ else ++ pax_flags &= ~MF_PAX_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP)) ++ pax_flags |= MF_PAX_EMUTRAMP; ++#endif ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT)) ++ pax_flags |= MF_PAX_MPROTECT; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP)) ++ pax_flags |= MF_PAX_RANDMMAP; ++#endif ++ ++ return pax_flags; ++} ++#endif ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) ++static long pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata) ++{ ++ unsigned long pax_flags = 0UL; ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ unsigned long i; ++#endif ++ ++#ifdef CONFIG_PAX_EI_PAX ++ pax_flags = pax_parse_ei_pax(elf_ex); ++#endif ++ ++#ifdef CONFIG_PAX_PT_PAX_FLAGS ++ for (i = 0UL; i < elf_ex->e_phnum; i++) ++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) { ++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) || ++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) || ++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) || ++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) || ++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP))) ++ return -EINVAL; ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ if (pax_softmode) ++ pax_flags = pax_parse_softmode(&elf_phdata[i]); ++ else ++#endif ++ ++ pax_flags = pax_parse_hardmode(&elf_phdata[i]); ++ break; ++ } ++#endif ++ ++ if (0 > pax_check_flags(&pax_flags)) ++ return -EINVAL; ++ ++ current->mm->pax_flags = pax_flags; ++ return 0; ++} ++#endif ++ + /* + * These are the functions used to load ELF style executables and shared + * libraries. There is no binary dependent code anywhere else. +@@ -549,6 +740,11 @@ static unsigned long randomize_stack_top + { + unsigned int random_variable = 0; + ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ return stack_top - current->mm->delta_stack; ++#endif ++ + if ((current->flags & PF_RANDOMIZE) && + !(current->personality & ADDR_NO_RANDOMIZE)) { + random_variable = get_random_int() & STACK_RND_MASK; +@@ -567,7 +763,7 @@ static int load_elf_binary(struct linux_ + unsigned long load_addr = 0, load_bias = 0; + int load_addr_set = 0; + char * elf_interpreter = NULL; +- unsigned long error; ++ unsigned long error = 0; + struct elf_phdr *elf_ppnt, *elf_phdata; + unsigned long elf_bss, elf_brk; + int retval, i; +@@ -577,11 +773,11 @@ static int load_elf_binary(struct linux_ + unsigned long start_code, end_code, start_data, end_data; + unsigned long reloc_func_desc = 0; + int executable_stack = EXSTACK_DEFAULT; +- unsigned long def_flags = 0; + struct { + struct elfhdr elf_ex; + struct elfhdr interp_elf_ex; + } *loc; ++ unsigned long pax_task_size = TASK_SIZE; + + loc = kmalloc(sizeof(*loc), GFP_KERNEL); + if (!loc) { +@@ -719,11 +915,80 @@ static int load_elf_binary(struct linux_ + + /* OK, This is the point of no return */ + current->flags &= ~PF_FORKNOEXEC; +- current->mm->def_flags = def_flags; ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ current->mm->pax_flags = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ current->mm->call_dl_resolve = 0UL; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ current->mm->call_syscall = 0UL; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ current->mm->delta_mmap = 0UL; ++ current->mm->delta_stack = 0UL; ++#endif ++ ++ current->mm->def_flags = 0; ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) ++ if (0 > pax_parse_elf_flags(&loc->elf_ex, elf_phdata)) { ++ send_sig(SIGKILL, current, 0); ++ goto out_free_dentry; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++ pax_set_initial_flags(bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++ if (pax_set_initial_flags_func) ++ (pax_set_initial_flags_func)(bprm); ++#endif ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) { ++ current->mm->context.user_cs_limit = PAGE_SIZE; ++ current->mm->def_flags |= VM_PAGEEXEC; ++ } ++#endif ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE; ++ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE; ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++ } ++#endif ++ ++#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu()); ++ put_cpu(); ++ } ++#endif + + /* Do this immediately, since STACK_TOP as used in setup_arg_pages + may depend on the personality. */ + SET_PERSONALITY(loc->elf_ex); ++ ++#ifdef CONFIG_PAX_ASLR ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) { ++ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT; ++ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ executable_stack = EXSTACK_DISABLE_X; ++ current->personality &= ~READ_IMPLIES_EXEC; ++ } else ++#endif ++ + if (elf_read_implies_exec(loc->elf_ex, executable_stack)) + current->personality |= READ_IMPLIES_EXEC; + +@@ -805,6 +1070,20 @@ static int load_elf_binary(struct linux_ + #else + load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr); + #endif ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ /* PaX: randomize base address at the default exe base if requested */ ++ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) { ++#ifdef CONFIG_SPARC64 ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1); ++#else ++ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT; ++#endif ++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias); ++ elf_flags |= MAP_FIXED; ++ } ++#endif ++ + } + + error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, +@@ -837,9 +1116,9 @@ static int load_elf_binary(struct linux_ + * allowed task size. Note that p_filesz must always be + * <= p_memsz so it is only necessary to check p_memsz. + */ +- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz || +- elf_ppnt->p_memsz > TASK_SIZE || +- TASK_SIZE - elf_ppnt->p_memsz < k) { ++ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz || ++ elf_ppnt->p_memsz > pax_task_size || ++ pax_task_size - elf_ppnt->p_memsz < k) { + /* set_brk can never work. Avoid overflows. */ + send_sig(SIGKILL, current, 0); + retval = -EINVAL; +@@ -867,6 +1146,11 @@ static int load_elf_binary(struct linux_ + start_data += load_bias; + end_data += load_bias; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (current->mm->pax_flags & MF_PAX_RANDMMAP) ++ elf_brk += PAGE_SIZE + ((pax_get_random_long() & ~PAGE_MASK) << 4); ++#endif ++ + /* Calling set_brk effectively mmaps the pages that we need + * for the bss and break sections. We must do this before + * mapping in the interpreter, to make sure it doesn't wind +@@ -878,9 +1162,11 @@ static int load_elf_binary(struct linux_ + goto out_free_dentry; + } + if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) { +- send_sig(SIGSEGV, current, 0); +- retval = -EFAULT; /* Nobody gets to see this, but.. */ +- goto out_free_dentry; ++ /* ++ * This bss-zeroing can fail if the ELF ++ * file specifies odd protections. So ++ * we don't check the return value ++ */ + } + + if (elf_interpreter) { +@@ -1091,7 +1377,7 @@ out: + * Decide what to dump of a segment, part, all or none. + */ + static unsigned long vma_dump_size(struct vm_area_struct *vma, +- unsigned long mm_flags) ++ unsigned long mm_flags, long signr) + { + #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type)) + +@@ -1125,7 +1411,7 @@ static unsigned long vma_dump_size(struc + if (vma->vm_file == NULL) + return 0; + +- if (FILTER(MAPPED_PRIVATE)) ++ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE)) + goto whole; + + /* +@@ -1347,9 +1633,9 @@ static void fill_auxv_note(struct memelf + { + elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv; + int i = 0; +- do ++ do { + i += 2; +- while (auxv[i - 2] != AT_NULL); ++ } while (auxv[i - 2] != AT_NULL); + fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv); + } + +@@ -1855,14 +2141,14 @@ static void fill_extnum_info(struct elfh + } + + static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma, +- unsigned long mm_flags) ++ struct coredump_params *cprm) + { + struct vm_area_struct *vma; + size_t size = 0; + + for (vma = first_vma(current, gate_vma); vma != NULL; + vma = next_vma(vma, gate_vma)) +- size += vma_dump_size(vma, mm_flags); ++ size += vma_dump_size(vma, cprm->mm_flags, cprm->signr); + return size; + } + +@@ -1956,7 +2242,7 @@ static int elf_core_dump(struct coredump + + dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE); + +- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags); ++ offset += elf_core_vma_data_size(gate_vma, cprm); + offset += elf_core_extra_data_size(); + e_shoff = offset; + +@@ -1970,10 +2256,12 @@ static int elf_core_dump(struct coredump + offset = dataoff; + + size += sizeof(*elf); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf))) + goto end_coredump; + + size += sizeof(*phdr4note); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note))) + goto end_coredump; +@@ -1987,7 +2275,7 @@ static int elf_core_dump(struct coredump + phdr.p_offset = offset; + phdr.p_vaddr = vma->vm_start; + phdr.p_paddr = 0; +- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags); ++ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->signr); + phdr.p_memsz = vma->vm_end - vma->vm_start; + offset += phdr.p_filesz; + phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0; +@@ -1998,6 +2286,7 @@ static int elf_core_dump(struct coredump + phdr.p_align = ELF_EXEC_PAGESIZE; + + size += sizeof(phdr); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, &phdr, sizeof(phdr))) + goto end_coredump; +@@ -2022,7 +2311,7 @@ static int elf_core_dump(struct coredump + unsigned long addr; + unsigned long end; + +- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags); ++ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->signr); + + for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) { + struct page *page; +@@ -2031,6 +2320,7 @@ static int elf_core_dump(struct coredump + page = get_dump_page(addr); + if (page) { + void *kaddr = kmap(page); ++ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1); + stop = ((size += PAGE_SIZE) > cprm->limit) || + !dump_write(cprm->file, kaddr, + PAGE_SIZE); +@@ -2048,6 +2338,7 @@ static int elf_core_dump(struct coredump + + if (e_phnum == PN_XNUM) { + size += sizeof(*shdr4extnum); ++ gr_learn_resource(current, RLIMIT_CORE, size, 1); + if (size > cprm->limit + || !dump_write(cprm->file, shdr4extnum, + sizeof(*shdr4extnum))) +@@ -2068,6 +2359,97 @@ out: + + #endif /* CONFIG_ELF_CORE */ + ++#ifdef CONFIG_PAX_MPROTECT ++/* PaX: non-PIC ELF libraries need relocations on their executable segments ++ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly ++ * we'll remove VM_MAYWRITE for good on RELRO segments. ++ * ++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment ++ * basis because we want to allow the common case and not the special ones. ++ */ ++static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags) ++{ ++ struct elfhdr elf_h; ++ struct elf_phdr elf_p; ++ unsigned long i; ++ unsigned long oldflags; ++ bool is_textrel_rw, is_textrel_rx, is_relro; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT)) ++ return; ++ ++ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ); ++ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ; ++ ++#ifdef CONFIG_PAX_ELFRELOCS ++ /* possible TEXTREL */ ++ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ); ++ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ); ++#else ++ is_textrel_rw = false; ++ is_textrel_rx = false; ++#endif ++ ++ /* possible RELRO */ ++ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ); ++ ++ if (!is_textrel_rw && !is_textrel_rx && !is_relro) ++ return; ++ ++ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) || ++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) || ++ ++#ifdef CONFIG_PAX_ETEXECRELOCS ++ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++#else ++ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) || ++#endif ++ ++ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) || ++ !elf_check_arch(&elf_h) || ++ elf_h.e_phentsize != sizeof(struct elf_phdr) || ++ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr)) ++ return; ++ ++ for (i = 0UL; i < elf_h.e_phnum; i++) { ++ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p))) ++ return; ++ switch (elf_p.p_type) { ++ case PT_DYNAMIC: ++ if (!is_textrel_rw && !is_textrel_rx) ++ continue; ++ i = 0UL; ++ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) { ++ elf_dyn dyn; ++ ++ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn))) ++ return; ++ if (dyn.d_tag == DT_NULL) ++ return; ++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) { ++ gr_log_textrel(vma); ++ if (is_textrel_rw) ++ vma->vm_flags |= VM_MAYWRITE; ++ else ++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */ ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ i++; ++ } ++ return; ++ ++ case PT_GNU_RELRO: ++ if (!is_relro) ++ continue; ++ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start) ++ vma->vm_flags &= ~VM_MAYWRITE; ++ return; ++ } ++ } ++} ++#endif ++ + static int __init init_elf_binfmt(void) + { + return register_binfmt(&elf_format); +diff -urNp linux-2.6.35.4/fs/binfmt_flat.c linux-2.6.35.4/fs/binfmt_flat.c +--- linux-2.6.35.4/fs/binfmt_flat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/binfmt_flat.c 2010-09-17 20:12:09.000000000 -0400 +@@ -567,7 +567,9 @@ static int load_flat_file(struct linux_b + realdatastart = (unsigned long) -ENOMEM; + printk("Unable to allocate RAM for process data, errno %d\n", + (int)-realdatastart); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); ++ up_write(¤t->mm->mmap_sem); + ret = realdatastart; + goto err; + } +@@ -591,8 +593,10 @@ static int load_flat_file(struct linux_b + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read data+bss, errno %d\n", (int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len); + do_munmap(current->mm, realdatastart, len); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +@@ -661,8 +665,10 @@ static int load_flat_file(struct linux_b + } + if (IS_ERR_VALUE(result)) { + printk("Unable to read code+data+bss, errno %d\n",(int)-result); ++ down_write(¤t->mm->mmap_sem); + do_munmap(current->mm, textpos, text_len + data_len + extra + + MAX_SHARED_LIBS * sizeof(unsigned long)); ++ up_write(¤t->mm->mmap_sem); + ret = result; + goto err; + } +diff -urNp linux-2.6.35.4/fs/binfmt_misc.c linux-2.6.35.4/fs/binfmt_misc.c +--- linux-2.6.35.4/fs/binfmt_misc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/binfmt_misc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -693,7 +693,7 @@ static int bm_fill_super(struct super_bl + static struct tree_descr bm_files[] = { + [2] = {"status", &bm_status_operations, S_IWUSR|S_IRUGO}, + [3] = {"register", &bm_register_operations, S_IWUSR}, +- /* last one */ {""} ++ /* last one */ {"", NULL, 0} + }; + int err = simple_fill_super(sb, 0x42494e4d, bm_files); + if (!err) +diff -urNp linux-2.6.35.4/fs/bio.c linux-2.6.35.4/fs/bio.c +--- linux-2.6.35.4/fs/bio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/bio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1213,7 +1213,7 @@ static void bio_copy_kern_endio(struct b + const int read = bio_data_dir(bio) == READ; + struct bio_map_data *bmd = bio->bi_private; + int i; +- char *p = bmd->sgvecs[0].iov_base; ++ char *p = (__force char *)bmd->sgvecs[0].iov_base; + + __bio_for_each_segment(bvec, bio, i, 0) { + char *addr = page_address(bvec->bv_page); +diff -urNp linux-2.6.35.4/fs/block_dev.c linux-2.6.35.4/fs/block_dev.c +--- linux-2.6.35.4/fs/block_dev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/block_dev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -647,7 +647,7 @@ static bool bd_may_claim(struct block_de + else if (bdev->bd_contains == bdev) + return true; /* is a whole device which isn't held */ + +- else if (whole->bd_holder == bd_claim) ++ else if (whole->bd_holder == (void *)bd_claim) + return true; /* is a partition of a device that is being partitioned */ + else if (whole->bd_holder != NULL) + return false; /* is a partition of a held device */ +diff -urNp linux-2.6.35.4/fs/btrfs/ctree.c linux-2.6.35.4/fs/btrfs/ctree.c +--- linux-2.6.35.4/fs/btrfs/ctree.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/btrfs/ctree.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3763,7 +3763,6 @@ setup_items_for_insert(struct btrfs_tran + + ret = 0; + if (slot == 0) { +- struct btrfs_disk_key disk_key; + btrfs_cpu_key_to_disk(&disk_key, cpu_key); + ret = fixup_low_keys(trans, root, path, &disk_key, 1); + } +diff -urNp linux-2.6.35.4/fs/btrfs/disk-io.c linux-2.6.35.4/fs/btrfs/disk-io.c +--- linux-2.6.35.4/fs/btrfs/disk-io.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/btrfs/disk-io.c 2010-09-17 20:12:09.000000000 -0400 +@@ -40,7 +40,7 @@ + #include "tree-log.h" + #include "free-space-cache.h" + +-static struct extent_io_ops btree_extent_io_ops; ++static const struct extent_io_ops btree_extent_io_ops; + static void end_workqueue_fn(struct btrfs_work *work); + static void free_fs_root(struct btrfs_root *root); + +@@ -2597,7 +2597,7 @@ out: + return 0; + } + +-static struct extent_io_ops btree_extent_io_ops = { ++static const struct extent_io_ops btree_extent_io_ops = { + .write_cache_pages_lock_hook = btree_lock_page_hook, + .readpage_end_io_hook = btree_readpage_end_io_hook, + .submit_bio_hook = btree_submit_bio_hook, +diff -urNp linux-2.6.35.4/fs/btrfs/extent_io.h linux-2.6.35.4/fs/btrfs/extent_io.h +--- linux-2.6.35.4/fs/btrfs/extent_io.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/btrfs/extent_io.h 2010-09-17 20:12:09.000000000 -0400 +@@ -51,36 +51,36 @@ typedef int (extent_submit_bio_hook_t)(s + struct bio *bio, int mirror_num, + unsigned long bio_flags, u64 bio_offset); + struct extent_io_ops { +- int (*fill_delalloc)(struct inode *inode, struct page *locked_page, ++ int (* const fill_delalloc)(struct inode *inode, struct page *locked_page, + u64 start, u64 end, int *page_started, + unsigned long *nr_written); +- int (*writepage_start_hook)(struct page *page, u64 start, u64 end); +- int (*writepage_io_hook)(struct page *page, u64 start, u64 end); ++ int (* const writepage_start_hook)(struct page *page, u64 start, u64 end); ++ int (* const writepage_io_hook)(struct page *page, u64 start, u64 end); + extent_submit_bio_hook_t *submit_bio_hook; +- int (*merge_bio_hook)(struct page *page, unsigned long offset, ++ int (* const merge_bio_hook)(struct page *page, unsigned long offset, + size_t size, struct bio *bio, + unsigned long bio_flags); +- int (*readpage_io_hook)(struct page *page, u64 start, u64 end); +- int (*readpage_io_failed_hook)(struct bio *bio, struct page *page, ++ int (* const readpage_io_hook)(struct page *page, u64 start, u64 end); ++ int (* const readpage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); +- int (*writepage_io_failed_hook)(struct bio *bio, struct page *page, ++ int (* const writepage_io_failed_hook)(struct bio *bio, struct page *page, + u64 start, u64 end, + struct extent_state *state); +- int (*readpage_end_io_hook)(struct page *page, u64 start, u64 end, ++ int (* const readpage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state); +- int (*writepage_end_io_hook)(struct page *page, u64 start, u64 end, ++ int (* const writepage_end_io_hook)(struct page *page, u64 start, u64 end, + struct extent_state *state, int uptodate); +- int (*set_bit_hook)(struct inode *inode, struct extent_state *state, ++ int (* const set_bit_hook)(struct inode *inode, struct extent_state *state, + int *bits); +- int (*clear_bit_hook)(struct inode *inode, struct extent_state *state, ++ int (* const clear_bit_hook)(struct inode *inode, struct extent_state *state, + int *bits); +- int (*merge_extent_hook)(struct inode *inode, ++ int (* const merge_extent_hook)(struct inode *inode, + struct extent_state *new, + struct extent_state *other); +- int (*split_extent_hook)(struct inode *inode, ++ int (* const split_extent_hook)(struct inode *inode, + struct extent_state *orig, u64 split); +- int (*write_cache_pages_lock_hook)(struct page *page); ++ int (* const write_cache_pages_lock_hook)(struct page *page); + }; + + struct extent_io_tree { +@@ -90,7 +90,7 @@ struct extent_io_tree { + u64 dirty_bytes; + spinlock_t lock; + spinlock_t buffer_lock; +- struct extent_io_ops *ops; ++ const struct extent_io_ops *ops; + }; + + struct extent_state { +diff -urNp linux-2.6.35.4/fs/btrfs/free-space-cache.c linux-2.6.35.4/fs/btrfs/free-space-cache.c +--- linux-2.6.35.4/fs/btrfs/free-space-cache.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/btrfs/free-space-cache.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1075,8 +1075,6 @@ u64 btrfs_alloc_from_cluster(struct btrf + + while(1) { + if (entry->bytes < bytes || entry->offset < min_start) { +- struct rb_node *node; +- + node = rb_next(&entry->offset_index); + if (!node) + break; +@@ -1227,7 +1225,7 @@ again: + */ + while (entry->bitmap || found_bitmap || + (!entry->bitmap && entry->bytes < min_bytes)) { +- struct rb_node *node = rb_next(&entry->offset_index); ++ node = rb_next(&entry->offset_index); + + if (entry->bitmap && entry->bytes > bytes + empty_size) { + ret = btrfs_bitmap_cluster(block_group, entry, cluster, +diff -urNp linux-2.6.35.4/fs/btrfs/inode.c linux-2.6.35.4/fs/btrfs/inode.c +--- linux-2.6.35.4/fs/btrfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/btrfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -64,7 +64,7 @@ static const struct inode_operations btr + static const struct address_space_operations btrfs_aops; + static const struct address_space_operations btrfs_symlink_aops; + static const struct file_operations btrfs_dir_file_operations; +-static struct extent_io_ops btrfs_extent_io_ops; ++static const struct extent_io_ops btrfs_extent_io_ops; + + static struct kmem_cache *btrfs_inode_cachep; + struct kmem_cache *btrfs_trans_handle_cachep; +@@ -6958,7 +6958,7 @@ static const struct file_operations btrf + .fsync = btrfs_sync_file, + }; + +-static struct extent_io_ops btrfs_extent_io_ops = { ++static const struct extent_io_ops btrfs_extent_io_ops = { + .fill_delalloc = run_delalloc_range, + .submit_bio_hook = btrfs_submit_bio_hook, + .merge_bio_hook = btrfs_merge_bio_hook, +diff -urNp linux-2.6.35.4/fs/buffer.c linux-2.6.35.4/fs/buffer.c +--- linux-2.6.35.4/fs/buffer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/buffer.c 2010-09-17 20:12:37.000000000 -0400 +@@ -25,6 +25,7 @@ + #include <linux/percpu.h> + #include <linux/slab.h> + #include <linux/capability.h> ++#include <linux/security.h> + #include <linux/blkdev.h> + #include <linux/file.h> + #include <linux/quotaops.h> +diff -urNp linux-2.6.35.4/fs/cachefiles/bind.c linux-2.6.35.4/fs/cachefiles/bind.c +--- linux-2.6.35.4/fs/cachefiles/bind.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/cachefiles/bind.c 2010-09-17 20:12:09.000000000 -0400 +@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachef + args); + + /* start by checking things over */ +- ASSERT(cache->fstop_percent >= 0 && +- cache->fstop_percent < cache->fcull_percent && ++ ASSERT(cache->fstop_percent < cache->fcull_percent && + cache->fcull_percent < cache->frun_percent && + cache->frun_percent < 100); + +- ASSERT(cache->bstop_percent >= 0 && +- cache->bstop_percent < cache->bcull_percent && ++ ASSERT(cache->bstop_percent < cache->bcull_percent && + cache->bcull_percent < cache->brun_percent && + cache->brun_percent < 100); + +diff -urNp linux-2.6.35.4/fs/cachefiles/daemon.c linux-2.6.35.4/fs/cachefiles/daemon.c +--- linux-2.6.35.4/fs/cachefiles/daemon.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/cachefiles/daemon.c 2010-09-17 20:12:09.000000000 -0400 +@@ -195,7 +195,7 @@ static ssize_t cachefiles_daemon_read(st + if (n > buflen) + return -EMSGSIZE; + +- if (copy_to_user(_buffer, buffer, n) != 0) ++ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0) + return -EFAULT; + + return n; +@@ -221,7 +221,7 @@ static ssize_t cachefiles_daemon_write(s + if (test_bit(CACHEFILES_DEAD, &cache->flags)) + return -EIO; + +- if (datalen < 0 || datalen > PAGE_SIZE - 1) ++ if (datalen > PAGE_SIZE - 1) + return -EOPNOTSUPP; + + /* drag the command string into the kernel so we can parse it */ +@@ -385,7 +385,7 @@ static int cachefiles_daemon_fstop(struc + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (fstop < 0 || fstop >= cache->fcull_percent) ++ if (fstop >= cache->fcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->fstop_percent = fstop; +@@ -457,7 +457,7 @@ static int cachefiles_daemon_bstop(struc + if (args[0] != '%' || args[1] != '\0') + return -EINVAL; + +- if (bstop < 0 || bstop >= cache->bcull_percent) ++ if (bstop >= cache->bcull_percent) + return cachefiles_daemon_range_error(cache, args); + + cache->bstop_percent = bstop; +diff -urNp linux-2.6.35.4/fs/cachefiles/rdwr.c linux-2.6.35.4/fs/cachefiles/rdwr.c +--- linux-2.6.35.4/fs/cachefiles/rdwr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/cachefiles/rdwr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -945,7 +945,7 @@ int cachefiles_write_page(struct fscache + old_fs = get_fs(); + set_fs(KERNEL_DS); + ret = file->f_op->write( +- file, (const void __user *) data, len, &pos); ++ file, (__force const void __user *) data, len, &pos); + set_fs(old_fs); + kunmap(page); + if (ret != len) +diff -urNp linux-2.6.35.4/fs/cifs/cifs_uniupr.h linux-2.6.35.4/fs/cifs/cifs_uniupr.h +--- linux-2.6.35.4/fs/cifs/cifs_uniupr.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/cifs/cifs_uniupr.h 2010-09-17 20:12:09.000000000 -0400 +@@ -132,7 +132,7 @@ const struct UniCaseRange CifsUniUpperRa + {0x0490, 0x04cc, UniCaseRangeU0490}, + {0x1e00, 0x1ffc, UniCaseRangeU1e00}, + {0xff40, 0xff5a, UniCaseRangeUff40}, +- {0} ++ {0, 0, NULL} + }; + #endif + +diff -urNp linux-2.6.35.4/fs/cifs/link.c linux-2.6.35.4/fs/cifs/link.c +--- linux-2.6.35.4/fs/cifs/link.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/cifs/link.c 2010-09-17 20:12:09.000000000 -0400 +@@ -216,7 +216,7 @@ cifs_symlink(struct inode *inode, struct + + void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie) + { +- char *p = nd_get_link(nd); ++ const char *p = nd_get_link(nd); + if (!IS_ERR(p)) + kfree(p); + } +diff -urNp linux-2.6.35.4/fs/compat_binfmt_elf.c linux-2.6.35.4/fs/compat_binfmt_elf.c +--- linux-2.6.35.4/fs/compat_binfmt_elf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/compat_binfmt_elf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -30,11 +30,13 @@ + #undef elf_phdr + #undef elf_shdr + #undef elf_note ++#undef elf_dyn + #undef elf_addr_t + #define elfhdr elf32_hdr + #define elf_phdr elf32_phdr + #define elf_shdr elf32_shdr + #define elf_note elf32_note ++#define elf_dyn Elf32_Dyn + #define elf_addr_t Elf32_Addr + + /* +diff -urNp linux-2.6.35.4/fs/compat.c linux-2.6.35.4/fs/compat.c +--- linux-2.6.35.4/fs/compat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/compat.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1433,14 +1433,12 @@ static int compat_copy_strings(int argc, + if (!kmapped_page || kpos != (pos & PAGE_MASK)) { + struct page *page; + +-#ifdef CONFIG_STACK_GROWSUP + ret = expand_stack_downwards(bprm->vma, pos); + if (ret < 0) { + /* We've exceed the stack rlimit. */ + ret = -E2BIG; + goto out; + } +-#endif + ret = get_user_pages(current, bprm->mm, pos, + 1, 1, 1, &page, NULL); + if (ret <= 0) { +@@ -1486,6 +1484,11 @@ int compat_do_execve(char * filename, + compat_uptr_t __user *envp, + struct pt_regs * regs) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; +@@ -1522,6 +1525,14 @@ int compat_do_execve(char * filename, + bprm->filename = filename; + bprm->interp = filename; + ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); ++ retval = -EAGAIN; ++ if (gr_handle_nproc()) ++ goto out_file; ++ retval = -EACCES; ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) ++ goto out_file; ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_file; +@@ -1551,9 +1562,40 @@ int compat_do_execve(char * filename, + if (retval < 0) + goto out; + ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); ++ ++ gr_handle_exec_args(bprm, (char __user * __user *)argv); ++ ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++ ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, ++ bprm->unsafe & LSM_UNSAFE_SHARE); ++ if (retval < 0) ++ goto out_fail; ++ + retval = search_binary_handler(bprm, regs); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ + current->fs->in_exec = 0; +@@ -1564,6 +1606,14 @@ int compat_do_execve(char * filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) + mmput(bprm->mm); +diff -urNp linux-2.6.35.4/fs/debugfs/inode.c linux-2.6.35.4/fs/debugfs/inode.c +--- linux-2.6.35.4/fs/debugfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/debugfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -129,7 +129,7 @@ static inline int debugfs_positive(struc + + static int debug_fill_super(struct super_block *sb, void *data, int silent) + { +- static struct tree_descr debug_files[] = {{""}}; ++ static struct tree_descr debug_files[] = {{"", NULL, 0}}; + + return simple_fill_super(sb, DEBUGFS_MAGIC, debug_files); + } +diff -urNp linux-2.6.35.4/fs/dlm/lockspace.c linux-2.6.35.4/fs/dlm/lockspace.c +--- linux-2.6.35.4/fs/dlm/lockspace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/dlm/lockspace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -200,7 +200,7 @@ static int dlm_uevent(struct kset *kset, + return 0; + } + +-static struct kset_uevent_ops dlm_uevent_ops = { ++static const struct kset_uevent_ops dlm_uevent_ops = { + .uevent = dlm_uevent, + }; + +diff -urNp linux-2.6.35.4/fs/ecryptfs/inode.c linux-2.6.35.4/fs/ecryptfs/inode.c +--- linux-2.6.35.4/fs/ecryptfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ecryptfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -658,7 +658,7 @@ static int ecryptfs_readlink_lower(struc + old_fs = get_fs(); + set_fs(get_ds()); + rc = lower_dentry->d_inode->i_op->readlink(lower_dentry, +- (char __user *)lower_buf, ++ (__force char __user *)lower_buf, + lower_bufsiz); + set_fs(old_fs); + if (rc < 0) +@@ -704,7 +704,7 @@ static void *ecryptfs_follow_link(struct + } + old_fs = get_fs(); + set_fs(get_ds()); +- rc = dentry->d_inode->i_op->readlink(dentry, (char __user *)buf, len); ++ rc = dentry->d_inode->i_op->readlink(dentry, (__force char __user *)buf, len); + set_fs(old_fs); + if (rc < 0) { + kfree(buf); +@@ -719,7 +719,7 @@ out: + static void + ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr) + { +- char *buf = nd_get_link(nd); ++ const char *buf = nd_get_link(nd); + if (!IS_ERR(buf)) { + /* Free the char* */ + kfree(buf); +diff -urNp linux-2.6.35.4/fs/ecryptfs/miscdev.c linux-2.6.35.4/fs/ecryptfs/miscdev.c +--- linux-2.6.35.4/fs/ecryptfs/miscdev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ecryptfs/miscdev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -328,7 +328,7 @@ check_list: + goto out_unlock_msg_ctx; + i = 5; + if (msg_ctx->msg) { +- if (copy_to_user(&buf[i], packet_length, packet_length_size)) ++ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size)) + goto out_unlock_msg_ctx; + i += packet_length_size; + if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size)) +diff -urNp linux-2.6.35.4/fs/exec.c linux-2.6.35.4/fs/exec.c +--- linux-2.6.35.4/fs/exec.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/exec.c 2010-09-17 20:12:37.000000000 -0400 +@@ -55,12 +55,24 @@ + #include <linux/fsnotify.h> + #include <linux/fs_struct.h> + #include <linux/pipe_fs_i.h> ++#include <linux/random.h> ++#include <linux/seq_file.h> ++ ++#ifdef CONFIG_PAX_REFCOUNT ++#include <linux/kallsyms.h> ++#include <linux/kdebug.h> ++#endif + + #include <asm/uaccess.h> + #include <asm/mmu_context.h> + #include <asm/tlb.h> + #include "internal.h" + ++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS ++void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++EXPORT_SYMBOL(pax_set_initial_flags_func); ++#endif ++ + int core_uses_pid; + char core_pattern[CORENAME_MAX_SIZE] = "core"; + unsigned int core_pipe_limit; +@@ -114,7 +126,7 @@ SYSCALL_DEFINE1(uselib, const char __use + goto out; + + file = do_filp_open(AT_FDCWD, tmp, +- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, ++ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0, + MAY_READ | MAY_EXEC | MAY_OPEN); + putname(tmp); + error = PTR_ERR(file); +@@ -162,18 +174,10 @@ static struct page *get_arg_page(struct + int write) + { + struct page *page; +- int ret; + +-#ifdef CONFIG_STACK_GROWSUP +- if (write) { +- ret = expand_stack_downwards(bprm->vma, pos); +- if (ret < 0) +- return NULL; +- } +-#endif +- ret = get_user_pages(current, bprm->mm, pos, +- 1, write, 1, &page, NULL); +- if (ret <= 0) ++ if (0 > expand_stack_downwards(bprm->vma, pos)) ++ return NULL; ++ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL)) + return NULL; + + if (write) { +@@ -246,6 +250,11 @@ static int __bprm_mm_init(struct linux_b + vma->vm_end = STACK_TOP_MAX; + vma->vm_start = vma->vm_end - PAGE_SIZE; + vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC); ++#endif ++ + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + INIT_LIST_HEAD(&vma->anon_vma_chain); + err = insert_vm_struct(mm, vma); +@@ -255,6 +264,12 @@ static int __bprm_mm_init(struct linux_b + mm->stack_vm = mm->total_vm = 1; + up_write(&mm->mmap_sem); + bprm->p = vma->vm_end - sizeof(void *); ++ ++#ifdef CONFIG_PAX_RANDUSTACK ++ if (randomize_va_space) ++ bprm->p ^= (pax_get_random_long() & ~15) & ~PAGE_MASK; ++#endif ++ + return 0; + err: + up_write(&mm->mmap_sem); +@@ -476,7 +491,7 @@ int copy_strings_kernel(int argc,char ** + int r; + mm_segment_t oldfs = get_fs(); + set_fs(KERNEL_DS); +- r = copy_strings(argc, (char __user * __user *)argv, bprm); ++ r = copy_strings(argc, (__force char __user * __user *)argv, bprm); + set_fs(oldfs); + return r; + } +@@ -506,7 +521,8 @@ static int shift_arg_pages(struct vm_are + unsigned long new_end = old_end - shift; + struct mmu_gather *tlb; + +- BUG_ON(new_start > new_end); ++ if (new_start >= new_end || new_start < mmap_min_addr) ++ return -EFAULT; + + /* + * ensure there are no vmas between where we want to go +@@ -515,6 +531,10 @@ static int shift_arg_pages(struct vm_are + if (vma != find_vma(mm, new_start)) + return -EFAULT; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ BUG_ON(pax_find_mirror_vma(vma)); ++#endif ++ + /* + * cover the whole range: [new_start, old_end) + */ +@@ -605,8 +625,28 @@ int setup_arg_pages(struct linux_binprm + bprm->exec -= stack_shift; + + down_write(&mm->mmap_sem); ++ ++ /* Move stack pages down in memory. */ ++ if (stack_shift) { ++ ret = shift_arg_pages(vma, stack_shift); ++ if (ret) ++ goto out_unlock; ++ } ++ + vm_flags = VM_STACK_FLAGS; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ vm_flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ vm_flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + /* + * Adjust stack execute permissions; explicitly enable for + * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone +@@ -625,13 +665,6 @@ int setup_arg_pages(struct linux_binprm + goto out_unlock; + BUG_ON(prev != vma); + +- /* Move stack pages down in memory. */ +- if (stack_shift) { +- ret = shift_arg_pages(vma, stack_shift); +- if (ret) +- goto out_unlock; +- } +- + /* mprotect_fixup is overkill to remove the temporary stack flags */ + vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP; + +@@ -671,7 +704,7 @@ struct file *open_exec(const char *name) + int err; + + file = do_filp_open(AT_FDCWD, name, +- O_LARGEFILE | O_RDONLY | FMODE_EXEC, 0, ++ O_LARGEFILE | O_RDONLY | FMODE_EXEC | FMODE_GREXEC, 0, + MAY_EXEC | MAY_OPEN); + if (IS_ERR(file)) + goto out; +@@ -708,7 +741,7 @@ int kernel_read(struct file *file, loff_ + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- result = vfs_read(file, (void __user *)addr, count, &pos); ++ result = vfs_read(file, (__force void __user *)addr, count, &pos); + set_fs(old_fs); + return result; + } +@@ -1125,7 +1158,7 @@ int check_unsafe_exec(struct linux_binpr + } + rcu_read_unlock(); + +- if (p->fs->users > n_fs) { ++ if (atomic_read(&p->fs->users) > n_fs) { + bprm->unsafe |= LSM_UNSAFE_SHARE; + } else { + res = -EAGAIN; +@@ -1321,6 +1354,11 @@ int do_execve(char * filename, + char __user *__user *envp, + struct pt_regs * regs) + { ++#ifdef CONFIG_GRKERNSEC ++ struct file *old_exec_file; ++ struct acl_subject_label *old_acl; ++ struct rlimit old_rlim[RLIM_NLIMITS]; ++#endif + struct linux_binprm *bprm; + struct file *file; + struct files_struct *displaced; +@@ -1357,6 +1395,18 @@ int do_execve(char * filename, + bprm->filename = filename; + bprm->interp = filename; + ++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(¤t->cred->user->processes), 1); ++ ++ if (gr_handle_nproc()) { ++ retval = -EAGAIN; ++ goto out_file; ++ } ++ ++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) { ++ retval = -EACCES; ++ goto out_file; ++ } ++ + retval = bprm_mm_init(bprm); + if (retval) + goto out_file; +@@ -1386,10 +1436,41 @@ int do_execve(char * filename, + if (retval < 0) + goto out; + ++ if (!gr_tpe_allow(file)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ if (gr_check_crash_exec(file)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt); ++ ++ gr_handle_exec_args(bprm, argv); ++ ++#ifdef CONFIG_GRKERNSEC ++ old_acl = current->acl; ++ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim)); ++ old_exec_file = current->exec_file; ++ get_file(file); ++ current->exec_file = file; ++#endif ++ ++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt, ++ bprm->unsafe & LSM_UNSAFE_SHARE); ++ if (retval < 0) ++ goto out_fail; ++ + current->flags &= ~PF_KTHREAD; + retval = search_binary_handler(bprm,regs); + if (retval < 0) +- goto out; ++ goto out_fail; ++#ifdef CONFIG_GRKERNSEC ++ if (old_exec_file) ++ fput(old_exec_file); ++#endif + + /* execve succeeded */ + current->fs->in_exec = 0; +@@ -1400,6 +1481,14 @@ int do_execve(char * filename, + put_files_struct(displaced); + return retval; + ++out_fail: ++#ifdef CONFIG_GRKERNSEC ++ current->acl = old_acl; ++ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim)); ++ fput(current->exec_file); ++ current->exec_file = old_exec_file; ++#endif ++ + out: + if (bprm->mm) + mmput (bprm->mm); +@@ -1563,6 +1652,225 @@ out: + return ispipe; + } + ++int pax_check_flags(unsigned long *flags) ++{ ++ int retval = 0; ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC) ++ if (*flags & MF_PAX_SEGMEXEC) ++ { ++ *flags &= ~MF_PAX_SEGMEXEC; ++ retval = -EINVAL; ++ } ++#endif ++ ++ if ((*flags & MF_PAX_PAGEEXEC) ++ ++#ifdef CONFIG_PAX_PAGEEXEC ++ && (*flags & MF_PAX_SEGMEXEC) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_PAGEEXEC; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_MPROTECT) ++ ++#ifdef CONFIG_PAX_MPROTECT ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_MPROTECT; ++ retval = -EINVAL; ++ } ++ ++ if ((*flags & MF_PAX_EMUTRAMP) ++ ++#ifdef CONFIG_PAX_EMUTRAMP ++ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) ++#endif ++ ++ ) ++ { ++ *flags &= ~MF_PAX_EMUTRAMP; ++ retval = -EINVAL; ++ } ++ ++ return retval; ++} ++ ++EXPORT_SYMBOL(pax_check_flags); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp) ++{ ++ struct task_struct *tsk = current; ++ struct mm_struct *mm = current->mm; ++ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL); ++ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL); ++ char *path_exec = NULL; ++ char *path_fault = NULL; ++ unsigned long start = 0UL, end = 0UL, offset = 0UL; ++ ++ if (buffer_exec && buffer_fault) { ++ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL; ++ ++ down_read(&mm->mmap_sem); ++ vma = mm->mmap; ++ while (vma && (!vma_exec || !vma_fault)) { ++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file) ++ vma_exec = vma; ++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end) ++ vma_fault = vma; ++ vma = vma->vm_next; ++ } ++ if (vma_exec) { ++ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE); ++ if (IS_ERR(path_exec)) ++ path_exec = "<path too long>"; ++ else { ++ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\"); ++ if (path_exec) { ++ *path_exec = 0; ++ path_exec = buffer_exec; ++ } else ++ path_exec = "<path too long>"; ++ } ++ } ++ if (vma_fault) { ++ start = vma_fault->vm_start; ++ end = vma_fault->vm_end; ++ offset = vma_fault->vm_pgoff << PAGE_SHIFT; ++ if (vma_fault->vm_file) { ++ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE); ++ if (IS_ERR(path_fault)) ++ path_fault = "<path too long>"; ++ else { ++ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\"); ++ if (path_fault) { ++ *path_fault = 0; ++ path_fault = buffer_fault; ++ } else ++ path_fault = "<path too long>"; ++ } ++ } else ++ path_fault = "<anonymous mapping>"; ++ } ++ up_read(&mm->mmap_sem); ++ } ++ if (tsk->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); ++ else ++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); ++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " ++ "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), ++ task_uid(tsk), task_euid(tsk), pc, sp); ++ free_page((unsigned long)buffer_exec); ++ free_page((unsigned long)buffer_fault); ++ pax_report_insns(pc, sp); ++ do_coredump(SIGKILL, SIGKILL, regs); ++} ++#endif ++ ++#ifdef CONFIG_PAX_REFCOUNT ++void pax_report_refcount_overflow(struct pt_regs *regs) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ else ++ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", ++ current->comm, task_pid_nr(current), current_uid(), current_euid()); ++ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); ++ show_regs(regs); ++ force_sig_info(SIGKILL, SEND_SIG_FORCED, current); ++} ++#endif ++ ++#ifdef CONFIG_PAX_USERCOPY ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++struct stack_frame { ++ struct stack_frame *next_frame; ++ unsigned long return_address; ++}; ++#endif ++ ++/* 0: not at all, 1: fully, 2: fully inside frame, ++ -1: partially (implies an error) */ ++ ++int object_is_on_stack(const void *obj, unsigned long len) ++{ ++ const void *stack = task_stack_page(current); ++ const void *stackend = stack + THREAD_SIZE; ++ ++ if (obj + len < obj) ++ return -1; ++ ++ if (stack <= obj && obj + len <= stackend) { ++#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86) ++ void *frame = __builtin_frame_address(2); ++ void *oldframe = __builtin_frame_address(1); ++ /* ++ bottom ----------------------------------------------> top ++ [saved bp][saved ip][args][local vars][saved bp][saved ip] ++ ^----------------^ ++ allow copies only within here ++ */ ++ while (frame) { ++ /* if obj + len extends past the last frame, this ++ check won't pass and the next frame will be 0, ++ causing us to bail out and correctly report ++ the copy as invalid ++ */ ++ if (obj + len <= frame) { ++ if (obj >= (oldframe + (2 * sizeof(void *)))) ++ return 2; ++ else ++ return -1; ++ } ++ oldframe = frame; ++ frame = ((struct stack_frame *)frame)->next_frame; ++ } ++ return -1; ++#else ++ return 1; ++#endif ++ } ++ ++ if (obj + len <= stack || stackend <= obj) ++ return 0; ++ ++ return -1; ++} ++ ++ ++void pax_report_leak_to_user(const void *ptr, unsigned long len) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: kernel memory leak attempt detected from %p (%lu bytes)\n", ++ ¤t->signal->curr_ip, ptr, len); ++ else ++ printk(KERN_ERR "PAX: kernel memory leak attempt detected from %p (%lu bytes)\n", ptr, len); ++ dump_stack(); ++ do_group_exit(SIGKILL); ++} ++ ++void pax_report_overflow_from_user(const void *ptr, unsigned long len) ++{ ++ if (current->signal->curr_ip) ++ printk(KERN_ERR "PAX: From %pI4: kernel memory overflow attempt detected to %p (%lu bytes)\n", ++ ¤t->signal->curr_ip, ptr, len); ++ else ++ printk(KERN_ERR "PAX: kernel memory overflow attempt detected to %p (%lu bytes)\n", ptr, len); ++ dump_stack(); ++ do_group_exit(SIGKILL); ++} ++#endif ++ + static int zap_process(struct task_struct *start, int exit_code) + { + struct task_struct *t; +@@ -1773,17 +2081,17 @@ static void wait_for_dump_helpers(struct + pipe = file->f_path.dentry->d_inode->i_pipe; + + pipe_lock(pipe); +- pipe->readers++; +- pipe->writers--; ++ atomic_inc(&pipe->readers); ++ atomic_dec(&pipe->writers); + +- while ((pipe->readers > 1) && (!signal_pending(current))) { ++ while ((atomic_read(&pipe->readers) > 1) && (!signal_pending(current))) { + wake_up_interruptible_sync(&pipe->wait); + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + pipe_wait(pipe); + } + +- pipe->readers--; +- pipe->writers++; ++ atomic_dec(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe_unlock(pipe); + + } +@@ -1891,6 +2199,10 @@ void do_coredump(long signr, int exit_co + */ + clear_thread_flag(TIF_SIGPENDING); + ++ if (signr == SIGKILL || signr == SIGILL) ++ gr_handle_brute_attach(current); ++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1); ++ + /* + * lock_kernel() because format_corename() is controlled by sysctl, which + * uses lock_kernel() +diff -urNp linux-2.6.35.4/fs/ext2/balloc.c linux-2.6.35.4/fs/ext2/balloc.c +--- linux-2.6.35.4/fs/ext2/balloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext2/balloc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1193,7 +1193,7 @@ static int ext2_has_free_blocks(struct e + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && + sbi->s_resuid != current_fsuid() && + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + return 0; +diff -urNp linux-2.6.35.4/fs/ext2/xattr.c linux-2.6.35.4/fs/ext2/xattr.c +--- linux-2.6.35.4/fs/ext2/xattr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext2/xattr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -86,8 +86,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(inode, f...) do {} while (0) ++# define ea_bdebug(bh, f...) do {} while (0) + #endif + + static int ext2_xattr_set2(struct inode *, struct buffer_head *, +diff -urNp linux-2.6.35.4/fs/ext3/balloc.c linux-2.6.35.4/fs/ext3/balloc.c +--- linux-2.6.35.4/fs/ext3/balloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext3/balloc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1422,7 +1422,7 @@ static int ext3_has_free_blocks(struct e + + free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter); + root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count); +- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) && ++ if (free_blocks < root_blocks + 1 && !capable_nolog(CAP_SYS_RESOURCE) && + sbi->s_resuid != current_fsuid() && + (sbi->s_resgid == 0 || !in_group_p (sbi->s_resgid))) { + return 0; +diff -urNp linux-2.6.35.4/fs/ext3/namei.c linux-2.6.35.4/fs/ext3/namei.c +--- linux-2.6.35.4/fs/ext3/namei.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext3/namei.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1168,7 +1168,7 @@ static struct ext3_dir_entry_2 *do_split + char *data1 = (*bh)->b_data, *data2; + unsigned split, move, size; + struct ext3_dir_entry_2 *de = NULL, *de2; +- int err = 0, i; ++ int i, err = 0; + + bh2 = ext3_append (handle, dir, &newblock, &err); + if (!(bh2)) { +diff -urNp linux-2.6.35.4/fs/ext3/xattr.c linux-2.6.35.4/fs/ext3/xattr.c +--- linux-2.6.35.4/fs/ext3/xattr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext3/xattr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -89,8 +89,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(f...) do {} while (0) ++# define ea_bdebug(f...) do {} while (0) + #endif + + static void ext3_xattr_cache_insert(struct buffer_head *); +diff -urNp linux-2.6.35.4/fs/ext4/balloc.c linux-2.6.35.4/fs/ext4/balloc.c +--- linux-2.6.35.4/fs/ext4/balloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext4/balloc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -522,7 +522,7 @@ int ext4_has_free_blocks(struct ext4_sb_ + /* Hm, nope. Are (enough) root reserved blocks available? */ + if (sbi->s_resuid == current_fsuid() || + ((sbi->s_resgid != 0) && in_group_p(sbi->s_resgid)) || +- capable(CAP_SYS_RESOURCE)) { ++ capable_nolog(CAP_SYS_RESOURCE)) { + if (free_blocks >= (nblocks + dirty_blocks)) + return 1; + } +diff -urNp linux-2.6.35.4/fs/ext4/namei.c linux-2.6.35.4/fs/ext4/namei.c +--- linux-2.6.35.4/fs/ext4/namei.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext4/namei.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1197,7 +1197,7 @@ static struct ext4_dir_entry_2 *do_split + char *data1 = (*bh)->b_data, *data2; + unsigned split, move, size; + struct ext4_dir_entry_2 *de = NULL, *de2; +- int err = 0, i; ++ int i, err = 0; + + bh2 = ext4_append (handle, dir, &newblock, &err); + if (!(bh2)) { +diff -urNp linux-2.6.35.4/fs/ext4/xattr.c linux-2.6.35.4/fs/ext4/xattr.c +--- linux-2.6.35.4/fs/ext4/xattr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ext4/xattr.c 2010-09-17 20:12:09.000000000 -0400 +@@ -82,8 +82,8 @@ + printk("\n"); \ + } while (0) + #else +-# define ea_idebug(f...) +-# define ea_bdebug(f...) ++# define ea_idebug(inode, f...) do {} while (0) ++# define ea_bdebug(bh, f...) do {} while (0) + #endif + + static void ext4_xattr_cache_insert(struct buffer_head *); +diff -urNp linux-2.6.35.4/fs/fcntl.c linux-2.6.35.4/fs/fcntl.c +--- linux-2.6.35.4/fs/fcntl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fcntl.c 2010-09-17 20:12:37.000000000 -0400 +@@ -224,6 +224,11 @@ int __f_setown(struct file *filp, struct + if (err) + return err; + ++ if (gr_handle_chroot_fowner(pid, type)) ++ return -ENOENT; ++ if (gr_check_protected_task_fowner(pid, type)) ++ return -EACCES; ++ + f_modown(filp, pid, type, force); + return 0; + } +@@ -348,6 +353,7 @@ static long do_fcntl(int fd, unsigned in + switch (cmd) { + case F_DUPFD: + case F_DUPFD_CLOEXEC: ++ gr_learn_resource(current, RLIMIT_NOFILE, arg, 0); + if (arg >= rlimit(RLIMIT_NOFILE)) + break; + err = alloc_fd(arg, cmd == F_DUPFD_CLOEXEC ? O_CLOEXEC : 0); +diff -urNp linux-2.6.35.4/fs/fifo.c linux-2.6.35.4/fs/fifo.c +--- linux-2.6.35.4/fs/fifo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fifo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -58,10 +58,10 @@ static int fifo_open(struct inode *inode + */ + filp->f_op = &read_pipefifo_fops; + pipe->r_counter++; +- if (pipe->readers++ == 0) ++ if (atomic_inc_return(&pipe->readers) == 1) + wake_up_partner(inode); + +- if (!pipe->writers) { ++ if (!atomic_read(&pipe->writers)) { + if ((filp->f_flags & O_NONBLOCK)) { + /* suppress POLLHUP until we have + * seen a writer */ +@@ -82,15 +82,15 @@ static int fifo_open(struct inode *inode + * errno=ENXIO when there is no process reading the FIFO. + */ + ret = -ENXIO; +- if ((filp->f_flags & O_NONBLOCK) && !pipe->readers) ++ if ((filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers)) + goto err; + + filp->f_op = &write_pipefifo_fops; + pipe->w_counter++; +- if (!pipe->writers++) ++ if (atomic_inc_return(&pipe->writers) == 1) + wake_up_partner(inode); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + wait_for_partner(inode, &pipe->r_counter); + if (signal_pending(current)) + goto err_wr; +@@ -106,11 +106,11 @@ static int fifo_open(struct inode *inode + */ + filp->f_op = &rdwr_pipefifo_fops; + +- pipe->readers++; +- pipe->writers++; ++ atomic_inc(&pipe->readers); ++ atomic_inc(&pipe->writers); + pipe->r_counter++; + pipe->w_counter++; +- if (pipe->readers == 1 || pipe->writers == 1) ++ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1) + wake_up_partner(inode); + break; + +@@ -124,19 +124,19 @@ static int fifo_open(struct inode *inode + return 0; + + err_rd: +- if (!--pipe->readers) ++ if (atomic_dec_and_test(&pipe->readers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err_wr: +- if (!--pipe->writers) ++ if (atomic_dec_and_test(&pipe->writers)) + wake_up_interruptible(&pipe->wait); + ret = -ERESTARTSYS; + goto err; + + err: +- if (!pipe->readers && !pipe->writers) ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) + free_pipe_info(inode); + + err_nocleanup: +diff -urNp linux-2.6.35.4/fs/file.c linux-2.6.35.4/fs/file.c +--- linux-2.6.35.4/fs/file.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/file.c 2010-09-17 20:12:37.000000000 -0400 +@@ -14,6 +14,7 @@ + #include <linux/slab.h> + #include <linux/vmalloc.h> + #include <linux/file.h> ++#include <linux/security.h> + #include <linux/fdtable.h> + #include <linux/bitops.h> + #include <linux/interrupt.h> +@@ -257,6 +258,7 @@ int expand_files(struct files_struct *fi + * N.B. For clone tasks sharing a files structure, this test + * will limit the total number of files that can be opened. + */ ++ gr_learn_resource(current, RLIMIT_NOFILE, nr, 0); + if (nr >= rlimit(RLIMIT_NOFILE)) + return -EMFILE; + +diff -urNp linux-2.6.35.4/fs/fs_struct.c linux-2.6.35.4/fs/fs_struct.c +--- linux-2.6.35.4/fs/fs_struct.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fs_struct.c 2010-09-17 20:12:37.000000000 -0400 +@@ -4,6 +4,7 @@ + #include <linux/path.h> + #include <linux/slab.h> + #include <linux/fs_struct.h> ++#include <linux/grsecurity.h> + + /* + * Replace the fs->{rootmnt,root} with {mnt,dentry}. Put the old values. +@@ -17,6 +18,7 @@ void set_fs_root(struct fs_struct *fs, s + old_root = fs->root; + fs->root = *path; + path_get(path); ++ gr_set_chroot_entries(current, path); + write_unlock(&fs->lock); + if (old_root.dentry) + path_put(&old_root); +@@ -56,6 +58,7 @@ void chroot_fs_refs(struct path *old_roo + && fs->root.mnt == old_root->mnt) { + path_get(new_root); + fs->root = *new_root; ++ gr_set_chroot_entries(p, new_root); + count++; + } + if (fs->pwd.dentry == old_root->dentry +@@ -89,7 +92,8 @@ void exit_fs(struct task_struct *tsk) + task_lock(tsk); + write_lock(&fs->lock); + tsk->fs = NULL; +- kill = !--fs->users; ++ gr_clear_chroot_entries(tsk); ++ kill = !atomic_dec_return(&fs->users); + write_unlock(&fs->lock); + task_unlock(tsk); + if (kill) +@@ -102,7 +106,7 @@ struct fs_struct *copy_fs_struct(struct + struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL); + /* We don't need to lock fs - think why ;-) */ + if (fs) { +- fs->users = 1; ++ atomic_set(&fs->users, 1); + fs->in_exec = 0; + rwlock_init(&fs->lock); + fs->umask = old->umask; +@@ -127,8 +131,9 @@ int unshare_fs_struct(void) + + task_lock(current); + write_lock(&fs->lock); +- kill = !--fs->users; ++ kill = !atomic_dec_return(&fs->users); + current->fs = new_fs; ++ gr_set_chroot_entries(current, &new_fs->root); + write_unlock(&fs->lock); + task_unlock(current); + +@@ -147,7 +152,7 @@ EXPORT_SYMBOL(current_umask); + + /* to be mentioned only in INIT_TASK */ + struct fs_struct init_fs = { +- .users = 1, ++ .users = ATOMIC_INIT(1), + .lock = __RW_LOCK_UNLOCKED(init_fs.lock), + .umask = 0022, + }; +@@ -162,12 +167,13 @@ void daemonize_fs_struct(void) + task_lock(current); + + write_lock(&init_fs.lock); +- init_fs.users++; ++ atomic_inc(&init_fs.users); + write_unlock(&init_fs.lock); + + write_lock(&fs->lock); + current->fs = &init_fs; +- kill = !--fs->users; ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ kill = !atomic_dec_return(&fs->users); + write_unlock(&fs->lock); + + task_unlock(current); +diff -urNp linux-2.6.35.4/fs/fuse/control.c linux-2.6.35.4/fs/fuse/control.c +--- linux-2.6.35.4/fs/fuse/control.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fuse/control.c 2010-09-17 20:12:09.000000000 -0400 +@@ -293,7 +293,7 @@ void fuse_ctl_remove_conn(struct fuse_co + + static int fuse_ctl_fill_super(struct super_block *sb, void *data, int silent) + { +- struct tree_descr empty_descr = {""}; ++ struct tree_descr empty_descr = {"", NULL, 0}; + struct fuse_conn *fc; + int err; + +diff -urNp linux-2.6.35.4/fs/fuse/cuse.c linux-2.6.35.4/fs/fuse/cuse.c +--- linux-2.6.35.4/fs/fuse/cuse.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fuse/cuse.c 2010-09-17 20:12:09.000000000 -0400 +@@ -529,8 +529,18 @@ static int cuse_channel_release(struct i + return rc; + } + +-static struct file_operations cuse_channel_fops; /* initialized during init */ +- ++static const struct file_operations cuse_channel_fops = { /* initialized during init */ ++ .owner = THIS_MODULE, ++ .llseek = no_llseek, ++ .read = do_sync_read, ++ .aio_read = fuse_dev_read, ++ .write = do_sync_write, ++ .aio_write = fuse_dev_write, ++ .poll = fuse_dev_poll, ++ .open = cuse_channel_open, ++ .release = cuse_channel_release, ++ .fasync = fuse_dev_fasync, ++}; + + /************************************************************************** + * Misc stuff and module initializatiion +@@ -576,12 +586,6 @@ static int __init cuse_init(void) + for (i = 0; i < CUSE_CONNTBL_LEN; i++) + INIT_LIST_HEAD(&cuse_conntbl[i]); + +- /* inherit and extend fuse_dev_operations */ +- cuse_channel_fops = fuse_dev_operations; +- cuse_channel_fops.owner = THIS_MODULE; +- cuse_channel_fops.open = cuse_channel_open; +- cuse_channel_fops.release = cuse_channel_release; +- + cuse_class = class_create(THIS_MODULE, "cuse"); + if (IS_ERR(cuse_class)) + return PTR_ERR(cuse_class); +diff -urNp linux-2.6.35.4/fs/fuse/dev.c linux-2.6.35.4/fs/fuse/dev.c +--- linux-2.6.35.4/fs/fuse/dev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fuse/dev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1031,7 +1031,7 @@ static ssize_t fuse_dev_do_read(struct f + return err; + } + +-static ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, ++ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) + { + struct fuse_copy_state cs; +@@ -1045,6 +1045,8 @@ static ssize_t fuse_dev_read(struct kioc + return fuse_dev_do_read(fc, file, &cs, iov_length(iov, nr_segs)); + } + ++EXPORT_SYMBOL_GPL(fuse_dev_read); ++ + static int fuse_dev_pipe_buf_steal(struct pipe_inode_info *pipe, + struct pipe_buffer *buf) + { +@@ -1088,7 +1090,7 @@ static ssize_t fuse_dev_splice_read(stru + ret = 0; + pipe_lock(pipe); + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -1387,7 +1389,7 @@ static ssize_t fuse_dev_do_write(struct + return err; + } + +-static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, ++ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos) + { + struct fuse_copy_state cs; +@@ -1400,6 +1402,8 @@ static ssize_t fuse_dev_write(struct kio + return fuse_dev_do_write(fc, &cs, iov_length(iov, nr_segs)); + } + ++EXPORT_SYMBOL_GPL(fuse_dev_write); ++ + static ssize_t fuse_dev_splice_write(struct pipe_inode_info *pipe, + struct file *out, loff_t *ppos, + size_t len, unsigned int flags) +@@ -1478,7 +1482,7 @@ out: + return ret; + } + +-static unsigned fuse_dev_poll(struct file *file, poll_table *wait) ++unsigned fuse_dev_poll(struct file *file, poll_table *wait) + { + unsigned mask = POLLOUT | POLLWRNORM; + struct fuse_conn *fc = fuse_get_conn(file); +@@ -1497,6 +1501,8 @@ static unsigned fuse_dev_poll(struct fil + return mask; + } + ++EXPORT_SYMBOL_GPL(fuse_dev_poll); ++ + /* + * Abort all requests on the given list (pending or processing) + * +@@ -1604,7 +1610,7 @@ int fuse_dev_release(struct inode *inode + } + EXPORT_SYMBOL_GPL(fuse_dev_release); + +-static int fuse_dev_fasync(int fd, struct file *file, int on) ++int fuse_dev_fasync(int fd, struct file *file, int on) + { + struct fuse_conn *fc = fuse_get_conn(file); + if (!fc) +@@ -1614,6 +1620,8 @@ static int fuse_dev_fasync(int fd, struc + return fasync_helper(fd, file, on, &fc->fasync); + } + ++EXPORT_SYMBOL_GPL(fuse_dev_fasync); ++ + const struct file_operations fuse_dev_operations = { + .owner = THIS_MODULE, + .llseek = no_llseek, +diff -urNp linux-2.6.35.4/fs/fuse/dir.c linux-2.6.35.4/fs/fuse/dir.c +--- linux-2.6.35.4/fs/fuse/dir.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fuse/dir.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1127,7 +1127,7 @@ static char *read_link(struct dentry *de + return link; + } + +-static void free_link(char *link) ++static void free_link(const char *link) + { + if (!IS_ERR(link)) + free_page((unsigned long) link); +diff -urNp linux-2.6.35.4/fs/fuse/fuse_i.h linux-2.6.35.4/fs/fuse/fuse_i.h +--- linux-2.6.35.4/fs/fuse/fuse_i.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/fuse/fuse_i.h 2010-09-17 20:12:09.000000000 -0400 +@@ -524,6 +524,16 @@ extern const struct file_operations fuse + + extern const struct dentry_operations fuse_dentry_operations; + ++extern ssize_t fuse_dev_read(struct kiocb *iocb, const struct iovec *iov, ++ unsigned long nr_segs, loff_t pos); ++ ++extern ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov, ++ unsigned long nr_segs, loff_t pos); ++ ++extern unsigned fuse_dev_poll(struct file *file, poll_table *wait); ++ ++extern int fuse_dev_fasync(int fd, struct file *file, int on); ++ + /** + * Inode to nodeid comparison. + */ +diff -urNp linux-2.6.35.4/fs/hfs/inode.c linux-2.6.35.4/fs/hfs/inode.c +--- linux-2.6.35.4/fs/hfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/hfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -423,7 +423,7 @@ int hfs_write_inode(struct inode *inode, + + if (S_ISDIR(main_inode->i_mode)) { + if (fd.entrylength < sizeof(struct hfs_cat_dir)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, + sizeof(struct hfs_cat_dir)); + if (rec.type != HFS_CDR_DIR || +@@ -444,7 +444,7 @@ int hfs_write_inode(struct inode *inode, + sizeof(struct hfs_cat_file)); + } else { + if (fd.entrylength < sizeof(struct hfs_cat_file)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd.bnode, &rec, fd.entryoffset, + sizeof(struct hfs_cat_file)); + if (rec.type != HFS_CDR_FIL || +diff -urNp linux-2.6.35.4/fs/hfsplus/inode.c linux-2.6.35.4/fs/hfsplus/inode.c +--- linux-2.6.35.4/fs/hfsplus/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/hfsplus/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -406,7 +406,7 @@ int hfsplus_cat_read_inode(struct inode + struct hfsplus_cat_folder *folder = &entry.folder; + + if (fd->entrylength < sizeof(struct hfsplus_cat_folder)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, + sizeof(struct hfsplus_cat_folder)); + hfsplus_get_perms(inode, &folder->permissions, 1); +@@ -423,7 +423,7 @@ int hfsplus_cat_read_inode(struct inode + struct hfsplus_cat_file *file = &entry.file; + + if (fd->entrylength < sizeof(struct hfsplus_cat_file)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd->bnode, &entry, fd->entryoffset, + sizeof(struct hfsplus_cat_file)); + +@@ -479,7 +479,7 @@ int hfsplus_cat_write_inode(struct inode + struct hfsplus_cat_folder *folder = &entry.folder; + + if (fd.entrylength < sizeof(struct hfsplus_cat_folder)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, + sizeof(struct hfsplus_cat_folder)); + /* simple node checks? */ +@@ -501,7 +501,7 @@ int hfsplus_cat_write_inode(struct inode + struct hfsplus_cat_file *file = &entry.file; + + if (fd.entrylength < sizeof(struct hfsplus_cat_file)) +- /* panic? */; ++ {/* panic? */} + hfs_bnode_read(fd.bnode, &entry, fd.entryoffset, + sizeof(struct hfsplus_cat_file)); + hfsplus_inode_write_fork(inode, &file->data_fork); +diff -urNp linux-2.6.35.4/fs/hugetlbfs/inode.c linux-2.6.35.4/fs/hugetlbfs/inode.c +--- linux-2.6.35.4/fs/hugetlbfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/hugetlbfs/inode.c 2010-09-17 20:12:37.000000000 -0400 +@@ -908,7 +908,7 @@ static struct file_system_type hugetlbfs + .kill_sb = kill_litter_super, + }; + +-static struct vfsmount *hugetlbfs_vfsmount; ++struct vfsmount *hugetlbfs_vfsmount; + + static int can_do_hugetlb_shm(void) + { +diff -urNp linux-2.6.35.4/fs/ioctl.c linux-2.6.35.4/fs/ioctl.c +--- linux-2.6.35.4/fs/ioctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ioctl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -97,7 +97,7 @@ int fiemap_fill_next_extent(struct fiema + u64 phys, u64 len, u32 flags) + { + struct fiemap_extent extent; +- struct fiemap_extent *dest = fieinfo->fi_extents_start; ++ struct fiemap_extent __user *dest = fieinfo->fi_extents_start; + + /* only count the extents */ + if (fieinfo->fi_extents_max == 0) { +@@ -207,7 +207,7 @@ static int ioctl_fiemap(struct file *fil + + fieinfo.fi_flags = fiemap.fm_flags; + fieinfo.fi_extents_max = fiemap.fm_extent_count; +- fieinfo.fi_extents_start = (struct fiemap_extent *)(arg + sizeof(fiemap)); ++ fieinfo.fi_extents_start = (struct fiemap_extent __user *)(arg + sizeof(fiemap)); + + if (fiemap.fm_extent_count != 0 && + !access_ok(VERIFY_WRITE, fieinfo.fi_extents_start, +@@ -220,7 +220,7 @@ static int ioctl_fiemap(struct file *fil + error = inode->i_op->fiemap(inode, &fieinfo, fiemap.fm_start, len); + fiemap.fm_flags = fieinfo.fi_flags; + fiemap.fm_mapped_extents = fieinfo.fi_extents_mapped; +- if (copy_to_user((char *)arg, &fiemap, sizeof(fiemap))) ++ if (copy_to_user((__force char __user *)arg, &fiemap, sizeof(fiemap))) + error = -EFAULT; + + return error; +diff -urNp linux-2.6.35.4/fs/jffs2/debug.h linux-2.6.35.4/fs/jffs2/debug.h +--- linux-2.6.35.4/fs/jffs2/debug.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/jffs2/debug.h 2010-09-17 20:12:09.000000000 -0400 +@@ -52,13 +52,13 @@ + #if CONFIG_JFFS2_FS_DEBUG > 0 + #define D1(x) x + #else +-#define D1(x) ++#define D1(x) do {} while (0); + #endif + + #if CONFIG_JFFS2_FS_DEBUG > 1 + #define D2(x) x + #else +-#define D2(x) ++#define D2(x) do {} while (0); + #endif + + /* The prefixes of JFFS2 messages */ +@@ -114,73 +114,73 @@ + #ifdef JFFS2_DBG_READINODE_MESSAGES + #define dbg_readinode(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_readinode(fmt, ...) ++#define dbg_readinode(fmt, ...) do {} while (0) + #endif + #ifdef JFFS2_DBG_READINODE2_MESSAGES + #define dbg_readinode2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_readinode2(fmt, ...) ++#define dbg_readinode2(fmt, ...) do {} while (0) + #endif + + /* Fragtree build debugging messages */ + #ifdef JFFS2_DBG_FRAGTREE_MESSAGES + #define dbg_fragtree(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_fragtree(fmt, ...) ++#define dbg_fragtree(fmt, ...) do {} while (0) + #endif + #ifdef JFFS2_DBG_FRAGTREE2_MESSAGES + #define dbg_fragtree2(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_fragtree2(fmt, ...) ++#define dbg_fragtree2(fmt, ...) do {} while (0) + #endif + + /* Directory entry list manilulation debugging messages */ + #ifdef JFFS2_DBG_DENTLIST_MESSAGES + #define dbg_dentlist(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_dentlist(fmt, ...) ++#define dbg_dentlist(fmt, ...) do {} while (0) + #endif + + /* Print the messages about manipulating node_refs */ + #ifdef JFFS2_DBG_NODEREF_MESSAGES + #define dbg_noderef(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_noderef(fmt, ...) ++#define dbg_noderef(fmt, ...) do {} while (0) + #endif + + /* Manipulations with the list of inodes (JFFS2 inocache) */ + #ifdef JFFS2_DBG_INOCACHE_MESSAGES + #define dbg_inocache(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_inocache(fmt, ...) ++#define dbg_inocache(fmt, ...) do {} while (0) + #endif + + /* Summary debugging messages */ + #ifdef JFFS2_DBG_SUMMARY_MESSAGES + #define dbg_summary(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_summary(fmt, ...) ++#define dbg_summary(fmt, ...) do {} while (0) + #endif + + /* File system build messages */ + #ifdef JFFS2_DBG_FSBUILD_MESSAGES + #define dbg_fsbuild(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_fsbuild(fmt, ...) ++#define dbg_fsbuild(fmt, ...) do {} while (0) + #endif + + /* Watch the object allocations */ + #ifdef JFFS2_DBG_MEMALLOC_MESSAGES + #define dbg_memalloc(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_memalloc(fmt, ...) ++#define dbg_memalloc(fmt, ...) do {} while (0) + #endif + + /* Watch the XATTR subsystem */ + #ifdef JFFS2_DBG_XATTR_MESSAGES + #define dbg_xattr(fmt, ...) JFFS2_DEBUG(fmt, ##__VA_ARGS__) + #else +-#define dbg_xattr(fmt, ...) ++#define dbg_xattr(fmt, ...) do {} while (0) + #endif + + /* "Sanity" checks */ +diff -urNp linux-2.6.35.4/fs/jffs2/erase.c linux-2.6.35.4/fs/jffs2/erase.c +--- linux-2.6.35.4/fs/jffs2/erase.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/jffs2/erase.c 2010-09-17 20:12:09.000000000 -0400 +@@ -438,7 +438,8 @@ static void jffs2_mark_erased_block(stru + struct jffs2_unknown_node marker = { + .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = cpu_to_je32(c->cleanmarker_size) ++ .totlen = cpu_to_je32(c->cleanmarker_size), ++ .hdr_crc = cpu_to_je32(0) + }; + + jffs2_prealloc_raw_node_refs(c, jeb, 1); +diff -urNp linux-2.6.35.4/fs/jffs2/summary.h linux-2.6.35.4/fs/jffs2/summary.h +--- linux-2.6.35.4/fs/jffs2/summary.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/jffs2/summary.h 2010-09-17 20:12:09.000000000 -0400 +@@ -194,18 +194,18 @@ int jffs2_sum_scan_sumnode(struct jffs2_ + + #define jffs2_sum_active() (0) + #define jffs2_sum_init(a) (0) +-#define jffs2_sum_exit(a) +-#define jffs2_sum_disable_collecting(a) ++#define jffs2_sum_exit(a) do {} while (0) ++#define jffs2_sum_disable_collecting(a) do {} while (0) + #define jffs2_sum_is_disabled(a) (0) +-#define jffs2_sum_reset_collected(a) ++#define jffs2_sum_reset_collected(a) do {} while (0) + #define jffs2_sum_add_kvec(a,b,c,d) (0) +-#define jffs2_sum_move_collected(a,b) ++#define jffs2_sum_move_collected(a,b) do {} while (0) + #define jffs2_sum_write_sumnode(a) (0) +-#define jffs2_sum_add_padding_mem(a,b) +-#define jffs2_sum_add_inode_mem(a,b,c) +-#define jffs2_sum_add_dirent_mem(a,b,c) +-#define jffs2_sum_add_xattr_mem(a,b,c) +-#define jffs2_sum_add_xref_mem(a,b,c) ++#define jffs2_sum_add_padding_mem(a,b) do {} while (0) ++#define jffs2_sum_add_inode_mem(a,b,c) do {} while (0) ++#define jffs2_sum_add_dirent_mem(a,b,c) do {} while (0) ++#define jffs2_sum_add_xattr_mem(a,b,c) do {} while (0) ++#define jffs2_sum_add_xref_mem(a,b,c) do {} while (0) + #define jffs2_sum_scan_sumnode(a,b,c,d,e) (0) + + #endif /* CONFIG_JFFS2_SUMMARY */ +diff -urNp linux-2.6.35.4/fs/jffs2/wbuf.c linux-2.6.35.4/fs/jffs2/wbuf.c +--- linux-2.6.35.4/fs/jffs2/wbuf.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/jffs2/wbuf.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1012,7 +1012,8 @@ static const struct jffs2_unknown_node o + { + .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK), + .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER), +- .totlen = constant_cpu_to_je32(8) ++ .totlen = constant_cpu_to_je32(8), ++ .hdr_crc = constant_cpu_to_je32(0) + }; + + /* +diff -urNp linux-2.6.35.4/fs/lockd/svc.c linux-2.6.35.4/fs/lockd/svc.c +--- linux-2.6.35.4/fs/lockd/svc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/lockd/svc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -42,7 +42,7 @@ + + static struct svc_program nlmsvc_program; + +-struct nlmsvc_binding * nlmsvc_ops; ++const struct nlmsvc_binding * nlmsvc_ops; + EXPORT_SYMBOL_GPL(nlmsvc_ops); + + static DEFINE_MUTEX(nlmsvc_mutex); +diff -urNp linux-2.6.35.4/fs/locks.c linux-2.6.35.4/fs/locks.c +--- linux-2.6.35.4/fs/locks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/locks.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2008,16 +2008,16 @@ void locks_remove_flock(struct file *fil + return; + + if (filp->f_op && filp->f_op->flock) { +- struct file_lock fl = { ++ struct file_lock flock = { + .fl_pid = current->tgid, + .fl_file = filp, + .fl_flags = FL_FLOCK, + .fl_type = F_UNLCK, + .fl_end = OFFSET_MAX, + }; +- filp->f_op->flock(filp, F_SETLKW, &fl); +- if (fl.fl_ops && fl.fl_ops->fl_release_private) +- fl.fl_ops->fl_release_private(&fl); ++ filp->f_op->flock(filp, F_SETLKW, &flock); ++ if (flock.fl_ops && flock.fl_ops->fl_release_private) ++ flock.fl_ops->fl_release_private(&flock); + } + + lock_kernel(); +diff -urNp linux-2.6.35.4/fs/namei.c linux-2.6.35.4/fs/namei.c +--- linux-2.6.35.4/fs/namei.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/namei.c 2010-09-17 20:12:37.000000000 -0400 +@@ -548,7 +548,7 @@ __do_follow_link(struct path *path, stru + *p = dentry->d_inode->i_op->follow_link(dentry, nd); + error = PTR_ERR(*p); + if (!IS_ERR(*p)) { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + error = 0; + if (s) + error = __vfs_follow_link(nd, s); +@@ -581,6 +581,13 @@ static inline int do_follow_link(struct + err = security_inode_follow_link(path->dentry, nd); + if (err) + goto loop; ++ ++ if (gr_handle_follow_link(path->dentry->d_parent->d_inode, ++ path->dentry->d_inode, path->dentry, nd->path.mnt)) { ++ err = -EACCES; ++ goto loop; ++ } ++ + current->link_count++; + current->total_link_count++; + nd->depth++; +@@ -965,11 +972,18 @@ return_reval: + break; + } + return_base: ++ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) { ++ path_put(&nd->path); ++ return -ENOENT; ++ } + return 0; + out_dput: + path_put_conditional(&next, nd); + break; + } ++ if (!(nd->flags & LOOKUP_PARENT) && !gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) ++ err = -ENOENT; ++ + path_put(&nd->path); + return_err: + return err; +@@ -1506,12 +1520,19 @@ static int __open_namei_create(struct na + int error; + struct dentry *dir = nd->path.dentry; + ++ if (!gr_acl_handle_creat(path->dentry, nd->path.dentry, nd->path.mnt, open_flag, mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ + if (!IS_POSIXACL(dir->d_inode)) + mode &= ~current_umask(); + error = security_path_mknod(&nd->path, path->dentry, mode, 0); + if (error) + goto out_unlock; + error = vfs_create(dir->d_inode, path->dentry, mode, nd); ++ if (!error) ++ gr_handle_create(path->dentry, nd->path.mnt); + out_unlock: + mutex_unlock(&dir->d_inode->i_mutex); + dput(nd->path.dentry); +@@ -1614,6 +1635,7 @@ static struct file *do_last(struct namei + int mode, const char *pathname) + { + struct dentry *dir = nd->path.dentry; ++ int flag = open_to_namei_flags(open_flag); + struct file *filp; + int error = -EISDIR; + +@@ -1662,6 +1684,22 @@ static struct file *do_last(struct namei + } + path_to_nameidata(path, nd); + audit_inode(pathname, nd->path.dentry); ++ ++ if (gr_handle_rofs_blockwrite(nd->path.dentry, nd->path.mnt, acc_mode)) { ++ error = -EPERM; ++ goto exit; ++ } ++ ++ if (gr_handle_rawio(nd->path.dentry->d_inode)) { ++ error = -EPERM; ++ goto exit; ++ } ++ ++ if (!gr_acl_handle_open(nd->path.dentry, nd->path.mnt, flag)) { ++ error = -EACCES; ++ goto exit; ++ } ++ + goto ok; + } + +@@ -1714,6 +1752,24 @@ static struct file *do_last(struct namei + /* + * It already exists. + */ ++ ++ if (gr_handle_rofs_blockwrite(path->dentry, nd->path.mnt, acc_mode)) { ++ error = -EPERM; ++ goto exit_mutex_unlock; ++ } ++ if (gr_handle_rawio(path->dentry->d_inode)) { ++ error = -EPERM; ++ goto exit_mutex_unlock; ++ } ++ if (!gr_acl_handle_open(path->dentry, nd->path.mnt, flag)) { ++ error = -EACCES; ++ goto exit_mutex_unlock; ++ } ++ if (gr_handle_fifo(path->dentry, nd->path.mnt, dir, flag, acc_mode)) { ++ error = -EACCES; ++ goto exit_mutex_unlock; ++ } ++ + mutex_unlock(&dir->d_inode->i_mutex); + audit_inode(pathname, path->dentry); + +@@ -2034,6 +2090,17 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const + error = may_mknod(mode); + if (error) + goto out_dput; ++ ++ if (gr_handle_chroot_mknod(dentry, nd.path.mnt, mode)) { ++ error = -EPERM; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2054,6 +2121,9 @@ SYSCALL_DEFINE4(mknodat, int, dfd, const + } + out_drop_write: + mnt_drop_write(nd.path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); + out_dput: + dput(dentry); + out_unlock: +@@ -2106,6 +2176,11 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const + if (IS_ERR(dentry)) + goto out_unlock; + ++ if (!gr_acl_handle_mkdir(dentry, nd.path.dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + if (!IS_POSIXACL(nd.path.dentry->d_inode)) + mode &= ~current_umask(); + error = mnt_want_write(nd.path.mnt); +@@ -2117,6 +2192,10 @@ SYSCALL_DEFINE3(mkdirat, int, dfd, const + error = vfs_mkdir(nd.path.dentry->d_inode, dentry, mode); + out_drop_write: + mnt_drop_write(nd.path.mnt); ++ ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); ++ + out_dput: + dput(dentry); + out_unlock: +@@ -2198,6 +2277,8 @@ static long do_rmdir(int dfd, const char + char * name; + struct dentry *dentry; + struct nameidata nd; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2222,6 +2303,19 @@ static long do_rmdir(int dfd, const char + error = PTR_ERR(dentry); + if (IS_ERR(dentry)) + goto exit2; ++ ++ if (dentry->d_inode != NULL) { ++ if (dentry->d_inode->i_nlink <= 1) { ++ saved_ino = dentry->d_inode->i_ino; ++ saved_dev = dentry->d_inode->i_sb->s_dev; ++ } ++ ++ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit3; ++ } ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit3; +@@ -2229,6 +2323,8 @@ static long do_rmdir(int dfd, const char + if (error) + goto exit4; + error = vfs_rmdir(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_dev || saved_ino)) ++ gr_handle_delete(saved_ino, saved_dev); + exit4: + mnt_drop_write(nd.path.mnt); + exit3: +@@ -2291,6 +2387,8 @@ static long do_unlinkat(int dfd, const c + struct dentry *dentry; + struct nameidata nd; + struct inode *inode = NULL; ++ ino_t saved_ino = 0; ++ dev_t saved_dev = 0; + + error = user_path_parent(dfd, pathname, &nd, &name); + if (error) +@@ -2310,8 +2408,19 @@ static long do_unlinkat(int dfd, const c + if (nd.last.name[nd.last.len]) + goto slashes; + inode = dentry->d_inode; +- if (inode) ++ if (inode) { ++ if (inode->i_nlink <= 1) { ++ saved_ino = inode->i_ino; ++ saved_dev = inode->i_sb->s_dev; ++ } ++ + atomic_inc(&inode->i_count); ++ ++ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) { ++ error = -EACCES; ++ goto exit2; ++ } ++ } + error = mnt_want_write(nd.path.mnt); + if (error) + goto exit2; +@@ -2319,6 +2428,8 @@ static long do_unlinkat(int dfd, const c + if (error) + goto exit3; + error = vfs_unlink(nd.path.dentry->d_inode, dentry); ++ if (!error && (saved_ino || saved_dev)) ++ gr_handle_delete(saved_ino, saved_dev); + exit3: + mnt_drop_write(nd.path.mnt); + exit2: +@@ -2396,6 +2507,11 @@ SYSCALL_DEFINE3(symlinkat, const char __ + if (IS_ERR(dentry)) + goto out_unlock; + ++ if (!gr_acl_handle_symlink(dentry, nd.path.dentry, nd.path.mnt, from)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2403,6 +2519,8 @@ SYSCALL_DEFINE3(symlinkat, const char __ + if (error) + goto out_drop_write; + error = vfs_symlink(nd.path.dentry->d_inode, dentry, from); ++ if (!error) ++ gr_handle_create(dentry, nd.path.mnt); + out_drop_write: + mnt_drop_write(nd.path.mnt); + out_dput: +@@ -2495,6 +2613,20 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con + error = PTR_ERR(new_dentry); + if (IS_ERR(new_dentry)) + goto out_unlock; ++ ++ if (gr_handle_hardlink(old_path.dentry, old_path.mnt, ++ old_path.dentry->d_inode, ++ old_path.dentry->d_inode->i_mode, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ ++ if (!gr_acl_handle_link(new_dentry, nd.path.dentry, nd.path.mnt, ++ old_path.dentry, old_path.mnt, to)) { ++ error = -EACCES; ++ goto out_dput; ++ } ++ + error = mnt_want_write(nd.path.mnt); + if (error) + goto out_dput; +@@ -2502,6 +2634,8 @@ SYSCALL_DEFINE5(linkat, int, olddfd, con + if (error) + goto out_drop_write; + error = vfs_link(old_path.dentry, nd.path.dentry->d_inode, new_dentry); ++ if (!error) ++ gr_handle_create(new_dentry, nd.path.mnt); + out_drop_write: + mnt_drop_write(nd.path.mnt); + out_dput: +@@ -2735,6 +2869,12 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c + if (new_dentry == trap) + goto exit5; + ++ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt, ++ old_dentry, old_dir->d_inode, oldnd.path.mnt, ++ to); ++ if (error) ++ goto exit5; ++ + error = mnt_want_write(oldnd.path.mnt); + if (error) + goto exit5; +@@ -2744,6 +2884,9 @@ SYSCALL_DEFINE4(renameat, int, olddfd, c + goto exit6; + error = vfs_rename(old_dir->d_inode, old_dentry, + new_dir->d_inode, new_dentry); ++ if (!error) ++ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry, ++ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0); + exit6: + mnt_drop_write(oldnd.path.mnt); + exit5: +diff -urNp linux-2.6.35.4/fs/namespace.c linux-2.6.35.4/fs/namespace.c +--- linux-2.6.35.4/fs/namespace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/namespace.c 2010-09-17 20:21:58.000000000 -0400 +@@ -1099,6 +1099,9 @@ static int do_umount(struct vfsmount *mn + if (!(sb->s_flags & MS_RDONLY)) + retval = do_remount_sb(sb, MS_RDONLY, NULL, 0); + up_write(&sb->s_umount); ++ ++ gr_log_remount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1118,6 +1121,9 @@ static int do_umount(struct vfsmount *mn + spin_unlock(&vfsmount_lock); + up_write(&namespace_sem); + release_mounts(&umount_list); ++ ++ gr_log_unmount(mnt->mnt_devname, retval); ++ + return retval; + } + +@@ -1988,6 +1994,16 @@ long do_mount(char *dev_name, char *dir_ + MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT | + MS_STRICTATIME); + ++ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ ++ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) { ++ retval = -EPERM; ++ goto dput_out; ++ } ++ + if (flags & MS_REMOUNT) + retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags, + data_page); +@@ -2002,6 +2018,9 @@ long do_mount(char *dev_name, char *dir_ + dev_name, data_page); + dput_out: + path_put(&path); ++ ++ gr_log_mount(dev_name, dir_name, retval); ++ + return retval; + } + +@@ -2208,6 +2227,12 @@ SYSCALL_DEFINE2(pivot_root, const char _ + goto out1; + } + ++ if (gr_handle_chroot_pivot()) { ++ error = -EPERM; ++ path_put(&old); ++ goto out1; ++ } ++ + read_lock(¤t->fs->lock); + root = current->fs->root; + path_get(¤t->fs->root); +diff -urNp linux-2.6.35.4/fs/nfs/inode.c linux-2.6.35.4/fs/nfs/inode.c +--- linux-2.6.35.4/fs/nfs/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nfs/inode.c 2010-09-17 20:12:09.000000000 -0400 +@@ -915,16 +915,16 @@ static int nfs_size_need_update(const st + return nfs_size_to_loff_t(fattr->size) > i_size_read(inode); + } + +-static atomic_long_t nfs_attr_generation_counter; ++static atomic_long_unchecked_t nfs_attr_generation_counter; + + static unsigned long nfs_read_attr_generation_counter(void) + { +- return atomic_long_read(&nfs_attr_generation_counter); ++ return atomic_long_read_unchecked(&nfs_attr_generation_counter); + } + + unsigned long nfs_inc_attr_generation_counter(void) + { +- return atomic_long_inc_return(&nfs_attr_generation_counter); ++ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter); + } + + void nfs_fattr_init(struct nfs_fattr *fattr) +diff -urNp linux-2.6.35.4/fs/nfs/nfs4proc.c linux-2.6.35.4/fs/nfs/nfs4proc.c +--- linux-2.6.35.4/fs/nfs/nfs4proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nfs/nfs4proc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1166,7 +1166,7 @@ static int _nfs4_do_open_reclaim(struct + static int nfs4_do_open_reclaim(struct nfs_open_context *ctx, struct nfs4_state *state) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = _nfs4_do_open_reclaim(ctx, state); +@@ -1208,7 +1208,7 @@ static int _nfs4_open_delegation_recall( + + int nfs4_open_delegation_recall(struct nfs_open_context *ctx, struct nfs4_state *state, const nfs4_stateid *stateid) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + struct nfs_server *server = NFS_SERVER(state->inode); + int err; + do { +@@ -1581,7 +1581,7 @@ static int _nfs4_open_expired(struct nfs + static int nfs4_do_open_expired(struct nfs_open_context *ctx, struct nfs4_state *state) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -1697,7 +1697,7 @@ out_err: + + static struct nfs4_state *nfs4_do_open(struct inode *dir, struct path *path, fmode_t fmode, int flags, struct iattr *sattr, struct rpc_cred *cred) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + struct nfs4_state *res; + int status; + +@@ -1788,7 +1788,7 @@ static int nfs4_do_setattr(struct inode + struct nfs4_state *state) + { + struct nfs_server *server = NFS_SERVER(inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(server, +@@ -2166,7 +2166,7 @@ static int _nfs4_server_capabilities(str + + int nfs4_server_capabilities(struct nfs_server *server, struct nfs_fh *fhandle) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(server, +@@ -2200,7 +2200,7 @@ static int _nfs4_lookup_root(struct nfs_ + static int nfs4_lookup_root(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_fsinfo *info) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(server, +@@ -2289,7 +2289,7 @@ static int _nfs4_proc_getattr(struct nfs + + static int nfs4_proc_getattr(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fattr *fattr) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(server, +@@ -2377,7 +2377,7 @@ static int nfs4_proc_lookupfh(struct nfs + struct qstr *name, struct nfs_fh *fhandle, + struct nfs_fattr *fattr) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = _nfs4_proc_lookupfh(server, dirfh, name, fhandle, fattr); +@@ -2406,7 +2406,7 @@ static int _nfs4_proc_lookup(struct inod + + static int nfs4_proc_lookup(struct inode *dir, struct qstr *name, struct nfs_fh *fhandle, struct nfs_fattr *fattr) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), +@@ -2473,7 +2473,7 @@ static int _nfs4_proc_access(struct inod + + static int nfs4_proc_access(struct inode *inode, struct nfs_access_entry *entry) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), +@@ -2529,7 +2529,7 @@ static int _nfs4_proc_readlink(struct in + static int nfs4_proc_readlink(struct inode *inode, struct page *page, + unsigned int pgbase, unsigned int pglen) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), +@@ -2625,7 +2625,7 @@ out: + + static int nfs4_proc_remove(struct inode *dir, struct qstr *name) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), +@@ -2700,7 +2700,7 @@ out: + static int nfs4_proc_rename(struct inode *old_dir, struct qstr *old_name, + struct inode *new_dir, struct qstr *new_name) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(old_dir), +@@ -2749,7 +2749,7 @@ out: + + static int nfs4_proc_link(struct inode *inode, struct inode *dir, struct qstr *name) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), +@@ -2841,7 +2841,7 @@ out: + static int nfs4_proc_symlink(struct inode *dir, struct dentry *dentry, + struct page *page, unsigned int len, struct iattr *sattr) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), +@@ -2872,7 +2872,7 @@ out: + static int nfs4_proc_mkdir(struct inode *dir, struct dentry *dentry, + struct iattr *sattr) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), +@@ -2921,7 +2921,7 @@ static int _nfs4_proc_readdir(struct den + static int nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred, + u64 cookie, struct page *page, unsigned int count, int plus) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dentry->d_inode), +@@ -2969,7 +2969,7 @@ out: + static int nfs4_proc_mknod(struct inode *dir, struct dentry *dentry, + struct iattr *sattr, dev_t rdev) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(dir), +@@ -3001,7 +3001,7 @@ static int _nfs4_proc_statfs(struct nfs_ + + static int nfs4_proc_statfs(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsstat *fsstat) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(server, +@@ -3032,7 +3032,7 @@ static int _nfs4_do_fsinfo(struct nfs_se + + static int nfs4_do_fsinfo(struct nfs_server *server, struct nfs_fh *fhandle, struct nfs_fsinfo *fsinfo) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -3078,7 +3078,7 @@ static int _nfs4_proc_pathconf(struct nf + static int nfs4_proc_pathconf(struct nfs_server *server, struct nfs_fh *fhandle, + struct nfs_pathconf *pathconf) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -3399,7 +3399,7 @@ out_free: + + static ssize_t nfs4_get_acl_uncached(struct inode *inode, void *buf, size_t buflen) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + ssize_t ret; + do { + ret = __nfs4_get_acl_uncached(inode, buf, buflen); +@@ -3455,7 +3455,7 @@ static int __nfs4_proc_set_acl(struct in + + static int nfs4_proc_set_acl(struct inode *inode, const void *buf, size_t buflen) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = nfs4_handle_exception(NFS_SERVER(inode), +@@ -3745,7 +3745,7 @@ out: + int nfs4_proc_delegreturn(struct inode *inode, struct rpc_cred *cred, const nfs4_stateid *stateid, int issync) + { + struct nfs_server *server = NFS_SERVER(inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + do { + err = _nfs4_proc_delegreturn(inode, cred, stateid, issync); +@@ -3818,7 +3818,7 @@ out: + + static int nfs4_proc_getlk(struct nfs4_state *state, int cmd, struct file_lock *request) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -4232,7 +4232,7 @@ static int _nfs4_do_setlk(struct nfs4_st + static int nfs4_lock_reclaim(struct nfs4_state *state, struct file_lock *request) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -4250,7 +4250,7 @@ static int nfs4_lock_reclaim(struct nfs4 + static int nfs4_lock_expired(struct nfs4_state *state, struct file_lock *request) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + err = nfs4_set_lock_state(state, request); +@@ -4315,7 +4315,7 @@ out: + + static int nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock *request) + { +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + do { +@@ -4375,7 +4375,7 @@ nfs4_proc_lock(struct file *filp, int cm + int nfs4_lock_delegation_recall(struct nfs4_state *state, struct file_lock *fl) + { + struct nfs_server *server = NFS_SERVER(state->inode); +- struct nfs4_exception exception = { }; ++ struct nfs4_exception exception = {0, 0}; + int err; + + err = nfs4_set_lock_state(state, fl); +diff -urNp linux-2.6.35.4/fs/nfsd/lockd.c linux-2.6.35.4/fs/nfsd/lockd.c +--- linux-2.6.35.4/fs/nfsd/lockd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nfsd/lockd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -61,7 +61,7 @@ nlm_fclose(struct file *filp) + fput(filp); + } + +-static struct nlmsvc_binding nfsd_nlm_ops = { ++static const struct nlmsvc_binding nfsd_nlm_ops = { + .fopen = nlm_fopen, /* open file for locking */ + .fclose = nlm_fclose, /* close file */ + }; +diff -urNp linux-2.6.35.4/fs/nfsd/nfsctl.c linux-2.6.35.4/fs/nfsd/nfsctl.c +--- linux-2.6.35.4/fs/nfsd/nfsctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nfsd/nfsctl.c 2010-09-17 20:12:09.000000000 -0400 +@@ -163,7 +163,7 @@ static int export_features_open(struct i + return single_open(file, export_features_show, NULL); + } + +-static struct file_operations export_features_operations = { ++static const struct file_operations export_features_operations = { + .open = export_features_open, + .read = seq_read, + .llseek = seq_lseek, +diff -urNp linux-2.6.35.4/fs/nfsd/vfs.c linux-2.6.35.4/fs/nfsd/vfs.c +--- linux-2.6.35.4/fs/nfsd/vfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nfsd/vfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -933,7 +933,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, st + } else { + oldfs = get_fs(); + set_fs(KERNEL_DS); +- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_readv(file, (__force struct iovec __user *)vec, vlen, &offset); + set_fs(oldfs); + } + +@@ -1056,7 +1056,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, s + + /* Write the data. */ + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &offset); ++ host_err = vfs_writev(file, (__force struct iovec __user *)vec, vlen, &offset); + set_fs(oldfs); + if (host_err < 0) + goto out_nfserr; +@@ -1541,7 +1541,7 @@ nfsd_readlink(struct svc_rqst *rqstp, st + */ + + oldfs = get_fs(); set_fs(KERNEL_DS); +- host_err = inode->i_op->readlink(dentry, buf, *lenp); ++ host_err = inode->i_op->readlink(dentry, (__force char __user *)buf, *lenp); + set_fs(oldfs); + + if (host_err < 0) +diff -urNp linux-2.6.35.4/fs/nls/nls_base.c linux-2.6.35.4/fs/nls/nls_base.c +--- linux-2.6.35.4/fs/nls/nls_base.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/nls/nls_base.c 2010-09-17 20:12:09.000000000 -0400 +@@ -41,7 +41,7 @@ static const struct utf8_table utf8_tabl + {0xF8, 0xF0, 3*6, 0x1FFFFF, 0x10000, /* 4 byte sequence */}, + {0xFC, 0xF8, 4*6, 0x3FFFFFF, 0x200000, /* 5 byte sequence */}, + {0xFE, 0xFC, 5*6, 0x7FFFFFFF, 0x4000000, /* 6 byte sequence */}, +- {0, /* end of table */} ++ {0, 0, 0, 0, 0, /* end of table */} + }; + + #define UNICODE_MAX 0x0010ffff +diff -urNp linux-2.6.35.4/fs/ntfs/file.c linux-2.6.35.4/fs/ntfs/file.c +--- linux-2.6.35.4/fs/ntfs/file.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ntfs/file.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2223,6 +2223,6 @@ const struct inode_operations ntfs_file_ + #endif /* NTFS_RW */ + }; + +-const struct file_operations ntfs_empty_file_ops = {}; ++const struct file_operations ntfs_empty_file_ops __read_only; + +-const struct inode_operations ntfs_empty_inode_ops = {}; ++const struct inode_operations ntfs_empty_inode_ops __read_only; +diff -urNp linux-2.6.35.4/fs/ocfs2/localalloc.c linux-2.6.35.4/fs/ocfs2/localalloc.c +--- linux-2.6.35.4/fs/ocfs2/localalloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ocfs2/localalloc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1307,7 +1307,7 @@ static int ocfs2_local_alloc_slide_windo + goto bail; + } + +- atomic_inc(&osb->alloc_stats.moves); ++ atomic_inc_unchecked(&osb->alloc_stats.moves); + + bail: + if (handle) +diff -urNp linux-2.6.35.4/fs/ocfs2/ocfs2.h linux-2.6.35.4/fs/ocfs2/ocfs2.h +--- linux-2.6.35.4/fs/ocfs2/ocfs2.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ocfs2/ocfs2.h 2010-09-17 20:12:09.000000000 -0400 +@@ -223,11 +223,11 @@ enum ocfs2_vol_state + + struct ocfs2_alloc_stats + { +- atomic_t moves; +- atomic_t local_data; +- atomic_t bitmap_data; +- atomic_t bg_allocs; +- atomic_t bg_extends; ++ atomic_unchecked_t moves; ++ atomic_unchecked_t local_data; ++ atomic_unchecked_t bitmap_data; ++ atomic_unchecked_t bg_allocs; ++ atomic_unchecked_t bg_extends; + }; + + enum ocfs2_local_alloc_state +diff -urNp linux-2.6.35.4/fs/ocfs2/suballoc.c linux-2.6.35.4/fs/ocfs2/suballoc.c +--- linux-2.6.35.4/fs/ocfs2/suballoc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ocfs2/suballoc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -856,7 +856,7 @@ static int ocfs2_reserve_suballoc_bits(s + mlog_errno(status); + goto bail; + } +- atomic_inc(&osb->alloc_stats.bg_extends); ++ atomic_inc_unchecked(&osb->alloc_stats.bg_extends); + + /* You should never ask for this much metadata */ + BUG_ON(bits_wanted > +@@ -1968,7 +1968,7 @@ int ocfs2_claim_metadata(handle_t *handl + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + *suballoc_loc = res.sr_bg_blkno; + *suballoc_bit_start = res.sr_bit_offset; +@@ -2045,7 +2045,7 @@ int ocfs2_claim_new_inode(handle_t *hand + mlog_errno(status); + goto bail; + } +- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); ++ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs); + + BUG_ON(res.sr_bits != 1); + +@@ -2150,7 +2150,7 @@ int __ocfs2_claim_clusters(handle_t *han + cluster_start, + num_clusters); + if (!status) +- atomic_inc(&osb->alloc_stats.local_data); ++ atomic_inc_unchecked(&osb->alloc_stats.local_data); + } else { + if (min_clusters > (osb->bitmap_cpg - 1)) { + /* The only paths asking for contiguousness +@@ -2176,7 +2176,7 @@ int __ocfs2_claim_clusters(handle_t *han + ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode, + res.sr_bg_blkno, + res.sr_bit_offset); +- atomic_inc(&osb->alloc_stats.bitmap_data); ++ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data); + *num_clusters = res.sr_bits; + } + } +diff -urNp linux-2.6.35.4/fs/ocfs2/super.c linux-2.6.35.4/fs/ocfs2/super.c +--- linux-2.6.35.4/fs/ocfs2/super.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ocfs2/super.c 2010-09-17 20:12:09.000000000 -0400 +@@ -293,11 +293,11 @@ static int ocfs2_osb_dump(struct ocfs2_s + "%10s => GlobalAllocs: %d LocalAllocs: %d " + "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n", + "Stats", +- atomic_read(&osb->alloc_stats.bitmap_data), +- atomic_read(&osb->alloc_stats.local_data), +- atomic_read(&osb->alloc_stats.bg_allocs), +- atomic_read(&osb->alloc_stats.moves), +- atomic_read(&osb->alloc_stats.bg_extends)); ++ atomic_read_unchecked(&osb->alloc_stats.bitmap_data), ++ atomic_read_unchecked(&osb->alloc_stats.local_data), ++ atomic_read_unchecked(&osb->alloc_stats.bg_allocs), ++ atomic_read_unchecked(&osb->alloc_stats.moves), ++ atomic_read_unchecked(&osb->alloc_stats.bg_extends)); + + out += snprintf(buf + out, len - out, + "%10s => State: %u Descriptor: %llu Size: %u bits " +@@ -2047,11 +2047,11 @@ static int ocfs2_initialize_super(struct + spin_lock_init(&osb->osb_xattr_lock); + ocfs2_init_steal_slots(osb); + +- atomic_set(&osb->alloc_stats.moves, 0); +- atomic_set(&osb->alloc_stats.local_data, 0); +- atomic_set(&osb->alloc_stats.bitmap_data, 0); +- atomic_set(&osb->alloc_stats.bg_allocs, 0); +- atomic_set(&osb->alloc_stats.bg_extends, 0); ++ atomic_set_unchecked(&osb->alloc_stats.moves, 0); ++ atomic_set_unchecked(&osb->alloc_stats.local_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0); ++ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0); + + /* Copy the blockcheck stats from the superblock probe */ + osb->osb_ecc_stats = *stats; +diff -urNp linux-2.6.35.4/fs/ocfs2/symlink.c linux-2.6.35.4/fs/ocfs2/symlink.c +--- linux-2.6.35.4/fs/ocfs2/symlink.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/ocfs2/symlink.c 2010-09-17 20:12:09.000000000 -0400 +@@ -148,7 +148,7 @@ bail: + + static void ocfs2_fast_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *link = nd_get_link(nd); ++ const char *link = nd_get_link(nd); + if (!IS_ERR(link)) + kfree(link); + } +diff -urNp linux-2.6.35.4/fs/open.c linux-2.6.35.4/fs/open.c +--- linux-2.6.35.4/fs/open.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/open.c 2010-09-17 20:12:37.000000000 -0400 +@@ -42,6 +42,9 @@ int do_truncate(struct dentry *dentry, l + if (length < 0) + return -EINVAL; + ++ if (filp && !gr_acl_handle_truncate(dentry, filp->f_path.mnt)) ++ return -EACCES; ++ + newattrs.ia_size = length; + newattrs.ia_valid = ATTR_SIZE | time_attrs; + if (filp) { +@@ -345,6 +348,9 @@ SYSCALL_DEFINE3(faccessat, int, dfd, con + if (__mnt_is_readonly(path.mnt)) + res = -EROFS; + ++ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode)) ++ res = -EACCES; ++ + out_path_release: + path_put(&path); + out: +@@ -371,6 +377,8 @@ SYSCALL_DEFINE1(chdir, const char __user + if (error) + goto dput_and_out; + ++ gr_log_chdir(path.dentry, path.mnt); ++ + set_fs_pwd(current->fs, &path); + + dput_and_out: +@@ -397,6 +405,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd + goto out_putf; + + error = inode_permission(inode, MAY_EXEC | MAY_ACCESS); ++ ++ if (!error && !gr_chroot_fchdir(file->f_path.dentry, file->f_path.mnt)) ++ error = -EPERM; ++ ++ if (!error) ++ gr_log_chdir(file->f_path.dentry, file->f_path.mnt); ++ + if (!error) + set_fs_pwd(current->fs, &file->f_path); + out_putf: +@@ -425,7 +440,18 @@ SYSCALL_DEFINE1(chroot, const char __use + if (error) + goto dput_and_out; + ++ if (gr_handle_chroot_chroot(path.dentry, path.mnt)) ++ goto dput_and_out; ++ ++ if (gr_handle_chroot_caps(&path)) { ++ error = -ENOMEM; ++ goto dput_and_out; ++ } ++ + set_fs_root(current->fs, &path); ++ ++ gr_handle_chroot_chdir(&path); ++ + error = 0; + dput_and_out: + path_put(&path); +@@ -453,6 +479,12 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd + err = mnt_want_write_file(file); + if (err) + goto out_putf; ++ ++ if (!gr_acl_handle_fchmod(dentry, file->f_path.mnt, mode)) { ++ err = -EACCES; ++ goto out_drop_write; ++ } ++ + mutex_lock(&inode->i_mutex); + err = security_path_chmod(dentry, file->f_vfsmnt, mode); + if (err) +@@ -464,6 +496,7 @@ SYSCALL_DEFINE2(fchmod, unsigned int, fd + err = notify_change(dentry, &newattrs); + out_unlock: + mutex_unlock(&inode->i_mutex); ++out_drop_write: + mnt_drop_write(file->f_path.mnt); + out_putf: + fput(file); +@@ -486,17 +519,30 @@ SYSCALL_DEFINE3(fchmodat, int, dfd, cons + error = mnt_want_write(path.mnt); + if (error) + goto dput_and_out; ++ ++ if (!gr_acl_handle_chmod(path.dentry, path.mnt, mode)) { ++ error = -EACCES; ++ goto out_drop_write; ++ } ++ + mutex_lock(&inode->i_mutex); + error = security_path_chmod(path.dentry, path.mnt, mode); + if (error) + goto out_unlock; + if (mode == (mode_t) -1) + mode = inode->i_mode; ++ ++ if (gr_handle_chroot_chmod(path.dentry, path.mnt, mode)) { ++ error = -EACCES; ++ goto out_unlock; ++ } ++ + newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO); + newattrs.ia_valid = ATTR_MODE | ATTR_CTIME; + error = notify_change(path.dentry, &newattrs); + out_unlock: + mutex_unlock(&inode->i_mutex); ++out_drop_write: + mnt_drop_write(path.mnt); + dput_and_out: + path_put(&path); +@@ -515,6 +561,9 @@ static int chown_common(struct path *pat + int error; + struct iattr newattrs; + ++ if (!gr_acl_handle_chown(path->dentry, path->mnt)) ++ return -EACCES; ++ + newattrs.ia_valid = ATTR_CTIME; + if (user != (uid_t) -1) { + newattrs.ia_valid |= ATTR_UID; +diff -urNp linux-2.6.35.4/fs/pipe.c linux-2.6.35.4/fs/pipe.c +--- linux-2.6.35.4/fs/pipe.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/pipe.c 2010-09-17 20:12:37.000000000 -0400 +@@ -420,9 +420,9 @@ redo: + } + if (bufs) /* More to do? */ + continue; +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + /* syscall merging: Usually we must not sleep + * if O_NONBLOCK is set, or if we got some data. + * But if a writer sleeps in kernel space, then +@@ -481,7 +481,7 @@ pipe_write(struct kiocb *iocb, const str + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + goto out; +@@ -530,7 +530,7 @@ redo1: + for (;;) { + int bufs; + +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -616,9 +616,9 @@ redo2: + kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN); + do_wakeup = 0; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + out: + mutex_unlock(&inode->i_mutex); +@@ -685,7 +685,7 @@ pipe_poll(struct file *filp, poll_table + mask = 0; + if (filp->f_mode & FMODE_READ) { + mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0; +- if (!pipe->writers && filp->f_version != pipe->w_counter) ++ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter) + mask |= POLLHUP; + } + +@@ -695,7 +695,7 @@ pipe_poll(struct file *filp, poll_table + * Most Unices do not set POLLERR for FIFOs but on Linux they + * behave exactly like pipes for poll(). + */ +- if (!pipe->readers) ++ if (!atomic_read(&pipe->readers)) + mask |= POLLERR; + } + +@@ -709,10 +709,10 @@ pipe_release(struct inode *inode, int de + + mutex_lock(&inode->i_mutex); + pipe = inode->i_pipe; +- pipe->readers -= decr; +- pipe->writers -= decw; ++ atomic_sub(decr, &pipe->readers); ++ atomic_sub(decw, &pipe->writers); + +- if (!pipe->readers && !pipe->writers) { ++ if (!atomic_read(&pipe->readers) && !atomic_read(&pipe->writers)) { + free_pipe_info(inode); + } else { + wake_up_interruptible_sync(&pipe->wait); +@@ -802,7 +802,7 @@ pipe_read_open(struct inode *inode, stru + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + } + + mutex_unlock(&inode->i_mutex); +@@ -819,7 +819,7 @@ pipe_write_open(struct inode *inode, str + + if (inode->i_pipe) { + ret = 0; +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -837,9 +837,9 @@ pipe_rdwr_open(struct inode *inode, stru + if (inode->i_pipe) { + ret = 0; + if (filp->f_mode & FMODE_READ) +- inode->i_pipe->readers++; ++ atomic_inc(&inode->i_pipe->readers); + if (filp->f_mode & FMODE_WRITE) +- inode->i_pipe->writers++; ++ atomic_inc(&inode->i_pipe->writers); + } + + mutex_unlock(&inode->i_mutex); +@@ -931,7 +931,7 @@ void free_pipe_info(struct inode *inode) + inode->i_pipe = NULL; + } + +-static struct vfsmount *pipe_mnt __read_mostly; ++struct vfsmount *pipe_mnt __read_mostly; + + /* + * pipefs_dname() is called from d_path(). +@@ -959,7 +959,8 @@ static struct inode * get_pipe_inode(voi + goto fail_iput; + inode->i_pipe = pipe; + +- pipe->readers = pipe->writers = 1; ++ atomic_set(&pipe->readers, 1); ++ atomic_set(&pipe->writers, 1); + inode->i_fop = &rdwr_pipefifo_fops; + + /* +diff -urNp linux-2.6.35.4/fs/proc/array.c linux-2.6.35.4/fs/proc/array.c +--- linux-2.6.35.4/fs/proc/array.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/array.c 2010-09-17 20:12:37.000000000 -0400 +@@ -337,6 +337,21 @@ static void task_cpus_allowed(struct seq + seq_printf(m, "\n"); + } + ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline void task_pax(struct seq_file *m, struct task_struct *p) ++{ ++ if (p->mm) ++ seq_printf(m, "PaX:\t%c%c%c%c%c\n", ++ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p', ++ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e', ++ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm', ++ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r', ++ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's'); ++ else ++ seq_printf(m, "PaX:\t-----\n"); ++} ++#endif ++ + int proc_pid_status(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task) + { +@@ -357,9 +372,20 @@ int proc_pid_status(struct seq_file *m, + task_show_regs(m, task); + #endif + task_context_switch_counts(m, task); ++ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ task_pax(m, task); ++#endif ++ + return 0; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int do_task_stat(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task, int whole) + { +@@ -452,6 +478,19 @@ static int do_task_stat(struct seq_file + gtime = task->gtime; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (PAX_RAND_FLAGS(mm)) { ++ eip = 0; ++ esp = 0; ++ wchan = 0; ++ } ++#endif ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ wchan = 0; ++ eip =0; ++ esp =0; ++#endif ++ + /* scale priority and nice values from timeslices to -20..20 */ + /* to make it look like a "normal" Unix priority/nice value */ + priority = task_prio(task); +@@ -492,9 +531,15 @@ static int do_task_stat(struct seq_file + vsize, + mm ? get_mm_rss(mm) : 0, + rsslim, ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->start_code : 0), ++ PAX_RAND_FLAGS(mm) ? 1 : (mm ? mm->end_code : 0), ++ PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0), ++#else + mm ? mm->start_code : 0, + mm ? mm->end_code : 0, + (permitted && mm) ? mm->start_stack : 0, ++#endif + esp, + eip, + /* The signal information here is obsolete. +@@ -547,3 +592,10 @@ int proc_pid_statm(struct seq_file *m, s + + return 0; + } ++ ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++int proc_pid_ipaddr(struct task_struct *task, char *buffer) ++{ ++ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip); ++} ++#endif +diff -urNp linux-2.6.35.4/fs/proc/base.c linux-2.6.35.4/fs/proc/base.c +--- linux-2.6.35.4/fs/proc/base.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/base.c 2010-09-17 20:12:37.000000000 -0400 +@@ -103,6 +103,22 @@ struct pid_entry { + union proc_op op; + }; + ++struct getdents_callback { ++ struct linux_dirent __user * current_dir; ++ struct linux_dirent __user * previous; ++ struct file * file; ++ int count; ++ int error; ++}; ++ ++static int gr_fake_filldir(void * __buf, const char *name, int namlen, ++ loff_t offset, u64 ino, unsigned int d_type) ++{ ++ struct getdents_callback * buf = (struct getdents_callback *) __buf; ++ buf->error = -EINVAL; ++ return 0; ++} ++ + #define NOD(NAME, MODE, IOP, FOP, OP) { \ + .name = (NAME), \ + .len = sizeof(NAME) - 1, \ +@@ -202,6 +218,9 @@ static int check_mem_permission(struct t + if (task == current) + return 0; + ++ if (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task)) ++ return -EPERM; ++ + /* + * If current is actively ptrace'ing, and would also be + * permitted to freshly attach with ptrace now, permit it. +@@ -249,6 +268,9 @@ static int proc_pid_cmdline(struct task_ + if (!mm->arg_end) + goto out_mm; /* Shh! No looking before we're done */ + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out_mm; ++ + len = mm->arg_end - mm->arg_start; + + if (len > PAGE_SIZE) +@@ -276,12 +298,26 @@ out: + return res; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static int proc_pid_auxv(struct task_struct *task, char *buffer) + { + int res = 0; + struct mm_struct *mm = get_task_mm(task); + if (mm) { + unsigned int nwords = 0; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (PAX_RAND_FLAGS(mm)) { ++ mmput(mm); ++ return res; ++ } ++#endif ++ + do { + nwords += 2; + } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */ +@@ -317,7 +353,7 @@ static int proc_pid_wchan(struct task_st + } + #endif /* CONFIG_KALLSYMS */ + +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + + #define MAX_STACK_TRACE_DEPTH 64 + +@@ -511,7 +547,7 @@ static int proc_pid_limits(struct task_s + return count; + } + +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + static int proc_pid_syscall(struct task_struct *task, char *buffer) + { + long nr; +@@ -920,6 +956,9 @@ static ssize_t environ_read(struct file + if (!task) + goto out_no_task; + ++ if (gr_acl_handle_procpidmem(task)) ++ goto out; ++ + if (!ptrace_may_access(task, PTRACE_MODE_READ)) + goto out; + +@@ -1514,7 +1553,11 @@ static struct inode *proc_pid_make_inode + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } + security_task_to_inode(task, inode); +@@ -1532,6 +1575,9 @@ static int pid_getattr(struct vfsmount * + struct inode *inode = dentry->d_inode; + struct task_struct *task; + const struct cred *cred; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++#endif + + generic_fillattr(inode, stat); + +@@ -1539,12 +1585,34 @@ static int pid_getattr(struct vfsmount * + stat->uid = 0; + stat->gid = 0; + task = pid_task(proc_pid(inode), PIDTYPE_PID); ++ ++ if (task && (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))) { ++ rcu_read_unlock(); ++ return -ENOENT; ++ } ++ + if (task) { ++ cred = __task_cred(task); ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (!tmpcred->uid || (tmpcred->uid == cred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ || in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) ++#endif + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { +- cred = __task_cred(task); + stat->uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ stat->gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + stat->gid = cred->egid; ++#endif + } + } + rcu_read_unlock(); +@@ -1576,11 +1644,20 @@ static int pid_revalidate(struct dentry + + if (task) { + if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) || ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) || ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) || ++#endif + task_dumpable(task)) { + rcu_read_lock(); + cred = __task_cred(task); + inode->i_uid = cred->euid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = cred->egid; ++#endif + rcu_read_unlock(); + } else { + inode->i_uid = 0; +@@ -1701,7 +1778,8 @@ static int proc_fd_info(struct inode *in + int fd = proc_fd(inode); + + if (task) { +- files = get_files_struct(task); ++ if (!gr_acl_handle_procpidmem(task)) ++ files = get_files_struct(task); + put_task_struct(task); + } + if (files) { +@@ -1953,12 +2031,22 @@ static const struct file_operations proc + static int proc_fd_permission(struct inode *inode, int mask) + { + int rv; ++ struct task_struct *task; + + rv = generic_permission(inode, mask, NULL); +- if (rv == 0) +- return 0; ++ + if (task_pid(current) == proc_pid(inode)) + rv = 0; ++ ++ task = get_proc_task(inode); ++ if (task == NULL) ++ return rv; ++ ++ if (gr_acl_handle_procpidmem(task)) ++ rv = -EACCES; ++ ++ put_task_struct(task); ++ + return rv; + } + +@@ -2067,6 +2155,9 @@ static struct dentry *proc_pident_lookup + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + /* + * Yes, it does not scale. And it should not. Don't add + * new entries into /proc/<tgid>/ without very good reasons. +@@ -2111,6 +2202,9 @@ static int proc_pident_readdir(struct fi + if (!task) + goto out_no_task; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out; ++ + ret = 0; + i = filp->f_pos; + switch (i) { +@@ -2380,7 +2474,7 @@ static void *proc_self_follow_link(struc + static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd, + void *cookie) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +@@ -2580,7 +2674,7 @@ static const struct pid_entry tgid_base_ + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUSR, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2608,7 +2702,7 @@ static const struct pid_entry tgid_base_ + #ifdef CONFIG_KALLSYMS + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUSR, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +@@ -2638,6 +2732,9 @@ static const struct pid_entry tgid_base_ + #ifdef CONFIG_TASK_IO_ACCOUNTING + INF("io", S_IRUGO, proc_tgid_io_accounting), + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++ INF("ipaddr", S_IRUSR, proc_pid_ipaddr), ++#endif + }; + + static int proc_tgid_base_readdir(struct file * filp, +@@ -2762,7 +2859,14 @@ static struct dentry *proc_pid_instantia + if (!inode) + goto out; + ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP; ++#else + inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO; ++#endif + inode->i_op = &proc_tgid_base_inode_operations; + inode->i_fop = &proc_tgid_base_operations; + inode->i_flags|=S_IMMUTABLE; +@@ -2804,7 +2908,11 @@ struct dentry *proc_pid_lookup(struct in + if (!task) + goto out; + ++ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task)) ++ goto out_put_task; ++ + result = proc_pid_instantiate(dir, dentry, task, NULL); ++out_put_task: + put_task_struct(task); + out: + return result; +@@ -2869,6 +2977,11 @@ int proc_pid_readdir(struct file * filp, + { + unsigned int nr = filp->f_pos - FIRST_PROCESS_ENTRY; + struct task_struct *reaper = get_proc_task(filp->f_path.dentry->d_inode); ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *tmpcred = current_cred(); ++ const struct cred *itercred; ++#endif ++ filldir_t __filldir = filldir; + struct tgid_iter iter; + struct pid_namespace *ns; + +@@ -2887,8 +3000,27 @@ int proc_pid_readdir(struct file * filp, + for (iter = next_tgid(ns, iter); + iter.task; + iter.tgid += 1, iter = next_tgid(ns, iter)) { ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_lock(); ++ itercred = __task_cred(iter.task); ++#endif ++ if (gr_pid_is_chrooted(iter.task) || gr_check_hidden_task(iter.task) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ || (tmpcred->uid && (itercred->uid != tmpcred->uid) ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID) ++#endif ++ ) ++#endif ++ ) ++ __filldir = &gr_fake_filldir; ++ else ++ __filldir = filldir; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ rcu_read_unlock(); ++#endif + filp->f_pos = iter.tgid + TGID_OFFSET; +- if (proc_pid_fill_cache(filp, dirent, filldir, iter) < 0) { ++ if (proc_pid_fill_cache(filp, dirent, __filldir, iter) < 0) { + put_task_struct(iter.task); + goto out; + } +@@ -2915,7 +3047,7 @@ static const struct pid_entry tid_base_s + REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations), + #endif + REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations), +-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK ++#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP) + INF("syscall", S_IRUSR, proc_pid_syscall), + #endif + INF("cmdline", S_IRUGO, proc_pid_cmdline), +@@ -2942,7 +3074,7 @@ static const struct pid_entry tid_base_s + #ifdef CONFIG_KALLSYMS + INF("wchan", S_IRUGO, proc_pid_wchan), + #endif +-#ifdef CONFIG_STACKTRACE ++#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM) + ONE("stack", S_IRUSR, proc_pid_stack), + #endif + #ifdef CONFIG_SCHEDSTATS +diff -urNp linux-2.6.35.4/fs/proc/cmdline.c linux-2.6.35.4/fs/proc/cmdline.c +--- linux-2.6.35.4/fs/proc/cmdline.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/cmdline.c 2010-09-17 20:12:37.000000000 -0400 +@@ -23,7 +23,11 @@ static const struct file_operations cmdl + + static int __init proc_cmdline_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops); ++#else + proc_create("cmdline", 0, NULL, &cmdline_proc_fops); ++#endif + return 0; + } + module_init(proc_cmdline_init); +diff -urNp linux-2.6.35.4/fs/proc/devices.c linux-2.6.35.4/fs/proc/devices.c +--- linux-2.6.35.4/fs/proc/devices.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/devices.c 2010-09-17 20:12:37.000000000 -0400 +@@ -64,7 +64,11 @@ static const struct file_operations proc + + static int __init proc_devices_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations); ++#else + proc_create("devices", 0, NULL, &proc_devinfo_operations); ++#endif + return 0; + } + module_init(proc_devices_init); +diff -urNp linux-2.6.35.4/fs/proc/inode.c linux-2.6.35.4/fs/proc/inode.c +--- linux-2.6.35.4/fs/proc/inode.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/inode.c 2010-09-17 20:12:37.000000000 -0400 +@@ -435,7 +435,11 @@ struct inode *proc_get_inode(struct supe + if (de->mode) { + inode->i_mode = de->mode; + inode->i_uid = de->uid; ++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP ++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID; ++#else + inode->i_gid = de->gid; ++#endif + } + if (de->size) + inode->i_size = de->size; +diff -urNp linux-2.6.35.4/fs/proc/internal.h linux-2.6.35.4/fs/proc/internal.h +--- linux-2.6.35.4/fs/proc/internal.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/internal.h 2010-09-17 20:12:37.000000000 -0400 +@@ -51,6 +51,9 @@ extern int proc_pid_status(struct seq_fi + struct pid *pid, struct task_struct *task); + extern int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns, + struct pid *pid, struct task_struct *task); ++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR ++extern int proc_pid_ipaddr(struct task_struct *task, char *buffer); ++#endif + extern loff_t mem_lseek(struct file *file, loff_t offset, int orig); + + extern const struct file_operations proc_maps_operations; +diff -urNp linux-2.6.35.4/fs/proc/Kconfig linux-2.6.35.4/fs/proc/Kconfig +--- linux-2.6.35.4/fs/proc/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/Kconfig 2010-09-17 20:12:37.000000000 -0400 +@@ -30,12 +30,12 @@ config PROC_FS + + config PROC_KCORE + bool "/proc/kcore support" if !ARM +- depends on PROC_FS && MMU ++ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD + + config PROC_VMCORE + bool "/proc/vmcore support (EXPERIMENTAL)" +- depends on PROC_FS && CRASH_DUMP +- default y ++ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC ++ default n + help + Exports the dump image of crashed kernel in ELF format. + +@@ -59,8 +59,8 @@ config PROC_SYSCTL + limited in memory. + + config PROC_PAGE_MONITOR +- default y +- depends on PROC_FS && MMU ++ default n ++ depends on PROC_FS && MMU && !GRKERNSEC + bool "Enable /proc page monitoring" if EMBEDDED + help + Various /proc files exist to monitor process memory utilization: +diff -urNp linux-2.6.35.4/fs/proc/kcore.c linux-2.6.35.4/fs/proc/kcore.c +--- linux-2.6.35.4/fs/proc/kcore.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/kcore.c 2010-09-17 20:12:37.000000000 -0400 +@@ -478,9 +478,10 @@ read_kcore(struct file *file, char __use + * the addresses in the elf_phdr on our list. + */ + start = kc_offset_to_vaddr(*fpos - elf_buflen); +- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen) ++ tsz = PAGE_SIZE - (start & ~PAGE_MASK); ++ if (tsz > buflen) + tsz = buflen; +- ++ + while (buflen) { + struct kcore_list *m; + +@@ -509,20 +510,18 @@ read_kcore(struct file *file, char __use + kfree(elf_buf); + } else { + if (kern_addr_valid(start)) { +- unsigned long n; ++ char *elf_buf; + +- n = copy_to_user(buffer, (char *)start, tsz); +- /* +- * We cannot distingush between fault on source +- * and fault on destination. When this happens +- * we clear too and hope it will trigger the +- * EFAULT again. +- */ +- if (n) { +- if (clear_user(buffer + tsz - n, +- n)) ++ elf_buf = kmalloc(tsz, GFP_KERNEL); ++ if (!elf_buf) ++ return -ENOMEM; ++ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) { ++ if (copy_to_user(buffer, elf_buf, tsz)) { ++ kfree(elf_buf); + return -EFAULT; ++ } + } ++ kfree(elf_buf); + } else { + if (clear_user(buffer, tsz)) + return -EFAULT; +@@ -542,6 +541,9 @@ read_kcore(struct file *file, char __use + + static int open_kcore(struct inode *inode, struct file *filp) + { ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ return -EPERM; ++#endif + if (!capable(CAP_SYS_RAWIO)) + return -EPERM; + if (kcore_need_update) +diff -urNp linux-2.6.35.4/fs/proc/meminfo.c linux-2.6.35.4/fs/proc/meminfo.c +--- linux-2.6.35.4/fs/proc/meminfo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/meminfo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -149,7 +149,7 @@ static int meminfo_proc_show(struct seq_ + vmi.used >> 10, + vmi.largest_chunk >> 10 + #ifdef CONFIG_MEMORY_FAILURE +- ,atomic_long_read(&mce_bad_pages) << (PAGE_SHIFT - 10) ++ ,atomic_long_read_unchecked(&mce_bad_pages) << (PAGE_SHIFT - 10) + #endif + ); + +diff -urNp linux-2.6.35.4/fs/proc/nommu.c linux-2.6.35.4/fs/proc/nommu.c +--- linux-2.6.35.4/fs/proc/nommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/nommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_ + if (len < 1) + len = 1; + seq_printf(m, "%*c", len, ' '); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } + + seq_putc(m, '\n'); +diff -urNp linux-2.6.35.4/fs/proc/proc_net.c linux-2.6.35.4/fs/proc/proc_net.c +--- linux-2.6.35.4/fs/proc/proc_net.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/proc_net.c 2010-09-17 20:12:37.000000000 -0400 +@@ -105,6 +105,17 @@ static struct net *get_proc_task_net(str + struct task_struct *task; + struct nsproxy *ns; + struct net *net = NULL; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred = current_cred(); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->fsuid) ++ return net; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->fsuid && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)) ++ return net; ++#endif + + rcu_read_lock(); + task = pid_task(proc_pid(dir), PIDTYPE_PID); +diff -urNp linux-2.6.35.4/fs/proc/proc_sysctl.c linux-2.6.35.4/fs/proc/proc_sysctl.c +--- linux-2.6.35.4/fs/proc/proc_sysctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/proc_sysctl.c 2010-09-17 20:12:37.000000000 -0400 +@@ -7,6 +7,8 @@ + #include <linux/security.h> + #include "internal.h" + ++extern __u32 gr_handle_sysctl(const struct ctl_table *table, const int op); ++ + static const struct dentry_operations proc_sys_dentry_operations; + static const struct file_operations proc_sys_file_operations; + static const struct inode_operations proc_sys_inode_operations; +@@ -109,6 +111,9 @@ static struct dentry *proc_sys_lookup(st + if (!p) + goto out; + ++ if (gr_handle_sysctl(p, MAY_EXEC)) ++ goto out; ++ + err = ERR_PTR(-ENOMEM); + inode = proc_sys_make_inode(dir->i_sb, h ? h : head, p); + if (h) +@@ -228,6 +233,9 @@ static int scan(struct ctl_table_header + if (*pos < file->f_pos) + continue; + ++ if (gr_handle_sysctl(table, 0)) ++ continue; ++ + res = proc_sys_fill_cache(file, dirent, filldir, head, table); + if (res) + return res; +@@ -344,6 +352,9 @@ static int proc_sys_getattr(struct vfsmo + if (IS_ERR(head)) + return PTR_ERR(head); + ++ if (table && gr_handle_sysctl(table, MAY_EXEC)) ++ return -ENOENT; ++ + generic_fillattr(inode, stat); + if (table) + stat->mode = (stat->mode & S_IFMT) | table->mode; +diff -urNp linux-2.6.35.4/fs/proc/root.c linux-2.6.35.4/fs/proc/root.c +--- linux-2.6.35.4/fs/proc/root.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/root.c 2010-09-17 20:12:37.000000000 -0400 +@@ -133,7 +133,15 @@ void __init proc_root_init(void) + #ifdef CONFIG_PROC_DEVICETREE + proc_device_tree_init(); + #endif ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL); ++#endif ++#else + proc_mkdir("bus", NULL); ++#endif + proc_sys_init(); + } + +diff -urNp linux-2.6.35.4/fs/proc/task_mmu.c linux-2.6.35.4/fs/proc/task_mmu.c +--- linux-2.6.35.4/fs/proc/task_mmu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/task_mmu.c 2010-09-17 20:12:37.000000000 -0400 +@@ -49,8 +49,13 @@ void task_mem(struct seq_file *m, struct + "VmExe:\t%8lu kB\n" + "VmLib:\t%8lu kB\n" + "VmPTE:\t%8lu kB\n" +- "VmSwap:\t%8lu kB\n", +- hiwater_vm << (PAGE_SHIFT-10), ++ "VmSwap:\t%8lu kB\n" ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ "CsBase:\t%8lx\nCsLim:\t%8lx\n" ++#endif ++ ++ ,hiwater_vm << (PAGE_SHIFT-10), + (total_vm - mm->reserved_vm) << (PAGE_SHIFT-10), + mm->locked_vm << (PAGE_SHIFT-10), + hiwater_rss << (PAGE_SHIFT-10), +@@ -58,7 +63,13 @@ void task_mem(struct seq_file *m, struct + data << (PAGE_SHIFT-10), + mm->stack_vm << (PAGE_SHIFT-10), text, lib, + (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10, +- swap << (PAGE_SHIFT-10)); ++ swap << (PAGE_SHIFT-10) ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ , mm->context.user_cs_base, mm->context.user_cs_limit ++#endif ++ ++ ); + } + + unsigned long task_vsize(struct mm_struct *mm) +@@ -203,6 +214,12 @@ static int do_maps_open(struct inode *in + return ret; + } + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \ ++ (_mm->pax_flags & MF_PAX_RANDMMAP || \ ++ _mm->pax_flags & MF_PAX_SEGMEXEC)) ++#endif ++ + static void show_map_vma(struct seq_file *m, struct vm_area_struct *vma) + { + struct mm_struct *mm = vma->vm_mm; +@@ -210,7 +227,6 @@ static void show_map_vma(struct seq_file + int flags = vma->vm_flags; + unsigned long ino = 0; + unsigned long long pgoff = 0; +- unsigned long start; + dev_t dev = 0; + int len; + +@@ -221,19 +237,24 @@ static void show_map_vma(struct seq_file + pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; + } + +- /* We don't show the stack guard page in /proc/maps */ +- start = vma->vm_start; +- if (vma->vm_flags & VM_GROWSDOWN) +- start += PAGE_SIZE; + + seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n", +- start, ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start, ++ PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end, ++#else ++ vma->vm_start, + vma->vm_end, ++#endif + flags & VM_READ ? 'r' : '-', + flags & VM_WRITE ? 'w' : '-', + flags & VM_EXEC ? 'x' : '-', + flags & VM_MAYSHARE ? 's' : 'p', ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(mm) ? 0UL : pgoff, ++#else + pgoff, ++#endif + MAJOR(dev), MINOR(dev), ino, &len); + + /* +@@ -242,16 +263,16 @@ static void show_map_vma(struct seq_file + */ + if (file) { + pad_len_spaces(m, len); +- seq_path(m, &file->f_path, "\n"); ++ seq_path(m, &file->f_path, "\n\\"); + } else { + const char *name = arch_vma_name(vma); + if (!name) { + if (mm) { +- if (vma->vm_start <= mm->start_brk && +- vma->vm_end >= mm->brk) { ++ if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + name = "[heap]"; +- } else if (vma->vm_start <= mm->start_stack && +- vma->vm_end >= mm->start_stack) { ++ } else if ((vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) || ++ (vma->vm_start <= mm->start_stack && ++ vma->vm_end >= mm->start_stack)) { + name = "[stack]"; + } + } else { +@@ -393,11 +414,16 @@ static int show_smap(struct seq_file *m, + }; + + memset(&mss, 0, sizeof mss); +- mss.vma = vma; +- /* mmap_sem is held in m_start */ +- if (vma->vm_mm && !is_vm_hugetlb_page(vma)) +- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); +- ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!PAX_RAND_FLAGS(vma->vm_mm)) { ++#endif ++ mss.vma = vma; ++ /* mmap_sem is held in m_start */ ++ if (vma->vm_mm && !is_vm_hugetlb_page(vma)) ++ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk); ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ } ++#endif + show_map_vma(m, vma); + + seq_printf(m, +@@ -412,7 +438,11 @@ static int show_smap(struct seq_file *m, + "Swap: %8lu kB\n" + "KernelPageSize: %8lu kB\n" + "MMUPageSize: %8lu kB\n", ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10, ++#else + (vma->vm_end - vma->vm_start) >> 10, ++#endif + mss.resident >> 10, + (unsigned long)(mss.pss >> (10 + PSS_SHIFT)), + mss.shared_clean >> 10, +diff -urNp linux-2.6.35.4/fs/proc/task_nommu.c linux-2.6.35.4/fs/proc/task_nommu.c +--- linux-2.6.35.4/fs/proc/task_nommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/proc/task_nommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct + else + bytes += kobjsize(mm); + +- if (current->fs && current->fs->users > 1) ++ if (current->fs && atomic_read(¤t->fs->users) > 1) + sbytes += kobjsize(current->fs); + else + bytes += kobjsize(current->fs); +@@ -165,7 +165,7 @@ static int nommu_vma_show(struct seq_fil + + if (file) { + pad_len_spaces(m, len); +- seq_path(m, &file->f_path, ""); ++ seq_path(m, &file->f_path, "\n\\"); + } else if (mm) { + if (vma->vm_start <= mm->start_stack && + vma->vm_end >= mm->start_stack) { +diff -urNp linux-2.6.35.4/fs/readdir.c linux-2.6.35.4/fs/readdir.c +--- linux-2.6.35.4/fs/readdir.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/readdir.c 2010-09-17 20:12:37.000000000 -0400 +@@ -16,6 +16,7 @@ + #include <linux/security.h> + #include <linux/syscalls.h> + #include <linux/unistd.h> ++#include <linux/namei.h> + + #include <asm/uaccess.h> + +@@ -67,6 +68,7 @@ struct old_linux_dirent { + + struct readdir_callback { + struct old_linux_dirent __user * dirent; ++ struct file * file; + int result; + }; + +@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, cons + buf->result = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + buf->result++; + dirent = buf->dirent; + if (!access_ok(VERIFY_WRITE, dirent, +@@ -116,6 +122,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned in + + buf.result = 0; + buf.dirent = dirent; ++ buf.file = file; + + error = vfs_readdir(file, fillonedir, &buf); + if (buf.result) +@@ -142,6 +149,7 @@ struct linux_dirent { + struct getdents_callback { + struct linux_dirent __user * current_dir; + struct linux_dirent __user * previous; ++ struct file * file; + int count; + int error; + }; +@@ -162,6 +170,10 @@ static int filldir(void * __buf, const c + buf->error = -EOVERFLOW; + return -EOVERFLOW; + } ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -209,6 +221,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, + buf.previous = NULL; + buf.count = count; + buf.error = 0; ++ buf.file = file; + + error = vfs_readdir(file, filldir, &buf); + if (error >= 0) +@@ -228,6 +241,7 @@ out: + struct getdents_callback64 { + struct linux_dirent64 __user * current_dir; + struct linux_dirent64 __user * previous; ++ struct file *file; + int count; + int error; + }; +@@ -242,6 +256,10 @@ static int filldir64(void * __buf, const + buf->error = -EINVAL; /* only used if we fail.. */ + if (reclen > buf->count) + return -EINVAL; ++ ++ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino)) ++ return 0; ++ + dirent = buf->previous; + if (dirent) { + if (__put_user(offset, &dirent->d_off)) +@@ -289,6 +307,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int + + buf.current_dir = dirent; + buf.previous = NULL; ++ buf.file = file; + buf.count = count; + buf.error = 0; + +diff -urNp linux-2.6.35.4/fs/reiserfs/do_balan.c linux-2.6.35.4/fs/reiserfs/do_balan.c +--- linux-2.6.35.4/fs/reiserfs/do_balan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/reiserfs/do_balan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, + return; + } + +- atomic_inc(&(fs_generation(tb->tb_sb))); ++ atomic_inc_unchecked(&(fs_generation(tb->tb_sb))); + do_balance_starts(tb); + + /* balance leaf returns 0 except if combining L R and S into +diff -urNp linux-2.6.35.4/fs/reiserfs/item_ops.c linux-2.6.35.4/fs/reiserfs/item_ops.c +--- linux-2.6.35.4/fs/reiserfs/item_ops.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/reiserfs/item_ops.c 2010-09-17 20:12:09.000000000 -0400 +@@ -102,7 +102,7 @@ static void sd_print_vi(struct virtual_i + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations stat_data_ops = { ++static const struct item_operations stat_data_ops = { + .bytes_number = sd_bytes_number, + .decrement_key = sd_decrement_key, + .is_left_mergeable = sd_is_left_mergeable, +@@ -196,7 +196,7 @@ static void direct_print_vi(struct virtu + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations direct_ops = { ++static const struct item_operations direct_ops = { + .bytes_number = direct_bytes_number, + .decrement_key = direct_decrement_key, + .is_left_mergeable = direct_is_left_mergeable, +@@ -341,7 +341,7 @@ static void indirect_print_vi(struct vir + vi->vi_index, vi->vi_type, vi->vi_ih); + } + +-static struct item_operations indirect_ops = { ++static const struct item_operations indirect_ops = { + .bytes_number = indirect_bytes_number, + .decrement_key = indirect_decrement_key, + .is_left_mergeable = indirect_is_left_mergeable, +@@ -628,7 +628,7 @@ static void direntry_print_vi(struct vir + printk("\n"); + } + +-static struct item_operations direntry_ops = { ++static const struct item_operations direntry_ops = { + .bytes_number = direntry_bytes_number, + .decrement_key = direntry_decrement_key, + .is_left_mergeable = direntry_is_left_mergeable, +@@ -724,7 +724,7 @@ static void errcatch_print_vi(struct vir + "Invalid item type observed, run fsck ASAP"); + } + +-static struct item_operations errcatch_ops = { ++static const struct item_operations errcatch_ops = { + errcatch_bytes_number, + errcatch_decrement_key, + errcatch_is_left_mergeable, +@@ -746,7 +746,7 @@ static struct item_operations errcatch_o + #error Item types must use disk-format assigned values. + #endif + +-struct item_operations *item_ops[TYPE_ANY + 1] = { ++const struct item_operations * const item_ops[TYPE_ANY + 1] = { + &stat_data_ops, + &indirect_ops, + &direct_ops, +diff -urNp linux-2.6.35.4/fs/reiserfs/procfs.c linux-2.6.35.4/fs/reiserfs/procfs.c +--- linux-2.6.35.4/fs/reiserfs/procfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/reiserfs/procfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -113,7 +113,7 @@ static int show_super(struct seq_file *m + "SMALL_TAILS " : "NO_TAILS ", + replay_only(sb) ? "REPLAY_ONLY " : "", + convert_reiserfs(sb) ? "CONV " : "", +- atomic_read(&r->s_generation_counter), ++ atomic_read_unchecked(&r->s_generation_counter), + SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes), + SF(s_do_balance), SF(s_unneeded_left_neighbor), + SF(s_good_search_by_key_reada), SF(s_bmaps), +diff -urNp linux-2.6.35.4/fs/select.c linux-2.6.35.4/fs/select.c +--- linux-2.6.35.4/fs/select.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/select.c 2010-09-17 20:12:37.000000000 -0400 +@@ -20,6 +20,7 @@ + #include <linux/module.h> + #include <linux/slab.h> + #include <linux/poll.h> ++#include <linux/security.h> + #include <linux/personality.h> /* for STICKY_TIMEOUTS */ + #include <linux/file.h> + #include <linux/fdtable.h> +@@ -838,6 +839,7 @@ int do_sys_poll(struct pollfd __user *uf + struct poll_list *walk = head; + unsigned long todo = nfds; + ++ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1); + if (nfds > rlimit(RLIMIT_NOFILE)) + return -EINVAL; + +diff -urNp linux-2.6.35.4/fs/seq_file.c linux-2.6.35.4/fs/seq_file.c +--- linux-2.6.35.4/fs/seq_file.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/seq_file.c 2010-09-17 20:12:09.000000000 -0400 +@@ -76,7 +76,8 @@ static int traverse(struct seq_file *m, + return 0; + } + if (!m->buf) { +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); ++ m->size = PAGE_SIZE; ++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!m->buf) + return -ENOMEM; + } +@@ -116,7 +117,8 @@ static int traverse(struct seq_file *m, + Eoverflow: + m->op->stop(m, p); + kfree(m->buf); +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); ++ m->size <<= 1; ++ m->buf = kmalloc(m->size, GFP_KERNEL); + return !m->buf ? -ENOMEM : -EAGAIN; + } + +@@ -169,7 +171,8 @@ ssize_t seq_read(struct file *file, char + m->version = file->f_version; + /* grab buffer if we didn't have one */ + if (!m->buf) { +- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL); ++ m->size = PAGE_SIZE; ++ m->buf = kmalloc(PAGE_SIZE, GFP_KERNEL); + if (!m->buf) + goto Enomem; + } +@@ -210,7 +213,8 @@ ssize_t seq_read(struct file *file, char + goto Fill; + m->op->stop(m, p); + kfree(m->buf); +- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL); ++ m->size <<= 1; ++ m->buf = kmalloc(m->size, GFP_KERNEL); + if (!m->buf) + goto Enomem; + m->count = 0; +diff -urNp linux-2.6.35.4/fs/smbfs/symlink.c linux-2.6.35.4/fs/smbfs/symlink.c +--- linux-2.6.35.4/fs/smbfs/symlink.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/smbfs/symlink.c 2010-09-17 20:12:09.000000000 -0400 +@@ -55,7 +55,7 @@ static void *smb_follow_link(struct dent + + static void smb_put_link(struct dentry *dentry, struct nameidata *nd, void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + if (!IS_ERR(s)) + __putname(s); + } +diff -urNp linux-2.6.35.4/fs/splice.c linux-2.6.35.4/fs/splice.c +--- linux-2.6.35.4/fs/splice.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/splice.c 2010-09-17 20:12:09.000000000 -0400 +@@ -186,7 +186,7 @@ ssize_t splice_to_pipe(struct pipe_inode + pipe_lock(pipe); + + for (;;) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -240,9 +240,9 @@ ssize_t splice_to_pipe(struct pipe_inode + do_wakeup = 0; + } + +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -566,7 +566,7 @@ static ssize_t kernel_readv(struct file + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos); ++ res = vfs_readv(file, (__force const struct iovec __user *)vec, vlen, &pos); + set_fs(old_fs); + + return res; +@@ -581,7 +581,7 @@ static ssize_t kernel_write(struct file + old_fs = get_fs(); + set_fs(get_ds()); + /* The cast to a user pointer is valid due to the set_fs() */ +- res = vfs_write(file, (const char __user *)buf, count, &pos); ++ res = vfs_write(file, (__force const char __user *)buf, count, &pos); + set_fs(old_fs); + + return res; +@@ -634,7 +634,7 @@ ssize_t default_file_splice_read(struct + goto err; + + this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset); +- vec[i].iov_base = (void __user *) page_address(page); ++ vec[i].iov_base = (__force void __user *) page_address(page); + vec[i].iov_len = this_len; + spd.pages[i] = page; + spd.nr_pages++; +@@ -861,10 +861,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed); + int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd) + { + while (!pipe->nrbufs) { +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + return 0; + +- if (!pipe->waiting_writers && sd->num_spliced) ++ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced) + return 0; + + if (sd->flags & SPLICE_F_NONBLOCK) +@@ -1201,7 +1201,7 @@ ssize_t splice_direct_to_actor(struct fi + * out of the pipe right after the splice_to_pipe(). So set + * PIPE_READERS appropriately. + */ +- pipe->readers = 1; ++ atomic_set(&pipe->readers, 1); + + current->splice_pipe = pipe; + } +@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_ + ret = -ERESTARTSYS; + break; + } +- if (!pipe->writers) ++ if (!atomic_read(&pipe->writers)) + break; +- if (!pipe->waiting_writers) { ++ if (!atomic_read(&pipe->waiting_writers)) { + if (flags & SPLICE_F_NONBLOCK) { + ret = -EAGAIN; + break; +@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_ + pipe_lock(pipe); + + while (pipe->nrbufs >= pipe->buffers) { +- if (!pipe->readers) { ++ if (!atomic_read(&pipe->readers)) { + send_sig(SIGPIPE, current, 0); + ret = -EPIPE; + break; +@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_ + ret = -ERESTARTSYS; + break; + } +- pipe->waiting_writers++; ++ atomic_inc(&pipe->waiting_writers); + pipe_wait(pipe); +- pipe->waiting_writers--; ++ atomic_dec(&pipe->waiting_writers); + } + + pipe_unlock(pipe); +@@ -1854,14 +1854,14 @@ retry: + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; + break; + } + +- if (!ipipe->nrbufs && !ipipe->writers) ++ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers)) + break; + + /* +@@ -1961,7 +1961,7 @@ static int link_pipe(struct pipe_inode_i + pipe_double_lock(ipipe, opipe); + + do { +- if (!opipe->readers) { ++ if (!atomic_read(&opipe->readers)) { + send_sig(SIGPIPE, current, 0); + if (!ret) + ret = -EPIPE; +@@ -2006,7 +2006,7 @@ static int link_pipe(struct pipe_inode_i + * return EAGAIN if we have the potential of some data in the + * future, otherwise just return 0 + */ +- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK)) ++ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK)) + ret = -EAGAIN; + + pipe_unlock(ipipe); +diff -urNp linux-2.6.35.4/fs/sysfs/symlink.c linux-2.6.35.4/fs/sysfs/symlink.c +--- linux-2.6.35.4/fs/sysfs/symlink.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/sysfs/symlink.c 2010-09-17 20:12:09.000000000 -0400 +@@ -286,7 +286,7 @@ static void *sysfs_follow_link(struct de + + static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie) + { +- char *page = nd_get_link(nd); ++ const char *page = nd_get_link(nd); + if (!IS_ERR(page)) + free_page((unsigned long)page); + } +diff -urNp linux-2.6.35.4/fs/udf/misc.c linux-2.6.35.4/fs/udf/misc.c +--- linux-2.6.35.4/fs/udf/misc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/udf/misc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -142,8 +142,8 @@ struct genericFormat *udf_add_extendedat + iinfo->i_lenEAttr += size; + return (struct genericFormat *)&ea[offset]; + } +- if (loc & 0x02) +- ; ++ if (loc & 0x02) { ++ } + + return NULL; + } +diff -urNp linux-2.6.35.4/fs/udf/udfdecl.h linux-2.6.35.4/fs/udf/udfdecl.h +--- linux-2.6.35.4/fs/udf/udfdecl.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/udf/udfdecl.h 2010-09-17 20:12:09.000000000 -0400 +@@ -26,7 +26,7 @@ do { \ + printk(f, ##a); \ + } while (0) + #else +-#define udf_debug(f, a...) /**/ ++#define udf_debug(f, a...) do {} while (0) + #endif + + #define udf_info(f, a...) \ +diff -urNp linux-2.6.35.4/fs/utimes.c linux-2.6.35.4/fs/utimes.c +--- linux-2.6.35.4/fs/utimes.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/utimes.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1,6 +1,7 @@ + #include <linux/compiler.h> + #include <linux/file.h> + #include <linux/fs.h> ++#include <linux/security.h> + #include <linux/linkage.h> + #include <linux/mount.h> + #include <linux/namei.h> +@@ -101,6 +102,12 @@ static int utimes_common(struct path *pa + goto mnt_drop_write_and_out; + } + } ++ ++ if (!gr_acl_handle_utime(path->dentry, path->mnt)) { ++ error = -EACCES; ++ goto mnt_drop_write_and_out; ++ } ++ + mutex_lock(&inode->i_mutex); + error = notify_change(path->dentry, &newattrs); + mutex_unlock(&inode->i_mutex); +diff -urNp linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c +--- linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/xfs/linux-2.6/xfs_ioctl.c 2010-09-17 20:12:37.000000000 -0400 +@@ -136,7 +136,7 @@ xfs_find_handle( + } + + error = -EFAULT; +- if (copy_to_user(hreq->ohandle, &handle, hsize) || ++ if (hsize > sizeof(handle) || copy_to_user(hreq->ohandle, &handle, hsize) || + copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32))) + goto out_put; + +diff -urNp linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c +--- linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/xfs/linux-2.6/xfs_iops.c 2010-09-17 20:12:09.000000000 -0400 +@@ -480,7 +480,7 @@ xfs_vn_put_link( + struct nameidata *nd, + void *p) + { +- char *s = nd_get_link(nd); ++ const char *s = nd_get_link(nd); + + if (!IS_ERR(s)) + kfree(s); +diff -urNp linux-2.6.35.4/fs/xfs/xfs_bmap.c linux-2.6.35.4/fs/xfs/xfs_bmap.c +--- linux-2.6.35.4/fs/xfs/xfs_bmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/fs/xfs/xfs_bmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -296,7 +296,7 @@ xfs_bmap_validate_ret( + int nmap, + int ret_nmap); + #else +-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) ++#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do {} while (0) + #endif /* DEBUG */ + + STATIC int +diff -urNp linux-2.6.35.4/grsecurity/gracl_alloc.c linux-2.6.35.4/grsecurity/gracl_alloc.c +--- linux-2.6.35.4/grsecurity/gracl_alloc.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_alloc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,105 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++ ++static unsigned long alloc_stack_next = 1; ++static unsigned long alloc_stack_size = 1; ++static void **alloc_stack; ++ ++static __inline__ int ++alloc_pop(void) ++{ ++ if (alloc_stack_next == 1) ++ return 0; ++ ++ kfree(alloc_stack[alloc_stack_next - 2]); ++ ++ alloc_stack_next--; ++ ++ return 1; ++} ++ ++static __inline__ int ++alloc_push(void *buf) ++{ ++ if (alloc_stack_next >= alloc_stack_size) ++ return 1; ++ ++ alloc_stack[alloc_stack_next - 1] = buf; ++ ++ alloc_stack_next++; ++ ++ return 0; ++} ++ ++void * ++acl_alloc(unsigned long len) ++{ ++ void *ret = NULL; ++ ++ if (!len || len > PAGE_SIZE) ++ goto out; ++ ++ ret = kmalloc(len, GFP_KERNEL); ++ ++ if (ret) { ++ if (alloc_push(ret)) { ++ kfree(ret); ++ ret = NULL; ++ } ++ } ++ ++out: ++ return ret; ++} ++ ++void * ++acl_alloc_num(unsigned long num, unsigned long len) ++{ ++ if (!len || (num > (PAGE_SIZE / len))) ++ return NULL; ++ ++ return acl_alloc(num * len); ++} ++ ++void ++acl_free_all(void) ++{ ++ if (gr_acl_is_enabled() || !alloc_stack) ++ return; ++ ++ while (alloc_pop()) ; ++ ++ if (alloc_stack) { ++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE) ++ kfree(alloc_stack); ++ else ++ vfree(alloc_stack); ++ } ++ ++ alloc_stack = NULL; ++ alloc_stack_size = 1; ++ alloc_stack_next = 1; ++ ++ return; ++} ++ ++int ++acl_alloc_stack_init(unsigned long size) ++{ ++ if ((size * sizeof (void *)) <= PAGE_SIZE) ++ alloc_stack = ++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL); ++ else ++ alloc_stack = (void **) vmalloc(size * sizeof (void *)); ++ ++ alloc_stack_size = size; ++ ++ if (!alloc_stack) ++ return 0; ++ else ++ return 1; ++} +diff -urNp linux-2.6.35.4/grsecurity/gracl.c linux-2.6.35.4/grsecurity/gracl.c +--- linux-2.6.35.4/grsecurity/gracl.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl.c 2010-09-17 20:18:36.000000000 -0400 +@@ -0,0 +1,3922 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/namei.h> ++#include <linux/mount.h> ++#include <linux/tty.h> ++#include <linux/proc_fs.h> ++#include <linux/smp_lock.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/types.h> ++#include <linux/sysctl.h> ++#include <linux/netdevice.h> ++#include <linux/ptrace.h> ++#include <linux/gracl.h> ++#include <linux/gralloc.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/pid_namespace.h> ++#include <linux/fdtable.h> ++#include <linux/percpu.h> ++ ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++ ++static struct acl_role_db acl_role_set; ++static struct name_db name_set; ++static struct inodev_db inodev_set; ++ ++/* for keeping track of userspace pointers used for subjects, so we ++ can share references in the kernel as well ++*/ ++ ++static struct dentry *real_root; ++static struct vfsmount *real_root_mnt; ++ ++static struct acl_subj_map_db subj_map_set; ++ ++static struct acl_role_label *default_role; ++ ++static struct acl_role_label *role_list; ++ ++static u16 acl_sp_role_value; ++ ++extern char *gr_shared_page[4]; ++static DECLARE_MUTEX(gr_dev_sem); ++DEFINE_RWLOCK(gr_inode_lock); ++ ++struct gr_arg *gr_usermode; ++ ++static unsigned int gr_status __read_only = GR_STATUS_INIT; ++ ++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum); ++extern void gr_clear_learn_entries(void); ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++extern void gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt); ++#endif ++ ++unsigned char *gr_system_salt; ++unsigned char *gr_system_sum; ++ ++static struct sprole_pw **acl_special_roles = NULL; ++static __u16 num_sprole_pws = 0; ++ ++static struct acl_role_label *kernel_role = NULL; ++ ++static unsigned int gr_auth_attempts = 0; ++static unsigned long gr_auth_expires = 0UL; ++ ++extern struct vfsmount *sock_mnt; ++extern struct vfsmount *pipe_mnt; ++extern struct vfsmount *shm_mnt; ++#ifdef CONFIG_HUGETLBFS ++extern struct vfsmount *hugetlbfs_vfsmount; ++#endif ++ ++static struct acl_object_label *fakefs_obj; ++ ++extern int gr_init_uidset(void); ++extern void gr_free_uidset(void); ++extern void gr_remove_uid(uid_t uid); ++extern int gr_find_uid(uid_t uid); ++ ++extern spinlock_t vfsmount_lock; ++ ++__inline__ int ++gr_acl_is_enabled(void) ++{ ++ return (gr_status & GR_READY); ++} ++ ++char gr_roletype_to_char(void) ++{ ++ switch (current->role->roletype & ++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP | ++ GR_ROLE_SPECIAL)) { ++ case GR_ROLE_DEFAULT: ++ return 'D'; ++ case GR_ROLE_USER: ++ return 'U'; ++ case GR_ROLE_GROUP: ++ return 'G'; ++ case GR_ROLE_SPECIAL: ++ return 'S'; ++ } ++ ++ return 'X'; ++} ++ ++__inline__ int ++gr_acl_tpe_check(void) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ if (current->role->roletype & GR_ROLE_TPE) ++ return 1; ++ else ++ return 0; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (inode && S_ISBLK(inode->i_mode) && ++ grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++static int ++gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb) ++{ ++ if (likely(lena != lenb)) ++ return 0; ++ ++ return !memcmp(a, b, lena); ++} ++ ++static char * __our_d_path(struct dentry *dentry, struct vfsmount *vfsmnt, ++ struct dentry *root, struct vfsmount *rootmnt, ++ char *buffer, int buflen) ++{ ++ char * end = buffer+buflen; ++ char * retval; ++ int namelen; ++ ++ spin_lock(&vfsmount_lock); ++ *--end = '\0'; ++ buflen--; ++ ++ if (buflen < 1) ++ goto Elong; ++ /* Get '/' right */ ++ retval = end-1; ++ *retval = '/'; ++ ++ for (;;) { ++ struct dentry * parent; ++ ++ if (dentry == root && vfsmnt == rootmnt) ++ break; ++ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) { ++ /* Global root? */ ++ if (vfsmnt->mnt_parent == vfsmnt) { ++ goto global_root; ++ } ++ dentry = vfsmnt->mnt_mountpoint; ++ vfsmnt = vfsmnt->mnt_parent; ++ continue; ++ } ++ parent = dentry->d_parent; ++ prefetch(parent); ++ namelen = dentry->d_name.len; ++ buflen -= namelen + 1; ++ if (buflen < 0) ++ goto Elong; ++ end -= namelen; ++ memcpy(end, dentry->d_name.name, namelen); ++ *--end = '/'; ++ retval = end; ++ dentry = parent; ++ } ++ ++out: ++ spin_unlock(&vfsmount_lock); ++ return retval; ++ ++global_root: ++ namelen = dentry->d_name.len; ++ buflen -= namelen; ++ if (buflen < 0) ++ goto Elong; ++ retval -= namelen-1; /* hit the slash */ ++ memcpy(retval, dentry->d_name.name, namelen); ++ goto out; ++Elong: ++ retval = ERR_PTR(-ENAMETOOLONG); ++ goto out; ++} ++ ++static char * ++gen_full_path(struct dentry *dentry, struct vfsmount *vfsmnt, ++ struct dentry *root, struct vfsmount *rootmnt, char *buf, int buflen) ++{ ++ char *retval; ++ ++ retval = __our_d_path(dentry, vfsmnt, root, rootmnt, buf, buflen); ++ if (unlikely(IS_ERR(retval))) ++ retval = strcpy(buf, "<path too long>"); ++ else if (unlikely(retval[1] == '/' && retval[2] == '\0')) ++ retval[1] = '\0'; ++ ++ return retval; ++} ++ ++static char * ++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ ++ /* we can use real_root, real_root_mnt, because this is only called ++ by the RBAC system */ ++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, real_root, real_root_mnt, buf, buflen); ++ ++ return res; ++} ++ ++static char * ++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt, ++ char *buf, int buflen) ++{ ++ char *res; ++ struct dentry *root; ++ struct vfsmount *rootmnt; ++ struct task_struct *reaper = &init_task; ++ ++ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */ ++ read_lock(&reaper->fs->lock); ++ root = dget(reaper->fs->root.dentry); ++ rootmnt = mntget(reaper->fs->root.mnt); ++ read_unlock(&reaper->fs->lock); ++ ++ spin_lock(&dcache_lock); ++ res = gen_full_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen); ++ spin_unlock(&dcache_lock); ++ ++ dput(root); ++ mntput(rootmnt); ++ return res; ++} ++ ++static char * ++gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ char *ret; ++ spin_lock(&dcache_lock); ++ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++ spin_unlock(&dcache_lock); ++ return ret; ++} ++ ++char * ++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++char * ++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()), ++ PAGE_SIZE); ++} ++ ++__inline__ __u32 ++to_gr_audit(const __u32 reqmode) ++{ ++ /* masks off auditable permission flags, then shifts them to create ++ auditing flags, and adds the special case of append auditing if ++ we're requesting write */ ++ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0)); ++} ++ ++struct acl_subject_label * ++lookup_subject_map(const struct acl_subject_label *userp) ++{ ++ unsigned int index = shash(userp, subj_map_set.s_size); ++ struct subject_map *match; ++ ++ match = subj_map_set.s_hash[index]; ++ ++ while (match && match->user != userp) ++ match = match->next; ++ ++ if (match != NULL) ++ return match->kernel; ++ else ++ return NULL; ++} ++ ++static void ++insert_subj_map_entry(struct subject_map *subjmap) ++{ ++ unsigned int index = shash(subjmap->user, subj_map_set.s_size); ++ struct subject_map **curr; ++ ++ subjmap->prev = NULL; ++ ++ curr = &subj_map_set.s_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = subjmap; ++ ++ subjmap->next = *curr; ++ *curr = subjmap; ++ ++ return; ++} ++ ++static struct acl_role_label * ++lookup_acl_role_label(const struct task_struct *task, const uid_t uid, ++ const gid_t gid) ++{ ++ unsigned int index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size); ++ struct acl_role_label *match; ++ struct role_allowed_ip *ipp; ++ unsigned int x; ++ ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == uid) ++ goto found; ++ } ++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER) ++ break; ++ match = match->next; ++ } ++found: ++ if (match == NULL) { ++ try_group: ++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size); ++ match = acl_role_set.r_hash[index]; ++ ++ while (match) { ++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) { ++ for (x = 0; x < match->domain_child_num; x++) { ++ if (match->domain_children[x] == gid) ++ goto found2; ++ } ++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP) ++ break; ++ match = match->next; ++ } ++found2: ++ if (match == NULL) ++ match = default_role; ++ if (match->allowed_ips == NULL) ++ return match; ++ else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(task->signal->curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ match = default_role; ++ } ++ } else if (match->allowed_ips == NULL) { ++ return match; ++ } else { ++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) { ++ if (likely ++ ((ntohl(task->signal->curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask))) ++ return match; ++ } ++ goto try_group; ++ } ++ ++ return match; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++struct acl_subject_label * ++lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev, ++ const struct acl_role_label *role) ++{ ++ unsigned int index = fhash(ino, dev, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct acl_object_label * ++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev, ++ const struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(ino, dev, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ !(match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && (match->mode & GR_DELETED)) ++ return match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != ino || match->device != dev || ++ (match->mode & GR_DELETED))) { ++ match = match->next; ++ } ++ ++ if (match && !(match->mode & GR_DELETED)) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct name_entry * ++lookup_name_entry(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len))) ++ match = match->next; ++ ++ return match; ++} ++ ++static struct name_entry * ++lookup_name_entry_create(const char *name) ++{ ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ struct name_entry *match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ !match->deleted)) ++ match = match->next; ++ ++ if (match && match->deleted) ++ return match; ++ ++ match = name_set.n_hash[index]; ++ ++ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) || ++ match->deleted)) ++ match = match->next; ++ ++ if (match && !match->deleted) ++ return match; ++ else ++ return NULL; ++} ++ ++static struct inodev_entry * ++lookup_inodev_entry(const ino_t ino, const dev_t dev) ++{ ++ unsigned int index = fhash(ino, dev, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != ino || match->nentry->device != dev)) ++ match = match->next; ++ ++ return match; ++} ++ ++static void ++insert_inodev_entry(struct inodev_entry *entry) ++{ ++ unsigned int index = fhash(entry->nentry->inode, entry->nentry->device, ++ inodev_set.i_size); ++ struct inodev_entry **curr; ++ ++ entry->prev = NULL; ++ ++ curr = &inodev_set.i_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = entry; ++ ++ entry->next = *curr; ++ *curr = entry; ++ ++ return; ++} ++ ++static void ++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid) ++{ ++ unsigned int index = ++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size); ++ struct acl_role_label **curr; ++ struct acl_role_label *tmp; ++ ++ curr = &acl_role_set.r_hash[index]; ++ ++ /* if role was already inserted due to domains and already has ++ a role in the same bucket as it attached, then we need to ++ combine these two buckets ++ */ ++ if (role->next) { ++ tmp = role->next; ++ while (tmp->next) ++ tmp = tmp->next; ++ tmp->next = *curr; ++ } else ++ role->next = *curr; ++ *curr = role; ++ ++ return; ++} ++ ++static void ++insert_acl_role_label(struct acl_role_label *role) ++{ ++ int i; ++ ++ if (role_list == NULL) { ++ role_list = role; ++ role->prev = NULL; ++ } else { ++ role->prev = role_list; ++ role_list = role; ++ } ++ ++ /* used for hash chains */ ++ role->next = NULL; ++ ++ if (role->roletype & GR_ROLE_DOMAIN) { ++ for (i = 0; i < role->domain_child_num; i++) ++ __insert_acl_role_label(role, role->domain_children[i]); ++ } else ++ __insert_acl_role_label(role, role->uidgid); ++} ++ ++static int ++insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted) ++{ ++ struct name_entry **curr, *nentry; ++ struct inodev_entry *ientry; ++ unsigned int len = strlen(name); ++ unsigned int key = full_name_hash(name, len); ++ unsigned int index = key % name_set.n_size; ++ ++ curr = &name_set.n_hash[index]; ++ ++ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len))) ++ curr = &((*curr)->next); ++ ++ if (*curr != NULL) ++ return 1; ++ ++ nentry = acl_alloc(sizeof (struct name_entry)); ++ if (nentry == NULL) ++ return 0; ++ ientry = acl_alloc(sizeof (struct inodev_entry)); ++ if (ientry == NULL) ++ return 0; ++ ientry->nentry = nentry; ++ ++ nentry->key = key; ++ nentry->name = name; ++ nentry->inode = inode; ++ nentry->device = device; ++ nentry->len = len; ++ nentry->deleted = deleted; ++ ++ nentry->prev = NULL; ++ curr = &name_set.n_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = nentry; ++ nentry->next = *curr; ++ *curr = nentry; ++ ++ /* insert us into the table searchable by inode/dev */ ++ insert_inodev_entry(ientry); ++ ++ return 1; ++} ++ ++static void ++insert_acl_obj_label(struct acl_object_label *obj, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = ++ fhash(obj->inode, obj->device, subj->obj_hash_size); ++ struct acl_object_label **curr; ++ ++ ++ obj->prev = NULL; ++ ++ curr = &subj->obj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++static void ++insert_acl_subj_label(struct acl_subject_label *obj, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(obj->inode, obj->device, role->subj_hash_size); ++ struct acl_subject_label **curr; ++ ++ obj->prev = NULL; ++ ++ curr = &role->subj_hash[index]; ++ if (*curr != NULL) ++ (*curr)->prev = obj; ++ ++ obj->next = *curr; ++ *curr = obj; ++ ++ return; ++} ++ ++/* allocating chained hash tables, so optimal size is where lambda ~ 1 */ ++ ++static void * ++create_table(__u32 * len, int elementsize) ++{ ++ unsigned int table_sizes[] = { ++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381, ++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143, ++ 4194301, 8388593, 16777213, 33554393, 67108859 ++ }; ++ void *newtable = NULL; ++ unsigned int pwr = 0; ++ ++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) && ++ table_sizes[pwr] <= *len) ++ pwr++; ++ ++ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize)) ++ return newtable; ++ ++ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE) ++ newtable = ++ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL); ++ else ++ newtable = vmalloc(table_sizes[pwr] * elementsize); ++ ++ *len = table_sizes[pwr]; ++ ++ return newtable; ++} ++ ++static int ++init_variables(const struct gr_arg *arg) ++{ ++ struct task_struct *reaper = &init_task; ++ unsigned int stacksize; ++ ++ subj_map_set.s_size = arg->role_db.num_subjects; ++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children; ++ name_set.n_size = arg->role_db.num_objects; ++ inodev_set.i_size = arg->role_db.num_objects; ++ ++ if (!subj_map_set.s_size || !acl_role_set.r_size || ++ !name_set.n_size || !inodev_set.i_size) ++ return 1; ++ ++ if (!gr_init_uidset()) ++ return 1; ++ ++ /* set up the stack that holds allocation info */ ++ ++ stacksize = arg->role_db.num_pointers + 5; ++ ++ if (!acl_alloc_stack_init(stacksize)) ++ return 1; ++ ++ /* grab reference for the real root dentry and vfsmount */ ++ read_lock(&reaper->fs->lock); ++ real_root_mnt = mntget(reaper->fs->root.mnt); ++ real_root = dget(reaper->fs->root.dentry); ++ read_unlock(&reaper->fs->lock); ++ ++ fakefs_obj = acl_alloc(sizeof(struct acl_object_label)); ++ if (fakefs_obj == NULL) ++ return 1; ++ fakefs_obj->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC; ++ ++ subj_map_set.s_hash = ++ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *)); ++ acl_role_set.r_hash = ++ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *)); ++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *)); ++ inodev_set.i_hash = ++ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *)); ++ ++ if (!subj_map_set.s_hash || !acl_role_set.r_hash || ++ !name_set.n_hash || !inodev_set.i_hash) ++ return 1; ++ ++ memset(subj_map_set.s_hash, 0, ++ sizeof(struct subject_map *) * subj_map_set.s_size); ++ memset(acl_role_set.r_hash, 0, ++ sizeof (struct acl_role_label *) * acl_role_set.r_size); ++ memset(name_set.n_hash, 0, ++ sizeof (struct name_entry *) * name_set.n_size); ++ memset(inodev_set.i_hash, 0, ++ sizeof (struct inodev_entry *) * inodev_set.i_size); ++ ++ return 0; ++} ++ ++/* free information not needed after startup ++ currently contains user->kernel pointer mappings for subjects ++*/ ++ ++static void ++free_init_variables(void) ++{ ++ __u32 i; ++ ++ if (subj_map_set.s_hash) { ++ for (i = 0; i < subj_map_set.s_size; i++) { ++ if (subj_map_set.s_hash[i]) { ++ kfree(subj_map_set.s_hash[i]); ++ subj_map_set.s_hash[i] = NULL; ++ } ++ } ++ ++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <= ++ PAGE_SIZE) ++ kfree(subj_map_set.s_hash); ++ else ++ vfree(subj_map_set.s_hash); ++ } ++ ++ return; ++} ++ ++static void ++free_variables(void) ++{ ++ struct acl_subject_label *s; ++ struct acl_role_label *r; ++ struct task_struct *task, *task2; ++ unsigned int x; ++ ++ gr_clear_learn_entries(); ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(task2, task) { ++ task->acl_sp_role = 0; ++ task->acl_role_id = 0; ++ task->acl = NULL; ++ task->role = NULL; ++ } while_each_thread(task2, task); ++ read_unlock(&tasklist_lock); ++ ++ /* release the reference to the real root dentry and vfsmount */ ++ if (real_root) ++ dput(real_root); ++ real_root = NULL; ++ if (real_root_mnt) ++ mntput(real_root_mnt); ++ real_root_mnt = NULL; ++ ++ /* free all object hash tables */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (r->subj_hash == NULL) ++ goto next_role; ++ FOR_EACH_SUBJECT_START(r, s, x) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_SUBJECT_END(s, x) ++ FOR_EACH_NESTED_SUBJECT_START(r, s) ++ if (s->obj_hash == NULL) ++ break; ++ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE) ++ kfree(s->obj_hash); ++ else ++ vfree(s->obj_hash); ++ FOR_EACH_NESTED_SUBJECT_END(s) ++ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE) ++ kfree(r->subj_hash); ++ else ++ vfree(r->subj_hash); ++ r->subj_hash = NULL; ++next_role: ++ FOR_EACH_ROLE_END(r) ++ ++ acl_free_all(); ++ ++ if (acl_role_set.r_hash) { ++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <= ++ PAGE_SIZE) ++ kfree(acl_role_set.r_hash); ++ else ++ vfree(acl_role_set.r_hash); ++ } ++ if (name_set.n_hash) { ++ if ((name_set.n_size * sizeof (struct name_entry *)) <= ++ PAGE_SIZE) ++ kfree(name_set.n_hash); ++ else ++ vfree(name_set.n_hash); ++ } ++ ++ if (inodev_set.i_hash) { ++ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <= ++ PAGE_SIZE) ++ kfree(inodev_set.i_hash); ++ else ++ vfree(inodev_set.i_hash); ++ } ++ ++ gr_free_uidset(); ++ ++ memset(&name_set, 0, sizeof (struct name_db)); ++ memset(&inodev_set, 0, sizeof (struct inodev_db)); ++ memset(&acl_role_set, 0, sizeof (struct acl_role_db)); ++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db)); ++ ++ default_role = NULL; ++ role_list = NULL; ++ ++ return; ++} ++ ++static __u32 ++count_user_objs(struct acl_object_label *userp) ++{ ++ struct acl_object_label o_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ break; ++ ++ userp = o_tmp.prev; ++ num++; ++ } ++ ++ return num; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role); ++ ++static int ++copy_user_glob(struct acl_object_label *obj) ++{ ++ struct acl_object_label *g_tmp, **guser; ++ unsigned int len; ++ char *tmp; ++ ++ if (obj->globbed == NULL) ++ return 0; ++ ++ guser = &obj->globbed; ++ while (*guser) { ++ g_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label)); ++ if (g_tmp == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(g_tmp, *guser, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ len = strnlen_user(g_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, g_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ g_tmp->filename = tmp; ++ ++ *guser = g_tmp; ++ guser = &(g_tmp->next); ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj, ++ struct acl_role_label *role) ++{ ++ struct acl_object_label *o_tmp; ++ unsigned int len; ++ int ret; ++ char *tmp; ++ ++ while (userp) { ++ if ((o_tmp = (struct acl_object_label *) ++ acl_alloc(sizeof (struct acl_object_label))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(o_tmp, userp, ++ sizeof (struct acl_object_label))) ++ return -EFAULT; ++ ++ userp = o_tmp->prev; ++ ++ len = strnlen_user(o_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, o_tmp->filename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ o_tmp->filename = tmp; ++ ++ insert_acl_obj_label(o_tmp, subj); ++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode, ++ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return -ENOMEM; ++ ++ ret = copy_user_glob(o_tmp); ++ if (ret) ++ return ret; ++ ++ if (o_tmp->nested) { ++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role); ++ if (IS_ERR(o_tmp->nested)) ++ return PTR_ERR(o_tmp->nested); ++ ++ /* insert into nested subject list */ ++ o_tmp->nested->next = role->hash->first; ++ role->hash->first = o_tmp->nested; ++ } ++ } ++ ++ return 0; ++} ++ ++static __u32 ++count_user_subjs(struct acl_subject_label *userp) ++{ ++ struct acl_subject_label s_tmp; ++ __u32 num = 0; ++ ++ while (userp) { ++ if (copy_from_user(&s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ break; ++ ++ userp = s_tmp.prev; ++ /* do not count nested subjects against this count, since ++ they are not included in the hash table, but are ++ attached to objects. We have already counted ++ the subjects in userspace for the allocation ++ stack ++ */ ++ if (!(s_tmp.mode & GR_NESTED)) ++ num++; ++ } ++ ++ return num; ++} ++ ++static int ++copy_user_allowedips(struct acl_role_label *rolep) ++{ ++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast; ++ ++ ruserip = rolep->allowed_ips; ++ ++ while (ruserip) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_allowed_ip *) ++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, ruserip, ++ sizeof (struct role_allowed_ip))) ++ return -EFAULT; ++ ++ ruserip = rtmp->prev; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->allowed_ips = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!ruserip) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_transitions(struct acl_role_label *rolep) ++{ ++ struct role_transition *rusertp, *rtmp = NULL, *rlast; ++ ++ unsigned int len; ++ char *tmp; ++ ++ rusertp = rolep->transitions; ++ ++ while (rusertp) { ++ rlast = rtmp; ++ ++ if ((rtmp = (struct role_transition *) ++ acl_alloc(sizeof (struct role_transition))) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(rtmp, rusertp, ++ sizeof (struct role_transition))) ++ return -EFAULT; ++ ++ rusertp = rtmp->prev; ++ ++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) ++ return -EINVAL; ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return -ENOMEM; ++ ++ if (copy_from_user(tmp, rtmp->rolename, len)) ++ return -EFAULT; ++ tmp[len-1] = '\0'; ++ rtmp->rolename = tmp; ++ ++ if (!rlast) { ++ rtmp->prev = NULL; ++ rolep->transitions = rtmp; ++ } else { ++ rlast->next = rtmp; ++ rtmp->prev = rlast; ++ } ++ ++ if (!rusertp) ++ rtmp->next = NULL; ++ } ++ ++ return 0; ++} ++ ++static struct acl_subject_label * ++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label *s_tmp = NULL, *s_tmp2; ++ unsigned int len; ++ char *tmp; ++ __u32 num_objs; ++ struct acl_ip_label **i_tmp, *i_utmp2; ++ struct gr_hash_struct ghash; ++ struct subject_map *subjmap; ++ unsigned int i_num; ++ int err; ++ ++ s_tmp = lookup_subject_map(userp); ++ ++ /* we've already copied this subject into the kernel, just return ++ the reference to it, and don't copy it over again ++ */ ++ if (s_tmp) ++ return(s_tmp); ++ ++ if ((s_tmp = (struct acl_subject_label *) ++ acl_alloc(sizeof (struct acl_subject_label))) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL); ++ if (subjmap == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ subjmap->user = userp; ++ subjmap->kernel = s_tmp; ++ insert_subj_map_entry(subjmap); ++ ++ if (copy_from_user(s_tmp, userp, ++ sizeof (struct acl_subject_label))) ++ return ERR_PTR(-EFAULT); ++ ++ len = strnlen_user(s_tmp->filename, PATH_MAX); ++ ++ if (!len || len >= PATH_MAX) ++ return ERR_PTR(-EINVAL); ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user(tmp, s_tmp->filename, len)) ++ return ERR_PTR(-EFAULT); ++ tmp[len-1] = '\0'; ++ s_tmp->filename = tmp; ++ ++ if (!strcmp(s_tmp->filename, "/")) ++ role->root_label = s_tmp; ++ ++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct))) ++ return ERR_PTR(-EFAULT); ++ ++ /* copy user and group transition tables */ ++ ++ if (s_tmp->user_trans_num) { ++ uid_t *uidlist; ++ ++ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t)); ++ if (uidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->user_transitions = uidlist; ++ } ++ ++ if (s_tmp->group_trans_num) { ++ gid_t *gidlist; ++ ++ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t)); ++ if (gidlist == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t))) ++ return ERR_PTR(-EFAULT); ++ ++ s_tmp->group_transitions = gidlist; ++ } ++ ++ /* set up object hash table */ ++ num_objs = count_user_objs(ghash.first); ++ ++ s_tmp->obj_hash_size = num_objs; ++ s_tmp->obj_hash = ++ (struct acl_object_label **) ++ create_table(&(s_tmp->obj_hash_size), sizeof(void *)); ++ ++ if (!s_tmp->obj_hash) ++ return ERR_PTR(-ENOMEM); ++ ++ memset(s_tmp->obj_hash, 0, ++ s_tmp->obj_hash_size * ++ sizeof (struct acl_object_label *)); ++ ++ /* add in objects */ ++ err = copy_user_objs(ghash.first, s_tmp, role); ++ ++ if (err) ++ return ERR_PTR(err); ++ ++ /* set pointer for parent subject */ ++ if (s_tmp->parent_subject) { ++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role); ++ ++ if (IS_ERR(s_tmp2)) ++ return s_tmp2; ++ ++ s_tmp->parent_subject = s_tmp2; ++ } ++ ++ /* add in ip acls */ ++ ++ if (!s_tmp->ip_num) { ++ s_tmp->ips = NULL; ++ goto insert; ++ } ++ ++ i_tmp = ++ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num, ++ sizeof (struct acl_ip_label *)); ++ ++ if (!i_tmp) ++ return ERR_PTR(-ENOMEM); ++ ++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) { ++ *(i_tmp + i_num) = ++ (struct acl_ip_label *) ++ acl_alloc(sizeof (struct acl_ip_label)); ++ if (!*(i_tmp + i_num)) ++ return ERR_PTR(-ENOMEM); ++ ++ if (copy_from_user ++ (&i_utmp2, s_tmp->ips + i_num, ++ sizeof (struct acl_ip_label *))) ++ return ERR_PTR(-EFAULT); ++ ++ if (copy_from_user ++ (*(i_tmp + i_num), i_utmp2, ++ sizeof (struct acl_ip_label))) ++ return ERR_PTR(-EFAULT); ++ ++ if ((*(i_tmp + i_num))->iface == NULL) ++ continue; ++ ++ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ); ++ if (!len || len >= IFNAMSIZ) ++ return ERR_PTR(-EINVAL); ++ tmp = acl_alloc(len); ++ if (tmp == NULL) ++ return ERR_PTR(-ENOMEM); ++ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len)) ++ return ERR_PTR(-EFAULT); ++ (*(i_tmp + i_num))->iface = tmp; ++ } ++ ++ s_tmp->ips = i_tmp; ++ ++insert: ++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode, ++ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0)) ++ return ERR_PTR(-ENOMEM); ++ ++ return s_tmp; ++} ++ ++static int ++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role) ++{ ++ struct acl_subject_label s_pre; ++ struct acl_subject_label * ret; ++ int err; ++ ++ while (userp) { ++ if (copy_from_user(&s_pre, userp, ++ sizeof (struct acl_subject_label))) ++ return -EFAULT; ++ ++ /* do not add nested subjects here, add ++ while parsing objects ++ */ ++ ++ if (s_pre.mode & GR_NESTED) { ++ userp = s_pre.prev; ++ continue; ++ } ++ ++ ret = do_copy_user_subj(userp, role); ++ ++ err = PTR_ERR(ret); ++ if (IS_ERR(ret)) ++ return err; ++ ++ insert_acl_subj_label(ret, role); ++ ++ userp = s_pre.prev; ++ } ++ ++ return 0; ++} ++ ++static int ++copy_user_acl(struct gr_arg *arg) ++{ ++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2; ++ struct sprole_pw *sptmp; ++ struct gr_hash_struct *ghash; ++ uid_t *domainlist; ++ unsigned int r_num; ++ unsigned int len; ++ char *tmp; ++ int err = 0; ++ __u16 i; ++ __u32 num_subjs; ++ ++ /* we need a default and kernel role */ ++ if (arg->role_db.num_roles < 2) ++ return -EINVAL; ++ ++ /* copy special role authentication info from userspace */ ++ ++ num_sprole_pws = arg->num_sprole_pws; ++ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *)); ++ ++ if (!acl_special_roles) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw)); ++ if (!sptmp) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ if (copy_from_user(sptmp, arg->sprole_pws + i, ++ sizeof (struct sprole_pw))) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ ++ len = ++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= GR_SPROLE_LEN) { ++ err = -EINVAL; ++ goto cleanup; ++ } ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ ++ if (copy_from_user(tmp, sptmp->rolename, len)) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ tmp[len-1] = '\0'; ++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG ++ printk(KERN_ALERT "Copying special role %s\n", tmp); ++#endif ++ sptmp->rolename = tmp; ++ acl_special_roles[i] = sptmp; ++ } ++ ++ r_utmp = (struct acl_role_label **) arg->role_db.r_table; ++ ++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) { ++ r_tmp = acl_alloc(sizeof (struct acl_role_label)); ++ ++ if (!r_tmp) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ ++ if (copy_from_user(&r_utmp2, r_utmp + r_num, ++ sizeof (struct acl_role_label *))) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ ++ if (copy_from_user(r_tmp, r_utmp2, ++ sizeof (struct acl_role_label))) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ ++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN); ++ ++ if (!len || len >= PATH_MAX) { ++ err = -EINVAL; ++ goto cleanup; ++ } ++ ++ if ((tmp = (char *) acl_alloc(len)) == NULL) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ if (copy_from_user(tmp, r_tmp->rolename, len)) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ tmp[len-1] = '\0'; ++ r_tmp->rolename = tmp; ++ ++ if (!strcmp(r_tmp->rolename, "default") ++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) { ++ default_role = r_tmp; ++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) { ++ kernel_role = r_tmp; ++ } ++ ++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ ++ r_tmp->hash = ghash; ++ ++ num_subjs = count_user_subjs(r_tmp->hash->first); ++ ++ r_tmp->subj_hash_size = num_subjs; ++ r_tmp->subj_hash = ++ (struct acl_subject_label **) ++ create_table(&(r_tmp->subj_hash_size), sizeof(void *)); ++ ++ if (!r_tmp->subj_hash) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ ++ err = copy_user_allowedips(r_tmp); ++ if (err) ++ goto cleanup; ++ ++ /* copy domain info */ ++ if (r_tmp->domain_children != NULL) { ++ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t)); ++ if (domainlist == NULL) { ++ err = -ENOMEM; ++ goto cleanup; ++ } ++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) { ++ err = -EFAULT; ++ goto cleanup; ++ } ++ r_tmp->domain_children = domainlist; ++ } ++ ++ err = copy_user_transitions(r_tmp); ++ if (err) ++ goto cleanup; ++ ++ memset(r_tmp->subj_hash, 0, ++ r_tmp->subj_hash_size * ++ sizeof (struct acl_subject_label *)); ++ ++ err = copy_user_subjs(r_tmp->hash->first, r_tmp); ++ ++ if (err) ++ goto cleanup; ++ ++ /* set nested subject list to null */ ++ r_tmp->hash->first = NULL; ++ ++ insert_acl_role_label(r_tmp); ++ } ++ ++ goto return_err; ++ cleanup: ++ free_variables(); ++ return_err: ++ return err; ++ ++} ++ ++static int ++gracl_init(struct gr_arg *args) ++{ ++ int error = 0; ++ ++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN); ++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN); ++ ++ if (init_variables(args)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION); ++ error = -ENOMEM; ++ free_variables(); ++ goto out; ++ } ++ ++ error = copy_user_acl(args); ++ free_init_variables(); ++ if (error) { ++ free_variables(); ++ goto out; ++ } ++ ++ if ((error = gr_set_acls(0))) { ++ free_variables(); ++ goto out; ++ } ++ ++ pax_open_kernel(); ++ gr_status |= GR_READY; ++ pax_close_kernel(); ++ ++ out: ++ return error; ++} ++ ++/* derived from glibc fnmatch() 0: match, 1: no match*/ ++ ++static int ++glob_match(const char *p, const char *n) ++{ ++ char c; ++ ++ while ((c = *p++) != '\0') { ++ switch (c) { ++ case '?': ++ if (*n == '\0') ++ return 1; ++ else if (*n == '/') ++ return 1; ++ break; ++ case '\\': ++ if (*n != c) ++ return 1; ++ break; ++ case '*': ++ for (c = *p++; c == '?' || c == '*'; c = *p++) { ++ if (*n == '/') ++ return 1; ++ else if (c == '?') { ++ if (*n == '\0') ++ return 1; ++ else ++ ++n; ++ } ++ } ++ if (c == '\0') { ++ return 0; ++ } else { ++ const char *endp; ++ ++ if ((endp = strchr(n, '/')) == NULL) ++ endp = n + strlen(n); ++ ++ if (c == '[') { ++ for (--p; n < endp; ++n) ++ if (!glob_match(p, n)) ++ return 0; ++ } else if (c == '/') { ++ while (*n != '\0' && *n != '/') ++ ++n; ++ if (*n == '/' && !glob_match(p, n + 1)) ++ return 0; ++ } else { ++ for (--p; n < endp; ++n) ++ if (*n == c && !glob_match(p, n)) ++ return 0; ++ } ++ ++ return 1; ++ } ++ case '[': ++ { ++ int not; ++ char cold; ++ ++ if (*n == '\0' || *n == '/') ++ return 1; ++ ++ not = (*p == '!' || *p == '^'); ++ if (not) ++ ++p; ++ ++ c = *p++; ++ for (;;) { ++ unsigned char fn = (unsigned char)*n; ++ ++ if (c == '\0') ++ return 1; ++ else { ++ if (c == fn) ++ goto matched; ++ cold = c; ++ c = *p++; ++ ++ if (c == '-' && *p != ']') { ++ unsigned char cend = *p++; ++ ++ if (cend == '\0') ++ return 1; ++ ++ if (cold <= fn && fn <= cend) ++ goto matched; ++ ++ c = *p++; ++ } ++ } ++ ++ if (c == ']') ++ break; ++ } ++ if (!not) ++ return 1; ++ break; ++ matched: ++ while (c != ']') { ++ if (c == '\0') ++ return 1; ++ ++ c = *p++; ++ } ++ if (not) ++ return 1; ++ } ++ break; ++ default: ++ if (c != *n) ++ return 1; ++ } ++ ++ ++n; ++ } ++ ++ if (*n == '\0') ++ return 0; ++ ++ if (*n == '/') ++ return 0; ++ ++ return 1; ++} ++ ++static struct acl_object_label * ++chk_glob_label(struct acl_object_label *globbed, ++ struct dentry *dentry, struct vfsmount *mnt, char **path) ++{ ++ struct acl_object_label *tmp; ++ ++ if (*path == NULL) ++ *path = gr_to_filename_nolock(dentry, mnt); ++ ++ tmp = globbed; ++ ++ while (tmp) { ++ if (!glob_match(tmp->filename, *path)) ++ return tmp; ++ tmp = tmp->next; ++ } ++ ++ return NULL; ++} ++ ++static struct acl_object_label * ++__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const ino_t curr_ino, const dev_t curr_dev, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ struct acl_subject_label *tmpsubj; ++ struct acl_object_label *retval; ++ struct acl_object_label *retval2; ++ ++ tmpsubj = (struct acl_subject_label *) subj; ++ read_lock(&gr_inode_lock); ++ do { ++ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj); ++ if (retval) { ++ if (checkglob && retval->globbed) { ++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry, ++ (struct vfsmount *)orig_mnt, path); ++ if (retval2) ++ retval = retval2; ++ } ++ break; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++ read_unlock(&gr_inode_lock); ++ ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt, ++ const struct dentry *curr_dentry, ++ const struct acl_subject_label *subj, char **path, const int checkglob) ++{ ++ return __full_lookup(orig_dentry, orig_mnt, ++ curr_dentry->d_inode->i_ino, ++ curr_dentry->d_inode->i_sb->s_dev, subj, path, checkglob); ++} ++ ++static struct acl_object_label * ++__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path, const int checkglob) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_object_label *retval; ++ ++ spin_lock(&dcache_lock); ++ ++ if (unlikely(mnt == shm_mnt || mnt == pipe_mnt || mnt == sock_mnt || ++#ifdef CONFIG_HUGETLBFS ++ mnt == hugetlbfs_vfsmount || ++#endif ++ /* ignore Eric Biederman */ ++ IS_PRIVATE(l_dentry->d_inode))) { ++ retval = fakefs_obj; ++ goto out; ++ } ++ ++ for (;;) { ++ if (dentry == real_root && mnt == real_root_mnt) ++ break; ++ ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = dentry->d_parent; ++ } ++ ++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob); ++ ++ if (retval == NULL) ++ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path, checkglob); ++out: ++ spin_unlock(&dcache_lock); ++ return retval; ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 1); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj) ++{ ++ char *path = NULL; ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 0); ++} ++ ++static __inline__ struct acl_object_label * ++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_subject_label *subj, char *path) ++{ ++ return __chk_obj_label(l_dentry, l_mnt, subj, path, 1); ++} ++ ++static struct acl_subject_label * ++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt, ++ const struct acl_role_label *role) ++{ ++ struct dentry *dentry = (struct dentry *) l_dentry; ++ struct vfsmount *mnt = (struct vfsmount *) l_mnt; ++ struct acl_subject_label *retval; ++ ++ spin_lock(&dcache_lock); ++ ++ for (;;) { ++ if (dentry == real_root && mnt == real_root_mnt) ++ break; ++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ ++ read_lock(&gr_inode_lock); ++ retval = ++ lookup_acl_subj_label(dentry->d_inode->i_ino, ++ dentry->d_inode->i_sb->s_dev, role); ++ read_unlock(&gr_inode_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ dentry->d_inode->i_sb->s_dev, role); ++ read_unlock(&gr_inode_lock); ++ if (retval != NULL) ++ goto out; ++ ++ dentry = dentry->d_parent; ++ } ++ ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(dentry->d_inode->i_ino, ++ dentry->d_inode->i_sb->s_dev, role); ++ read_unlock(&gr_inode_lock); ++ ++ if (unlikely(retval == NULL)) { ++ read_lock(&gr_inode_lock); ++ retval = lookup_acl_subj_label(real_root->d_inode->i_ino, ++ real_root->d_inode->i_sb->s_dev, role); ++ read_unlock(&gr_inode_lock); ++ } ++out: ++ spin_unlock(&dcache_lock); ++ ++ return retval; ++} ++ ++static void ++gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->curr_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_sysctl(const char *path, const __u32 mode) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ 1UL, 1UL, path, (unsigned long) mode, &task->signal->curr_ip); ++ ++ return; ++} ++ ++static void ++gr_log_learn_id_change(const char type, const unsigned int real, ++ const unsigned int effective, const unsigned int fs) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ ++ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype, ++ cred->uid, cred->gid, task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename, ++ type, real, effective, fs, &task->signal->curr_ip); ++ ++ return; ++} ++ ++__u32 ++gr_check_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, const struct vfsmount * old_mnt) ++{ ++ struct acl_object_label *obj; ++ __u32 oldmode, newmode; ++ __u32 needmode; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (GR_CREATE | GR_LINK); ++ ++ obj = chk_obj_label(old_dentry, old_mnt, current->acl); ++ oldmode = obj->mode; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ oldmode |= (GR_CREATE | GR_LINK); ++ ++ needmode = GR_CREATE | GR_AUDIT_CREATE | GR_SUPPRESS; ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID | GR_AUDIT_SETID; ++ ++ newmode = ++ gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ oldmode | needmode); ++ ++ needmode = newmode & (GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | ++ GR_SETID | GR_READ | GR_FIND | GR_DELETE | ++ GR_INHERIT | GR_AUDIT_INHERIT); ++ ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID) && !(newmode & GR_SETID)) ++ goto bad; ++ ++ if ((oldmode & needmode) != needmode) ++ goto bad; ++ ++ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS); ++ if ((newmode & needmode) != needmode) ++ goto bad; ++ ++ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK)) ++ return newmode; ++bad: ++ needmode = oldmode; ++ if (old_dentry->d_inode->i_mode & (S_ISUID | S_ISGID)) ++ needmode |= GR_SETID; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ gr_log_learn(old_dentry, old_mnt, needmode); ++ return (GR_CREATE | GR_LINK); ++ } else if (newmode & GR_SUPPRESS) ++ return GR_SUPPRESS; ++ else ++ return 0; ++} ++ ++__u32 ++gr_search_file(const struct dentry * dentry, const __u32 mode, ++ const struct vfsmount * mnt) ++{ ++ __u32 retval = mode; ++ struct acl_subject_label *curracl; ++ struct acl_object_label *currobj; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ curracl = current->acl; ++ ++ currobj = chk_obj_label(dentry, mnt, curracl); ++ retval = currobj->mode & mode; ++ ++ if (unlikely ++ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE) ++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ retval = new_mode; ++ ++ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN) ++ new_mode |= GR_INHERIT; ++ ++ if (!(mode & GR_NOLEARN)) ++ gr_log_learn(dentry, mnt, new_mode); ++ } ++ ++ return retval; ++} ++ ++__u32 ++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent, ++ const struct vfsmount * mnt, const __u32 mode) ++{ ++ struct name_entry *match; ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *curracl; ++ char *path; ++ __u32 retval; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return (mode & ~GR_AUDITS); ++ ++ preempt_disable(); ++ path = gr_to_filename_rbac(new_dentry, mnt); ++ match = lookup_name_entry_create(path); ++ ++ if (!match) ++ goto check_parent; ++ ++ curracl = current->acl; ++ ++ read_lock(&gr_inode_lock); ++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl); ++ read_unlock(&gr_inode_lock); ++ ++ if (matchpo) { ++ if ((matchpo->mode & mode) != ++ (mode & ~(GR_AUDITS | GR_SUPPRESS)) ++ && curracl->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ gr_log_learn(new_dentry, mnt, new_mode); ++ ++ preempt_enable(); ++ return new_mode; ++ } ++ preempt_enable(); ++ return (matchpo->mode & mode); ++ } ++ ++ check_parent: ++ curracl = current->acl; ++ ++ matchpo = chk_obj_create_label(parent, mnt, curracl, path); ++ retval = matchpo->mode & mode; ++ ++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))) ++ && (curracl->mode & (GR_LEARN | GR_INHERITLEARN))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ gr_log_learn(new_dentry, mnt, new_mode); ++ preempt_enable(); ++ return new_mode; ++ } ++ ++ preempt_enable(); ++ return retval; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW)) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ if (unlikely(!(gr_status & GR_READY) || !task)) ++ return 0; ++ ++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ task->acl != current->acl) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ struct task_struct *p; ++ int ret = 0; ++ ++ if (unlikely(!(gr_status & GR_READY) || !pid)) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) && ++ p->acl != current->acl) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ ++ return ret; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ tsk->signal->used_accept = 0; ++ tsk->acl_sp_role = 0; ++ tsk->acl_role_id = current->acl_role_id; ++ tsk->acl = current->acl; ++ tsk->role = current->role; ++ tsk->signal->curr_ip = current->signal->curr_ip; ++ if (current->exec_file) ++ get_file(current->exec_file); ++ tsk->exec_file = current->exec_file; ++ tsk->is_writable = current->is_writable; ++ if (unlikely(current->signal->used_accept)) ++ current->signal->curr_ip = 0; ++ ++ return; ++} ++ ++static void ++gr_set_proc_res(struct task_struct *task) ++{ ++ struct acl_subject_label *proc; ++ unsigned short i; ++ ++ proc = task->acl; ++ ++ if (proc->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return; ++ ++ for (i = 0; i < RLIM_NLIMITS; i++) { ++ if (!(proc->resmask & (1 << i))) ++ continue; ++ ++ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur; ++ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max; ++ } ++ ++ return; ++} ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ uid_t *uidlist; ++ int curuid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('u', real, effective, fs); ++ ++ num = current->acl->user_trans_num; ++ uidlist = current->acl->user_transitions; ++ ++ if (uidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->user_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ realok = 1; ++ if (effective == curuid) ++ effectiveok = 1; ++ if (fs == curuid) ++ fsok = 1; ++ } ++ } else if (current->acl->user_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curuid = (int)uidlist[i]; ++ if (real == curuid) ++ break; ++ if (effective == curuid) ++ break; ++ if (fs == curuid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ unsigned int i; ++ __u16 num; ++ gid_t *gidlist; ++ int curgid; ++ int realok = 0; ++ int effectiveok = 0; ++ int fsok = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ gr_log_learn_id_change('g', real, effective, fs); ++ ++ num = current->acl->group_trans_num; ++ gidlist = current->acl->group_transitions; ++ ++ if (gidlist == NULL) ++ return 0; ++ ++ if (real == -1) ++ realok = 1; ++ if (effective == -1) ++ effectiveok = 1; ++ if (fs == -1) ++ fsok = 1; ++ ++ if (current->acl->group_trans_type & GR_ID_ALLOW) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ realok = 1; ++ if (effective == curgid) ++ effectiveok = 1; ++ if (fs == curgid) ++ fsok = 1; ++ } ++ } else if (current->acl->group_trans_type & GR_ID_DENY) { ++ for (i = 0; i < num; i++) { ++ curgid = (int)gidlist[i]; ++ if (real == curgid) ++ break; ++ if (effective == curgid) ++ break; ++ if (fs == curgid) ++ break; ++ } ++ /* not in deny list */ ++ if (i == num) { ++ realok = 1; ++ effectiveok = 1; ++ fsok = 1; ++ } ++ } ++ ++ if (realok && effectiveok && fsok) ++ return 0; ++ else { ++ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real); ++ return 1; ++ } ++} ++ ++void ++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid) ++{ ++ struct acl_role_label *role = task->role; ++ struct acl_subject_label *subj = NULL; ++ struct acl_object_label *obj; ++ struct file *filp; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ filp = task->exec_file; ++ ++ /* kernel process, we'll give them the kernel role */ ++ if (unlikely(!filp)) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ return; ++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL)) ++ role = lookup_acl_role_label(task, uid, gid); ++ ++ /* perform subject lookup in possibly new role ++ we can use this result below in the case where role == task->role ++ */ ++ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role); ++ ++ /* if we changed uid/gid, but result in the same role ++ and are using inheritance, don't lose the inherited subject ++ if current subject is other than what normal lookup ++ would result in, we arrived via inheritance, don't ++ lose subject ++ */ ++ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) && ++ (subj == task->acl))) ++ task->acl = subj; ++ ++ task->role = role; ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG ++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ ++ gr_set_proc_res(task); ++ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_share) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *newacl; ++ struct acl_object_label *obj; ++ __u32 retmode; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ newacl = chk_subj_label(dentry, mnt, task->role); ++ ++ task_lock(task); ++ if ((((task->ptrace & PT_PTRACED) || unsafe_share) && ++ !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) && ++ !(task->role->roletype & GR_ROLE_GOD) && ++ !gr_search_file(dentry, GR_PTRACERD, mnt) && ++ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) { ++ task_unlock(task); ++ if (unsafe_share) ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt); ++ else ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt); ++ return -EACCES; ++ } ++ task_unlock(task); ++ ++ obj = chk_obj_label(dentry, mnt, task->acl); ++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT); ++ ++ if (!(task->acl->mode & GR_INHERITLEARN) && ++ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) { ++ if (obj->nested) ++ task->acl = obj->nested; ++ else ++ task->acl = newacl; ++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) ++ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt); ++ ++ task->is_writable = 0; ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(dentry, mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(dentry, mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG ++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ return 0; ++} ++ ++/* always called with valid inodev ptr */ ++static void ++do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev) ++{ ++ struct acl_object_label *matchpo; ++ struct acl_subject_label *matchps; ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL) ++ matchpo->mode |= GR_DELETED; ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if (subj->inode == ino && subj->device == dev) ++ subj->mode |= GR_DELETED; ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL) ++ matchps->mode |= GR_DELETED; ++ FOR_EACH_ROLE_END(role) ++ ++ inodev->nentry->deleted = 1; ++ ++ return; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ struct inodev_entry *inodev; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ write_lock(&gr_inode_lock); ++ inodev = lookup_inodev_entry(ino, dev); ++ if (inodev != NULL) ++ do_handle_delete(inodev, ino, dev); ++ write_unlock(&gr_inode_lock); ++ ++ return; ++} ++ ++static void ++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_subject_label *subj) ++{ ++ unsigned int index = fhash(oldinode, olddevice, subj->obj_hash_size); ++ struct acl_object_label *match; ++ ++ match = subj->obj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ subj->obj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_obj_label(match, subj); ++ } ++ ++ return; ++} ++ ++static void ++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice, ++ struct acl_role_label *role) ++{ ++ unsigned int index = fhash(oldinode, olddevice, role->subj_hash_size); ++ struct acl_subject_label *match; ++ ++ match = role->subj_hash[index]; ++ ++ while (match && (match->inode != oldinode || ++ match->device != olddevice || ++ !(match->mode & GR_DELETED))) ++ match = match->next; ++ ++ if (match && (match->inode == oldinode) ++ && (match->device == olddevice) ++ && (match->mode & GR_DELETED)) { ++ if (match->prev == NULL) { ++ role->subj_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->inode = newinode; ++ match->device = newdevice; ++ match->mode &= ~GR_DELETED; ++ ++ insert_acl_subj_label(match, role); ++ } ++ ++ return; ++} ++ ++static void ++update_inodev_entry(const ino_t oldinode, const dev_t olddevice, ++ const ino_t newinode, const dev_t newdevice) ++{ ++ unsigned int index = fhash(oldinode, olddevice, inodev_set.i_size); ++ struct inodev_entry *match; ++ ++ match = inodev_set.i_hash[index]; ++ ++ while (match && (match->nentry->inode != oldinode || ++ match->nentry->device != olddevice || !match->nentry->deleted)) ++ match = match->next; ++ ++ if (match && (match->nentry->inode == oldinode) ++ && (match->nentry->device == olddevice) && ++ match->nentry->deleted) { ++ if (match->prev == NULL) { ++ inodev_set.i_hash[index] = match->next; ++ if (match->next != NULL) ++ match->next->prev = NULL; ++ } else { ++ match->prev->next = match->next; ++ if (match->next != NULL) ++ match->next->prev = match->prev; ++ } ++ match->prev = NULL; ++ match->next = NULL; ++ match->nentry->inode = newinode; ++ match->nentry->device = newdevice; ++ match->nentry->deleted = 0; ++ ++ insert_inodev_entry(match); ++ } ++ ++ return; ++} ++ ++static void ++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry, ++ const struct vfsmount *mnt) ++{ ++ struct acl_subject_label *subj; ++ struct acl_role_label *role; ++ unsigned int x; ++ ++ FOR_EACH_ROLE_START(role) ++ update_acl_subj_label(matchn->inode, matchn->device, ++ dentry->d_inode->i_ino, ++ dentry->d_inode->i_sb->s_dev, role); ++ ++ FOR_EACH_NESTED_SUBJECT_START(role, subj) ++ if ((subj->inode == dentry->d_inode->i_ino) && ++ (subj->device == dentry->d_inode->i_sb->s_dev)) { ++ subj->inode = dentry->d_inode->i_ino; ++ subj->device = dentry->d_inode->i_sb->s_dev; ++ } ++ FOR_EACH_NESTED_SUBJECT_END(subj) ++ FOR_EACH_SUBJECT_START(role, subj, x) ++ update_acl_obj_label(matchn->inode, matchn->device, ++ dentry->d_inode->i_ino, ++ dentry->d_inode->i_sb->s_dev, subj); ++ FOR_EACH_SUBJECT_END(subj,x) ++ FOR_EACH_ROLE_END(role) ++ ++ update_inodev_entry(matchn->inode, matchn->device, ++ dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev); ++ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ struct name_entry *matchn; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt)); ++ ++ if (unlikely((unsigned long)matchn)) { ++ write_lock(&gr_inode_lock); ++ do_handle_create(matchn, dentry, mnt); ++ write_unlock(&gr_inode_lock); ++ } ++ preempt_enable(); ++ ++ return; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ struct name_entry *matchn; ++ struct inodev_entry *inodev; ++ ++ /* vfs_rename swaps the name and parent link for old_dentry and ++ new_dentry ++ at this point, old_dentry has the new name, parent link, and inode ++ for the renamed file ++ if a file is being replaced by a rename, new_dentry has the inode ++ and name for the replaced file ++ */ ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ preempt_disable(); ++ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt)); ++ ++ /* we wouldn't have to check d_inode if it weren't for ++ NFS silly-renaming ++ */ ++ ++ write_lock(&gr_inode_lock); ++ if (unlikely(replace && new_dentry->d_inode)) { ++ inodev = lookup_inodev_entry(new_dentry->d_inode->i_ino, ++ new_dentry->d_inode->i_sb->s_dev); ++ if (inodev != NULL && (new_dentry->d_inode->i_nlink <= 1)) ++ do_handle_delete(inodev, new_dentry->d_inode->i_ino, ++ new_dentry->d_inode->i_sb->s_dev); ++ } ++ ++ inodev = lookup_inodev_entry(old_dentry->d_inode->i_ino, ++ old_dentry->d_inode->i_sb->s_dev); ++ if (inodev != NULL && (old_dentry->d_inode->i_nlink <= 1)) ++ do_handle_delete(inodev, old_dentry->d_inode->i_ino, ++ old_dentry->d_inode->i_sb->s_dev); ++ ++ if (unlikely((unsigned long)matchn)) ++ do_handle_create(matchn, old_dentry, mnt); ++ ++ write_unlock(&gr_inode_lock); ++ preempt_enable(); ++ ++ return; ++} ++ ++static int ++lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt, ++ unsigned char **sum) ++{ ++ struct acl_role_label *r; ++ struct role_allowed_ip *ipp; ++ struct role_transition *trans; ++ unsigned int i; ++ int found = 0; ++ ++ /* check transition table */ ++ ++ for (trans = current->role->transitions; trans; trans = trans->next) { ++ if (!strcmp(rolename, trans->rolename)) { ++ found = 1; ++ break; ++ } ++ } ++ ++ if (!found) ++ return 0; ++ ++ /* handle special roles that do not require authentication ++ and check ip */ ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ found = 0; ++ if (r->allowed_ips != NULL) { ++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) { ++ if ((ntohl(current->signal->curr_ip) & ipp->netmask) == ++ (ntohl(ipp->addr) & ipp->netmask)) ++ found = 1; ++ } ++ } else ++ found = 2; ++ if (!found) ++ return 0; ++ ++ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) || ++ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) { ++ *salt = NULL; ++ *sum = NULL; ++ return 1; ++ } ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ for (i = 0; i < num_sprole_pws; i++) { ++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) { ++ *salt = acl_special_roles[i]->salt; ++ *sum = acl_special_roles[i]->sum; ++ return 1; ++ } ++ } ++ ++ return 0; ++} ++ ++static void ++assign_special_role(char *rolename) ++{ ++ struct acl_object_label *obj; ++ struct acl_role_label *r; ++ struct acl_role_label *assigned = NULL; ++ struct task_struct *tsk; ++ struct file *filp; ++ ++ FOR_EACH_ROLE_START(r) ++ if (!strcmp(rolename, r->rolename) && ++ (r->roletype & GR_ROLE_SPECIAL)) { ++ assigned = r; ++ break; ++ } ++ FOR_EACH_ROLE_END(r) ++ ++ if (!assigned) ++ return; ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ ++ tsk = current->parent; ++ if (tsk == NULL) ++ goto out_unlock; ++ ++ filp = tsk->exec_file; ++ if (filp == NULL) ++ goto out_unlock; ++ ++ tsk->is_writable = 0; ++ ++ tsk->acl_sp_role = 1; ++ tsk->acl_role_id = ++acl_sp_role_value; ++ tsk->role = assigned; ++ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role); ++ ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ tsk->is_writable = 1; ++ ++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG ++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid); ++#endif ++ ++out_unlock: ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return; ++} ++ ++int gr_check_secure_terminal(struct task_struct *task) ++{ ++ struct task_struct *p, *p2, *p3; ++ struct files_struct *files; ++ struct fdtable *fdt; ++ struct file *our_file = NULL, *file; ++ int i; ++ ++ if (task->signal->tty == NULL) ++ return 1; ++ ++ files = get_files_struct(task); ++ if (files != NULL) { ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) { ++ get_file(file); ++ our_file = file; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } ++ ++ if (our_file == NULL) ++ return 1; ++ ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ files = get_files_struct(p); ++ if (files == NULL || ++ (p->signal && p->signal->tty == task->signal->tty)) { ++ if (files != NULL) ++ put_files_struct(files); ++ continue; ++ } ++ rcu_read_lock(); ++ fdt = files_fdtable(files); ++ for (i=0; i < fdt->max_fds; i++) { ++ file = fcheck_files(files, i); ++ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) && ++ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) { ++ p3 = task; ++ while (p3->pid > 0) { ++ if (p3 == p) ++ break; ++ p3 = p3->parent; ++ } ++ if (p3 == p) ++ break; ++ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p); ++ gr_handle_alertkill(p); ++ rcu_read_unlock(); ++ put_files_struct(files); ++ read_unlock(&tasklist_lock); ++ fput(our_file); ++ return 0; ++ } ++ } ++ rcu_read_unlock(); ++ put_files_struct(files); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ ++ fput(our_file); ++ return 1; ++} ++ ++ssize_t ++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos) ++{ ++ struct gr_arg_wrapper uwrap; ++ unsigned char *sprole_salt = NULL; ++ unsigned char *sprole_sum = NULL; ++ int error = sizeof (struct gr_arg_wrapper); ++ int error2 = 0; ++ ++ down(&gr_dev_sem); ++ ++ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ if (count != sizeof (struct gr_arg_wrapper)) { ++ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper)); ++ error = -EINVAL; ++ goto out; ++ } ++ ++ ++ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) { ++ gr_auth_expires = 0; ++ gr_auth_attempts = 0; ++ } ++ ++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) { ++ error = -EINVAL; ++ goto out; ++ } ++ ++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) { ++ error = -EFAULT; ++ goto out; ++ } ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(gr_auth_expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ /* if non-root trying to do anything other than use a special role, ++ do not attempt authentication, do not count towards authentication ++ locking ++ */ ++ ++ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS && ++ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM && ++ current_uid()) { ++ error = -EPERM; ++ goto out; ++ } ++ ++ /* ensure pw and special role name are null terminated */ ++ ++ gr_usermode->pw[GR_PW_LEN - 1] = '\0'; ++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0'; ++ ++ /* Okay. ++ * We have our enough of the argument structure..(we have yet ++ * to copy_from_user the tables themselves) . Copy the tables ++ * only if we need them, i.e. for loading operations. */ ++ ++ switch (gr_usermode->mode) { ++ case GR_STATUS: ++ if (gr_status & GR_READY) { ++ error = 1; ++ if (!gr_check_secure_terminal(current)) ++ error = 3; ++ } else ++ error = 2; ++ goto out; ++ case GR_SHUTDOWN: ++ if ((gr_status & GR_READY) ++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG); ++ free_variables(); ++ memset(gr_usermode, 0, sizeof (struct gr_arg)); ++ memset(gr_system_salt, 0, GR_SALT_LEN); ++ memset(gr_system_sum, 0, GR_SHA_LEN); ++ } else if (gr_status & GR_READY) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG); ++ error = -EPERM; ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG); ++ error = -EAGAIN; ++ } ++ break; ++ case GR_ENABLE: ++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode))) ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION); ++ else { ++ if (gr_status & GR_READY) ++ error = -EAGAIN; ++ else ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION); ++ } ++ break; ++ case GR_RELOAD: ++ if (!(gr_status & GR_READY)) { ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION); ++ error = -EAGAIN; ++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ lock_kernel(); ++ ++ pax_open_kernel(); ++ gr_status &= ~GR_READY; ++ pax_close_kernel(); ++ ++ free_variables(); ++ if (!(error2 = gracl_init(gr_usermode))) { ++ unlock_kernel(); ++ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION); ++ } else { ++ unlock_kernel(); ++ error = error2; ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ } ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION); ++ error = -EPERM; ++ } ++ break; ++ case GR_SEGVMOD: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG); ++ if (gr_usermode->segv_device && gr_usermode->segv_inode) { ++ struct acl_subject_label *segvacl; ++ segvacl = ++ lookup_acl_subj_label(gr_usermode->segv_inode, ++ gr_usermode->segv_device, ++ current->role); ++ if (segvacl) { ++ segvacl->crashes = 0; ++ segvacl->expires = 0; ++ } ++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) { ++ gr_remove_uid(gr_usermode->segv_uid); ++ } ++ } else { ++ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG); ++ error = -EPERM; ++ } ++ break; ++ case GR_SPROLE: ++ case GR_SPROLEPAM: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) { ++ current->role->expires = 0; ++ current->role->auth_attempts = 0; ++ } ++ ++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES && ++ time_after(current->role->expires, get_seconds())) { ++ error = -EBUSY; ++ goto out; ++ } ++ ++ if (lookup_special_role_auth ++ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum) ++ && ((!sprole_salt && !sprole_sum) ++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) { ++ char *p = ""; ++ assign_special_role(gr_usermode->sp_role); ++ read_lock(&tasklist_lock); ++ if (current->parent) ++ p = current->parent->role->rolename; ++ read_unlock(&tasklist_lock); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG, ++ p, acl_sp_role_value); ++ } else { ++ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role); ++ error = -EPERM; ++ if(!(current->role->auth_attempts++)) ++ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ goto out; ++ } ++ break; ++ case GR_UNSPROLE: ++ if (unlikely(!(gr_status & GR_READY))) { ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG); ++ error = -EAGAIN; ++ break; ++ } ++ ++ if (current->role->roletype & GR_ROLE_SPECIAL) { ++ char *p = ""; ++ int i = 0; ++ ++ read_lock(&tasklist_lock); ++ if (current->parent) { ++ p = current->parent->role->rolename; ++ i = current->parent->acl_role_id; ++ } ++ read_unlock(&tasklist_lock); ++ ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i); ++ gr_set_acls(1); ++ } else { ++ error = -EPERM; ++ goto out; ++ } ++ break; ++ default: ++ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode); ++ error = -EINVAL; ++ break; ++ } ++ ++ if (error != -EPERM) ++ goto out; ++ ++ if(!(gr_auth_attempts++)) ++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT; ++ ++ out: ++ up(&gr_dev_sem); ++ return error; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ struct acl_object_label *obj; ++ struct task_struct *task, *task2; ++ struct file *filp; ++ struct acl_role_label *role = current->role; ++ __u16 acl_role_id = current->acl_role_id; ++ const struct cred *cred; ++ char *tmpname; ++ struct name_entry *nmatch; ++ struct acl_subject_label *tmpsubj; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ do_each_thread(task2, task) { ++ /* check to see if we're called from the exit handler, ++ if so, only replace ACLs that have inherited the admin ++ ACL */ ++ ++ if (type && (task->role != role || ++ task->acl_role_id != acl_role_id)) ++ continue; ++ ++ task->acl_role_id = 0; ++ task->acl_sp_role = 0; ++ ++ if ((filp = task->exec_file)) { ++ cred = __task_cred(task); ++ task->role = lookup_acl_role_label(task, cred->uid, cred->gid); ++ ++ /* the following is to apply the correct subject ++ on binaries running when the RBAC system ++ is enabled, when the binaries have been ++ replaced or deleted since their execution ++ ----- ++ when the RBAC system starts, the inode/dev ++ from exec_file will be one the RBAC system ++ is unaware of. It only knows the inode/dev ++ of the present file on disk, or the absence ++ of it. ++ */ ++ preempt_disable(); ++ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt); ++ ++ nmatch = lookup_name_entry(tmpname); ++ preempt_enable(); ++ tmpsubj = NULL; ++ if (nmatch) { ++ if (nmatch->deleted) ++ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role); ++ else ++ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role); ++ if (tmpsubj != NULL) ++ task->acl = tmpsubj; ++ } ++ if (tmpsubj == NULL) ++ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role); ++ if (task->acl) { ++ struct acl_subject_label *curr; ++ curr = task->acl; ++ ++ task->is_writable = 0; ++ /* ignore additional mmap checks for processes that are writable ++ by the default ACL */ ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label); ++ if (unlikely(obj->mode & GR_WRITE)) ++ task->is_writable = 1; ++ ++ gr_set_proc_res(task); ++ ++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG ++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename); ++#endif ++ } else { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task->pid); ++ return 1; ++ } ++ } else { ++ // it's a kernel process ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN ++ task->acl->mode &= ~GR_PROCFIND; ++#endif ++ } ++ } while_each_thread(task2, task); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ struct acl_subject_label *acl; ++ const struct cred *cred; ++ ++ if (unlikely((gr_status & GR_READY) && ++ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)))) ++ goto skip_reslog; ++ ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ gr_log_resource(task, res, wanted, gt); ++#endif ++ skip_reslog: ++ ++ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS)) ++ return; ++ ++ acl = task->acl; ++ ++ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) || ++ !(acl->resmask & (1 << (unsigned short) res)))) ++ return; ++ ++ if (wanted >= acl->res[res].rlim_cur) { ++ unsigned long res_add; ++ ++ res_add = wanted; ++ switch (res) { ++ case RLIMIT_CPU: ++ res_add += GR_RLIM_CPU_BUMP; ++ break; ++ case RLIMIT_FSIZE: ++ res_add += GR_RLIM_FSIZE_BUMP; ++ break; ++ case RLIMIT_DATA: ++ res_add += GR_RLIM_DATA_BUMP; ++ break; ++ case RLIMIT_STACK: ++ res_add += GR_RLIM_STACK_BUMP; ++ break; ++ case RLIMIT_CORE: ++ res_add += GR_RLIM_CORE_BUMP; ++ break; ++ case RLIMIT_RSS: ++ res_add += GR_RLIM_RSS_BUMP; ++ break; ++ case RLIMIT_NPROC: ++ res_add += GR_RLIM_NPROC_BUMP; ++ break; ++ case RLIMIT_NOFILE: ++ res_add += GR_RLIM_NOFILE_BUMP; ++ break; ++ case RLIMIT_MEMLOCK: ++ res_add += GR_RLIM_MEMLOCK_BUMP; ++ break; ++ case RLIMIT_AS: ++ res_add += GR_RLIM_AS_BUMP; ++ break; ++ case RLIMIT_LOCKS: ++ res_add += GR_RLIM_LOCKS_BUMP; ++ break; ++ case RLIMIT_SIGPENDING: ++ res_add += GR_RLIM_SIGPENDING_BUMP; ++ break; ++ case RLIMIT_MSGQUEUE: ++ res_add += GR_RLIM_MSGQUEUE_BUMP; ++ break; ++ case RLIMIT_NICE: ++ res_add += GR_RLIM_NICE_BUMP; ++ break; ++ case RLIMIT_RTPRIO: ++ res_add += GR_RLIM_RTPRIO_BUMP; ++ break; ++ case RLIMIT_RTTIME: ++ res_add += GR_RLIM_RTTIME_BUMP; ++ break; ++ } ++ ++ acl->res[res].rlim_cur = res_add; ++ ++ if (wanted > acl->res[res].rlim_max) ++ acl->res[res].rlim_max = res_add; ++ ++ /* only log the subject filename, since resource logging is supported for ++ single-subject learning only */ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, cred->gid, acl->filename, ++ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max, ++ "", (unsigned long) res, &task->signal->curr_ip); ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)) ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ struct task_struct *task = current; ++ struct acl_subject_label *proc; ++ unsigned long flags; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return; ++ ++ flags = pax_get_flags(task); ++ ++ proc = task->acl; ++ ++ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC) ++ flags &= ~MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC) ++ flags &= ~MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP) ++ flags &= ~MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP) ++ flags &= ~MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT) ++ flags &= ~MF_PAX_MPROTECT; ++ ++ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC) ++ flags |= MF_PAX_PAGEEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC) ++ flags |= MF_PAX_SEGMEXEC; ++ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP) ++ flags |= MF_PAX_RANDMMAP; ++ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP) ++ flags |= MF_PAX_EMUTRAMP; ++ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT) ++ flags |= MF_PAX_MPROTECT; ++ ++ pax_set_flags(task, flags); ++ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++/* Eric Biederman likes breaking userland ABI and every inode-based security ++ system to save 35kb of memory */ ++ ++/* we modify the passed in filename, but adjust it back before returning */ ++static struct acl_object_label *gr_lookup_by_name(char *name, unsigned int len) ++{ ++ struct name_entry *nmatch; ++ char *p, *lastp = NULL; ++ struct acl_object_label *obj = NULL, *tmp; ++ struct acl_subject_label *tmpsubj; ++ char c = '\0'; ++ ++ read_lock(&gr_inode_lock); ++ ++ p = name + len - 1; ++ do { ++ nmatch = lookup_name_entry(name); ++ if (lastp != NULL) ++ *lastp = c; ++ ++ if (nmatch == NULL) ++ goto next_component; ++ tmpsubj = current->acl; ++ do { ++ obj = lookup_acl_obj_label(nmatch->inode, nmatch->device, tmpsubj); ++ if (obj != NULL) { ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, name)) { ++ obj = tmp; ++ goto found_obj; ++ } ++ tmp = tmp->next; ++ } ++ goto found_obj; ++ } ++ } while ((tmpsubj = tmpsubj->parent_subject)); ++next_component: ++ /* end case */ ++ if (p == name) ++ break; ++ ++ while (*p != '/') ++ p--; ++ if (p == name) ++ lastp = p + 1; ++ else { ++ lastp = p; ++ p--; ++ } ++ c = *lastp; ++ *lastp = '\0'; ++ } while (1); ++found_obj: ++ read_unlock(&gr_inode_lock); ++ /* obj returned will always be non-null */ ++ return obj; ++} ++ ++/* returns 0 when allowing, non-zero on error ++ op of 0 is used for readdir, so we don't log the names of hidden files ++*/ ++__u32 ++gr_handle_sysctl(const struct ctl_table *table, const int op) ++{ ++ struct ctl_table *tmp; ++ const char *proc_sys = "/proc/sys"; ++ char *path; ++ struct acl_object_label *obj; ++ unsigned short len = 0, pos = 0, depth = 0, i; ++ __u32 err = 0; ++ __u32 mode = 0; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++ ++ /* for now, ignore operations on non-sysctl entries if it's not a ++ readdir*/ ++ if (table->child != NULL && op != 0) ++ return 0; ++ ++ mode |= GR_FIND; ++ /* it's only a read if it's an entry, read on dirs is for readdir */ ++ if (op & MAY_READ) ++ mode |= GR_READ; ++ if (op & MAY_WRITE) ++ mode |= GR_WRITE; ++ ++ preempt_disable(); ++ ++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id()); ++ ++ /* it's only a read/write if it's an actual entry, not a dir ++ (which are opened for readdir) ++ */ ++ ++ /* convert the requested sysctl entry into a pathname */ ++ ++ for (tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ len += strlen(tmp->procname); ++ len++; ++ depth++; ++ } ++ ++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE) { ++ /* deny */ ++ goto out; ++ } ++ ++ memset(path, 0, PAGE_SIZE); ++ ++ memcpy(path, proc_sys, strlen(proc_sys)); ++ ++ pos += strlen(proc_sys); ++ ++ for (; depth > 0; depth--) { ++ path[pos] = '/'; ++ pos++; ++ for (i = 1, tmp = (struct ctl_table *)table; tmp != NULL; tmp = tmp->parent) { ++ if (depth == i) { ++ memcpy(path + pos, tmp->procname, ++ strlen(tmp->procname)); ++ pos += strlen(tmp->procname); ++ } ++ i++; ++ } ++ } ++ ++ obj = gr_lookup_by_name(path, pos); ++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS); ++ ++ if (unlikely((current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) && ++ ((err & mode) != mode))) { ++ __u32 new_mode = mode; ++ ++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS); ++ ++ err = 0; ++ gr_log_learn_sysctl(path, new_mode); ++ } else if (!(err & GR_FIND) && !(err & GR_SUPPRESS) && op != 0) { ++ gr_log_hidden_sysctl(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, path); ++ err = -ENOENT; ++ } else if (!(err & GR_FIND)) { ++ err = -ENOENT; ++ } else if (((err & mode) & ~GR_FIND) != (mode & ~GR_FIND) && !(err & GR_SUPPRESS)) { ++ gr_log_str4(GR_DONT_AUDIT, GR_SYSCTL_ACL_MSG, "denied", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = -EACCES; ++ } else if ((err & mode) != mode) { ++ err = -EACCES; ++ } else if ((((err & mode) & ~GR_FIND) == (mode & ~GR_FIND)) && (err & GR_AUDITS)) { ++ gr_log_str4(GR_DO_AUDIT, GR_SYSCTL_ACL_MSG, "successful", ++ path, (mode & GR_READ) ? " reading" : "", ++ (mode & GR_WRITE) ? " writing" : ""); ++ err = 0; ++ } else ++ err = 0; ++ ++ out: ++ preempt_enable(); ++ ++ return err; ++} ++#endif ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ struct file *filp; ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ filp = task->exec_file; ++ ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->parent; ++ } ++ ++ if (!filp || (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 1; ++ } ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) { ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ return 0; ++ } ++#endif ++ ++ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++ ++ if (retmode & GR_NOPTRACE) ++ return 1; ++ ++ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD) ++ && (current->acl != task->acl || (current->acl != current->role->root_label ++ && current->pid != task->pid))) ++ return 1; ++ ++ return 0; ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ struct task_struct *tmp = task; ++ struct task_struct *curtemp = current; ++ __u32 retmode; ++ ++#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (unlikely(!(gr_status & GR_READY))) ++ return 0; ++#endif ++ ++ read_lock(&tasklist_lock); ++ while (tmp->pid > 0) { ++ if (tmp == curtemp) ++ break; ++ tmp = tmp->parent; ++ } ++ ++ if (tmp->pid == 0 && ((grsec_enable_harden_ptrace && current_uid() && !(gr_status & GR_READY)) || ++ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) { ++ read_unlock(&tasklist_lock); ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ read_unlock(&tasklist_lock); ++ ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ if (!(gr_status & GR_READY)) ++ return 0; ++#endif ++ ++ read_lock(&grsec_exec_file_lock); ++ if (unlikely(!task->exec_file)) { ++ read_unlock(&grsec_exec_file_lock); ++ return 0; ++ } ++ ++ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt); ++ read_unlock(&grsec_exec_file_lock); ++ ++ if (retmode & GR_NOPTRACE) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ if (retmode & GR_PTRACERD) { ++ switch (request) { ++ case PTRACE_POKETEXT: ++ case PTRACE_POKEDATA: ++ case PTRACE_POKEUSR: ++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64) ++ case PTRACE_SETREGS: ++ case PTRACE_SETFPREGS: ++#endif ++#ifdef CONFIG_X86 ++ case PTRACE_SETFPXREGS: ++#endif ++#ifdef CONFIG_ALTIVEC ++ case PTRACE_SETVRREGS: ++#endif ++ return 1; ++ default: ++ return 0; ++ } ++ } else if (!(current->acl->mode & GR_POVERRIDE) && ++ !(current->role->roletype & GR_ROLE_GOD) && ++ (current->acl != task->acl)) { ++ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task); ++ return 1; ++ } ++ ++ return 0; ++} ++ ++static int is_writable_mmap(const struct file *filp) ++{ ++ struct task_struct *task = current; ++ struct acl_object_label *obj, *obj2; ++ ++ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) && ++ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && filp->f_path.mnt != shm_mnt) { ++ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label); ++ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, ++ task->role->root_label); ++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt); ++ return 1; ++ } ++ } ++ return 0; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ __u32 mode; ++ ++ if (unlikely(!file || !(prot & PROT_EXEC))) ++ return 1; ++ ++ if (is_writable_mmap(file)) ++ return 0; ++ ++ mode = ++ gr_search_file(file->f_path.dentry, ++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS, ++ file->f_path.mnt); ++ ++ if (!gr_tpe_allow(file)) ++ return 0; ++ ++ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_EXEC))) { ++ return 0; ++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 1; ++ } ++ ++ return 1; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ unsigned long runtime; ++ unsigned long cputime; ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ struct timespec timeval; ++ ++ if (unlikely(!(gr_status & GR_READY) || !task->acl || ++ !(task->acl->mode & GR_PROCACCT))) ++ return; ++ ++ do_posix_clock_monotonic_gettime(&timeval); ++ runtime = timeval.tv_sec - task->start_time.tv_sec; ++ wday = runtime / (3600 * 24); ++ runtime -= wday * (3600 * 24); ++ whr = runtime / 3600; ++ runtime -= whr * 3600; ++ wmin = runtime / 60; ++ runtime -= wmin * 60; ++ wsec = runtime; ++ ++ cputime = (task->utime + task->stime) / HZ; ++ cday = cputime / (3600 * 24); ++ cputime -= cday * (3600 * 24); ++ chr = cputime / 3600; ++ cputime -= chr * 3600; ++ cmin = cputime / 60; ++ cputime -= cmin * 60; ++ csec = cputime; ++ ++ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code); ++ ++ return; ++} ++ ++void gr_set_kernel_label(struct task_struct *task) ++{ ++ if (gr_status & GR_READY) { ++ task->role = kernel_role; ++ task->acl = kernel_role->root_label; ++ } ++ return; ++} ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ struct task_struct *task; ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ const struct cred *cred; ++#endif ++ int ret = 0; ++ ++ /* restrict taskstats viewing to un-chrooted root users ++ who have the 'view' subject flag if the RBAC system is enabled ++ */ ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ task = find_task_by_vpid(pid); ++ if (task) { ++#ifdef CONFIG_GRKERNSEC_CHROOT ++ if (proc_is_chrooted(task)) ++ ret = -EACCES; ++#endif ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ cred = __task_cred(task); ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ if (cred->uid != 0) ++ ret = -EACCES; ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ if (cred->uid != 0 && !groups_search(cred->group_info, CONFIG_GRKERNSEC_PROC_GID)) ++ ret = -EACCES; ++#endif ++#endif ++ if (gr_status & GR_READY) { ++ if (!(task->acl->mode & GR_VIEW)) ++ ret = -EACCES; ++ } ++ } else ++ ret = -ENOENT; ++ ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return ret; ++} ++#endif ++ ++int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino) ++{ ++ struct task_struct *task = current; ++ struct dentry *dentry = file->f_path.dentry; ++ struct vfsmount *mnt = file->f_path.mnt; ++ struct acl_object_label *obj, *tmp; ++ struct acl_subject_label *subj; ++ unsigned int bufsize; ++ int is_not_root; ++ char *path; ++ ++ if (unlikely(!(gr_status & GR_READY))) ++ return 1; ++ ++ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ return 1; ++ ++ /* ignore Eric Biederman */ ++ if (IS_PRIVATE(dentry->d_inode)) ++ return 1; ++ ++ subj = task->acl; ++ do { ++ obj = lookup_acl_obj_label(ino, dentry->d_inode->i_sb->s_dev, subj); ++ if (obj != NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ } while ((subj = subj->parent_subject)); ++ ++ /* this is purely an optimization since we're looking for an object ++ for the directory we're doing a readdir on ++ if it's possible for any globbed object to match the entry we're ++ filling into the directory, then the object we find here will be ++ an anchor point with attached globbed objects ++ */ ++ obj = chk_obj_label_noglob(dentry, mnt, task->acl); ++ if (obj->globbed == NULL) ++ return (obj->mode & GR_FIND) ? 1 : 0; ++ ++ is_not_root = ((obj->filename[0] == '/') && ++ (obj->filename[1] == '\0')) ? 0 : 1; ++ bufsize = PAGE_SIZE - namelen - is_not_root; ++ ++ /* check bufsize > PAGE_SIZE || bufsize == 0 */ ++ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1))) ++ return 1; ++ ++ preempt_disable(); ++ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()), ++ bufsize); ++ ++ bufsize = strlen(path); ++ ++ /* if base is "/", don't append an additional slash */ ++ if (is_not_root) ++ *(path + bufsize) = '/'; ++ memcpy(path + bufsize + is_not_root, name, namelen); ++ *(path + bufsize + namelen + is_not_root) = '\0'; ++ ++ tmp = obj->globbed; ++ while (tmp) { ++ if (!glob_match(tmp->filename, path)) { ++ preempt_enable(); ++ return (tmp->mode & GR_FIND) ? 1 : 0; ++ } ++ tmp = tmp->next; ++ } ++ preempt_enable(); ++ return (obj->mode & GR_FIND) ? 1 : 0; ++} ++ ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif ++ +diff -urNp linux-2.6.35.4/grsecurity/gracl_cap.c linux-2.6.35.4/grsecurity/gracl_cap.c +--- linux-2.6.35.4/grsecurity/gracl_cap.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_cap.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,138 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++static const char *captab_log[] = { ++ "CAP_CHOWN", ++ "CAP_DAC_OVERRIDE", ++ "CAP_DAC_READ_SEARCH", ++ "CAP_FOWNER", ++ "CAP_FSETID", ++ "CAP_KILL", ++ "CAP_SETGID", ++ "CAP_SETUID", ++ "CAP_SETPCAP", ++ "CAP_LINUX_IMMUTABLE", ++ "CAP_NET_BIND_SERVICE", ++ "CAP_NET_BROADCAST", ++ "CAP_NET_ADMIN", ++ "CAP_NET_RAW", ++ "CAP_IPC_LOCK", ++ "CAP_IPC_OWNER", ++ "CAP_SYS_MODULE", ++ "CAP_SYS_RAWIO", ++ "CAP_SYS_CHROOT", ++ "CAP_SYS_PTRACE", ++ "CAP_SYS_PACCT", ++ "CAP_SYS_ADMIN", ++ "CAP_SYS_BOOT", ++ "CAP_SYS_NICE", ++ "CAP_SYS_RESOURCE", ++ "CAP_SYS_TIME", ++ "CAP_SYS_TTY_CONFIG", ++ "CAP_MKNOD", ++ "CAP_LEASE", ++ "CAP_AUDIT_WRITE", ++ "CAP_AUDIT_CONTROL", ++ "CAP_SETFCAP", ++ "CAP_MAC_OVERRIDE", ++ "CAP_MAC_ADMIN" ++}; ++ ++EXPORT_SYMBOL(gr_is_capable); ++EXPORT_SYMBOL(gr_is_capable_nolog); ++ ++int ++gr_is_capable(const int cap) ++{ ++ struct task_struct *task = current; ++ const struct cred *cred = current_cred(); ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ kernel_cap_t cap_audit = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = task->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ cap_audit = curracl->cap_invert_audit; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ if (cap_raised(curracl->cap_invert_audit, cap)) ++ cap_raise(cap_audit, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) { ++ if (cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]); ++ return 1; ++ } ++ ++ curracl = task->acl; ++ ++ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) ++ && cap_raised(cred->cap_effective, cap)) { ++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, ++ task->role->roletype, cred->uid, ++ cred->gid, task->exec_file ? ++ gr_to_filename(task->exec_file->f_path.dentry, ++ task->exec_file->f_path.mnt) : curracl->filename, ++ curracl->filename, 0UL, ++ 0UL, "", (unsigned long) cap, &task->signal->curr_ip); ++ return 1; ++ } ++ ++ if ((cap >= 0) && (cap < (sizeof(captab_log)/sizeof(captab_log[0]))) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap)) ++ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]); ++ return 0; ++} ++ ++int ++gr_is_capable_nolog(const int cap) ++{ ++ struct acl_subject_label *curracl; ++ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ curracl = current->acl; ++ ++ cap_drop = curracl->cap_lower; ++ cap_mask = curracl->cap_mask; ++ ++ while ((curracl = curracl->parent_subject)) { ++ /* if the cap isn't specified in the current computed mask but is specified in the ++ current level subject, and is lowered in the current level subject, then add ++ it to the set of dropped capabilities ++ otherwise, add the current level subject's mask to the current computed mask ++ */ ++ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) { ++ cap_raise(cap_mask, cap); ++ if (cap_raised(curracl->cap_lower, cap)) ++ cap_raise(cap_drop, cap); ++ } ++ } ++ ++ if (!cap_raised(cap_drop, cap)) ++ return 1; ++ ++ return 0; ++} ++ +diff -urNp linux-2.6.35.4/grsecurity/gracl_fs.c linux-2.6.35.4/grsecurity/gracl_fs.c +--- linux-2.6.35.4/grsecurity/gracl_fs.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_fs.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,424 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/types.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/stat.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return GR_FIND; ++ ++ mode = ++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt); ++ ++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return mode; ++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt); ++ return 0; ++ } else if (unlikely(!(mode & GR_FIND))) ++ return 0; ++ ++ return GR_FIND; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ __u32 reqmode = GR_FIND; ++ __u32 mode; ++ ++ if (unlikely(!dentry->d_inode)) ++ return reqmode; ++ ++ if (unlikely(fmode & O_APPEND)) ++ reqmode |= GR_APPEND; ++ else if (unlikely(fmode & FMODE_WRITE)) ++ reqmode |= GR_WRITE; ++ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) ++ reqmode |= GR_READ; ++ if ((fmode & FMODE_GREXEC) && (fmode & FMODE_EXEC)) ++ reqmode &= ~GR_READ; ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, const int fmode, ++ const int imode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ if (unlikely(fmode & O_APPEND)) ++ reqmode |= GR_APPEND; ++ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY))) ++ reqmode |= GR_READ; ++ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID)))) ++ reqmode |= GR_SETID; ++ ++ mode = ++ gr_check_create(dentry, p_dentry, p_mnt, ++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : reqmode & ++ GR_APPEND ? " appending" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ __u32 mode, reqmode = GR_FIND; ++ ++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode)) ++ reqmode |= GR_EXEC; ++ if (fmode & S_IWOTH) ++ reqmode |= GR_WRITE; ++ if (fmode & S_IROTH) ++ reqmode |= GR_READ; ++ ++ mode = ++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, ++ mnt); ++ ++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return reqmode; ++ } else ++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS))) ++ { ++ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt, ++ reqmode & GR_READ ? " reading" : "", ++ reqmode & GR_WRITE ? " writing" : "", ++ reqmode & GR_EXEC ? " executing" : ""); ++ return 0; ++ } else if (unlikely((mode & reqmode) != reqmode)) ++ return 0; ++ ++ return reqmode; ++} ++ ++static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt, ++ mode_t mode) ++{ ++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode))) ++ return 1; ++ ++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, ++ GR_FCHMOD_ACL_MSG); ++ } else { ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG); ++ } ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt, ++ mode_t mode) ++{ ++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) { ++ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID, ++ GR_CHMOD_ACL_MSG); ++ } else { ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG); ++ } ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE, ++ GR_UNIXCONNECT_ACL_MSG); ++} ++ ++/* hardlinks require at minimum create permission, ++ any additional privilege required is based on the ++ privilege of the file being linked to ++*/ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ __u32 mode; ++ __u32 needmode = GR_CREATE | GR_LINK; ++ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK; ++ ++ mode = ++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry, ++ old_mnt); ++ ++ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) { ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ __u32 needmode = GR_WRITE | GR_CREATE; ++ __u32 mode; ++ ++ mode = ++ gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_CREATE | GR_AUDIT_CREATE | ++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS); ++ ++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) { ++ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & needmode) != needmode)) ++ return 0; ++ ++ return (GR_WRITE | GR_CREATE); ++} ++ ++static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt) ++{ ++ __u32 mode; ++ ++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); ++ ++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { ++ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt); ++ return mode; ++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { ++ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt); ++ return 0; ++ } else if (unlikely((mode & (reqmode)) != (reqmode))) ++ return 0; ++ ++ return (reqmode); ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ __u32 reqmode = GR_WRITE | GR_CREATE; ++ if (unlikely(mode & (S_ISUID | S_ISGID))) ++ reqmode |= GR_SETID; ++ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ reqmode, GR_MKNOD_ACL_MSG); ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt) ++{ ++ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, ++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG); ++} ++ ++#define RENAME_CHECK_SUCCESS(old, new) \ ++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \ ++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ))) ++ ++int ++gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname) ++{ ++ __u32 comp1, comp2; ++ int error = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (!new_dentry->d_inode) { ++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt, ++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ | ++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS); ++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, old_mnt); ++ } else { ++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE | ++ GR_CREATE | GR_DELETE | ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | ++ GR_AUDIT_READ | GR_AUDIT_WRITE | ++ GR_SUPPRESS, parent_mnt); ++ comp2 = ++ gr_search_file(old_dentry, ++ GR_READ | GR_WRITE | GR_AUDIT_READ | ++ GR_DELETE | GR_AUDIT_DELETE | ++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt); ++ } ++ ++ if (RENAME_CHECK_SUCCESS(comp1, comp2) && ++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS))) ++ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS) ++ && !(comp2 & GR_SUPPRESS)) { ++ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname); ++ error = -EACCES; ++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2))) ++ error = -EACCES; ++ ++ return error; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ u16 id; ++ char *rolename; ++ struct file *exec_file; ++ ++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) { ++ id = current->acl_role_id; ++ rolename = current->role->rolename; ++ gr_set_acls(1); ++ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id); ++ } ++ ++ write_lock(&grsec_exec_file_lock); ++ exec_file = current->exec_file; ++ current->exec_file = NULL; ++ write_unlock(&grsec_exec_file_lock); ++ ++ if (exec_file) ++ fput(exec_file); ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ if (task != current && task->acl->mode & GR_PROTPROCFD) ++ return -EACCES; ++ ++ return 0; ++} +diff -urNp linux-2.6.35.4/grsecurity/gracl_ip.c linux-2.6.35.4/grsecurity/gracl_ip.c +--- linux-2.6.35.4/grsecurity/gracl_ip.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_ip.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,339 @@ ++#include <linux/kernel.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/skbuff.h> ++#include <linux/ip.h> ++#include <linux/udp.h> ++#include <linux/smp_lock.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/netdevice.h> ++#include <linux/inetdevice.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++#define GR_BIND 0x01 ++#define GR_CONNECT 0x02 ++#define GR_INVERT 0x04 ++#define GR_BINDOVERRIDE 0x08 ++#define GR_CONNECTOVERRIDE 0x10 ++ ++static const char * gr_protocols[256] = { ++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt", ++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet", ++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1", ++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp", ++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++", ++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre", ++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile", ++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63", ++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv", ++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak", ++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf", ++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp", ++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim", ++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip", ++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp", ++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup", ++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135", ++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143", ++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151", ++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159", ++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167", ++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175", ++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183", ++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191", ++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199", ++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207", ++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215", ++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223", ++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231", ++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239", ++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247", ++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255", ++ }; ++ ++static const char * gr_socktypes[11] = { ++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6", ++ "unknown:7", "unknown:8", "unknown:9", "packet" ++ }; ++ ++const char * ++gr_proto_to_name(unsigned char proto) ++{ ++ return gr_protocols[proto]; ++} ++ ++const char * ++gr_socktype_to_name(unsigned char type) ++{ ++ return gr_socktypes[type]; ++} ++ ++int ++gr_search_socket(const int domain, const int type, const int protocol) ++{ ++ struct acl_subject_label *curr; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ goto exit; ++ ++ if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET) ++ || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255)) ++ goto exit; // let the kernel handle it ++ ++ curr = current->acl; ++ ++ if (!curr->ips) ++ goto exit; ++ ++ if ((curr->ip_type & (1 << type)) && ++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32)))) ++ goto exit; ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ /* we don't place acls on raw sockets , and sometimes ++ dgram/ip sockets are opened for ioctl and not ++ bind/connect, so we'll fake a bind learn log */ ++ if (type == SOCK_RAW || type == SOCK_PACKET) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_CONNECT, ¤t->signal->curr_ip); ++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) { ++ __u32 fakeip = 0; ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &fakeip, 0, type, ++ protocol, GR_BIND, ¤t->signal->curr_ip); ++ } ++ /* we'll log when they use connect or bind */ ++ goto exit; ++ } ++ ++ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, "inet", ++ gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ ++ return 0; ++ exit: ++ return 1; ++} ++ ++int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask) ++{ ++ if ((ip->mode & mode) && ++ (ip_port >= ip->low) && ++ (ip_port <= ip->high) && ++ ((ntohl(ip_addr) & our_netmask) == ++ (ntohl(our_addr) & our_netmask)) ++ && (ip->proto[protocol / 32] & (1 << (protocol % 32))) ++ && (ip->type & (1 << type))) { ++ if (ip->mode & GR_INVERT) ++ return 2; // specifically denied ++ else ++ return 1; // allowed ++ } ++ ++ return 0; // not specifically allowed, may continue parsing ++} ++ ++static int ++gr_search_connectbind(const int full_mode, struct sock *sk, ++ struct sockaddr_in *addr, const int type) ++{ ++ char iface[IFNAMSIZ] = {0}; ++ struct acl_subject_label *curr; ++ struct acl_ip_label *ip; ++ struct inet_sock *isk; ++ struct net_device *dev; ++ struct in_device *idev; ++ unsigned long i; ++ int ret; ++ int mode = full_mode & (GR_BIND | GR_CONNECT); ++ __u32 ip_addr = 0; ++ __u32 our_addr; ++ __u32 our_netmask; ++ char *p; ++ __u16 ip_port = 0; ++ const struct cred *cred = current_cred(); ++ ++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET)) ++ return 0; ++ ++ curr = current->acl; ++ isk = inet_sk(sk); ++ ++ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */ ++ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) ++ addr->sin_addr.s_addr = curr->inaddr_any_override; ++ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) { ++ struct sockaddr_in saddr; ++ int err; ++ ++ saddr.sin_family = AF_INET; ++ saddr.sin_addr.s_addr = curr->inaddr_any_override; ++ saddr.sin_port = isk->inet_sport; ++ ++ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ ++ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in)); ++ if (err) ++ return err; ++ } ++ ++ if (!curr->ips) ++ return 0; ++ ++ ip_addr = addr->sin_addr.s_addr; ++ ip_port = ntohs(addr->sin_port); ++ ++ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) { ++ security_learn(GR_IP_LEARN_MSG, current->role->rolename, ++ current->role->roletype, cred->uid, ++ cred->gid, current->exec_file ? ++ gr_to_filename(current->exec_file->f_path.dentry, ++ current->exec_file->f_path.mnt) : ++ curr->filename, curr->filename, ++ &ip_addr, ip_port, type, ++ sk->sk_protocol, mode, ¤t->signal->curr_ip); ++ return 0; ++ } ++ ++ for (i = 0; i < curr->ip_num; i++) { ++ ip = *(curr->ips + i); ++ if (ip->iface != NULL) { ++ strncpy(iface, ip->iface, IFNAMSIZ - 1); ++ p = strchr(iface, ':'); ++ if (p != NULL) ++ *p = '\0'; ++ dev = dev_get_by_name(sock_net(sk), iface); ++ if (dev == NULL) ++ continue; ++ idev = in_dev_get(dev); ++ if (idev == NULL) { ++ dev_put(dev); ++ continue; ++ } ++ rcu_read_lock(); ++ for_ifa(idev) { ++ if (!strcmp(ip->iface, ifa->ifa_label)) { ++ our_addr = ifa->ifa_address; ++ our_netmask = 0xffffffff; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ return 0; ++ } else if (ret == 2) { ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ goto denied; ++ } ++ } ++ } endfor_ifa(idev); ++ rcu_read_unlock(); ++ in_dev_put(idev); ++ dev_put(dev); ++ } else { ++ our_addr = ip->addr; ++ our_netmask = ip->netmask; ++ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask); ++ if (ret == 1) ++ return 0; ++ else if (ret == 2) ++ goto denied; ++ } ++ } ++ ++denied: ++ if (mode == GR_BIND) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ else if (mode == GR_CONNECT) ++ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol)); ++ ++ return -EACCES; ++} ++ ++int ++gr_search_connect(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int ++gr_search_bind(struct socket *sock, struct sockaddr_in *addr) ++{ ++ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type); ++} ++ ++int gr_search_listen(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int gr_search_accept(struct socket *sock) ++{ ++ struct sock *sk = sock->sk; ++ struct sockaddr_in addr; ++ ++ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr; ++ addr.sin_port = inet_sk(sk)->inet_sport; ++ ++ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type); ++} ++ ++int ++gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr) ++{ ++ if (addr) ++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM); ++ else { ++ struct sockaddr_in sin; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ sin.sin_addr.s_addr = inet->inet_daddr; ++ sin.sin_port = inet->inet_dport; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++ } ++} ++ ++int ++gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb) ++{ ++ struct sockaddr_in sin; ++ ++ if (unlikely(skb->len < sizeof (struct udphdr))) ++ return 0; // skip this packet ++ ++ sin.sin_addr.s_addr = ip_hdr(skb)->saddr; ++ sin.sin_port = udp_hdr(skb)->source; ++ ++ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM); ++} +diff -urNp linux-2.6.35.4/grsecurity/gracl_learn.c linux-2.6.35.4/grsecurity/gracl_learn.c +--- linux-2.6.35.4/grsecurity/gracl_learn.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_learn.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,211 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/poll.h> ++#include <linux/smp_lock.h> ++#include <linux/string.h> ++#include <linux/file.h> ++#include <linux/types.h> ++#include <linux/vmalloc.h> ++#include <linux/grinternal.h> ++ ++extern ssize_t write_grsec_handler(struct file * file, const char __user * buf, ++ size_t count, loff_t *ppos); ++extern int gr_acl_is_enabled(void); ++ ++static DECLARE_WAIT_QUEUE_HEAD(learn_wait); ++static int gr_learn_attached; ++ ++/* use a 512k buffer */ ++#define LEARN_BUFFER_SIZE (512 * 1024) ++ ++static DEFINE_SPINLOCK(gr_learn_lock); ++static DECLARE_MUTEX(gr_learn_user_sem); ++ ++/* we need to maintain two buffers, so that the kernel context of grlearn ++ uses a semaphore around the userspace copying, and the other kernel contexts ++ use a spinlock when copying into the buffer, since they cannot sleep ++*/ ++static char *learn_buffer; ++static char *learn_buffer_user; ++static int learn_buffer_len; ++static int learn_buffer_user_len; ++ ++static ssize_t ++read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos) ++{ ++ DECLARE_WAITQUEUE(wait, current); ++ ssize_t retval = 0; ++ ++ add_wait_queue(&learn_wait, &wait); ++ set_current_state(TASK_INTERRUPTIBLE); ++ do { ++ down(&gr_learn_user_sem); ++ spin_lock(&gr_learn_lock); ++ if (learn_buffer_len) ++ break; ++ spin_unlock(&gr_learn_lock); ++ up(&gr_learn_user_sem); ++ if (file->f_flags & O_NONBLOCK) { ++ retval = -EAGAIN; ++ goto out; ++ } ++ if (signal_pending(current)) { ++ retval = -ERESTARTSYS; ++ goto out; ++ } ++ ++ schedule(); ++ } while (1); ++ ++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len); ++ learn_buffer_user_len = learn_buffer_len; ++ retval = learn_buffer_len; ++ learn_buffer_len = 0; ++ ++ spin_unlock(&gr_learn_lock); ++ ++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len)) ++ retval = -EFAULT; ++ ++ up(&gr_learn_user_sem); ++out: ++ set_current_state(TASK_RUNNING); ++ remove_wait_queue(&learn_wait, &wait); ++ return retval; ++} ++ ++static unsigned int ++poll_learn(struct file * file, poll_table * wait) ++{ ++ poll_wait(file, &learn_wait, wait); ++ ++ if (learn_buffer_len) ++ return (POLLIN | POLLRDNORM); ++ ++ return 0; ++} ++ ++void ++gr_clear_learn_entries(void) ++{ ++ char *tmp; ++ ++ down(&gr_learn_user_sem); ++ if (learn_buffer != NULL) { ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ vfree(learn_buffer); ++ } ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ up(&gr_learn_user_sem); ++ ++ return; ++} ++ ++void ++gr_add_learn_entry(const char *fmt, ...) ++{ ++ va_list args; ++ unsigned int len; ++ ++ if (!gr_learn_attached) ++ return; ++ ++ spin_lock(&gr_learn_lock); ++ ++ /* leave a gap at the end so we know when it's "full" but don't have to ++ compute the exact length of the string we're trying to append ++ */ ++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) { ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ return; ++ } ++ if (learn_buffer == NULL) { ++ spin_unlock(&gr_learn_lock); ++ return; ++ } ++ ++ va_start(args, fmt); ++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args); ++ va_end(args); ++ ++ learn_buffer_len += len + 1; ++ ++ spin_unlock(&gr_learn_lock); ++ wake_up_interruptible(&learn_wait); ++ ++ return; ++} ++ ++static int ++open_learn(struct inode *inode, struct file *file) ++{ ++ if (file->f_mode & FMODE_READ && gr_learn_attached) ++ return -EBUSY; ++ if (file->f_mode & FMODE_READ) { ++ int retval = 0; ++ down(&gr_learn_user_sem); ++ if (learn_buffer == NULL) ++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer_user == NULL) ++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE); ++ if (learn_buffer == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ if (learn_buffer_user == NULL) { ++ retval = -ENOMEM; ++ goto out_error; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 1; ++out_error: ++ up(&gr_learn_user_sem); ++ return retval; ++ } ++ return 0; ++} ++ ++static int ++close_learn(struct inode *inode, struct file *file) ++{ ++ char *tmp; ++ ++ if (file->f_mode & FMODE_READ) { ++ down(&gr_learn_user_sem); ++ if (learn_buffer != NULL) { ++ spin_lock(&gr_learn_lock); ++ tmp = learn_buffer; ++ learn_buffer = NULL; ++ spin_unlock(&gr_learn_lock); ++ vfree(tmp); ++ } ++ if (learn_buffer_user != NULL) { ++ vfree(learn_buffer_user); ++ learn_buffer_user = NULL; ++ } ++ learn_buffer_len = 0; ++ learn_buffer_user_len = 0; ++ gr_learn_attached = 0; ++ up(&gr_learn_user_sem); ++ } ++ ++ return 0; ++} ++ ++const struct file_operations grsec_fops = { ++ .read = read_learn, ++ .write = write_grsec_handler, ++ .open = open_learn, ++ .release = close_learn, ++ .poll = poll_learn, ++}; +diff -urNp linux-2.6.35.4/grsecurity/gracl_res.c linux-2.6.35.4/grsecurity/gracl_res.c +--- linux-2.6.35.4/grsecurity/gracl_res.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_res.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,68 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/gracl.h> ++#include <linux/grinternal.h> ++ ++static const char *restab_log[] = { ++ [RLIMIT_CPU] = "RLIMIT_CPU", ++ [RLIMIT_FSIZE] = "RLIMIT_FSIZE", ++ [RLIMIT_DATA] = "RLIMIT_DATA", ++ [RLIMIT_STACK] = "RLIMIT_STACK", ++ [RLIMIT_CORE] = "RLIMIT_CORE", ++ [RLIMIT_RSS] = "RLIMIT_RSS", ++ [RLIMIT_NPROC] = "RLIMIT_NPROC", ++ [RLIMIT_NOFILE] = "RLIMIT_NOFILE", ++ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK", ++ [RLIMIT_AS] = "RLIMIT_AS", ++ [RLIMIT_LOCKS] = "RLIMIT_LOCKS", ++ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING", ++ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE", ++ [RLIMIT_NICE] = "RLIMIT_NICE", ++ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO", ++ [RLIMIT_RTTIME] = "RLIMIT_RTTIME", ++ [GR_CRASH_RES] = "RLIMIT_CRASH" ++}; ++ ++void ++gr_log_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ const struct cred *cred; ++ unsigned long rlim; ++ ++ if (!gr_acl_is_enabled() && !grsec_resource_logging) ++ return; ++ ++ // not yet supported resource ++ if (unlikely(!restab_log[res])) ++ return; ++ ++ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME) ++ rlim = task_rlimit_max(task, res); ++ else ++ rlim = task_rlimit(task, res); ++ ++ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim))) ++ return; ++ ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ ++ if (res == RLIMIT_NPROC && ++ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) || ++ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE))) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_MEMLOCK && ++ cap_raised(cred->cap_effective, CAP_IPC_LOCK)) ++ goto out_rcu_unlock; ++ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE)) ++ goto out_rcu_unlock; ++ rcu_read_unlock(); ++ ++ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim); ++ ++ return; ++out_rcu_unlock: ++ rcu_read_unlock(); ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/gracl_segv.c linux-2.6.35.4/grsecurity/gracl_segv.c +--- linux-2.6.35.4/grsecurity/gracl_segv.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_segv.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,310 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <asm/uaccess.h> ++#include <asm/errno.h> ++#include <asm/mman.h> ++#include <net/sock.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/smp_lock.h> ++#include <linux/slab.h> ++#include <linux/types.h> ++#include <linux/sched.h> ++#include <linux/timer.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++static struct crash_uid *uid_set; ++static unsigned short uid_used; ++static DEFINE_SPINLOCK(gr_uid_lock); ++extern rwlock_t gr_inode_lock; ++extern struct acl_subject_label * ++ lookup_acl_subj_label(const ino_t inode, const dev_t dev, ++ struct acl_role_label *role); ++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t); ++ ++int ++gr_init_uidset(void) ++{ ++ uid_set = ++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL); ++ uid_used = 0; ++ ++ return uid_set ? 1 : 0; ++} ++ ++void ++gr_free_uidset(void) ++{ ++ if (uid_set) ++ kfree(uid_set); ++ ++ return; ++} ++ ++int ++gr_find_uid(const uid_t uid) ++{ ++ struct crash_uid *tmp = uid_set; ++ uid_t buid; ++ int low = 0, high = uid_used - 1, mid; ++ ++ while (high >= low) { ++ mid = (low + high) >> 1; ++ buid = tmp[mid].uid; ++ if (buid == uid) ++ return mid; ++ if (buid > uid) ++ high = mid - 1; ++ if (buid < uid) ++ low = mid + 1; ++ } ++ ++ return -1; ++} ++ ++static __inline__ void ++gr_insertsort(void) ++{ ++ unsigned short i, j; ++ struct crash_uid index; ++ ++ for (i = 1; i < uid_used; i++) { ++ index = uid_set[i]; ++ j = i; ++ while ((j > 0) && uid_set[j - 1].uid > index.uid) { ++ uid_set[j] = uid_set[j - 1]; ++ j--; ++ } ++ uid_set[j] = index; ++ } ++ ++ return; ++} ++ ++static __inline__ void ++gr_insert_uid(const uid_t uid, const unsigned long expires) ++{ ++ int loc; ++ ++ if (uid_used == GR_UIDTABLE_MAX) ++ return; ++ ++ loc = gr_find_uid(uid); ++ ++ if (loc >= 0) { ++ uid_set[loc].expires = expires; ++ return; ++ } ++ ++ uid_set[uid_used].uid = uid; ++ uid_set[uid_used].expires = expires; ++ uid_used++; ++ ++ gr_insertsort(); ++ ++ return; ++} ++ ++void ++gr_remove_uid(const unsigned short loc) ++{ ++ unsigned short i; ++ ++ for (i = loc + 1; i < uid_used; i++) ++ uid_set[i - 1] = uid_set[i]; ++ ++ uid_used--; ++ ++ return; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ int loc; ++ int ret = 0; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ spin_lock(&gr_uid_lock); ++ loc = gr_find_uid(uid); ++ ++ if (loc < 0) ++ goto out_unlock; ++ ++ if (time_before_eq(uid_set[loc].expires, get_seconds())) ++ gr_remove_uid(loc); ++ else ++ ret = 1; ++ ++out_unlock: ++ spin_unlock(&gr_uid_lock); ++ return ret; ++} ++ ++static __inline__ int ++proc_is_setxid(const struct cred *cred) ++{ ++ if (cred->uid != cred->euid || cred->uid != cred->suid || ++ cred->uid != cred->fsuid) ++ return 1; ++ if (cred->gid != cred->egid || cred->gid != cred->sgid || ++ cred->gid != cred->fsgid) ++ return 1; ++ ++ return 0; ++} ++static __inline__ int ++gr_fake_force_sig(int sig, struct task_struct *t) ++{ ++ unsigned long int flags; ++ int ret, blocked, ignored; ++ struct k_sigaction *action; ++ ++ spin_lock_irqsave(&t->sighand->siglock, flags); ++ action = &t->sighand->action[sig-1]; ++ ignored = action->sa.sa_handler == SIG_IGN; ++ blocked = sigismember(&t->blocked, sig); ++ if (blocked || ignored) { ++ action->sa.sa_handler = SIG_DFL; ++ if (blocked) { ++ sigdelset(&t->blocked, sig); ++ recalc_sigpending_and_wake(t); ++ } ++ } ++ if (action->sa.sa_handler == SIG_DFL) ++ t->signal->flags &= ~SIGNAL_UNKILLABLE; ++ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t); ++ ++ spin_unlock_irqrestore(&t->sighand->siglock, flags); ++ ++ return ret; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ struct acl_subject_label *curr; ++ struct acl_subject_label *curr2; ++ struct task_struct *tsk, *tsk2; ++ const struct cred *cred; ++ const struct cred *cred2; ++ ++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL) ++ return; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curr = task->acl; ++ ++ if (!(curr->resmask & (1 << GR_CRASH_RES))) ++ return; ++ ++ if (time_before_eq(curr->expires, get_seconds())) { ++ curr->expires = 0; ++ curr->crashes = 0; ++ } ++ ++ curr->crashes++; ++ ++ if (!curr->expires) ++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) { ++ rcu_read_lock(); ++ cred = __task_cred(task); ++ if (cred->uid && proc_is_setxid(cred)) { ++ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ spin_lock(&gr_uid_lock); ++ gr_insert_uid(cred->uid, curr->expires); ++ spin_unlock(&gr_uid_lock); ++ curr->expires = 0; ++ curr->crashes = 0; ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ cred2 = __task_cred(tsk); ++ if (tsk != task && cred2->uid == cred->uid) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } else { ++ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max); ++ read_lock(&tasklist_lock); ++ do_each_thread(tsk2, tsk) { ++ if (likely(tsk != task)) { ++ curr2 = tsk->acl; ++ ++ if (curr2->device == curr->device && ++ curr2->inode == curr->inode) ++ gr_fake_force_sig(SIGKILL, tsk); ++ } ++ } while_each_thread(tsk2, tsk); ++ read_unlock(&tasklist_lock); ++ } ++ rcu_read_unlock(); ++ } ++ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ struct acl_subject_label *curr; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return 0; ++ ++ read_lock(&gr_inode_lock); ++ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino, ++ filp->f_path.dentry->d_inode->i_sb->s_dev, ++ current->role); ++ read_unlock(&gr_inode_lock); ++ ++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) || ++ (!curr->crashes && !curr->expires)) ++ return 0; ++ ++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) && ++ time_after(curr->expires, get_seconds())) ++ return 1; ++ else if (time_before_eq(curr->expires, get_seconds())) { ++ curr->crashes = 0; ++ curr->expires = 0; ++ } ++ ++ return 0; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ struct acl_subject_label *curracl; ++ __u32 curr_ip; ++ struct task_struct *p, *p2; ++ ++ if (unlikely(!gr_acl_is_enabled())) ++ return; ++ ++ curracl = task->acl; ++ curr_ip = task->signal->curr_ip; ++ ++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) { ++ read_lock(&tasklist_lock); ++ do_each_thread(p2, p) { ++ if (p->signal->curr_ip == curr_ip) ++ gr_fake_force_sig(SIGKILL, p); ++ } while_each_thread(p2, p); ++ read_unlock(&tasklist_lock); ++ } else if (curracl->mode & GR_KILLPROC) ++ gr_fake_force_sig(SIGKILL, task); ++ ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/gracl_shm.c linux-2.6.35.4/grsecurity/gracl_shm.c +--- linux-2.6.35.4/grsecurity/gracl_shm.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/gracl_shm.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,40 @@ ++#include <linux/kernel.h> ++#include <linux/mm.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/ipc.h> ++#include <linux/gracl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ struct task_struct *task; ++ ++ if (!gr_acl_is_enabled()) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ task = find_task_by_vpid(shm_cprid); ++ ++ if (unlikely(!task)) ++ task = find_task_by_vpid(shm_lapid); ++ ++ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) || ++ (task->pid == shm_lapid)) && ++ (task->acl->mode & GR_PROTSHM) && ++ (task->acl != current->acl))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid); ++ return 0; ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ ++ return 1; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_chdir.c linux-2.6.35.4/grsecurity/grsec_chdir.c +--- linux-2.6.35.4/grsecurity/grsec_chdir.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_chdir.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,19 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ if ((grsec_enable_chdir && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir && ++ !grsec_enable_group)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt); ++ } ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_chroot.c linux-2.6.35.4/grsecurity/grsec_chroot.c +--- linux-2.6.35.4/grsecurity/grsec_chroot.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_chroot.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,389 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/mount.h> ++#include <linux/types.h> ++#include <linux/pid_namespace.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (task->pid > 1 && path->dentry != init_task.fs->root.dentry && ++ path->dentry != task->nsproxy->mnt_ns->root->mnt_root) ++ task->gr_is_chrooted = 1; ++ else ++ task->gr_is_chrooted = 0; ++ ++ task->gr_chroot_dentry = path->dentry; ++#endif ++ return; ++} ++ ++void gr_clear_chroot_entries(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ task->gr_is_chrooted = 0; ++ task->gr_chroot_dentry = NULL; ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_unix(const pid_t pid) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ struct pid *spid = NULL; ++ ++ if (unlikely(!grsec_enable_chroot_unix)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ spid = find_vpid(pid); ++ if (spid) { ++ struct task_struct *p; ++ p = pid_task(spid, PIDTYPE_PID); ++ if (unlikely(!have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG); ++ return 0; ++ } ++ } ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++int ++gr_handle_chroot_nice(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ if (grsec_enable_chroot_nice && (niceval < task_nice(p)) ++ && proc_is_chrooted(current)) { ++ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, p->pid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_rawio(const struct inode *inode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) && ++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO)) ++ return 1; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_fowner(struct pid *pid, enum pid_type type) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ struct task_struct *p; ++ int ret = 0; ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid) ++ return ret; ++ ++ read_lock(&tasklist_lock); ++ do_each_pid_task(pid, type, p) { ++ if (!have_same_root(current, p)) { ++ ret = 1; ++ goto out; ++ } ++ } while_each_pid_task(pid, type, p); ++out: ++ read_unlock(&tasklist_lock); ++ return ret; ++#endif ++ return 0; ++} ++ ++int ++gr_pid_is_chrooted(struct task_struct *p) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL) ++ return 0; ++ ++ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) || ++ !have_same_root(current, p)) { ++ return 1; ++ } ++#endif ++ return 0; ++} ++ ++EXPORT_SYMBOL(gr_pid_is_chrooted); ++ ++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR) ++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt) ++{ ++ struct dentry *dentry = (struct dentry *)u_dentry; ++ struct vfsmount *mnt = (struct vfsmount *)u_mnt; ++ struct dentry *realroot; ++ struct vfsmount *realrootmnt; ++ struct dentry *currentroot; ++ struct vfsmount *currentmnt; ++ struct task_struct *reaper = &init_task; ++ int ret = 1; ++ ++ read_lock(&reaper->fs->lock); ++ realrootmnt = mntget(reaper->fs->root.mnt); ++ realroot = dget(reaper->fs->root.dentry); ++ read_unlock(&reaper->fs->lock); ++ ++ read_lock(¤t->fs->lock); ++ currentmnt = mntget(current->fs->root.mnt); ++ currentroot = dget(current->fs->root.dentry); ++ read_unlock(¤t->fs->lock); ++ ++ spin_lock(&dcache_lock); ++ for (;;) { ++ if (unlikely((dentry == realroot && mnt == realrootmnt) ++ || (dentry == currentroot && mnt == currentmnt))) ++ break; ++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) { ++ if (mnt->mnt_parent == mnt) ++ break; ++ dentry = mnt->mnt_mountpoint; ++ mnt = mnt->mnt_parent; ++ continue; ++ } ++ dentry = dentry->d_parent; ++ } ++ spin_unlock(&dcache_lock); ++ ++ dput(currentroot); ++ mntput(currentmnt); ++ ++ /* access is outside of chroot */ ++ if (dentry == realroot && mnt == realrootmnt) ++ ret = 0; ++ ++ dput(realroot); ++ mntput(realrootmnt); ++ return ret; ++} ++#endif ++ ++int ++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ if (!grsec_enable_chroot_fchdir) ++ return 1; ++ ++ if (!proc_is_chrooted(current)) ++ return 1; ++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt); ++ return 0; ++ } ++#endif ++ return 1; ++} ++ ++int ++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ struct pid *pid = NULL; ++ time_t starttime; ++ ++ if (unlikely(!grsec_enable_chroot_shmat)) ++ return 1; ++ ++ if (likely(!proc_is_chrooted(current))) ++ return 1; ++ ++ rcu_read_lock(); ++ read_lock(&tasklist_lock); ++ ++ pid = find_vpid(shm_cprid); ++ if (pid) { ++ struct task_struct *p; ++ p = pid_task(pid, PIDTYPE_PID); ++ starttime = p->start_time.tv_sec; ++ if (unlikely(!have_same_root(current, p) && ++ time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } else { ++ pid = find_vpid(shm_lapid); ++ if (pid) { ++ struct task_struct *p; ++ p = pid_task(pid, PIDTYPE_PID); ++ if (unlikely(!have_same_root(current, p))) { ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG); ++ return 0; ++ } ++ } ++ } ++ ++ read_unlock(&tasklist_lock); ++ rcu_read_unlock(); ++#endif ++ return 1; ++} ++ ++void ++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current)) ++ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, const char *dev_name) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) { ++ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_pivot(void) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ if (grsec_enable_chroot_double && proc_is_chrooted(current) && ++ !gr_is_outside_chroot(dentry, mnt)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_caps(struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ if (grsec_enable_chroot_caps && current->pid > 1 && current->fs != NULL && ++ (init_task.fs->root.dentry != path->dentry) && ++ (current->nsproxy->mnt_ns->root->mnt_root != path->dentry)) { ++ ++ kernel_cap_t chroot_caps = GR_CHROOT_CAPS; ++ const struct cred *old = current_cred(); ++ struct cred *new = prepare_creds(); ++ if (new == NULL) ++ return 1; ++ ++ new->cap_permitted = cap_drop(old->cap_permitted, ++ chroot_caps); ++ new->cap_inheritable = cap_drop(old->cap_inheritable, ++ chroot_caps); ++ new->cap_effective = cap_drop(old->cap_effective, ++ chroot_caps); ++ ++ commit_creds(new); ++ ++ return 0; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_chroot_sysctl(const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) && ++ proc_is_chrooted(current)) ++ return -EACCES; ++#endif ++ return 0; ++} ++ ++void ++gr_handle_chroot_chdir(struct path *path) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ if (grsec_enable_chroot_chdir) ++ set_fs_pwd(current->fs, path); ++#endif ++ return; ++} ++ ++int ++gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode) ++{ ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ if (grsec_enable_chroot_chmod && ++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) && ++ proc_is_chrooted(current)) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_handle_chroot_caps); ++#endif +diff -urNp linux-2.6.35.4/grsecurity/grsec_disabled.c linux-2.6.35.4/grsecurity/grsec_disabled.c +--- linux-2.6.35.4/grsecurity/grsec_disabled.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_disabled.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,431 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/kdev_t.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <linux/skbuff.h> ++#include <linux/sysctl.h> ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++void ++pax_set_initial_flags(struct linux_binprm *bprm) ++{ ++ return; ++} ++#endif ++ ++#ifdef CONFIG_SYSCTL ++__u32 ++gr_handle_sysctl(const struct ctl_table * table, const int op) ++{ ++ return 0; ++} ++#endif ++ ++#ifdef CONFIG_TASKSTATS ++int gr_is_taskstats_denied(int pid) ++{ ++ return 0; ++} ++#endif ++ ++int ++gr_acl_is_enabled(void) ++{ ++ return 0; ++} ++ ++int ++gr_handle_rawio(const struct inode *inode) ++{ ++ return 0; ++} ++ ++void ++gr_acl_handle_psacct(struct task_struct *task, const long code) ++{ ++ return; ++} ++ ++int ++gr_handle_ptrace(struct task_struct *task, const long request) ++{ ++ return 0; ++} ++ ++int ++gr_handle_proc_ptrace(struct task_struct *task) ++{ ++ return 0; ++} ++ ++void ++gr_learn_resource(const struct task_struct *task, ++ const int res, const unsigned long wanted, const int gt) ++{ ++ return; ++} ++ ++int ++gr_set_acls(const int type) ++{ ++ return 0; ++} ++ ++int ++gr_check_hidden_task(const struct task_struct *tsk) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_check_protected_task_fowner(struct pid *pid, enum pid_type type) ++{ ++ return 0; ++} ++ ++void ++gr_copy_label(struct task_struct *tsk) ++{ ++ return; ++} ++ ++void ++gr_set_pax_flags(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt, ++ const int unsafe_share) ++{ ++ return 0; ++} ++ ++void ++gr_handle_delete(const ino_t ino, const dev_t dev) ++{ ++ return; ++} ++ ++void ++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++ return; ++} ++ ++void ++gr_handle_crash(struct task_struct *task, const int sig) ++{ ++ return; ++} ++ ++int ++gr_check_crash_exec(const struct file *filp) ++{ ++ return 0; ++} ++ ++int ++gr_check_crash_uid(const uid_t uid) ++{ ++ return 0; ++} ++ ++void ++gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace) ++{ ++ return; ++} ++ ++int ++gr_search_socket(const int family, const int type, const int protocol) ++{ ++ return 1; ++} ++ ++int ++gr_search_connectbind(const int mode, const struct socket *sock, ++ const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_is_capable(const int cap) ++{ ++ return 1; ++} ++ ++int ++gr_is_capable_nolog(const int cap) ++{ ++ return 1; ++} ++ ++void ++gr_handle_alertkill(struct task_struct *task) ++{ ++ return; ++} ++ ++__u32 ++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_hidden_file(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt, ++ const int fmode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_mmap(const struct file *file, const unsigned long prot, ++ unsigned int *vm_flags) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_truncate(const struct dentry * dentry, ++ const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_access(const struct dentry * dentry, ++ const struct vfsmount * mnt, const int fmode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt, ++ mode_t mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt, ++ mode_t mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++void ++grsecurity_init(void) ++{ ++ return; ++} ++ ++__u32 ++gr_acl_handle_mknod(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const int mode) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_mkdir(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_symlink(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, const char *from) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_link(const struct dentry * new_dentry, ++ const struct dentry * parent_dentry, ++ const struct vfsmount * parent_mnt, ++ const struct dentry * old_dentry, ++ const struct vfsmount * old_mnt, const char *to) ++{ ++ return 1; ++} ++ ++int ++gr_acl_handle_rename(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct inode *old_parent_inode, ++ const struct vfsmount *old_mnt, const char *newname) ++{ ++ return 0; ++} ++ ++int ++gr_acl_handle_filldir(const struct file *file, const char *name, ++ const int namelen, const ino_t ino) ++{ ++ return 1; ++} ++ ++int ++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, const int shmid) ++{ ++ return 1; ++} ++ ++int ++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++int ++gr_search_accept(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_listen(const struct socket *sock) ++{ ++ return 0; ++} ++ ++int ++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++__u32 ++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt) ++{ ++ return 1; ++} ++ ++__u32 ++gr_acl_handle_creat(const struct dentry * dentry, ++ const struct dentry * p_dentry, ++ const struct vfsmount * p_mnt, const int fmode, ++ const int imode) ++{ ++ return 1; ++} ++ ++void ++gr_acl_handle_exit(void) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot) ++{ ++ return 1; ++} ++ ++void ++gr_set_role_label(const uid_t uid, const gid_t gid) ++{ ++ return; ++} ++ ++int ++gr_acl_handle_procpidmem(const struct task_struct *task) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb) ++{ ++ return 0; ++} ++ ++int ++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr) ++{ ++ return 0; ++} ++ ++void ++gr_set_kernel_label(struct task_struct *task) ++{ ++ return; ++} ++ ++int ++gr_check_user_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++int ++gr_check_group_change(int real, int effective, int fs) ++{ ++ return 0; ++} ++ ++EXPORT_SYMBOL(gr_is_capable); ++EXPORT_SYMBOL(gr_is_capable_nolog); ++EXPORT_SYMBOL(gr_learn_resource); ++EXPORT_SYMBOL(gr_set_kernel_label); ++#ifdef CONFIG_SECURITY ++EXPORT_SYMBOL(gr_check_user_change); ++EXPORT_SYMBOL(gr_check_group_change); ++#endif +diff -urNp linux-2.6.35.4/grsecurity/grsec_exec.c linux-2.6.35.4/grsecurity/grsec_exec.c +--- linux-2.6.35.4/grsecurity/grsec_exec.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_exec.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,88 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/binfmts.h> ++#include <linux/smp_lock.h> ++#include <linux/fs.h> ++#include <linux/types.h> ++#include <linux/grdefs.h> ++#include <linux/grinternal.h> ++#include <linux/capability.h> ++ ++#include <asm/uaccess.h> ++ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++static char gr_exec_arg_buf[132]; ++static DECLARE_MUTEX(gr_exec_arg_sem); ++#endif ++ ++int ++gr_handle_nproc(void) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECVE ++ const struct cred *cred = current_cred(); ++ if (grsec_enable_execve && cred->user && ++ (atomic_read(&cred->user->processes) > rlimit(RLIMIT_NPROC)) && ++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_NPROC_MSG); ++ return -EAGAIN; ++ } ++#endif ++ return 0; ++} ++ ++void ++gr_handle_exec_args(struct linux_binprm *bprm, const char __user *__user *argv) ++{ ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ char *grarg = gr_exec_arg_buf; ++ unsigned int i, x, execlen = 0; ++ char c; ++ ++ if (!((grsec_enable_execlog && grsec_enable_group && ++ in_group_p(grsec_audit_gid)) ++ || (grsec_enable_execlog && !grsec_enable_group))) ++ return; ++ ++ down(&gr_exec_arg_sem); ++ memset(grarg, 0, sizeof(gr_exec_arg_buf)); ++ ++ if (unlikely(argv == NULL)) ++ goto log; ++ ++ for (i = 0; i < bprm->argc && execlen < 128; i++) { ++ const char __user *p; ++ unsigned int len; ++ ++ if (copy_from_user(&p, argv + i, sizeof(p))) ++ goto log; ++ if (!p) ++ goto log; ++ len = strnlen_user(p, 128 - execlen); ++ if (len > 128 - execlen) ++ len = 128 - execlen; ++ else if (len > 0) ++ len--; ++ if (copy_from_user(grarg + execlen, p, len)) ++ goto log; ++ ++ /* rewrite unprintable characters */ ++ for (x = 0; x < len; x++) { ++ c = *(grarg + execlen + x); ++ if (c < 32 || c > 126) ++ *(grarg + execlen + x) = ' '; ++ } ++ ++ execlen += len; ++ *(grarg + execlen) = ' '; ++ *(grarg + execlen + 1) = '\0'; ++ execlen++; ++ } ++ ++ log: ++ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry, ++ bprm->file->f_path.mnt, grarg); ++ up(&gr_exec_arg_sem); ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_fifo.c linux-2.6.35.4/grsecurity/grsec_fifo.c +--- linux-2.6.35.4/grsecurity/grsec_fifo.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_fifo.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,24 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, const int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_FIFO ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) && ++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) && ++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) && ++ (cred->fsuid != dentry->d_inode->i_uid)) { ++ if (!generic_permission(dentry->d_inode, acc_mode, NULL)) ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, dentry->d_inode->i_uid, dentry->d_inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_fork.c linux-2.6.35.4/grsecurity/grsec_fork.c +--- linux-2.6.35.4/grsecurity/grsec_fork.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_fork.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,15 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/errno.h> ++ ++void ++gr_log_forkfail(const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ if (grsec_enable_forkfail && retval != -ERESTARTNOINTR) ++ gr_log_int(GR_DONT_AUDIT, GR_FAILFORK_MSG, retval); ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_init.c linux-2.6.35.4/grsecurity/grsec_init.c +--- linux-2.6.35.4/grsecurity/grsec_init.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_init.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,266 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/smp_lock.h> ++#include <linux/gracl.h> ++#include <linux/slab.h> ++#include <linux/vmalloc.h> ++#include <linux/percpu.h> ++#include <linux/module.h> ++ ++int grsec_enable_link; ++int grsec_enable_dmesg; ++int grsec_enable_harden_ptrace; ++int grsec_enable_fifo; ++int grsec_enable_execve; ++int grsec_enable_execlog; ++int grsec_enable_signal; ++int grsec_enable_forkfail; ++int grsec_enable_audit_ptrace; ++int grsec_enable_time; ++int grsec_enable_audit_textrel; ++int grsec_enable_group; ++int grsec_audit_gid; ++int grsec_enable_chdir; ++int grsec_enable_mount; ++int grsec_enable_rofs; ++int grsec_enable_chroot_findtask; ++int grsec_enable_chroot_mount; ++int grsec_enable_chroot_shmat; ++int grsec_enable_chroot_fchdir; ++int grsec_enable_chroot_double; ++int grsec_enable_chroot_pivot; ++int grsec_enable_chroot_chdir; ++int grsec_enable_chroot_chmod; ++int grsec_enable_chroot_mknod; ++int grsec_enable_chroot_nice; ++int grsec_enable_chroot_execlog; ++int grsec_enable_chroot_caps; ++int grsec_enable_chroot_sysctl; ++int grsec_enable_chroot_unix; ++int grsec_enable_tpe; ++int grsec_tpe_gid; ++int grsec_enable_blackhole; ++#ifdef CONFIG_IPV6_MODULE ++EXPORT_SYMBOL(grsec_enable_blackhole); ++#endif ++int grsec_lastack_retries; ++int grsec_enable_tpe_all; ++int grsec_enable_tpe_invert; ++int grsec_enable_socket_all; ++int grsec_socket_all_gid; ++int grsec_enable_socket_client; ++int grsec_socket_client_gid; ++int grsec_enable_socket_server; ++int grsec_socket_server_gid; ++int grsec_resource_logging; ++int grsec_disable_privio; ++int grsec_lock; ++ ++DEFINE_SPINLOCK(grsec_alert_lock); ++unsigned long grsec_alert_wtime = 0; ++unsigned long grsec_alert_fyet = 0; ++ ++DEFINE_SPINLOCK(grsec_audit_lock); ++ ++DEFINE_RWLOCK(grsec_exec_file_lock); ++ ++char *gr_shared_page[4]; ++ ++char *gr_alert_log_fmt; ++char *gr_audit_log_fmt; ++char *gr_alert_log_buf; ++char *gr_audit_log_buf; ++ ++extern struct gr_arg *gr_usermode; ++extern unsigned char *gr_system_salt; ++extern unsigned char *gr_system_sum; ++ ++void __init ++grsecurity_init(void) ++{ ++ int j; ++ /* create the per-cpu shared pages */ ++ ++#ifdef CONFIG_X86 ++ memset((char *)(0x41a + PAGE_OFFSET), 0, 36); ++#endif ++ ++ for (j = 0; j < 4; j++) { ++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long)); ++ if (gr_shared_page[j] == NULL) { ++ panic("Unable to allocate grsecurity shared page"); ++ return; ++ } ++ } ++ ++ /* allocate log buffers */ ++ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_alert_log_fmt) { ++ panic("Unable to allocate grsecurity alert log format buffer"); ++ return; ++ } ++ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL); ++ if (!gr_audit_log_fmt) { ++ panic("Unable to allocate grsecurity audit log format buffer"); ++ return; ++ } ++ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_alert_log_buf) { ++ panic("Unable to allocate grsecurity alert log buffer"); ++ return; ++ } ++ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL); ++ if (!gr_audit_log_buf) { ++ panic("Unable to allocate grsecurity audit log buffer"); ++ return; ++ } ++ ++ /* allocate memory for authentication structure */ ++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL); ++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL); ++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL); ++ ++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) { ++ panic("Unable to allocate grsecurity authentication structure"); ++ return; ++ } ++ ++ ++#ifdef CONFIG_GRKERNSEC_IO ++#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO) ++ grsec_disable_privio = 1; ++#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++ grsec_disable_privio = 1; ++#else ++ grsec_disable_privio = 0; ++#endif ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ /* for backward compatibility, tpe_invert always defaults to on if ++ enabled in the kernel ++ */ ++ grsec_enable_tpe_invert = 1; ++#endif ++ ++#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON) ++#ifndef CONFIG_GRKERNSEC_SYSCTL ++ grsec_lock = 1; ++#endif ++ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ grsec_enable_audit_textrel = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ grsec_enable_group = 1; ++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ grsec_enable_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ grsec_enable_harden_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ grsec_enable_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ grsec_enable_link = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ grsec_enable_dmesg = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ grsec_enable_blackhole = 1; ++ grsec_lastack_retries = 4; ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ grsec_enable_fifo = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECVE ++ grsec_enable_execve = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ grsec_enable_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ grsec_enable_signal = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ grsec_enable_forkfail = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ grsec_enable_time = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ grsec_resource_logging = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ grsec_enable_chroot_findtask = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ grsec_enable_chroot_unix = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ grsec_enable_chroot_mount = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ grsec_enable_chroot_fchdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ grsec_enable_chroot_shmat = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ grsec_enable_audit_ptrace = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ grsec_enable_chroot_double = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ grsec_enable_chroot_pivot = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ grsec_enable_chroot_chdir = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ grsec_enable_chroot_chmod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ grsec_enable_chroot_mknod = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ grsec_enable_chroot_nice = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ grsec_enable_chroot_execlog = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ grsec_enable_chroot_caps = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ grsec_enable_chroot_sysctl = 1; ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ grsec_enable_tpe = 1; ++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID; ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ grsec_enable_tpe_all = 1; ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ grsec_enable_socket_all = 1; ++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ grsec_enable_socket_client = 1; ++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID; ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ grsec_enable_socket_server = 1; ++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID; ++#endif ++#endif ++ ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_link.c linux-2.6.35.4/grsecurity/grsec_link.c +--- linux-2.6.35.4/grsecurity/grsec_link.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_link.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,43 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/fs.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, const struct vfsmount *mnt) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && S_ISLNK(inode->i_mode) && ++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) && ++ (parent->i_mode & S_IWOTH) && (cred->fsuid != inode->i_uid)) { ++ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, const int mode, const char *to) ++{ ++#ifdef CONFIG_GRKERNSEC_LINK ++ const struct cred *cred = current_cred(); ++ ++ if (grsec_enable_link && cred->fsuid != inode->i_uid && ++ (!S_ISREG(mode) || (mode & S_ISUID) || ++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) || ++ (generic_permission(inode, MAY_READ | MAY_WRITE, NULL))) && ++ !capable(CAP_FOWNER) && cred->uid) { ++ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to); ++ return -EPERM; ++ } ++#endif ++ return 0; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_log.c linux-2.6.35.4/grsecurity/grsec_log.c +--- linux-2.6.35.4/grsecurity/grsec_log.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_log.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,306 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/tty.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++#ifdef CONFIG_TREE_PREEMPT_RCU ++#define DISABLE_PREEMPT() preempt_disable() ++#define ENABLE_PREEMPT() preempt_enable() ++#else ++#define DISABLE_PREEMPT() ++#define ENABLE_PREEMPT() ++#endif ++ ++#define BEGIN_LOCKS(x) \ ++ DISABLE_PREEMPT(); \ ++ rcu_read_lock(); \ ++ read_lock(&tasklist_lock); \ ++ read_lock(&grsec_exec_file_lock); \ ++ if (x != GR_DO_AUDIT) \ ++ spin_lock(&grsec_alert_lock); \ ++ else \ ++ spin_lock(&grsec_audit_lock) ++ ++#define END_LOCKS(x) \ ++ if (x != GR_DO_AUDIT) \ ++ spin_unlock(&grsec_alert_lock); \ ++ else \ ++ spin_unlock(&grsec_audit_lock); \ ++ read_unlock(&grsec_exec_file_lock); \ ++ read_unlock(&tasklist_lock); \ ++ rcu_read_unlock(); \ ++ ENABLE_PREEMPT(); \ ++ if (x == GR_DONT_AUDIT) \ ++ gr_handle_alertkill(current) ++ ++enum { ++ FLOODING, ++ NO_FLOODING ++}; ++ ++extern char *gr_alert_log_fmt; ++extern char *gr_audit_log_fmt; ++extern char *gr_alert_log_buf; ++extern char *gr_audit_log_buf; ++ ++static int gr_log_start(int audit) ++{ ++ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT; ++ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt; ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ ++ if (audit == GR_DO_AUDIT) ++ goto set_fmt; ++ ++ if (!grsec_alert_wtime || jiffies - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME * HZ) { ++ grsec_alert_wtime = jiffies; ++ grsec_alert_fyet = 0; ++ } else if ((jiffies - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME * HZ) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { ++ grsec_alert_fyet++; ++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { ++ grsec_alert_wtime = jiffies; ++ grsec_alert_fyet++; ++ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); ++ return FLOODING; ++ } else return FLOODING; ++ ++set_fmt: ++ memset(buf, 0, PAGE_SIZE); ++ if (current->signal->curr_ip && gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else if (current->signal->curr_ip) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, ¤t->signal->curr_ip); ++ } else if (gr_acl_is_enabled()) { ++ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) "); ++ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename); ++ } else { ++ sprintf(fmt, "%s%s", loglevel, "grsec: "); ++ strcpy(buf, fmt); ++ } ++ ++ return NO_FLOODING; ++} ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++ __attribute__ ((format (printf, 2, 0))); ++ ++static void gr_log_middle(int audit, const char *msg, va_list ap) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ ++ return; ++} ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++ __attribute__ ((format (printf, 2, 3))); ++ ++static void gr_log_middle_varargs(int audit, const char *msg, ...) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ va_list ap; ++ ++ va_start(ap, msg); ++ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap); ++ va_end(ap); ++ ++ return; ++} ++ ++static void gr_log_end(int audit) ++{ ++ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf; ++ unsigned int len = strlen(buf); ++ ++ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, DEFAULTSECARGS(current, current_cred(), __task_cred(current->parent))); ++ printk("%s\n", buf); ++ ++ return; ++} ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...) ++{ ++ int logtype; ++ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied"; ++ char *str1, *str2, *str3; ++ void *voidptr; ++ int num1, num2; ++ unsigned long ulong1, ulong2; ++ struct dentry *dentry; ++ struct vfsmount *mnt; ++ struct file *file; ++ struct task_struct *task; ++ const struct cred *cred, *pcred; ++ va_list ap; ++ ++ BEGIN_LOCKS(audit); ++ logtype = gr_log_start(audit); ++ if (logtype == FLOODING) { ++ END_LOCKS(audit); ++ return; ++ } ++ va_start(ap, argtypes); ++ switch (argtypes) { ++ case GR_TTYSNIFF: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task->pid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid); ++ break; ++ case GR_SYSCTL_HIDDEN: ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, str1); ++ break; ++ case GR_RBAC: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_STR_RBAC: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_RBAC_MODE2: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2); ++ break; ++ case GR_RBAC_MODE3: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ str2 = va_arg(ap, char *); ++ str3 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3); ++ break; ++ case GR_FILENAME: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_STR_FILENAME: ++ str1 = va_arg(ap, char *); ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt)); ++ break; ++ case GR_FILENAME_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1); ++ break; ++ case GR_FILENAME_TWO_INT: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2); ++ break; ++ case GR_FILENAME_TWO_INT_STR: ++ dentry = va_arg(ap, struct dentry *); ++ mnt = va_arg(ap, struct vfsmount *); ++ num1 = va_arg(ap, int); ++ num2 = va_arg(ap, int); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1); ++ break; ++ case GR_TEXTREL: ++ file = va_arg(ap, struct file *); ++ ulong1 = va_arg(ap, unsigned long); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2); ++ break; ++ case GR_PTRACE: ++ task = va_arg(ap, struct task_struct *); ++ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task->pid); ++ break; ++ case GR_RESOURCE: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ ulong1 = va_arg(ap, unsigned long); ++ str1 = va_arg(ap, char *); ++ ulong2 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CAP: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ str1 = va_arg(ap, char *); ++ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_SIG: ++ str1 = va_arg(ap, char *); ++ voidptr = va_arg(ap, void *); ++ gr_log_middle_varargs(audit, msg, str1, voidptr); ++ break; ++ case GR_SIG2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ num1 = va_arg(ap, int); ++ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath0(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ break; ++ case GR_CRASH1: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, cred->uid, ulong1); ++ break; ++ case GR_CRASH2: ++ task = va_arg(ap, struct task_struct *); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ ulong1 = va_arg(ap, unsigned long); ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, cred->uid, cred->euid, cred->gid, cred->egid, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, pcred->uid, pcred->euid, pcred->gid, pcred->egid, ulong1); ++ break; ++ case GR_PSACCT: ++ { ++ unsigned int wday, cday; ++ __u8 whr, chr; ++ __u8 wmin, cmin; ++ __u8 wsec, csec; ++ char cur_tty[64] = { 0 }; ++ char parent_tty[64] = { 0 }; ++ ++ task = va_arg(ap, struct task_struct *); ++ wday = va_arg(ap, unsigned int); ++ cday = va_arg(ap, unsigned int); ++ whr = va_arg(ap, int); ++ chr = va_arg(ap, int); ++ wmin = va_arg(ap, int); ++ cmin = va_arg(ap, int); ++ wsec = va_arg(ap, int); ++ csec = va_arg(ap, int); ++ ulong1 = va_arg(ap, unsigned long); ++ cred = __task_cred(task); ++ pcred = __task_cred(task->parent); ++ ++ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task->pid, &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), cred->uid, cred->euid, cred->gid, cred->egid, wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->parent->comm, task->parent->pid, &task->parent->signal->curr_ip, tty_name(task->parent->signal->tty, parent_tty), pcred->uid, pcred->euid, pcred->gid, pcred->egid); ++ } ++ break; ++ default: ++ gr_log_middle(audit, msg, ap); ++ } ++ va_end(ap); ++ gr_log_end(audit); ++ END_LOCKS(audit); ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_mem.c linux-2.6.35.4/grsecurity/grsec_mem.c +--- linux-2.6.35.4/grsecurity/grsec_mem.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_mem.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,85 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/mman.h> ++#include <linux/grinternal.h> ++ ++void ++gr_handle_ioperm(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG); ++ return; ++} ++ ++void ++gr_handle_iopl(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG); ++ return; ++} ++ ++void ++gr_handle_mem_write(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_WRITE_MSG); ++ return; ++} ++ ++void ++gr_handle_kmem_write(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_KMEM_MSG); ++ return; ++} ++ ++void ++gr_handle_open_port(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_PORT_OPEN_MSG); ++ return; ++} ++ ++int ++gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma) ++{ ++ unsigned long start, end; ++ ++ start = offset; ++ end = start + vma->vm_end - vma->vm_start; ++ ++ if (start > end) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); ++ return -EPERM; ++ } ++ ++ /* allowed ranges : ISA I/O BIOS */ ++ if ((start >= __pa(high_memory)) ++#if defined(CONFIG_X86) || defined(CONFIG_PPC) ++ || (start >= 0x000a0000 && end <= 0x00100000) ++ || (start >= 0x00000000 && end <= 0x00001000) ++#endif ++ ) ++ return 0; ++ ++ if (vma->vm_flags & VM_WRITE) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_MEM_MMAP_MSG); ++ return -EPERM; ++ } else ++ vma->vm_flags &= ~VM_MAYWRITE; ++ ++ return 0; ++} ++ ++void ++gr_log_nonroot_mod_load(const char *modname) ++{ ++ gr_log_str(GR_DONT_AUDIT, GR_NONROOT_MODLOAD_MSG, modname); ++ return; ++} ++ ++void ++gr_handle_vm86(void) ++{ ++ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG); ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_mount.c linux-2.6.35.4/grsecurity/grsec_mount.c +--- linux-2.6.35.4/grsecurity/grsec_mount.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_mount.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,62 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mount.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_remount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_unmount(const char *devname, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none"); ++#endif ++ return; ++} ++ ++void ++gr_log_mount(const char *from, const char *to, const int retval) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ if (grsec_enable_mount && (retval >= 0)) ++ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from, to); ++#endif ++ return; ++} ++ ++int ++gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} ++ ++int ++gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode) ++{ ++#ifdef CONFIG_GRKERNSEC_ROFS ++ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) && ++ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) { ++ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt); ++ return -EPERM; ++ } else ++ return 0; ++#endif ++ return 0; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_ptrace.c linux-2.6.35.4/grsecurity/grsec_ptrace.c +--- linux-2.6.35.4/grsecurity/grsec_ptrace.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_ptrace.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,14 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++#include <linux/grsecurity.h> ++ ++void ++gr_audit_ptrace(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ if (grsec_enable_audit_ptrace) ++ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task); ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_sig.c linux-2.6.35.4/grsecurity/grsec_sig.c +--- linux-2.6.35.4/grsecurity/grsec_sig.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_sig.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,65 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/delay.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++char *signames[] = { ++ [SIGSEGV] = "Segmentation fault", ++ [SIGILL] = "Illegal instruction", ++ [SIGABRT] = "Abort", ++ [SIGBUS] = "Invalid alignment/Bus error" ++}; ++ ++void ++gr_log_signal(const int sig, const void *addr, const struct task_struct *t) ++{ ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) || ++ (sig == SIGABRT) || (sig == SIGBUS))) { ++ if (t->pid == current->pid) { ++ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr); ++ } else { ++ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig); ++ } ++ } ++#endif ++ return; ++} ++ ++int ++gr_handle_signal(const struct task_struct *p, const int sig) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (current->pid > 1 && gr_check_protected_task(p)) { ++ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig); ++ return -EPERM; ++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) { ++ return -EPERM; ++ } ++#endif ++ return 0; ++} ++ ++void gr_handle_brute_attach(struct task_struct *p) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ read_lock(&tasklist_lock); ++ read_lock(&grsec_exec_file_lock); ++ if (p->parent && p->parent->exec_file == p->exec_file) ++ p->parent->brute = 1; ++ read_unlock(&grsec_exec_file_lock); ++ read_unlock(&tasklist_lock); ++#endif ++ return; ++} ++ ++void gr_handle_brute_check(void) ++{ ++#ifdef CONFIG_GRKERNSEC_BRUTE ++ if (current->brute) ++ msleep(30 * 1000); ++#endif ++ return; ++} ++ +diff -urNp linux-2.6.35.4/grsecurity/grsec_sock.c linux-2.6.35.4/grsecurity/grsec_sock.c +--- linux-2.6.35.4/grsecurity/grsec_sock.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_sock.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,271 @@ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/net.h> ++#include <linux/in.h> ++#include <linux/ip.h> ++#include <net/sock.h> ++#include <net/inet_sock.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++#include <linux/gracl.h> ++ ++kernel_cap_t gr_cap_rtnetlink(struct sock *sock); ++EXPORT_SYMBOL(gr_cap_rtnetlink); ++ ++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr); ++ ++EXPORT_SYMBOL(gr_search_udp_recvmsg); ++EXPORT_SYMBOL(gr_search_udp_sendmsg); ++ ++#ifdef CONFIG_UNIX_MODULE ++EXPORT_SYMBOL(gr_acl_handle_unix); ++EXPORT_SYMBOL(gr_acl_handle_mknod); ++EXPORT_SYMBOL(gr_handle_chroot_unix); ++EXPORT_SYMBOL(gr_handle_create); ++#endif ++ ++#ifdef CONFIG_GRKERNSEC ++#define gr_conn_table_size 32749 ++struct conn_table_entry { ++ struct conn_table_entry *next; ++ struct signal_struct *sig; ++}; ++ ++struct conn_table_entry *gr_conn_table[gr_conn_table_size]; ++DEFINE_SPINLOCK(gr_conn_table_lock); ++ ++extern const char * gr_socktype_to_name(unsigned char type); ++extern const char * gr_proto_to_name(unsigned char proto); ++ ++static __inline__ int ++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size) ++{ ++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size); ++} ++ ++static __inline__ int ++conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr && ++ sig->gr_sport == sport && sig->gr_dport == dport)) ++ return 1; ++ else ++ return 0; ++} ++ ++static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent) ++{ ++ struct conn_table_entry **match; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ newent->sig = sig; ++ ++ match = &gr_conn_table[index]; ++ newent->next = *match; ++ *match = newent; ++ ++ return; ++} ++ ++static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig) ++{ ++ struct conn_table_entry *match, *last = NULL; ++ unsigned int index; ++ ++ index = conn_hash(sig->gr_saddr, sig->gr_daddr, ++ sig->gr_sport, sig->gr_dport, ++ gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, ++ sig->gr_saddr, sig->gr_daddr, sig->gr_sport, ++ sig->gr_dport)) { ++ last = match; ++ match = match->next; ++ } ++ ++ if (match) { ++ if (last) ++ last->next = match->next; ++ else ++ gr_conn_table[index] = NULL; ++ kfree(match); ++ } ++ ++ return; ++} ++ ++static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr, ++ __u16 sport, __u16 dport) ++{ ++ struct conn_table_entry *match; ++ unsigned int index; ++ ++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size); ++ ++ match = gr_conn_table[index]; ++ while (match && !conn_match(match->sig, saddr, daddr, sport, dport)) ++ match = match->next; ++ ++ if (match) ++ return match->sig; ++ else ++ return NULL; ++} ++ ++#endif ++ ++void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *sig = task->signal; ++ struct conn_table_entry *newent; ++ ++ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC); ++ if (newent == NULL) ++ return; ++ /* no bh lock needed since we are called with bh disabled */ ++ spin_lock(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(sig); ++ sig->gr_saddr = inet->inet_rcv_saddr; ++ sig->gr_daddr = inet->inet_daddr; ++ sig->gr_sport = inet->inet_sport; ++ sig->gr_dport = inet->inet_dport; ++ gr_add_to_task_ip_table_nolock(sig, newent); ++ spin_unlock(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void gr_del_task_from_ip_table(struct task_struct *task) ++{ ++#ifdef CONFIG_GRKERNSEC ++ spin_lock_bh(&gr_conn_table_lock); ++ gr_del_task_from_ip_table_nolock(task->signal); ++ spin_unlock_bh(&gr_conn_table_lock); ++#endif ++ return; ++} ++ ++void ++gr_attach_curr_ip(const struct sock *sk) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct signal_struct *p, *set; ++ const struct inet_sock *inet = inet_sk(sk); ++ ++ if (unlikely(sk->sk_protocol != IPPROTO_TCP)) ++ return; ++ ++ set = current->signal; ++ ++ spin_lock_bh(&gr_conn_table_lock); ++ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr, ++ inet->inet_dport, inet->inet_sport); ++ if (unlikely(p != NULL)) { ++ set->curr_ip = p->curr_ip; ++ set->used_accept = 1; ++ gr_del_task_from_ip_table_nolock(p); ++ spin_unlock_bh(&gr_conn_table_lock); ++ return; ++ } ++ spin_unlock_bh(&gr_conn_table_lock); ++ ++ set->curr_ip = inet->inet_daddr; ++ set->used_accept = 1; ++#endif ++ return; ++} ++ ++int ++gr_handle_sock_all(const int family, const int type, const int protocol) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) && ++ (family != AF_UNIX) && (family != AF_LOCAL)) { ++ gr_log_int_str2(GR_DONT_AUDIT, GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol)); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_server_other(const struct sock *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ if (grsec_enable_socket_server && ++ in_group_p(grsec_socket_server_gid) && ++ sck && (sck->sk_family != AF_UNIX) && ++ (sck->sk_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++int ++gr_handle_sock_client(const struct sockaddr *sck) ++{ ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) && ++ sck && (sck->sa_family != AF_UNIX) && ++ (sck->sa_family != AF_LOCAL)) { ++ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++kernel_cap_t ++gr_cap_rtnetlink(struct sock *sock) ++{ ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_acl_is_enabled()) ++ return current_cap(); ++ else if (sock->sk_protocol == NETLINK_ISCSI && ++ cap_raised(current_cap(), CAP_SYS_ADMIN) && ++ gr_is_capable(CAP_SYS_ADMIN)) ++ return current_cap(); ++ else if (sock->sk_protocol == NETLINK_AUDIT && ++ cap_raised(current_cap(), CAP_AUDIT_WRITE) && ++ gr_is_capable(CAP_AUDIT_WRITE) && ++ cap_raised(current_cap(), CAP_AUDIT_CONTROL) && ++ gr_is_capable(CAP_AUDIT_CONTROL)) ++ return current_cap(); ++ else if (cap_raised(current_cap(), CAP_NET_ADMIN) && ++ ((sock->sk_protocol == NETLINK_ROUTE) ? ++ gr_is_capable_nolog(CAP_NET_ADMIN) : ++ gr_is_capable(CAP_NET_ADMIN))) ++ return current_cap(); ++ else ++ return __cap_empty_set; ++#else ++ return current_cap(); ++#endif ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_sysctl.c linux-2.6.35.4/grsecurity/grsec_sysctl.c +--- linux-2.6.35.4/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_sysctl.c 2010-09-17 20:18:57.000000000 -0400 +@@ -0,0 +1,424 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/sysctl.h> ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++int ++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op) ++{ ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) { ++ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name); ++ return -EACCES; ++ } ++#endif ++ return 0; ++} ++ ++#ifdef CONFIG_GRKERNSEC_ROFS ++static int __maybe_unused one = 1; ++#endif ++ ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++struct ctl_table grsecurity_table[] = { ++#ifdef CONFIG_GRKERNSEC_SYSCTL ++#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO ++#ifdef CONFIG_GRKERNSEC_IO ++ { ++ .procname = "disable_priv_io", ++ .data = &grsec_disable_privio, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#endif ++#ifdef CONFIG_GRKERNSEC_LINK ++ { ++ .procname = "linking_restrictions", ++ .data = &grsec_enable_link, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FIFO ++ { ++ .procname = "fifo_restrictions", ++ .data = &grsec_enable_fifo, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECVE ++ { ++ .procname = "execve_limiting", ++ .data = &grsec_enable_execve, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ { ++ .procname = "ip_blackhole", ++ .data = &grsec_enable_blackhole, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "lastack_retries", ++ .data = &grsec_lastack_retries, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_EXECLOG ++ { ++ .procname = "exec_logging", ++ .data = &grsec_enable_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SIGNAL ++ { ++ .procname = "signal_logging", ++ .data = &grsec_enable_signal, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_FORKFAIL ++ { ++ .procname = "forkfail_logging", ++ .data = &grsec_enable_forkfail, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TIME ++ { ++ .procname = "timechange_logging", ++ .data = &grsec_enable_time, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT ++ { ++ .procname = "chroot_deny_shmat", ++ .data = &grsec_enable_chroot_shmat, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ { ++ .procname = "chroot_deny_unix", ++ .data = &grsec_enable_chroot_unix, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT ++ { ++ .procname = "chroot_deny_mount", ++ .data = &grsec_enable_chroot_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR ++ { ++ .procname = "chroot_deny_fchdir", ++ .data = &grsec_enable_chroot_fchdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE ++ { ++ .procname = "chroot_deny_chroot", ++ .data = &grsec_enable_chroot_double, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT ++ { ++ .procname = "chroot_deny_pivot", ++ .data = &grsec_enable_chroot_pivot, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR ++ { ++ .procname = "chroot_enforce_chdir", ++ .data = &grsec_enable_chroot_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD ++ { ++ .procname = "chroot_deny_chmod", ++ .data = &grsec_enable_chroot_chmod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD ++ { ++ .procname = "chroot_deny_mknod", ++ .data = &grsec_enable_chroot_mknod, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE ++ { ++ .procname = "chroot_restrict_nice", ++ .data = &grsec_enable_chroot_nice, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG ++ { ++ .procname = "chroot_execlog", ++ .data = &grsec_enable_chroot_execlog, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS ++ { ++ .procname = "chroot_caps", ++ .data = &grsec_enable_chroot_caps, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL ++ { ++ .procname = "chroot_deny_sysctl", ++ .data = &grsec_enable_chroot_sysctl, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE ++ { ++ .procname = "tpe", ++ .data = &grsec_enable_tpe, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "tpe_gid", ++ .data = &grsec_tpe_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ { ++ .procname = "tpe_invert", ++ .data = &grsec_enable_tpe_invert, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ { ++ .procname = "tpe_restrict_all", ++ .data = &grsec_enable_tpe_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL ++ { ++ .procname = "socket_all", ++ .data = &grsec_enable_socket_all, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_all_gid", ++ .data = &grsec_socket_all_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT ++ { ++ .procname = "socket_client", ++ .data = &grsec_enable_socket_client, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_client_gid", ++ .data = &grsec_socket_client_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER ++ { ++ .procname = "socket_server", ++ .data = &grsec_enable_socket_server, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "socket_server_gid", ++ .data = &grsec_socket_server_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP ++ { ++ .procname = "audit_group", ++ .data = &grsec_enable_group, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ { ++ .procname = "audit_gid", ++ .data = &grsec_audit_gid, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR ++ { ++ .procname = "audit_chdir", ++ .data = &grsec_enable_chdir, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT ++ { ++ .procname = "audit_mount", ++ .data = &grsec_enable_mount, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ { ++ .procname = "audit_textrel", ++ .data = &grsec_enable_audit_textrel, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_DMESG ++ { ++ .procname = "dmesg", ++ .data = &grsec_enable_dmesg, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK ++ { ++ .procname = "chroot_findtask", ++ .data = &grsec_enable_chroot_findtask, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_RESLOG ++ { ++ .procname = "resource_logging", ++ .data = &grsec_resource_logging, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE ++ { ++ .procname = "audit_ptrace", ++ .data = &grsec_enable_audit_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE ++ { ++ .procname = "harden_ptrace", ++ .data = &grsec_enable_harden_ptrace, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++ { ++ .procname = "grsec_lock", ++ .data = &grsec_lock, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++#endif ++#ifdef CONFIG_GRKERNSEC_ROFS ++ { ++ .procname = "romount_protect", ++ .data = &grsec_enable_rofs, ++ .maxlen = sizeof(int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec_minmax, ++ .extra1 = &one, ++ .extra2 = &one, ++ }, ++#endif ++ { } ++}; ++#endif +diff -urNp linux-2.6.35.4/grsecurity/grsec_textrel.c linux-2.6.35.4/grsecurity/grsec_textrel.c +--- linux-2.6.35.4/grsecurity/grsec_textrel.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_textrel.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,16 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/file.h> ++#include <linux/grinternal.h> ++#include <linux/grsecurity.h> ++ ++void ++gr_log_textrel(struct vm_area_struct * vma) ++{ ++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL ++ if (grsec_enable_audit_textrel) ++ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff); ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_time.c linux-2.6.35.4/grsecurity/grsec_time.c +--- linux-2.6.35.4/grsecurity/grsec_time.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_time.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,13 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/grinternal.h> ++ ++void ++gr_log_timechange(void) ++{ ++#ifdef CONFIG_GRKERNSEC_TIME ++ if (grsec_enable_time) ++ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG); ++#endif ++ return; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsec_tpe.c linux-2.6.35.4/grsecurity/grsec_tpe.c +--- linux-2.6.35.4/grsecurity/grsec_tpe.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsec_tpe.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,39 @@ ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/file.h> ++#include <linux/fs.h> ++#include <linux/grinternal.h> ++ ++extern int gr_acl_tpe_check(void); ++ ++int ++gr_tpe_allow(const struct file *file) ++{ ++#ifdef CONFIG_GRKERNSEC ++ struct inode *inode = file->f_path.dentry->d_parent->d_inode; ++ const struct cred *cred = current_cred(); ++ ++ if (cred->uid && ((grsec_enable_tpe && ++#ifdef CONFIG_GRKERNSEC_TPE_INVERT ++ ((grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid)) || ++ (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))) ++#else ++ in_group_p(grsec_tpe_gid) ++#endif ++ ) || gr_acl_tpe_check()) && ++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) || ++ (inode->i_mode & S_IWOTH))))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++#ifdef CONFIG_GRKERNSEC_TPE_ALL ++ if (cred->uid && grsec_enable_tpe && grsec_enable_tpe_all && ++ ((inode->i_uid && (inode->i_uid != cred->uid)) || ++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) { ++ gr_log_fs_generic(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, file->f_path.dentry, file->f_path.mnt); ++ return 0; ++ } ++#endif ++#endif ++ return 1; ++} +diff -urNp linux-2.6.35.4/grsecurity/grsum.c linux-2.6.35.4/grsecurity/grsum.c +--- linux-2.6.35.4/grsecurity/grsum.c 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/grsum.c 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,61 @@ ++#include <linux/err.h> ++#include <linux/kernel.h> ++#include <linux/sched.h> ++#include <linux/mm.h> ++#include <linux/scatterlist.h> ++#include <linux/crypto.h> ++#include <linux/gracl.h> ++ ++ ++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE) ++#error "crypto and sha256 must be built into the kernel" ++#endif ++ ++int ++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum) ++{ ++ char *p; ++ struct crypto_hash *tfm; ++ struct hash_desc desc; ++ struct scatterlist sg; ++ unsigned char temp_sum[GR_SHA_LEN]; ++ volatile int retval = 0; ++ volatile int dummy = 0; ++ unsigned int i; ++ ++ sg_init_table(&sg, 1); ++ ++ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC); ++ if (IS_ERR(tfm)) { ++ /* should never happen, since sha256 should be built in */ ++ return 1; ++ } ++ ++ desc.tfm = tfm; ++ desc.flags = 0; ++ ++ crypto_hash_init(&desc); ++ ++ p = salt; ++ sg_set_buf(&sg, p, GR_SALT_LEN); ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ p = entry->pw; ++ sg_set_buf(&sg, p, strlen(p)); ++ ++ crypto_hash_update(&desc, &sg, sg.length); ++ ++ crypto_hash_final(&desc, temp_sum); ++ ++ memset(entry->pw, 0, GR_PW_LEN); ++ ++ for (i = 0; i < GR_SHA_LEN; i++) ++ if (sum[i] != temp_sum[i]) ++ retval = 1; ++ else ++ dummy = 1; // waste a cycle ++ ++ crypto_free_hash(tfm); ++ ++ return retval; ++} +diff -urNp linux-2.6.35.4/grsecurity/Kconfig linux-2.6.35.4/grsecurity/Kconfig +--- linux-2.6.35.4/grsecurity/Kconfig 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/Kconfig 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,986 @@ ++# ++# grecurity configuration ++# ++ ++menu "Grsecurity" ++ ++config GRKERNSEC ++ bool "Grsecurity" ++ select CRYPTO ++ select CRYPTO_SHA256 ++ help ++ If you say Y here, you will be able to configure many features ++ that will enhance the security of your system. It is highly ++ recommended that you say Y here and read through the help ++ for each option so that you fully understand the features and ++ can evaluate their usefulness for your machine. ++ ++choice ++ prompt "Security Level" ++ depends on GRKERNSEC ++ default GRKERNSEC_CUSTOM ++ ++config GRKERNSEC_LOW ++ bool "Low" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_CHDIR ++ ++ help ++ If you choose this option, several of the grsecurity options will ++ be enabled that will give you greater protection against a number ++ of attacks, while assuring that none of your software will have any ++ conflicts with the additional security measures. If you run a lot ++ of unusual software, or you are having problems with the higher ++ security levels, you should say Y here. With this option, the ++ following features are enabled: ++ ++ - Linking restrictions ++ - FIFO restrictions ++ - Enforcing RLIMIT_NPROC on execve ++ - Restricted dmesg ++ - Enforced chdir("/") on chroot ++ - Runtime module disabling ++ ++config GRKERNSEC_MEDIUM ++ bool "Medium" ++ select PAX ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_USERGROUP ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || SPARC32 || SPARC64 || PPC) && (SLAB || SLUB || SLOB)) ++ ++ help ++ If you say Y here, several features in addition to those included ++ in the low additional security level will be enabled. These ++ features provide even more security to your system, though in rare ++ cases they may be incompatible with very old or poorly written ++ software. If you enable this option, make sure that your auth ++ service (identd) is running as gid 1001. With this option, ++ the following features (in addition to those provided in the ++ low additional security level) will be enabled: ++ ++ - Failed fork logging ++ - Time change logging ++ - Signal logging ++ - Deny mounts in chroot ++ - Deny double chrooting ++ - Deny sysctl writes in chroot ++ - Deny mknod in chroot ++ - Deny access to abstract AF_UNIX sockets out of chroot ++ - Deny pivot_root in chroot ++ - Denied writes of /dev/kmem, /dev/mem, and /dev/port ++ - /proc restrictions with special GID set to 10 (usually wheel) ++ - Address Space Layout Randomization (ASLR) ++ - Prevent exploitation of most refcount overflows ++ - Bounds checking of copying between the kernel and userland ++ ++config GRKERNSEC_HIGH ++ bool "High" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_PROC_ADD ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_HARDEN_PTRACE ++ select GRKERNSEC_VM86 if (X86_32) ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select PAX_KERNEXEC if ((PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN) ++ select PAX_MEMORY_UDEREF if (X86 && !XEN) ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS if (PAX_ETEXECRELOCS || (IA64 || PPC || X86)) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB)) ++ help ++ If you say Y here, many of the features of grsecurity will be ++ enabled, which will protect you against many kinds of attacks ++ against your system. The heightened security comes at a cost ++ of an increased chance of incompatibilities with rare software ++ on your machine. Since this security level enables PaX, you should ++ view <http://pax.grsecurity.net> and read about the PaX ++ project. While you are there, download chpax and run it on ++ binaries that cause problems with PaX. Also remember that ++ since the /proc restrictions are enabled, you must run your ++ identd as gid 1001. This security level enables the following ++ features in addition to those listed in the low and medium ++ security levels: ++ ++ - Additional /proc restrictions ++ - Chmod restrictions in chroot ++ - No signals, ptrace, or viewing of processes outside of chroot ++ - Capability restrictions in chroot ++ - Deny fchdir out of chroot ++ - Priority restrictions in chroot ++ - Segmentation-based implementation of PaX ++ - Mprotect restrictions ++ - Removal of addresses from /proc/<pid>/[smaps|maps|stat] ++ - Kernel stack randomization ++ - Mount/unmount/remount logging ++ - Kernel symbol hiding ++ - Prevention of memory exhaustion-based exploits ++ - Hardening of module auto-loading ++ - Ptrace restrictions ++ - Restricted vm86 mode ++ ++config GRKERNSEC_CUSTOM ++ bool "Custom" ++ help ++ If you say Y here, you will be able to configure every grsecurity ++ option, which allows you to enable many more features that aren't ++ covered in the basic security levels. These additional features ++ include TPE, socket restrictions, and the sysctl system for ++ grsecurity. It is advised that you read through the help for ++ each option to determine its usefulness in your situation. ++ ++endchoice ++ ++menu "Address Space Protection" ++depends on GRKERNSEC ++ ++config GRKERNSEC_KMEM ++ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port" ++ help ++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to ++ be written to via mmap or otherwise to modify the running kernel. ++ /dev/port will also not be allowed to be opened. If you have module ++ support disabled, enabling this will close up four ways that are ++ currently used to insert malicious code into the running kernel. ++ Even with all these features enabled, we still highly recommend that ++ you use the RBAC system, as it is still possible for an attacker to ++ modify the running kernel through privileged I/O granted by ioperm/iopl. ++ If you are not using XFree86, you may be able to stop this additional ++ case by enabling the 'Disable privileged I/O' option. Though nothing ++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem, ++ but only to video memory, which is the only writing we allow in this ++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will ++ not be allowed to mprotect it with PROT_WRITE later. ++ It is highly recommended that you say Y here if you meet all the ++ conditions above. ++ ++config GRKERNSEC_VM86 ++ bool "Restrict VM86 mode" ++ depends on X86_32 ++ ++ help ++ If you say Y here, only processes with CAP_SYS_RAWIO will be able to ++ make use of a special execution mode on 32bit x86 processors called ++ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain ++ video cards and will still work with this option enabled. The purpose ++ of the option is to prevent exploitation of emulation errors in ++ virtualization of vm86 mode like the one discovered in VMWare in 2009. ++ Nearly all users should be able to enable this option. ++ ++config GRKERNSEC_IO ++ bool "Disable privileged I/O" ++ depends on X86 ++ select RTC_CLASS ++ select RTC_INTF_DEV ++ select RTC_DRV_CMOS ++ ++ help ++ If you say Y here, all ioperm and iopl calls will return an error. ++ Ioperm and iopl can be used to modify the running kernel. ++ Unfortunately, some programs need this access to operate properly, ++ the most notable of which are XFree86 and hwclock. hwclock can be ++ remedied by having RTC support in the kernel, so real-time ++ clock support is enabled if this option is enabled, to ensure ++ that hwclock operates correctly. XFree86 still will not ++ operate correctly with this option enabled, so DO NOT CHOOSE Y ++ IF YOU USE XFree86. If you use XFree86 and you still want to ++ protect your kernel against modification, use the RBAC system. ++ ++config GRKERNSEC_PROC_MEMMAP ++ bool "Remove addresses from /proc/<pid>/[smaps|maps|stat]" ++ default y if (PAX_NOEXEC || PAX_ASLR) ++ depends on PAX_NOEXEC || PAX_ASLR ++ help ++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will ++ give no information about the addresses of its mappings if ++ PaX features that rely on random addresses are enabled on the task. ++ If you use PaX it is greatly recommended that you say Y here as it ++ closes up a hole that makes the full ASLR useless for suid ++ binaries. ++ ++config GRKERNSEC_BRUTE ++ bool "Deter exploit bruteforcing" ++ help ++ If you say Y here, attempts to bruteforce exploits against forking ++ daemons such as apache or sshd will be deterred. When a child of a ++ forking daemon is killed by PaX or crashes due to an illegal ++ instruction, the parent process will be delayed 30 seconds upon every ++ subsequent fork until the administrator is able to assess the ++ situation and restart the daemon. It is recommended that you also ++ enable signal logging in the auditing section so that logs are ++ generated when a process performs an illegal instruction. ++ ++config GRKERNSEC_MODHARDEN ++ bool "Harden module auto-loading" ++ depends on MODULES ++ help ++ If you say Y here, module auto-loading in response to use of some ++ feature implemented by an unloaded module will be restricted to ++ root users. Enabling this option helps defend against attacks ++ by unprivileged users who abuse the auto-loading behavior to ++ cause a vulnerable module to load that is then exploited. ++ ++ If this option prevents a legitimate use of auto-loading for a ++ non-root user, the administrator can execute modprobe manually ++ with the exact name of the module mentioned in the alert log. ++ Alternatively, the administrator can add the module to the list ++ of modules loaded at boot by modifying init scripts. ++ ++ Modification of init scripts will most likely be needed on ++ Ubuntu servers with encrypted home directory support enabled, ++ as the first non-root user logging in will cause the ecb(aes), ++ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded. ++ ++config GRKERNSEC_HIDESYM ++ bool "Hide kernel symbols" ++ help ++ If you say Y here, getting information on loaded modules, and ++ displaying all kernel symbols through a syscall will be restricted ++ to users with CAP_SYS_MODULE. For software compatibility reasons, ++ /proc/kallsyms will be restricted to the root user. The RBAC ++ system can hide that entry even from root. ++ ++ This option also prevents leaking of kernel addresses through ++ several /proc entries. ++ ++ Note that this option is only effective provided the following ++ conditions are met: ++ 1) The kernel using grsecurity is not precompiled by some distribution ++ 2) You are using the RBAC system and hiding other files such as your ++ kernel image and System.map. Alternatively, enabling this option ++ causes the permissions on /boot, /lib/modules, and the kernel ++ source directory to change at compile time to prevent ++ reading by non-root users. ++ If the above conditions are met, this option will aid in providing a ++ useful protection against local kernel exploitation of overflows ++ and arbitrary read/write vulnerabilities. ++ ++endmenu ++menu "Role Based Access Control Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_NO_RBAC ++ bool "Disable RBAC system" ++ help ++ If you say Y here, the /dev/grsec device will be removed from the kernel, ++ preventing the RBAC system from being enabled. You should only say Y ++ here if you have no intention of using the RBAC system, so as to prevent ++ an attacker with root access from misusing the RBAC system to hide files ++ and processes when loadable module support and /dev/[k]mem have been ++ locked down. ++ ++config GRKERNSEC_ACL_HIDEKERN ++ bool "Hide kernel processes" ++ help ++ If you say Y here, all kernel threads will be hidden to all ++ processes but those whose subject has the "view hidden processes" ++ flag. ++ ++config GRKERNSEC_ACL_MAXTRIES ++ int "Maximum tries before password lockout" ++ default 3 ++ help ++ This option enforces the maximum number of times a user can attempt ++ to authorize themselves with the grsecurity RBAC system before being ++ denied the ability to attempt authorization again for a specified time. ++ The lower the number, the harder it will be to brute-force a password. ++ ++config GRKERNSEC_ACL_TIMEOUT ++ int "Time to wait after max password tries, in seconds" ++ default 30 ++ help ++ This option specifies the time the user must wait after attempting to ++ authorize to the RBAC system with the maximum number of invalid ++ passwords. The higher the number, the harder it will be to brute-force ++ a password. ++ ++endmenu ++menu "Filesystem Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_PROC ++ bool "Proc restrictions" ++ help ++ If you say Y here, the permissions of the /proc filesystem ++ will be altered to enhance system security and privacy. You MUST ++ choose either a user only restriction or a user and group restriction. ++ Depending upon the option you choose, you can either restrict users to ++ see only the processes they themselves run, or choose a group that can ++ view all processes and files normally restricted to root if you choose ++ the "restrict to user only" option. NOTE: If you're running identd as ++ a non-root user, you will have to run it as the group you specify here. ++ ++config GRKERNSEC_PROC_USER ++ bool "Restrict /proc to user only" ++ depends on GRKERNSEC_PROC ++ help ++ If you say Y here, non-root users will only be able to view their own ++ processes, and restricts them from viewing network-related information, ++ and viewing kernel symbol and module information. ++ ++config GRKERNSEC_PROC_USERGROUP ++ bool "Allow special group" ++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER ++ help ++ If you say Y here, you will be able to select a group that will be ++ able to view all processes, network-related information, and ++ kernel and symbol information. This option is useful if you want ++ to run identd as a non-root user. ++ ++config GRKERNSEC_PROC_GID ++ int "GID for special group" ++ depends on GRKERNSEC_PROC_USERGROUP ++ default 1001 ++ ++config GRKERNSEC_PROC_ADD ++ bool "Additional restrictions" ++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP ++ help ++ If you say Y here, additional restrictions will be placed on ++ /proc that keep normal users from viewing device information and ++ slabinfo information that could be useful for exploits. ++ ++config GRKERNSEC_LINK ++ bool "Linking restrictions" ++ help ++ If you say Y here, /tmp race exploits will be prevented, since users ++ will no longer be able to follow symlinks owned by other users in ++ world-writable +t directories (i.e. /tmp), unless the owner of the ++ symlink is the owner of the directory. users will also not be ++ able to hardlink to files they do not own. If the sysctl option is ++ enabled, a sysctl option with name "linking_restrictions" is created. ++ ++config GRKERNSEC_FIFO ++ bool "FIFO restrictions" ++ help ++ If you say Y here, users will not be able to write to FIFOs they don't ++ own in world-writable +t directories (i.e. /tmp), unless the owner of ++ the FIFO is the same owner of the directory it's held in. If the sysctl ++ option is enabled, a sysctl option with name "fifo_restrictions" is ++ created. ++ ++config GRKERNSEC_ROFS ++ bool "Runtime read-only mount protection" ++ help ++ If you say Y here, a sysctl option with name "romount_protect" will ++ be created. By setting this option to 1 at runtime, filesystems ++ will be protected in the following ways: ++ * No new writable mounts will be allowed ++ * Existing read-only mounts won't be able to be remounted read/write ++ * Write operations will be denied on all block devices ++ This option acts independently of grsec_lock: once it is set to 1, ++ it cannot be turned off. Therefore, please be mindful of the resulting ++ behavior if this option is enabled in an init script on a read-only ++ filesystem. This feature is mainly intended for secure embedded systems. ++ ++config GRKERNSEC_CHROOT ++ bool "Chroot jail restrictions" ++ help ++ If you say Y here, you will be able to choose several options that will ++ make breaking out of a chrooted jail much more difficult. If you ++ encounter no software incompatibilities with the following options, it ++ is recommended that you enable each one. ++ ++config GRKERNSEC_CHROOT_MOUNT ++ bool "Deny mounts" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ mount or remount filesystems. If the sysctl option is enabled, a ++ sysctl option with name "chroot_deny_mount" is created. ++ ++config GRKERNSEC_CHROOT_DOUBLE ++ bool "Deny double-chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chroot ++ again outside the chroot. This is a widely used method of breaking ++ out of a chroot jail and should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name ++ "chroot_deny_chroot" is created. ++ ++config GRKERNSEC_CHROOT_PIVOT ++ bool "Deny pivot_root in chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to use ++ a function called pivot_root() that was introduced in Linux 2.3.41. It ++ works similar to chroot in that it changes the root filesystem. This ++ function could be misused in a chrooted process to attempt to break out ++ of the chroot, and therefore should not be allowed. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_pivot" is ++ created. ++ ++config GRKERNSEC_CHROOT_CHDIR ++ bool "Enforce chdir(\"/\") on all chroots" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the current working directory of all newly-chrooted ++ applications will be set to the the root directory of the chroot. ++ The man page on chroot(2) states: ++ Note that this call does not change the current working ++ directory, so that `.' can be outside the tree rooted at ++ `/'. In particular, the super-user can escape from a ++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'. ++ ++ It is recommended that you say Y here, since it's not known to break ++ any software. If the sysctl option is enabled, a sysctl option with ++ name "chroot_enforce_chdir" is created. ++ ++config GRKERNSEC_CHROOT_CHMOD ++ bool "Deny (f)chmod +s" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to chmod ++ or fchmod files to make them have suid or sgid bits. This protects ++ against another published method of breaking a chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_deny_chmod" is ++ created. ++ ++config GRKERNSEC_CHROOT_FCHDIR ++ bool "Deny fchdir out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, a well-known method of breaking chroots by fchdir'ing ++ to a file descriptor of the chrooting process that points to a directory ++ outside the filesystem will be stopped. If the sysctl option ++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created. ++ ++config GRKERNSEC_CHROOT_MKNOD ++ bool "Deny mknod" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be allowed to ++ mknod. The problem with using mknod inside a chroot is that it ++ would allow an attacker to create a device entry that is the same ++ as one on the physical root of your system, which could range from ++ anything from the console device to a device for your harddrive (which ++ they could then use to wipe the drive or steal data). It is recommended ++ that you say Y here, unless you run into software incompatibilities. ++ If the sysctl option is enabled, a sysctl option with name ++ "chroot_deny_mknod" is created. ++ ++config GRKERNSEC_CHROOT_SHMAT ++ bool "Deny shmat() out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to attach ++ to shared memory segments that were created outside of the chroot jail. ++ It is recommended that you say Y here. If the sysctl option is enabled, ++ a sysctl option with name "chroot_deny_shmat" is created. ++ ++config GRKERNSEC_CHROOT_UNIX ++ bool "Deny access to abstract AF_UNIX sockets out of chroot" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ connect to abstract (meaning not belonging to a filesystem) Unix ++ domain sockets that were bound outside of a chroot. It is recommended ++ that you say Y here. If the sysctl option is enabled, a sysctl option ++ with name "chroot_deny_unix" is created. ++ ++config GRKERNSEC_CHROOT_FINDTASK ++ bool "Protect outside processes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to ++ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid, ++ getsid, or view any process outside of the chroot. If the sysctl ++ option is enabled, a sysctl option with name "chroot_findtask" is ++ created. ++ ++config GRKERNSEC_CHROOT_NICE ++ bool "Restrict priority changes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, processes inside a chroot will not be able to raise ++ the priority of processes in the chroot, or alter the priority of ++ processes outside the chroot. This provides more security than simply ++ removing CAP_SYS_NICE from the process' capability set. If the ++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice" ++ is created. ++ ++config GRKERNSEC_CHROOT_SYSCTL ++ bool "Deny sysctl writes" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, an attacker in a chroot will not be able to ++ write to sysctl entries, either by sysctl(2) or through a /proc ++ interface. It is strongly recommended that you say Y here. If the ++ sysctl option is enabled, a sysctl option with name ++ "chroot_deny_sysctl" is created. ++ ++config GRKERNSEC_CHROOT_CAPS ++ bool "Capability restrictions" ++ depends on GRKERNSEC_CHROOT ++ help ++ If you say Y here, the capabilities on all root processes within a ++ chroot jail will be lowered to stop module insertion, raw i/o, ++ system and net admin tasks, rebooting the system, modifying immutable ++ files, modifying IPC owned by another, and changing the system time. ++ This is left an option because it can break some apps. Disable this ++ if your chrooted apps are having problems performing those kinds of ++ tasks. If the sysctl option is enabled, a sysctl option with ++ name "chroot_caps" is created. ++ ++endmenu ++menu "Kernel Auditing" ++depends on GRKERNSEC ++ ++config GRKERNSEC_AUDIT_GROUP ++ bool "Single group for auditing" ++ help ++ If you say Y here, the exec, chdir, and (un)mount logging features ++ will only operate on a group you specify. This option is recommended ++ if you only want to watch certain users instead of having a large ++ amount of logs from the entire system. If the sysctl option is enabled, ++ a sysctl option with name "audit_group" is created. ++ ++config GRKERNSEC_AUDIT_GID ++ int "GID for auditing" ++ depends on GRKERNSEC_AUDIT_GROUP ++ default 1007 ++ ++config GRKERNSEC_EXECLOG ++ bool "Exec logging" ++ help ++ If you say Y here, all execve() calls will be logged (since the ++ other exec*() calls are frontends to execve(), all execution ++ will be logged). Useful for shell-servers that like to keep track ++ of their users. If the sysctl option is enabled, a sysctl option with ++ name "exec_logging" is created. ++ WARNING: This option when enabled will produce a LOT of logs, especially ++ on an active system. ++ ++config GRKERNSEC_RESLOG ++ bool "Resource logging" ++ help ++ If you say Y here, all attempts to overstep resource limits will ++ be logged with the resource name, the requested size, and the current ++ limit. It is highly recommended that you say Y here. If the sysctl ++ option is enabled, a sysctl option with name "resource_logging" is ++ created. If the RBAC system is enabled, the sysctl value is ignored. ++ ++config GRKERNSEC_CHROOT_EXECLOG ++ bool "Log execs within chroot" ++ help ++ If you say Y here, all executions inside a chroot jail will be logged ++ to syslog. This can cause a large amount of logs if certain ++ applications (eg. djb's daemontools) are installed on the system, and ++ is therefore left as an option. If the sysctl option is enabled, a ++ sysctl option with name "chroot_execlog" is created. ++ ++config GRKERNSEC_AUDIT_PTRACE ++ bool "Ptrace logging" ++ help ++ If you say Y here, all attempts to attach to a process via ptrace ++ will be logged. If the sysctl option is enabled, a sysctl option ++ with name "audit_ptrace" is created. ++ ++config GRKERNSEC_AUDIT_CHDIR ++ bool "Chdir logging" ++ help ++ If you say Y here, all chdir() calls will be logged. If the sysctl ++ option is enabled, a sysctl option with name "audit_chdir" is created. ++ ++config GRKERNSEC_AUDIT_MOUNT ++ bool "(Un)Mount logging" ++ help ++ If you say Y here, all mounts and unmounts will be logged. If the ++ sysctl option is enabled, a sysctl option with name "audit_mount" is ++ created. ++ ++config GRKERNSEC_SIGNAL ++ bool "Signal logging" ++ help ++ If you say Y here, certain important signals will be logged, such as ++ SIGSEGV, which will as a result inform you of when a error in a program ++ occurred, which in some cases could mean a possible exploit attempt. ++ If the sysctl option is enabled, a sysctl option with name ++ "signal_logging" is created. ++ ++config GRKERNSEC_FORKFAIL ++ bool "Fork failure logging" ++ help ++ If you say Y here, all failed fork() attempts will be logged. ++ This could suggest a fork bomb, or someone attempting to overstep ++ their process limit. If the sysctl option is enabled, a sysctl option ++ with name "forkfail_logging" is created. ++ ++config GRKERNSEC_TIME ++ bool "Time change logging" ++ help ++ If you say Y here, any changes of the system clock will be logged. ++ If the sysctl option is enabled, a sysctl option with name ++ "timechange_logging" is created. ++ ++config GRKERNSEC_PROC_IPADDR ++ bool "/proc/<pid>/ipaddr support" ++ help ++ If you say Y here, a new entry will be added to each /proc/<pid> ++ directory that contains the IP address of the person using the task. ++ The IP is carried across local TCP and AF_UNIX stream sockets. ++ This information can be useful for IDS/IPSes to perform remote response ++ to a local attack. The entry is readable by only the owner of the ++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via ++ the RBAC system), and thus does not create privacy concerns. ++ ++config GRKERNSEC_AUDIT_TEXTREL ++ bool 'ELF text relocations logging (READ HELP)' ++ depends on PAX_MPROTECT ++ help ++ If you say Y here, text relocations will be logged with the filename ++ of the offending library or binary. The purpose of the feature is ++ to help Linux distribution developers get rid of libraries and ++ binaries that need text relocations which hinder the future progress ++ of PaX. Only Linux distribution developers should say Y here, and ++ never on a production machine, as this option creates an information ++ leak that could aid an attacker in defeating the randomization of ++ a single memory region. If the sysctl option is enabled, a sysctl ++ option with name "audit_textrel" is created. ++ ++endmenu ++ ++menu "Executable Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_EXECVE ++ bool "Enforce RLIMIT_NPROC on execs" ++ help ++ If you say Y here, users with a resource limit on processes will ++ have the value checked during execve() calls. The current system ++ only checks the system limit during fork() calls. If the sysctl option ++ is enabled, a sysctl option with name "execve_limiting" is created. ++ ++config GRKERNSEC_DMESG ++ bool "Dmesg(8) restriction" ++ help ++ If you say Y here, non-root users will not be able to use dmesg(8) ++ to view up to the last 4kb of messages in the kernel's log buffer. ++ If the sysctl option is enabled, a sysctl option with name "dmesg" is ++ created. ++ ++config GRKERNSEC_HARDEN_PTRACE ++ bool "Deter ptrace-based process snooping" ++ help ++ If you say Y here, TTY sniffers and other malicious monitoring ++ programs implemented through ptrace will be defeated. If you ++ have been using the RBAC system, this option has already been ++ enabled for several years for all users, with the ability to make ++ fine-grained exceptions. ++ ++ This option only affects the ability of non-root users to ptrace ++ processes that are not a descendent of the ptracing process. ++ This means that strace ./binary and gdb ./binary will still work, ++ but attaching to arbitrary processes will not. If the sysctl ++ option is enabled, a sysctl option with name "harden_ptrace" is ++ created. ++ ++config GRKERNSEC_TPE ++ bool "Trusted Path Execution (TPE)" ++ help ++ If you say Y here, you will be able to choose a gid to add to the ++ supplementary groups of users you want to mark as "untrusted." ++ These users will not be able to execute any files that are not in ++ root-owned directories writable only by root. If the sysctl option ++ is enabled, a sysctl option with name "tpe" is created. ++ ++config GRKERNSEC_TPE_ALL ++ bool "Partially restrict all non-root users" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, all non-root users will be covered under ++ a weaker TPE restriction. This is separate from, and in addition to, ++ the main TPE options that you have selected elsewhere. Thus, if a ++ "trusted" GID is chosen, this restriction applies to even that GID. ++ Under this restriction, all non-root users will only be allowed to ++ execute files in directories they own that are not group or ++ world-writable, or in directories owned by root and writable only by ++ root. If the sysctl option is enabled, a sysctl option with name ++ "tpe_restrict_all" is created. ++ ++config GRKERNSEC_TPE_INVERT ++ bool "Invert GID option" ++ depends on GRKERNSEC_TPE ++ help ++ If you say Y here, the group you specify in the TPE configuration will ++ decide what group TPE restrictions will be *disabled* for. This ++ option is useful if you want TPE restrictions to be applied to most ++ users on the system. If the sysctl option is enabled, a sysctl option ++ with name "tpe_invert" is created. Unlike other sysctl options, this ++ entry will default to on for backward-compatibility. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for untrusted users" ++ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *enabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++config GRKERNSEC_TPE_GID ++ int "GID for trusted users" ++ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT ++ default 1005 ++ help ++ Setting this GID determines what group TPE restrictions will be ++ *disabled* for. If the sysctl option is enabled, a sysctl option ++ with name "tpe_gid" is created. ++ ++endmenu ++menu "Network Protections" ++depends on GRKERNSEC ++ ++config GRKERNSEC_RANDNET ++ bool "Larger entropy pools" ++ help ++ If you say Y here, the entropy pools used for many features of Linux ++ and grsecurity will be doubled in size. Since several grsecurity ++ features use additional randomness, it is recommended that you say Y ++ here. Saying Y here has a similar effect as modifying ++ /proc/sys/kernel/random/poolsize. ++ ++config GRKERNSEC_BLACKHOLE ++ bool "TCP/UDP blackhole and LAST_ACK DoS prevention" ++ help ++ If you say Y here, neither TCP resets nor ICMP ++ destination-unreachable packets will be sent in response to packets ++ sent to ports for which no associated listening process exists. ++ This feature supports both IPV4 and IPV6 and exempts the ++ loopback interface from blackholing. Enabling this feature ++ makes a host more resilient to DoS attacks and reduces network ++ visibility against scanners. ++ ++ The blackhole feature as-implemented is equivalent to the FreeBSD ++ blackhole feature, as it prevents RST responses to all packets, not ++ just SYNs. Under most application behavior this causes no ++ problems, but applications (like haproxy) may not close certain ++ connections in a way that cleanly terminates them on the remote ++ end, leaving the remote host in LAST_ACK state. Because of this ++ side-effect and to prevent intentional LAST_ACK DoSes, this ++ feature also adds automatic mitigation against such attacks. ++ The mitigation drastically reduces the amount of time a socket ++ can spend in LAST_ACK state. If you're using haproxy and not ++ all servers it connects to have this option enabled, consider ++ disabling this feature on the haproxy host. ++ ++ If the sysctl option is enabled, two sysctl options with names ++ "ip_blackhole" and "lastack_retries" will be created. ++ While "ip_blackhole" takes the standard zero/non-zero on/off ++ toggle, "lastack_retries" uses the same kinds of values as ++ "tcp_retries1" and "tcp_retries2". The default value of 4 ++ prevents a socket from lasting more than 45 seconds in LAST_ACK ++ state. ++ ++config GRKERNSEC_SOCKET ++ bool "Socket restrictions" ++ help ++ If you say Y here, you will be able to choose from several options. ++ If you assign a GID on your system and add it to the supplementary ++ groups of users you want to restrict socket access to, this patch ++ will perform up to three things, based on the option(s) you choose. ++ ++config GRKERNSEC_SOCKET_ALL ++ bool "Deny any sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine or run server ++ applications from your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_all" is created. ++ ++config GRKERNSEC_SOCKET_ALL_GID ++ int "GID to deny all sockets for" ++ depends on GRKERNSEC_SOCKET_ALL ++ default 1004 ++ help ++ Here you can choose the GID to disable socket access for. Remember to ++ add the users you want socket access disabled for to the GID ++ specified here. If the sysctl option is enabled, a sysctl option ++ with name "socket_all_gid" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT ++ bool "Deny client sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to connect to other hosts from your machine, but will be ++ able to run servers. If this option is enabled, all users in the group ++ you specify will have to use passive mode when initiating ftp transfers ++ from the shell on your machine. If the sysctl option is enabled, a ++ sysctl option with name "socket_client" is created. ++ ++config GRKERNSEC_SOCKET_CLIENT_GID ++ int "GID to deny client sockets for" ++ depends on GRKERNSEC_SOCKET_CLIENT ++ default 1003 ++ help ++ Here you can choose the GID to disable client socket access for. ++ Remember to add the users you want client socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_client_gid" is created. ++ ++config GRKERNSEC_SOCKET_SERVER ++ bool "Deny server sockets to group" ++ depends on GRKERNSEC_SOCKET ++ help ++ If you say Y here, you will be able to choose a GID of whose users will ++ be unable to run server applications from your machine. If the sysctl ++ option is enabled, a sysctl option with name "socket_server" is created. ++ ++config GRKERNSEC_SOCKET_SERVER_GID ++ int "GID to deny server sockets for" ++ depends on GRKERNSEC_SOCKET_SERVER ++ default 1002 ++ help ++ Here you can choose the GID to disable server socket access for. ++ Remember to add the users you want server socket access disabled for to ++ the GID specified here. If the sysctl option is enabled, a sysctl ++ option with name "socket_server_gid" is created. ++ ++endmenu ++menu "Sysctl support" ++depends on GRKERNSEC && SYSCTL ++ ++config GRKERNSEC_SYSCTL ++ bool "Sysctl support" ++ help ++ If you say Y here, you will be able to change the options that ++ grsecurity runs with at bootup, without having to recompile your ++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity ++ to enable (1) or disable (0) various features. All the sysctl entries ++ are mutable until the "grsec_lock" entry is set to a non-zero value. ++ All features enabled in the kernel configuration are disabled at boot ++ if you do not say Y to the "Turn on features by default" option. ++ All options should be set at startup, and the grsec_lock entry should ++ be set to a non-zero value after all the options are set. ++ *THIS IS EXTREMELY IMPORTANT* ++ ++config GRKERNSEC_SYSCTL_DISTRO ++ bool "Extra sysctl support for distro makers (READ HELP)" ++ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO ++ help ++ If you say Y here, additional sysctl options will be created ++ for features that affect processes running as root. Therefore, ++ it is critical when using this option that the grsec_lock entry be ++ enabled after boot. Only distros with prebuilt kernel packages ++ with this option enabled that can ensure grsec_lock is enabled ++ after boot should use this option. ++ *Failure to set grsec_lock after boot makes all grsec features ++ this option covers useless* ++ ++ Currently this option creates the following sysctl entries: ++ "Disable Privileged I/O": "disable_priv_io" ++ ++config GRKERNSEC_SYSCTL_ON ++ bool "Turn on features by default" ++ depends on GRKERNSEC_SYSCTL ++ help ++ If you say Y here, instead of having all features enabled in the ++ kernel configuration disabled at boot time, the features will be ++ enabled at boot time. It is recommended you say Y here unless ++ there is some reason you would want all sysctl-tunable features to ++ be disabled by default. As mentioned elsewhere, it is important ++ to enable the grsec_lock entry once you have finished modifying ++ the sysctl entries. ++ ++endmenu ++menu "Logging Options" ++depends on GRKERNSEC ++ ++config GRKERNSEC_FLOODTIME ++ int "Seconds in between log messages (minimum)" ++ default 10 ++ help ++ This option allows you to enforce the number of seconds between ++ grsecurity log messages. The default should be suitable for most ++ people, however, if you choose to change it, choose a value small enough ++ to allow informative logs to be produced, but large enough to ++ prevent flooding. ++ ++config GRKERNSEC_FLOODBURST ++ int "Number of messages in a burst (maximum)" ++ default 4 ++ help ++ This option allows you to choose the maximum number of messages allowed ++ within the flood time interval you chose in a separate option. The ++ default should be suitable for most people, however if you find that ++ many of your logs are being interpreted as flooding, you may want to ++ raise this value. ++ ++endmenu ++ ++endmenu +diff -urNp linux-2.6.35.4/grsecurity/Makefile linux-2.6.35.4/grsecurity/Makefile +--- linux-2.6.35.4/grsecurity/Makefile 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/grsecurity/Makefile 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,29 @@ ++# grsecurity's ACL system was originally written in 2001 by Michael Dalton ++# during 2001-2009 it has been completely redesigned by Brad Spengler ++# into an RBAC system ++# ++# All code in this directory and various hooks inserted throughout the kernel ++# are copyright Brad Spengler - Open Source Security, Inc., and released ++# under the GPL v2 or higher ++ ++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \ ++ grsec_mount.o grsec_sig.o grsec_sock.o grsec_sysctl.o \ ++ grsec_time.o grsec_tpe.o grsec_link.o grsec_textrel.o grsec_ptrace.o ++ ++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o \ ++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \ ++ gracl_learn.o grsec_log.o ++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o ++ ++ifndef CONFIG_GRKERNSEC ++obj-y += grsec_disabled.o ++endif ++ ++ifdef CONFIG_GRKERNSEC_HIDESYM ++extra-y := grsec_hidesym.o ++$(obj)/grsec_hidesym.o: ++ @-chmod -f 500 /boot ++ @-chmod -f 500 /lib/modules ++ @-chmod -f 700 . ++ @echo ' grsec: protected kernel image paths' ++endif +diff -urNp linux-2.6.35.4/include/acpi/acoutput.h linux-2.6.35.4/include/acpi/acoutput.h +--- linux-2.6.35.4/include/acpi/acoutput.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/acpi/acoutput.h 2010-09-17 20:12:09.000000000 -0400 +@@ -268,8 +268,8 @@ + * leaving no executable debug code! + */ + #define ACPI_FUNCTION_NAME(a) +-#define ACPI_DEBUG_PRINT(pl) +-#define ACPI_DEBUG_PRINT_RAW(pl) ++#define ACPI_DEBUG_PRINT(pl) do {} while (0) ++#define ACPI_DEBUG_PRINT_RAW(pl) do {} while (0) + + #endif /* ACPI_DEBUG_OUTPUT */ + +diff -urNp linux-2.6.35.4/include/acpi/acpi_drivers.h linux-2.6.35.4/include/acpi/acpi_drivers.h +--- linux-2.6.35.4/include/acpi/acpi_drivers.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/acpi/acpi_drivers.h 2010-09-17 20:12:09.000000000 -0400 +@@ -121,8 +121,8 @@ int acpi_processor_set_thermal_limit(acp + Dock Station + -------------------------------------------------------------------------- */ + struct acpi_dock_ops { +- acpi_notify_handler handler; +- acpi_notify_handler uevent; ++ const acpi_notify_handler handler; ++ const acpi_notify_handler uevent; + }; + + #if defined(CONFIG_ACPI_DOCK) || defined(CONFIG_ACPI_DOCK_MODULE) +@@ -130,7 +130,7 @@ extern int is_dock_device(acpi_handle ha + extern int register_dock_notifier(struct notifier_block *nb); + extern void unregister_dock_notifier(struct notifier_block *nb); + extern int register_hotplug_dock_device(acpi_handle handle, +- struct acpi_dock_ops *ops, ++ const struct acpi_dock_ops *ops, + void *context); + extern void unregister_hotplug_dock_device(acpi_handle handle); + #else +@@ -146,7 +146,7 @@ static inline void unregister_dock_notif + { + } + static inline int register_hotplug_dock_device(acpi_handle handle, +- struct acpi_dock_ops *ops, ++ const struct acpi_dock_ops *ops, + void *context) + { + return -ENODEV; +diff -urNp linux-2.6.35.4/include/asm-generic/atomic-long.h linux-2.6.35.4/include/asm-generic/atomic-long.h +--- linux-2.6.35.4/include/asm-generic/atomic-long.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/atomic-long.h 2010-09-17 20:12:09.000000000 -0400 +@@ -22,6 +22,12 @@ + + typedef atomic64_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic64_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic64_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i) + + static inline long atomic_long_read(atomic_long_t *l) +@@ -31,6 +37,15 @@ static inline long atomic_long_read(atom + return (long)atomic64_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic64_t *v = (atomic64_t *)l; +@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomi + atomic64_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomi + atomic64_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomi + atomic64_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -59,6 +101,15 @@ static inline void atomic_long_add(long + atomic64_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ atomic64_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -115,6 +166,15 @@ static inline long atomic_long_inc_retur + return (long)atomic64_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l; ++ ++ return (long)atomic64_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic64_t *v = (atomic64_t *)l; +@@ -140,6 +200,12 @@ static inline long atomic_long_add_unles + + typedef atomic_t atomic_long_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef atomic_unchecked_t atomic_long_unchecked_t; ++#else ++typedef atomic_t atomic_long_unchecked_t; ++#endif ++ + #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i) + static inline long atomic_long_read(atomic_long_t *l) + { +@@ -148,6 +214,15 @@ static inline long atomic_long_read(atom + return (long)atomic_read(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_read_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_set(atomic_long_t *l, long i) + { + atomic_t *v = (atomic_t *)l; +@@ -155,6 +230,15 @@ static inline void atomic_long_set(atomi + atomic_set(v, i); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_set_unchecked(v, i); ++} ++#endif ++ + static inline void atomic_long_inc(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -162,6 +246,15 @@ static inline void atomic_long_inc(atomi + atomic_inc(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_inc_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_dec(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -169,6 +262,15 @@ static inline void atomic_long_dec(atomi + atomic_dec(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_dec_unchecked(v); ++} ++#endif ++ + static inline void atomic_long_add(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -176,6 +278,15 @@ static inline void atomic_long_add(long + atomic_add(i, v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ atomic_add_unchecked(i, v); ++} ++#endif ++ + static inline void atomic_long_sub(long i, atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -232,6 +343,15 @@ static inline long atomic_long_inc_retur + return (long)atomic_inc_return(v); + } + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l) ++{ ++ atomic_unchecked_t *v = (atomic_unchecked_t *)l; ++ ++ return (long)atomic_inc_return_unchecked(v); ++} ++#endif ++ + static inline long atomic_long_dec_return(atomic_long_t *l) + { + atomic_t *v = (atomic_t *)l; +@@ -255,4 +375,37 @@ static inline long atomic_long_add_unles + + #endif /* BITS_PER_LONG == 64 */ + ++#ifdef CONFIG_PAX_REFCOUNT ++static inline void pax_refcount_needs_these_functions(void) ++{ ++ atomic_read_unchecked((atomic_unchecked_t *)NULL); ++ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0); ++ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL); ++ atomic_inc_unchecked((atomic_unchecked_t *)NULL); ++ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL); ++ ++ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0); ++ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL); ++ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL); ++} ++#else ++#define atomic_read_unchecked(v) atomic_read(v) ++#define atomic_set_unchecked(v, i) atomic_set((v), (i)) ++#define atomic_add_unchecked(i, v) atomic_add((i), (v)) ++#define atomic_sub_unchecked(i, v) atomic_sub((i), (v)) ++#define atomic_inc_unchecked(v) atomic_inc(v) ++#define atomic_inc_return_unchecked(v) atomic_inc_return(v) ++ ++#define atomic_long_read_unchecked(v) atomic_long_read(v) ++#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i)) ++#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v)) ++#define atomic_long_inc_unchecked(v) atomic_long_inc(v) ++#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v) ++#define atomic_long_dec_unchecked(v) atomic_long_dec(v) ++#endif ++ + #endif /* _ASM_GENERIC_ATOMIC_LONG_H */ +diff -urNp linux-2.6.35.4/include/asm-generic/dma-mapping-common.h linux-2.6.35.4/include/asm-generic/dma-mapping-common.h +--- linux-2.6.35.4/include/asm-generic/dma-mapping-common.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/dma-mapping-common.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,7 +11,7 @@ static inline dma_addr_t dma_map_single_ + enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(ptr, size); +@@ -30,7 +30,7 @@ static inline void dma_unmap_single_attr + enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) +@@ -42,7 +42,7 @@ static inline int dma_map_sg_attrs(struc + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + int i, ents; + struct scatterlist *s; + +@@ -59,7 +59,7 @@ static inline void dma_unmap_sg_attrs(st + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + debug_dma_unmap_sg(dev, sg, nents, dir); +@@ -71,7 +71,7 @@ static inline dma_addr_t dma_map_page(st + size_t offset, size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + dma_addr_t addr; + + kmemcheck_mark_initialized(page_address(page) + offset, size); +@@ -85,7 +85,7 @@ static inline dma_addr_t dma_map_page(st + static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, + size_t size, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->unmap_page) +@@ -97,7 +97,7 @@ static inline void dma_sync_single_for_c + size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_cpu) +@@ -109,7 +109,7 @@ static inline void dma_sync_single_for_d + dma_addr_t addr, size_t size, + enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_single_for_device) +@@ -139,7 +139,7 @@ static inline void + dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_cpu) +@@ -151,7 +151,7 @@ static inline void + dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, + int nelems, enum dma_data_direction dir) + { +- struct dma_map_ops *ops = get_dma_ops(dev); ++ const struct dma_map_ops *ops = get_dma_ops(dev); + + BUG_ON(!valid_dma_direction(dir)); + if (ops->sync_sg_for_device) +diff -urNp linux-2.6.35.4/include/asm-generic/futex.h linux-2.6.35.4/include/asm-generic/futex.h +--- linux-2.6.35.4/include/asm-generic/futex.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/futex.h 2010-09-17 20:12:09.000000000 -0400 +@@ -6,7 +6,7 @@ + #include <asm/errno.h> + + static inline int +-futex_atomic_op_inuser (int encoded_op, int __user *uaddr) ++futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) + { + int op = (encoded_op >> 28) & 7; + int cmp = (encoded_op >> 24) & 15; +@@ -48,7 +48,7 @@ futex_atomic_op_inuser (int encoded_op, + } + + static inline int +-futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) ++futex_atomic_cmpxchg_inatomic(u32 __user *uaddr, int oldval, int newval) + { + return -ENOSYS; + } +diff -urNp linux-2.6.35.4/include/asm-generic/int-l64.h linux-2.6.35.4/include/asm-generic/int-l64.h +--- linux-2.6.35.4/include/asm-generic/int-l64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/int-l64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -46,6 +46,8 @@ typedef unsigned int u32; + typedef signed long s64; + typedef unsigned long u64; + ++typedef unsigned int intoverflow_t __attribute__ ((mode(TI))); ++ + #define S8_C(x) x + #define U8_C(x) x ## U + #define S16_C(x) x +diff -urNp linux-2.6.35.4/include/asm-generic/int-ll64.h linux-2.6.35.4/include/asm-generic/int-ll64.h +--- linux-2.6.35.4/include/asm-generic/int-ll64.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/int-ll64.h 2010-09-17 20:12:09.000000000 -0400 +@@ -51,6 +51,8 @@ typedef unsigned int u32; + typedef signed long long s64; + typedef unsigned long long u64; + ++typedef unsigned long long intoverflow_t; ++ + #define S8_C(x) x + #define U8_C(x) x ## U + #define S16_C(x) x +diff -urNp linux-2.6.35.4/include/asm-generic/kmap_types.h linux-2.6.35.4/include/asm-generic/kmap_types.h +--- linux-2.6.35.4/include/asm-generic/kmap_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/kmap_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -29,10 +29,11 @@ KMAP_D(16) KM_IRQ_PTE, + KMAP_D(17) KM_NMI, + KMAP_D(18) KM_NMI_PTE, + KMAP_D(19) KM_KDB, ++KMAP_D(20) KM_CLEARPAGE, + /* + * Remember to update debug_kmap_atomic() when adding new kmap types! + */ +-KMAP_D(20) KM_TYPE_NR ++KMAP_D(21) KM_TYPE_NR + }; + + #undef KMAP_D +diff -urNp linux-2.6.35.4/include/asm-generic/pgtable.h linux-2.6.35.4/include/asm-generic/pgtable.h +--- linux-2.6.35.4/include/asm-generic/pgtable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/pgtable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -344,6 +344,14 @@ extern void untrack_pfn_vma(struct vm_ar + unsigned long size); + #endif + ++#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL ++static inline unsigned long pax_open_kernel(void) { return 0; } ++#endif ++ ++#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL ++static inline unsigned long pax_close_kernel(void) { return 0; } ++#endif ++ + #endif /* !__ASSEMBLY__ */ + + #endif /* _ASM_GENERIC_PGTABLE_H */ +diff -urNp linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h +--- linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/pgtable-nopmd.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1,14 +1,19 @@ + #ifndef _PGTABLE_NOPMD_H + #define _PGTABLE_NOPMD_H + +-#ifndef __ASSEMBLY__ +- + #include <asm-generic/pgtable-nopud.h> + +-struct mm_struct; +- + #define __PAGETABLE_PMD_FOLDED + ++#define PMD_SHIFT PUD_SHIFT ++#define PTRS_PER_PMD 1 ++#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT) ++#define PMD_MASK (~(PMD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ ++struct mm_struct; ++ + /* + * Having the pmd type consist of a pud gets the size right, and allows + * us to conceptually access the pud entry that this pmd is folded into +@@ -16,11 +21,6 @@ struct mm_struct; + */ + typedef struct { pud_t pud; } pmd_t; + +-#define PMD_SHIFT PUD_SHIFT +-#define PTRS_PER_PMD 1 +-#define PMD_SIZE (1UL << PMD_SHIFT) +-#define PMD_MASK (~(PMD_SIZE-1)) +- + /* + * The "pud_xxx()" functions here are trivial for a folded two-level + * setup: the pmd is never bad, and a pmd always exists (as it's folded +diff -urNp linux-2.6.35.4/include/asm-generic/pgtable-nopud.h linux-2.6.35.4/include/asm-generic/pgtable-nopud.h +--- linux-2.6.35.4/include/asm-generic/pgtable-nopud.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/pgtable-nopud.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1,10 +1,15 @@ + #ifndef _PGTABLE_NOPUD_H + #define _PGTABLE_NOPUD_H + +-#ifndef __ASSEMBLY__ +- + #define __PAGETABLE_PUD_FOLDED + ++#define PUD_SHIFT PGDIR_SHIFT ++#define PTRS_PER_PUD 1 ++#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT) ++#define PUD_MASK (~(PUD_SIZE-1)) ++ ++#ifndef __ASSEMBLY__ ++ + /* + * Having the pud type consist of a pgd gets the size right, and allows + * us to conceptually access the pgd entry that this pud is folded into +@@ -12,11 +17,6 @@ + */ + typedef struct { pgd_t pgd; } pud_t; + +-#define PUD_SHIFT PGDIR_SHIFT +-#define PTRS_PER_PUD 1 +-#define PUD_SIZE (1UL << PUD_SHIFT) +-#define PUD_MASK (~(PUD_SIZE-1)) +- + /* + * The "pgd_xxx()" functions here are trivial for a folded two-level + * setup: the pud is never bad, and a pud always exists (as it's folded +diff -urNp linux-2.6.35.4/include/asm-generic/vmlinux.lds.h linux-2.6.35.4/include/asm-generic/vmlinux.lds.h +--- linux-2.6.35.4/include/asm-generic/vmlinux.lds.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/asm-generic/vmlinux.lds.h 2010-09-17 20:12:09.000000000 -0400 +@@ -213,6 +213,7 @@ + .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \ + VMLINUX_SYMBOL(__start_rodata) = .; \ + *(.rodata) *(.rodata.*) \ ++ *(.data..read_only) \ + *(__vermagic) /* Kernel version magic */ \ + *(__markers_strings) /* Markers: strings */ \ + *(__tracepoints_strings)/* Tracepoints: strings */ \ +@@ -670,22 +671,24 @@ + * section in the linker script will go there too. @phdr should have + * a leading colon. + * +- * Note that this macros defines __per_cpu_load as an absolute symbol. ++ * Note that this macros defines per_cpu_load as an absolute symbol. + * If there is no need to put the percpu section at a predetermined + * address, use PERCPU(). + */ + #define PERCPU_VADDR(vaddr, phdr) \ +- VMLINUX_SYMBOL(__per_cpu_load) = .; \ +- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \ ++ per_cpu_load = .; \ ++ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \ + - LOAD_OFFSET) { \ ++ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \ + VMLINUX_SYMBOL(__per_cpu_start) = .; \ + *(.data..percpu..first) \ +- *(.data..percpu..page_aligned) \ + *(.data..percpu) \ ++ . = ALIGN(PAGE_SIZE); \ ++ *(.data..percpu..page_aligned) \ + *(.data..percpu..shared_aligned) \ + VMLINUX_SYMBOL(__per_cpu_end) = .; \ + } phdr \ +- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu); ++ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu); + + /** + * PERCPU - define output section for percpu area, simple version +diff -urNp linux-2.6.35.4/include/drm/drm_pciids.h linux-2.6.35.4/include/drm/drm_pciids.h +--- linux-2.6.35.4/include/drm/drm_pciids.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/drm/drm_pciids.h 2010-09-17 20:12:09.000000000 -0400 +@@ -419,7 +419,7 @@ + {0x1002, 0x9713, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9714, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ + {0x1002, 0x9715, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RS880|RADEON_NEW_MEMMAP|RADEON_IS_IGP}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define r128_PCI_IDS \ + {0x1002, 0x4c45, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -459,14 +459,14 @@ + {0x1002, 0x5446, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x544C, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x5452, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define mga_PCI_IDS \ + {0x102b, 0x0520, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0521, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G200}, \ + {0x102b, 0x0525, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G400}, \ + {0x102b, 0x2527, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MGA_CARD_TYPE_G550}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define mach64_PCI_IDS \ + {0x1002, 0x4749, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -489,7 +489,7 @@ + {0x1002, 0x4c53, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1002, 0x4c4e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define sisdrv_PCI_IDS \ + {0x1039, 0x0300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -500,7 +500,7 @@ + {0x1039, 0x7300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x18CA, 0x0040, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ + {0x18CA, 0x0042, PCI_ANY_ID, PCI_ANY_ID, 0, 0, SIS_CHIP_315}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define tdfx_PCI_IDS \ + {0x121a, 0x0003, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -509,7 +509,7 @@ + {0x121a, 0x0007, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x121a, 0x000b, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define viadrv_PCI_IDS \ + {0x1106, 0x3022, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -521,14 +521,14 @@ + {0x1106, 0x3343, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x1106, 0x3230, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_DX9_0}, \ + {0x1106, 0x3157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VIA_PRO_GROUP_A}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define i810_PCI_IDS \ + {0x8086, 0x7121, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7123, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x7125, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x1132, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define i830_PCI_IDS \ + {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +@@ -536,11 +536,11 @@ + {0x8086, 0x3582, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x2572, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ + {0x8086, 0x358e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define gamma_PCI_IDS \ + {0x3d3d, 0x0008, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define savage_PCI_IDS \ + {0x5333, 0x8a20, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_SAVAGE3D}, \ +@@ -566,10 +566,10 @@ + {0x5333, 0x8d02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_TWISTER}, \ + {0x5333, 0x8d03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ + {0x5333, 0x8d04, PCI_ANY_ID, PCI_ANY_ID, 0, 0, S3_PROSAVAGEDDR}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define ffb_PCI_IDS \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} + + #define i915_PCI_IDS \ + {0x8086, 0x3577, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ +@@ -603,4 +603,4 @@ + {0x8086, 0x0042, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x0046, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ + {0x8086, 0x0102, PCI_ANY_ID, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA << 8, 0xffff00, 0}, \ +- {0, 0, 0} ++ {0, 0, 0, 0, 0, 0} +diff -urNp linux-2.6.35.4/include/drm/drmP.h linux-2.6.35.4/include/drm/drmP.h +--- linux-2.6.35.4/include/drm/drmP.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/drm/drmP.h 2010-09-17 20:12:09.000000000 -0400 +@@ -808,7 +808,7 @@ struct drm_driver { + void (*vgaarb_irq)(struct drm_device *dev, bool state); + + /* Driver private ops for this object */ +- struct vm_operations_struct *gem_vm_ops; ++ const struct vm_operations_struct *gem_vm_ops; + + int major; + int minor; +@@ -917,7 +917,7 @@ struct drm_device { + + /** \name Usage Counters */ + /*@{ */ +- int open_count; /**< Outstanding files open */ ++ atomic_t open_count; /**< Outstanding files open */ + atomic_t ioctl_count; /**< Outstanding IOCTLs pending */ + atomic_t vma_count; /**< Outstanding vma areas open */ + int buf_use; /**< Buffers in use -- cannot alloc */ +@@ -928,7 +928,7 @@ struct drm_device { + /*@{ */ + unsigned long counters; + enum drm_stat_type types[15]; +- atomic_t counts[15]; ++ atomic_unchecked_t counts[15]; + /*@} */ + + struct list_head filelist; +diff -urNp linux-2.6.35.4/include/linux/a.out.h linux-2.6.35.4/include/linux/a.out.h +--- linux-2.6.35.4/include/linux/a.out.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/a.out.h 2010-09-17 20:12:09.000000000 -0400 +@@ -39,6 +39,14 @@ enum machine_type { + M_MIPS2 = 152 /* MIPS R6000/R4000 binary */ + }; + ++/* Constants for the N_FLAGS field */ ++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ ++ + #if !defined (N_MAGIC) + #define N_MAGIC(exec) ((exec).a_info & 0xffff) + #endif +diff -urNp linux-2.6.35.4/include/linux/atmdev.h linux-2.6.35.4/include/linux/atmdev.h +--- linux-2.6.35.4/include/linux/atmdev.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/atmdev.h 2010-09-17 20:12:09.000000000 -0400 +@@ -237,7 +237,7 @@ struct compat_atm_iobuf { + #endif + + struct k_atm_aal_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + }; +diff -urNp linux-2.6.35.4/include/linux/binfmts.h linux-2.6.35.4/include/linux/binfmts.h +--- linux-2.6.35.4/include/linux/binfmts.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/binfmts.h 2010-09-17 20:12:09.000000000 -0400 +@@ -87,6 +87,7 @@ struct linux_binfmt { + int (*load_binary)(struct linux_binprm *, struct pt_regs * regs); + int (*load_shlib)(struct file *); + int (*core_dump)(struct coredump_params *cprm); ++ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags); + unsigned long min_coredump; /* minimal dump size */ + int hasvdso; + }; +diff -urNp linux-2.6.35.4/include/linux/blkdev.h linux-2.6.35.4/include/linux/blkdev.h +--- linux-2.6.35.4/include/linux/blkdev.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/blkdev.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1331,20 +1331,20 @@ static inline int blk_integrity_rq(struc + #endif /* CONFIG_BLK_DEV_INTEGRITY */ + + struct block_device_operations { +- int (*open) (struct block_device *, fmode_t); +- int (*release) (struct gendisk *, fmode_t); +- int (*locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); +- int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); +- int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); +- int (*direct_access) (struct block_device *, sector_t, ++ int (* const open) (struct block_device *, fmode_t); ++ int (* const release) (struct gendisk *, fmode_t); ++ int (* const locked_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); ++ int (* const ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); ++ int (* const compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long); ++ int (* const direct_access) (struct block_device *, sector_t, + void **, unsigned long *); +- int (*media_changed) (struct gendisk *); +- void (*unlock_native_capacity) (struct gendisk *); +- int (*revalidate_disk) (struct gendisk *); +- int (*getgeo)(struct block_device *, struct hd_geometry *); ++ int (* const media_changed) (struct gendisk *); ++ void (* const unlock_native_capacity) (struct gendisk *); ++ int (* const revalidate_disk) (struct gendisk *); ++ int (*const getgeo)(struct block_device *, struct hd_geometry *); + /* this callback is with swap_lock and sometimes page table lock held */ +- void (*swap_slot_free_notify) (struct block_device *, unsigned long); +- struct module *owner; ++ void (* const swap_slot_free_notify) (struct block_device *, unsigned long); ++ struct module * const owner; + }; + + extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int, +diff -urNp linux-2.6.35.4/include/linux/cache.h linux-2.6.35.4/include/linux/cache.h +--- linux-2.6.35.4/include/linux/cache.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/cache.h 2010-09-17 20:12:09.000000000 -0400 +@@ -16,6 +16,10 @@ + #define __read_mostly + #endif + ++#ifndef __read_only ++#define __read_only __read_mostly ++#endif ++ + #ifndef ____cacheline_aligned + #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES))) + #endif +diff -urNp linux-2.6.35.4/include/linux/capability.h linux-2.6.35.4/include/linux/capability.h +--- linux-2.6.35.4/include/linux/capability.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/capability.h 2010-09-17 20:12:37.000000000 -0400 +@@ -561,6 +561,7 @@ extern const kernel_cap_t __cap_init_eff + (security_real_capable_noaudit((t), (cap)) == 0) + + extern int capable(int cap); ++int capable_nolog(int cap); + + /* audit system wants to get cap info from files as well */ + struct dentry; +diff -urNp linux-2.6.35.4/include/linux/compat.h linux-2.6.35.4/include/linux/compat.h +--- linux-2.6.35.4/include/linux/compat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/compat.h 2010-09-17 20:12:37.000000000 -0400 +@@ -360,5 +360,8 @@ extern ssize_t compat_rw_copy_check_uvec + const struct compat_iovec __user *uvector, unsigned long nr_segs, + unsigned long fast_segs, struct iovec *fast_pointer, + struct iovec **ret_pointer); ++ ++extern void __user *compat_alloc_user_space(unsigned long len); ++ + #endif /* CONFIG_COMPAT */ + #endif /* _LINUX_COMPAT_H */ +diff -urNp linux-2.6.35.4/include/linux/compiler-gcc4.h linux-2.6.35.4/include/linux/compiler-gcc4.h +--- linux-2.6.35.4/include/linux/compiler-gcc4.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/compiler-gcc4.h 2010-09-17 20:12:09.000000000 -0400 +@@ -54,6 +54,10 @@ + + #endif + ++#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__))) ++#define __bos(ptr, arg) __builtin_object_size((ptr), (arg)) ++#define __bos0(ptr) __bos((ptr), 0) ++#define __bos1(ptr) __bos((ptr), 1) + #endif + + #if __GNUC_MINOR__ > 0 +diff -urNp linux-2.6.35.4/include/linux/compiler.h linux-2.6.35.4/include/linux/compiler.h +--- linux-2.6.35.4/include/linux/compiler.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/compiler.h 2010-09-17 20:12:09.000000000 -0400 +@@ -267,6 +267,22 @@ void ftrace_likely_update(struct ftrace_ + #define __cold + #endif + ++#ifndef __alloc_size ++#define __alloc_size ++#endif ++ ++#ifndef __bos ++#define __bos ++#endif ++ ++#ifndef __bos0 ++#define __bos0 ++#endif ++ ++#ifndef __bos1 ++#define __bos1 ++#endif ++ + /* Simple shorthand for a section definition */ + #ifndef __section + # define __section(S) __attribute__ ((__section__(#S))) +diff -urNp linux-2.6.35.4/include/linux/decompress/mm.h linux-2.6.35.4/include/linux/decompress/mm.h +--- linux-2.6.35.4/include/linux/decompress/mm.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/decompress/mm.h 2010-09-17 20:12:09.000000000 -0400 +@@ -78,7 +78,7 @@ static void free(void *where) + * warnings when not needed (indeed large_malloc / large_free are not + * needed by inflate */ + +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + + #define large_malloc(a) vmalloc(a) +diff -urNp linux-2.6.35.4/include/linux/dma-mapping.h linux-2.6.35.4/include/linux/dma-mapping.h +--- linux-2.6.35.4/include/linux/dma-mapping.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/dma-mapping.h 2010-09-17 20:12:09.000000000 -0400 +@@ -16,40 +16,40 @@ enum dma_data_direction { + }; + + struct dma_map_ops { +- void* (*alloc_coherent)(struct device *dev, size_t size, ++ void* (* const alloc_coherent)(struct device *dev, size_t size, + dma_addr_t *dma_handle, gfp_t gfp); +- void (*free_coherent)(struct device *dev, size_t size, ++ void (* const free_coherent)(struct device *dev, size_t size, + void *vaddr, dma_addr_t dma_handle); +- dma_addr_t (*map_page)(struct device *dev, struct page *page, ++ dma_addr_t (* const map_page)(struct device *dev, struct page *page, + unsigned long offset, size_t size, + enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, ++ void (* const unmap_page)(struct device *dev, dma_addr_t dma_handle, + size_t size, enum dma_data_direction dir, + struct dma_attrs *attrs); +- int (*map_sg)(struct device *dev, struct scatterlist *sg, ++ int (* const map_sg)(struct device *dev, struct scatterlist *sg, + int nents, enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*unmap_sg)(struct device *dev, ++ void (* const unmap_sg)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir, + struct dma_attrs *attrs); +- void (*sync_single_for_cpu)(struct device *dev, ++ void (* const sync_single_for_cpu)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); +- void (*sync_single_for_device)(struct device *dev, ++ void (* const sync_single_for_device)(struct device *dev, + dma_addr_t dma_handle, size_t size, + enum dma_data_direction dir); +- void (*sync_sg_for_cpu)(struct device *dev, ++ void (* const sync_sg_for_cpu)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); +- void (*sync_sg_for_device)(struct device *dev, ++ void (* const sync_sg_for_device)(struct device *dev, + struct scatterlist *sg, int nents, + enum dma_data_direction dir); +- int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); +- int (*dma_supported)(struct device *dev, u64 mask); +- int (*set_dma_mask)(struct device *dev, u64 mask); +- int is_phys; ++ int (* const mapping_error)(struct device *dev, dma_addr_t dma_addr); ++ int (* const dma_supported)(struct device *dev, u64 mask); ++ int (* set_dma_mask)(struct device *dev, u64 mask); ++ const int is_phys; + }; + + #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) +diff -urNp linux-2.6.35.4/include/linux/elf.h linux-2.6.35.4/include/linux/elf.h +--- linux-2.6.35.4/include/linux/elf.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/elf.h 2010-09-17 20:12:09.000000000 -0400 +@@ -49,6 +49,17 @@ typedef __s64 Elf64_Sxword; + #define PT_GNU_EH_FRAME 0x6474e550 + + #define PT_GNU_STACK (PT_LOOS + 0x474e551) ++#define PT_GNU_RELRO (PT_LOOS + 0x474e552) ++ ++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580) ++ ++/* Constants for the e_flags field */ ++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */ ++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */ ++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */ ++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */ ++/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */ ++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */ + + /* + * Extended Numbering +@@ -106,6 +117,8 @@ typedef __s64 Elf64_Sxword; + #define DT_DEBUG 21 + #define DT_TEXTREL 22 + #define DT_JMPREL 23 ++#define DT_FLAGS 30 ++ #define DF_TEXTREL 0x00000004 + #define DT_ENCODING 32 + #define OLD_DT_LOOS 0x60000000 + #define DT_LOOS 0x6000000d +@@ -252,6 +265,19 @@ typedef struct elf64_hdr { + #define PF_W 0x2 + #define PF_X 0x1 + ++#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */ ++#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */ ++#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */ ++#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */ ++#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */ ++#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */ ++/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */ ++/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */ ++#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */ ++#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */ ++#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */ ++#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */ ++ + typedef struct elf32_phdr{ + Elf32_Word p_type; + Elf32_Off p_offset; +@@ -344,6 +370,8 @@ typedef struct elf64_shdr { + #define EI_OSABI 7 + #define EI_PAD 8 + ++#define EI_PAX 14 ++ + #define ELFMAG0 0x7f /* EI_MAG */ + #define ELFMAG1 'E' + #define ELFMAG2 'L' +@@ -421,6 +449,7 @@ extern Elf32_Dyn _DYNAMIC []; + #define elf_note elf32_note + #define elf_addr_t Elf32_Off + #define Elf_Half Elf32_Half ++#define elf_dyn Elf32_Dyn + + #else + +@@ -431,6 +460,7 @@ extern Elf64_Dyn _DYNAMIC []; + #define elf_note elf64_note + #define elf_addr_t Elf64_Off + #define Elf_Half Elf64_Half ++#define elf_dyn Elf64_Dyn + + #endif + +diff -urNp linux-2.6.35.4/include/linux/fs.h linux-2.6.35.4/include/linux/fs.h +--- linux-2.6.35.4/include/linux/fs.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/fs.h 2010-09-17 20:12:37.000000000 -0400 +@@ -90,6 +90,11 @@ struct inodes_stat_t { + /* Expect random access pattern */ + #define FMODE_RANDOM ((__force fmode_t)0x1000) + ++/* Hack for grsec so as not to require read permission simply to execute ++ * a binary ++ */ ++#define FMODE_GREXEC ((__force fmode_t)0x2000) ++ + /* + * The below are the various read and write types that we support. Some of + * them include behavioral modifiers that send information down to the +@@ -571,41 +576,41 @@ typedef int (*read_actor_t)(read_descrip + unsigned long, unsigned long); + + struct address_space_operations { +- int (*writepage)(struct page *page, struct writeback_control *wbc); +- int (*readpage)(struct file *, struct page *); +- void (*sync_page)(struct page *); ++ int (* const writepage)(struct page *page, struct writeback_control *wbc); ++ int (* const readpage)(struct file *, struct page *); ++ void (* const sync_page)(struct page *); + + /* Write back some dirty pages from this mapping. */ +- int (*writepages)(struct address_space *, struct writeback_control *); ++ int (* const writepages)(struct address_space *, struct writeback_control *); + + /* Set a page dirty. Return true if this dirtied it */ +- int (*set_page_dirty)(struct page *page); ++ int (* const set_page_dirty)(struct page *page); + +- int (*readpages)(struct file *filp, struct address_space *mapping, ++ int (* const readpages)(struct file *filp, struct address_space *mapping, + struct list_head *pages, unsigned nr_pages); + +- int (*write_begin)(struct file *, struct address_space *mapping, ++ int (* const write_begin)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned flags, + struct page **pagep, void **fsdata); +- int (*write_end)(struct file *, struct address_space *mapping, ++ int (* const write_end)(struct file *, struct address_space *mapping, + loff_t pos, unsigned len, unsigned copied, + struct page *page, void *fsdata); + + /* Unfortunately this kludge is needed for FIBMAP. Don't use it */ +- sector_t (*bmap)(struct address_space *, sector_t); +- void (*invalidatepage) (struct page *, unsigned long); +- int (*releasepage) (struct page *, gfp_t); +- ssize_t (*direct_IO)(int, struct kiocb *, const struct iovec *iov, ++ sector_t (* const bmap)(struct address_space *, sector_t); ++ void (* const invalidatepage) (struct page *, unsigned long); ++ int (* const releasepage) (struct page *, gfp_t); ++ ssize_t (* const direct_IO)(int, struct kiocb *, const struct iovec *iov, + loff_t offset, unsigned long nr_segs); +- int (*get_xip_mem)(struct address_space *, pgoff_t, int, ++ int (* const get_xip_mem)(struct address_space *, pgoff_t, int, + void **, unsigned long *); + /* migrate the contents of a page to the specified target */ +- int (*migratepage) (struct address_space *, ++ int (* const migratepage) (struct address_space *, + struct page *, struct page *); +- int (*launder_page) (struct page *); +- int (*is_partially_uptodate) (struct page *, read_descriptor_t *, ++ int (* const launder_page) (struct page *); ++ int (* const is_partially_uptodate) (struct page *, read_descriptor_t *, + unsigned long); +- int (*error_remove_page)(struct address_space *, struct page *); ++ int (* const error_remove_page)(struct address_space *, struct page *); + }; + + /* +@@ -1035,19 +1040,19 @@ static inline int file_check_writeable(s + typedef struct files_struct *fl_owner_t; + + struct file_lock_operations { +- void (*fl_copy_lock)(struct file_lock *, struct file_lock *); +- void (*fl_release_private)(struct file_lock *); ++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *); ++ void (* const fl_release_private)(struct file_lock *); + }; + + struct lock_manager_operations { +- int (*fl_compare_owner)(struct file_lock *, struct file_lock *); +- void (*fl_notify)(struct file_lock *); /* unblock callback */ +- int (*fl_grant)(struct file_lock *, struct file_lock *, int); +- void (*fl_copy_lock)(struct file_lock *, struct file_lock *); +- void (*fl_release_private)(struct file_lock *); +- void (*fl_break)(struct file_lock *); +- int (*fl_mylease)(struct file_lock *, struct file_lock *); +- int (*fl_change)(struct file_lock **, int); ++ int (* const fl_compare_owner)(struct file_lock *, struct file_lock *); ++ void (* const fl_notify)(struct file_lock *); /* unblock callback */ ++ int (* const fl_grant)(struct file_lock *, struct file_lock *, int); ++ void (* const fl_copy_lock)(struct file_lock *, struct file_lock *); ++ void (* const fl_release_private)(struct file_lock *); ++ void (* const fl_break)(struct file_lock *); ++ int (* const fl_mylease)(struct file_lock *, struct file_lock *); ++ int (* const fl_change)(struct file_lock **, int); + }; + + struct lock_manager { +@@ -1440,7 +1445,7 @@ struct fiemap_extent_info { + unsigned int fi_flags; /* Flags as passed from user */ + unsigned int fi_extents_mapped; /* Number of mapped extents */ + unsigned int fi_extents_max; /* Size of fiemap_extent array */ +- struct fiemap_extent *fi_extents_start; /* Start of fiemap_extent ++ struct fiemap_extent __user *fi_extents_start; /* Start of fiemap_extent + * array */ + }; + int fiemap_fill_next_extent(struct fiemap_extent_info *info, u64 logical, +@@ -1557,30 +1562,30 @@ extern ssize_t vfs_writev(struct file *, + unsigned long, loff_t *); + + struct super_operations { +- struct inode *(*alloc_inode)(struct super_block *sb); +- void (*destroy_inode)(struct inode *); ++ struct inode *(* const alloc_inode)(struct super_block *sb); ++ void (* const destroy_inode)(struct inode *); + +- void (*dirty_inode) (struct inode *); +- int (*write_inode) (struct inode *, struct writeback_control *wbc); +- void (*drop_inode) (struct inode *); +- void (*delete_inode) (struct inode *); +- void (*put_super) (struct super_block *); +- void (*write_super) (struct super_block *); +- int (*sync_fs)(struct super_block *sb, int wait); +- int (*freeze_fs) (struct super_block *); +- int (*unfreeze_fs) (struct super_block *); +- int (*statfs) (struct dentry *, struct kstatfs *); +- int (*remount_fs) (struct super_block *, int *, char *); +- void (*clear_inode) (struct inode *); +- void (*umount_begin) (struct super_block *); ++ void (* const dirty_inode) (struct inode *); ++ int (* const write_inode) (struct inode *, struct writeback_control *wbc); ++ void (* const drop_inode) (struct inode *); ++ void (* const delete_inode) (struct inode *); ++ void (* const put_super) (struct super_block *); ++ void (* const write_super) (struct super_block *); ++ int (* const sync_fs)(struct super_block *sb, int wait); ++ int (* const freeze_fs) (struct super_block *); ++ int (* const unfreeze_fs) (struct super_block *); ++ int (* const statfs) (struct dentry *, struct kstatfs *); ++ int (* const remount_fs) (struct super_block *, int *, char *); ++ void (* const clear_inode) (struct inode *); ++ void (* const umount_begin) (struct super_block *); + +- int (*show_options)(struct seq_file *, struct vfsmount *); +- int (*show_stats)(struct seq_file *, struct vfsmount *); ++ int (* const show_options)(struct seq_file *, struct vfsmount *); ++ int (* const show_stats)(struct seq_file *, struct vfsmount *); + #ifdef CONFIG_QUOTA +- ssize_t (*quota_read)(struct super_block *, int, char *, size_t, loff_t); +- ssize_t (*quota_write)(struct super_block *, int, const char *, size_t, loff_t); ++ ssize_t (* const quota_read)(struct super_block *, int, char *, size_t, loff_t); ++ ssize_t (* const quota_write)(struct super_block *, int, const char *, size_t, loff_t); + #endif +- int (*bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); ++ int (* const bdev_try_to_free_page)(struct super_block*, struct page*, gfp_t); + }; + + /* +diff -urNp linux-2.6.35.4/include/linux/fs_struct.h linux-2.6.35.4/include/linux/fs_struct.h +--- linux-2.6.35.4/include/linux/fs_struct.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/fs_struct.h 2010-09-17 20:12:09.000000000 -0400 +@@ -4,7 +4,7 @@ + #include <linux/path.h> + + struct fs_struct { +- int users; ++ atomic_t users; + rwlock_t lock; + int umask; + int in_exec; +diff -urNp linux-2.6.35.4/include/linux/genhd.h linux-2.6.35.4/include/linux/genhd.h +--- linux-2.6.35.4/include/linux/genhd.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/genhd.h 2010-09-17 20:12:09.000000000 -0400 +@@ -162,7 +162,7 @@ struct gendisk { + + struct timer_rand_state *random; + +- atomic_t sync_io; /* RAID */ ++ atomic_unchecked_t sync_io; /* RAID */ + struct work_struct async_notify; + #ifdef CONFIG_BLK_DEV_INTEGRITY + struct blk_integrity *integrity; +diff -urNp linux-2.6.35.4/include/linux/gracl.h linux-2.6.35.4/include/linux/gracl.h +--- linux-2.6.35.4/include/linux/gracl.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/gracl.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,310 @@ ++#ifndef GR_ACL_H ++#define GR_ACL_H ++ ++#include <linux/grdefs.h> ++#include <linux/resource.h> ++#include <linux/capability.h> ++#include <linux/dcache.h> ++#include <asm/resource.h> ++ ++/* Major status information */ ++ ++#define GR_VERSION "grsecurity 2.2.0" ++#define GRSECURITY_VERSION 0x2200 ++ ++enum { ++ GR_SHUTDOWN = 0, ++ GR_ENABLE = 1, ++ GR_SPROLE = 2, ++ GR_RELOAD = 3, ++ GR_SEGVMOD = 4, ++ GR_STATUS = 5, ++ GR_UNSPROLE = 6, ++ GR_PASSSET = 7, ++ GR_SPROLEPAM = 8, ++}; ++ ++/* Password setup definitions ++ * kernel/grhash.c */ ++enum { ++ GR_PW_LEN = 128, ++ GR_SALT_LEN = 16, ++ GR_SHA_LEN = 32, ++}; ++ ++enum { ++ GR_SPROLE_LEN = 64, ++}; ++ ++#define GR_NLIMITS 32 ++ ++/* Begin Data Structures */ ++ ++struct sprole_pw { ++ unsigned char *rolename; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */ ++}; ++ ++struct name_entry { ++ __u32 key; ++ ino_t inode; ++ dev_t device; ++ char *name; ++ __u16 len; ++ __u8 deleted; ++ struct name_entry *prev; ++ struct name_entry *next; ++}; ++ ++struct inodev_entry { ++ struct name_entry *nentry; ++ struct inodev_entry *prev; ++ struct inodev_entry *next; ++}; ++ ++struct acl_role_db { ++ struct acl_role_label **r_hash; ++ __u32 r_size; ++}; ++ ++struct inodev_db { ++ struct inodev_entry **i_hash; ++ __u32 i_size; ++}; ++ ++struct name_db { ++ struct name_entry **n_hash; ++ __u32 n_size; ++}; ++ ++struct crash_uid { ++ uid_t uid; ++ unsigned long expires; ++}; ++ ++struct gr_hash_struct { ++ void **table; ++ void **nametable; ++ void *first; ++ __u32 table_size; ++ __u32 used_size; ++ int type; ++}; ++ ++/* Userspace Grsecurity ACL data structures */ ++ ++struct acl_subject_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ kernel_cap_t cap_mask; ++ kernel_cap_t cap_lower; ++ kernel_cap_t cap_invert_audit; ++ ++ struct rlimit res[GR_NLIMITS]; ++ __u32 resmask; ++ ++ __u8 user_trans_type; ++ __u8 group_trans_type; ++ uid_t *user_transitions; ++ gid_t *group_transitions; ++ __u16 user_trans_num; ++ __u16 group_trans_num; ++ ++ __u32 ip_proto[8]; ++ __u32 ip_type; ++ struct acl_ip_label **ips; ++ __u32 ip_num; ++ __u32 inaddr_any_override; ++ ++ __u32 crashes; ++ unsigned long expires; ++ ++ struct acl_subject_label *parent_subject; ++ struct gr_hash_struct *hash; ++ struct acl_subject_label *prev; ++ struct acl_subject_label *next; ++ ++ struct acl_object_label **obj_hash; ++ __u32 obj_hash_size; ++ __u16 pax_flags; ++}; ++ ++struct role_allowed_ip { ++ __u32 addr; ++ __u32 netmask; ++ ++ struct role_allowed_ip *prev; ++ struct role_allowed_ip *next; ++}; ++ ++struct role_transition { ++ char *rolename; ++ ++ struct role_transition *prev; ++ struct role_transition *next; ++}; ++ ++struct acl_role_label { ++ char *rolename; ++ uid_t uidgid; ++ __u16 roletype; ++ ++ __u16 auth_attempts; ++ unsigned long expires; ++ ++ struct acl_subject_label *root_label; ++ struct gr_hash_struct *hash; ++ ++ struct acl_role_label *prev; ++ struct acl_role_label *next; ++ ++ struct role_transition *transitions; ++ struct role_allowed_ip *allowed_ips; ++ uid_t *domain_children; ++ __u16 domain_child_num; ++ ++ struct acl_subject_label **subj_hash; ++ __u32 subj_hash_size; ++}; ++ ++struct user_acl_role_db { ++ struct acl_role_label **r_table; ++ __u32 num_pointers; /* Number of allocations to track */ ++ __u32 num_roles; /* Number of roles */ ++ __u32 num_domain_children; /* Number of domain children */ ++ __u32 num_subjects; /* Number of subjects */ ++ __u32 num_objects; /* Number of objects */ ++}; ++ ++struct acl_object_label { ++ char *filename; ++ ino_t inode; ++ dev_t device; ++ __u32 mode; ++ ++ struct acl_subject_label *nested; ++ struct acl_object_label *globbed; ++ ++ /* next two structures not used */ ++ ++ struct acl_object_label *prev; ++ struct acl_object_label *next; ++}; ++ ++struct acl_ip_label { ++ char *iface; ++ __u32 addr; ++ __u32 netmask; ++ __u16 low, high; ++ __u8 mode; ++ __u32 type; ++ __u32 proto[8]; ++ ++ /* next two structures not used */ ++ ++ struct acl_ip_label *prev; ++ struct acl_ip_label *next; ++}; ++ ++struct gr_arg { ++ struct user_acl_role_db role_db; ++ unsigned char pw[GR_PW_LEN]; ++ unsigned char salt[GR_SALT_LEN]; ++ unsigned char sum[GR_SHA_LEN]; ++ unsigned char sp_role[GR_SPROLE_LEN]; ++ struct sprole_pw *sprole_pws; ++ dev_t segv_device; ++ ino_t segv_inode; ++ uid_t segv_uid; ++ __u16 num_sprole_pws; ++ __u16 mode; ++}; ++ ++struct gr_arg_wrapper { ++ struct gr_arg *arg; ++ __u32 version; ++ __u32 size; ++}; ++ ++struct subject_map { ++ struct acl_subject_label *user; ++ struct acl_subject_label *kernel; ++ struct subject_map *prev; ++ struct subject_map *next; ++}; ++ ++struct acl_subj_map_db { ++ struct subject_map **s_hash; ++ __u32 s_size; ++}; ++ ++/* End Data Structures Section */ ++ ++/* Hash functions generated by empirical testing by Brad Spengler ++ Makes good use of the low bits of the inode. Generally 0-1 times ++ in loop for successful match. 0-3 for unsuccessful match. ++ Shift/add algorithm with modulus of table size and an XOR*/ ++ ++static __inline__ unsigned int ++rhash(const uid_t uid, const __u16 type, const unsigned int sz) ++{ ++ return ((((uid + type) << (16 + type)) ^ uid) % sz); ++} ++ ++ static __inline__ unsigned int ++shash(const struct acl_subject_label *userp, const unsigned int sz) ++{ ++ return ((const unsigned long)userp % sz); ++} ++ ++static __inline__ unsigned int ++fhash(const ino_t ino, const dev_t dev, const unsigned int sz) ++{ ++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz); ++} ++ ++static __inline__ unsigned int ++nhash(const char *name, const __u16 len, const unsigned int sz) ++{ ++ return full_name_hash((const unsigned char *)name, len) % sz; ++} ++ ++#define FOR_EACH_ROLE_START(role) \ ++ role = role_list; \ ++ while (role) { ++ ++#define FOR_EACH_ROLE_END(role) \ ++ role = role->prev; \ ++ } ++ ++#define FOR_EACH_SUBJECT_START(role,subj,iter) \ ++ subj = NULL; \ ++ iter = 0; \ ++ while (iter < role->subj_hash_size) { \ ++ if (subj == NULL) \ ++ subj = role->subj_hash[iter]; \ ++ if (subj == NULL) { \ ++ iter++; \ ++ continue; \ ++ } ++ ++#define FOR_EACH_SUBJECT_END(subj,iter) \ ++ subj = subj->next; \ ++ if (subj == NULL) \ ++ iter++; \ ++ } ++ ++ ++#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \ ++ subj = role->hash->first; \ ++ while (subj != NULL) { ++ ++#define FOR_EACH_NESTED_SUBJECT_END(subj) \ ++ subj = subj->next; \ ++ } ++ ++#endif ++ +diff -urNp linux-2.6.35.4/include/linux/gralloc.h linux-2.6.35.4/include/linux/gralloc.h +--- linux-2.6.35.4/include/linux/gralloc.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/gralloc.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,9 @@ ++#ifndef __GRALLOC_H ++#define __GRALLOC_H ++ ++void acl_free_all(void); ++int acl_alloc_stack_init(unsigned long size); ++void *acl_alloc(unsigned long len); ++void *acl_alloc_num(unsigned long num, unsigned long len); ++ ++#endif +diff -urNp linux-2.6.35.4/include/linux/grdefs.h linux-2.6.35.4/include/linux/grdefs.h +--- linux-2.6.35.4/include/linux/grdefs.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/grdefs.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,136 @@ ++#ifndef GRDEFS_H ++#define GRDEFS_H ++ ++/* Begin grsecurity status declarations */ ++ ++enum { ++ GR_READY = 0x01, ++ GR_STATUS_INIT = 0x00 // disabled state ++}; ++ ++/* Begin ACL declarations */ ++ ++/* Role flags */ ++ ++enum { ++ GR_ROLE_USER = 0x0001, ++ GR_ROLE_GROUP = 0x0002, ++ GR_ROLE_DEFAULT = 0x0004, ++ GR_ROLE_SPECIAL = 0x0008, ++ GR_ROLE_AUTH = 0x0010, ++ GR_ROLE_NOPW = 0x0020, ++ GR_ROLE_GOD = 0x0040, ++ GR_ROLE_LEARN = 0x0080, ++ GR_ROLE_TPE = 0x0100, ++ GR_ROLE_DOMAIN = 0x0200, ++ GR_ROLE_PAM = 0x0400 ++}; ++ ++/* ACL Subject and Object mode flags */ ++enum { ++ GR_DELETED = 0x80000000 ++}; ++ ++/* ACL Object-only mode flags */ ++enum { ++ GR_READ = 0x00000001, ++ GR_APPEND = 0x00000002, ++ GR_WRITE = 0x00000004, ++ GR_EXEC = 0x00000008, ++ GR_FIND = 0x00000010, ++ GR_INHERIT = 0x00000020, ++ GR_SETID = 0x00000040, ++ GR_CREATE = 0x00000080, ++ GR_DELETE = 0x00000100, ++ GR_LINK = 0x00000200, ++ GR_AUDIT_READ = 0x00000400, ++ GR_AUDIT_APPEND = 0x00000800, ++ GR_AUDIT_WRITE = 0x00001000, ++ GR_AUDIT_EXEC = 0x00002000, ++ GR_AUDIT_FIND = 0x00004000, ++ GR_AUDIT_INHERIT= 0x00008000, ++ GR_AUDIT_SETID = 0x00010000, ++ GR_AUDIT_CREATE = 0x00020000, ++ GR_AUDIT_DELETE = 0x00040000, ++ GR_AUDIT_LINK = 0x00080000, ++ GR_PTRACERD = 0x00100000, ++ GR_NOPTRACE = 0x00200000, ++ GR_SUPPRESS = 0x00400000, ++ GR_NOLEARN = 0x00800000 ++}; ++ ++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \ ++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \ ++ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK) ++ ++/* ACL subject-only mode flags */ ++enum { ++ GR_KILL = 0x00000001, ++ GR_VIEW = 0x00000002, ++ GR_PROTECTED = 0x00000004, ++ GR_LEARN = 0x00000008, ++ GR_OVERRIDE = 0x00000010, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_DUMMY = 0x00000020, ++ GR_PROTSHM = 0x00000040, ++ GR_KILLPROC = 0x00000080, ++ GR_KILLIPPROC = 0x00000100, ++ /* just a placeholder, this mode is only used in userspace */ ++ GR_NOTROJAN = 0x00000200, ++ GR_PROTPROCFD = 0x00000400, ++ GR_PROCACCT = 0x00000800, ++ GR_RELAXPTRACE = 0x00001000, ++ GR_NESTED = 0x00002000, ++ GR_INHERITLEARN = 0x00004000, ++ GR_PROCFIND = 0x00008000, ++ GR_POVERRIDE = 0x00010000, ++ GR_KERNELAUTH = 0x00020000, ++}; ++ ++enum { ++ GR_PAX_ENABLE_SEGMEXEC = 0x0001, ++ GR_PAX_ENABLE_PAGEEXEC = 0x0002, ++ GR_PAX_ENABLE_MPROTECT = 0x0004, ++ GR_PAX_ENABLE_RANDMMAP = 0x0008, ++ GR_PAX_ENABLE_EMUTRAMP = 0x0010, ++ GR_PAX_DISABLE_SEGMEXEC = 0x0100, ++ GR_PAX_DISABLE_PAGEEXEC = 0x0200, ++ GR_PAX_DISABLE_MPROTECT = 0x0400, ++ GR_PAX_DISABLE_RANDMMAP = 0x0800, ++ GR_PAX_DISABLE_EMUTRAMP = 0x1000, ++}; ++ ++enum { ++ GR_ID_USER = 0x01, ++ GR_ID_GROUP = 0x02, ++}; ++ ++enum { ++ GR_ID_ALLOW = 0x01, ++ GR_ID_DENY = 0x02, ++}; ++ ++#define GR_CRASH_RES 31 ++#define GR_UIDTABLE_MAX 500 ++ ++/* begin resource learning section */ ++enum { ++ GR_RLIM_CPU_BUMP = 60, ++ GR_RLIM_FSIZE_BUMP = 50000, ++ GR_RLIM_DATA_BUMP = 10000, ++ GR_RLIM_STACK_BUMP = 1000, ++ GR_RLIM_CORE_BUMP = 10000, ++ GR_RLIM_RSS_BUMP = 500000, ++ GR_RLIM_NPROC_BUMP = 1, ++ GR_RLIM_NOFILE_BUMP = 5, ++ GR_RLIM_MEMLOCK_BUMP = 50000, ++ GR_RLIM_AS_BUMP = 500000, ++ GR_RLIM_LOCKS_BUMP = 2, ++ GR_RLIM_SIGPENDING_BUMP = 5, ++ GR_RLIM_MSGQUEUE_BUMP = 10000, ++ GR_RLIM_NICE_BUMP = 1, ++ GR_RLIM_RTPRIO_BUMP = 1, ++ GR_RLIM_RTTIME_BUMP = 1000000 ++}; ++ ++#endif +diff -urNp linux-2.6.35.4/include/linux/grinternal.h linux-2.6.35.4/include/linux/grinternal.h +--- linux-2.6.35.4/include/linux/grinternal.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/grinternal.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,211 @@ ++#ifndef __GRINTERNAL_H ++#define __GRINTERNAL_H ++ ++#ifdef CONFIG_GRKERNSEC ++ ++#include <linux/fs.h> ++#include <linux/mnt_namespace.h> ++#include <linux/nsproxy.h> ++#include <linux/gracl.h> ++#include <linux/grdefs.h> ++#include <linux/grmsg.h> ++ ++void gr_add_learn_entry(const char *fmt, ...) ++ __attribute__ ((format (printf, 1, 2))); ++__u32 gr_search_file(const struct dentry *dentry, const __u32 mode, ++ const struct vfsmount *mnt); ++__u32 gr_check_create(const struct dentry *new_dentry, ++ const struct dentry *parent, ++ const struct vfsmount *mnt, const __u32 mode); ++int gr_check_protected_task(const struct task_struct *task); ++__u32 to_gr_audit(const __u32 reqmode); ++int gr_set_acls(const int type); ++ ++int gr_acl_is_enabled(void); ++char gr_roletype_to_char(void); ++ ++void gr_handle_alertkill(struct task_struct *task); ++char *gr_to_filename(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename1(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename2(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++char *gr_to_filename3(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++ ++extern int grsec_enable_harden_ptrace; ++extern int grsec_enable_link; ++extern int grsec_enable_fifo; ++extern int grsec_enable_execve; ++extern int grsec_enable_shm; ++extern int grsec_enable_execlog; ++extern int grsec_enable_signal; ++extern int grsec_enable_audit_ptrace; ++extern int grsec_enable_forkfail; ++extern int grsec_enable_time; ++extern int grsec_enable_rofs; ++extern int grsec_enable_chroot_shmat; ++extern int grsec_enable_chroot_findtask; ++extern int grsec_enable_chroot_mount; ++extern int grsec_enable_chroot_double; ++extern int grsec_enable_chroot_pivot; ++extern int grsec_enable_chroot_chdir; ++extern int grsec_enable_chroot_chmod; ++extern int grsec_enable_chroot_mknod; ++extern int grsec_enable_chroot_fchdir; ++extern int grsec_enable_chroot_nice; ++extern int grsec_enable_chroot_execlog; ++extern int grsec_enable_chroot_caps; ++extern int grsec_enable_chroot_sysctl; ++extern int grsec_enable_chroot_unix; ++extern int grsec_enable_tpe; ++extern int grsec_tpe_gid; ++extern int grsec_enable_tpe_all; ++extern int grsec_enable_tpe_invert; ++extern int grsec_enable_socket_all; ++extern int grsec_socket_all_gid; ++extern int grsec_enable_socket_client; ++extern int grsec_socket_client_gid; ++extern int grsec_enable_socket_server; ++extern int grsec_socket_server_gid; ++extern int grsec_audit_gid; ++extern int grsec_enable_group; ++extern int grsec_enable_audit_textrel; ++extern int grsec_enable_mount; ++extern int grsec_enable_chdir; ++extern int grsec_resource_logging; ++extern int grsec_enable_blackhole; ++extern int grsec_lastack_retries; ++extern int grsec_lock; ++ ++extern spinlock_t grsec_alert_lock; ++extern unsigned long grsec_alert_wtime; ++extern unsigned long grsec_alert_fyet; ++ ++extern spinlock_t grsec_audit_lock; ++ ++extern rwlock_t grsec_exec_file_lock; ++ ++#define gr_task_fullpath(tsk) (tsk->exec_file ? \ ++ gr_to_filename2(tsk->exec_file->f_path.dentry, \ ++ tsk->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \ ++ gr_to_filename3(tsk->parent->exec_file->f_path.dentry, \ ++ tsk->parent->exec_file->f_vfsmnt) : "/") ++ ++#define gr_task_fullpath0(tsk) (tsk->exec_file ? \ ++ gr_to_filename(tsk->exec_file->f_path.dentry, \ ++ tsk->exec_file->f_vfsmnt) : "/") ++ ++#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \ ++ gr_to_filename1(tsk->parent->exec_file->f_path.dentry, \ ++ tsk->parent->exec_file->f_vfsmnt) : "/") ++ ++#define proc_is_chrooted(tsk_a) (tsk_a->gr_is_chrooted) ++ ++#define have_same_root(tsk_a,tsk_b) (tsk_a->gr_chroot_dentry == tsk_b->gr_chroot_dentry) ++ ++#define DEFAULTSECARGS(task, cred, pcred) gr_task_fullpath(task), task->comm, \ ++ task->pid, cred->uid, \ ++ cred->euid, cred->gid, cred->egid, \ ++ gr_parent_task_fullpath(task), \ ++ task->parent->comm, task->parent->pid, \ ++ pcred->uid, pcred->euid, \ ++ pcred->gid, pcred->egid ++ ++#define GR_CHROOT_CAPS {{ \ ++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \ ++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \ ++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \ ++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \ ++ CAP_TO_MASK(CAP_IPC_OWNER) , 0 }} ++ ++#define security_learn(normal_msg,args...) \ ++({ \ ++ read_lock(&grsec_exec_file_lock); \ ++ gr_add_learn_entry(normal_msg "\n", ## args); \ ++ read_unlock(&grsec_exec_file_lock); \ ++}) ++ ++enum { ++ GR_DO_AUDIT, ++ GR_DONT_AUDIT, ++ GR_DONT_AUDIT_GOOD ++}; ++ ++enum { ++ GR_TTYSNIFF, ++ GR_RBAC, ++ GR_RBAC_STR, ++ GR_STR_RBAC, ++ GR_RBAC_MODE2, ++ GR_RBAC_MODE3, ++ GR_FILENAME, ++ GR_SYSCTL_HIDDEN, ++ GR_NOARGS, ++ GR_ONE_INT, ++ GR_ONE_INT_TWO_STR, ++ GR_ONE_STR, ++ GR_STR_INT, ++ GR_TWO_INT, ++ GR_THREE_INT, ++ GR_FIVE_INT_TWO_STR, ++ GR_TWO_STR, ++ GR_THREE_STR, ++ GR_FOUR_STR, ++ GR_STR_FILENAME, ++ GR_FILENAME_STR, ++ GR_FILENAME_TWO_INT, ++ GR_FILENAME_TWO_INT_STR, ++ GR_TEXTREL, ++ GR_PTRACE, ++ GR_RESOURCE, ++ GR_CAP, ++ GR_SIG, ++ GR_SIG2, ++ GR_CRASH1, ++ GR_CRASH2, ++ GR_PSACCT ++}; ++ ++#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str) ++#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task) ++#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt) ++#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str) ++#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt) ++#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2) ++#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3) ++#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt) ++#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS) ++#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num) ++#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2) ++#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str) ++#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num) ++#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2) ++#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3) ++#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2) ++#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2) ++#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3) ++#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4) ++#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt) ++#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str) ++#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2) ++#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str) ++#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2) ++#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task) ++#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2) ++#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str) ++#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr) ++#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num) ++#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong) ++#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1) ++#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) ++ ++void gr_log_varargs(int audit, const char *msg, int argtypes, ...); ++ ++#endif ++ ++#endif +diff -urNp linux-2.6.35.4/include/linux/grmsg.h linux-2.6.35.4/include/linux/grmsg.h +--- linux-2.6.35.4/include/linux/grmsg.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/grmsg.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,108 @@ ++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u" ++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " ++#define GR_STOPMOD_MSG "denied modification of module state by " ++#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by " ++#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by " ++#define GR_IOPERM_MSG "denied use of ioperm() by " ++#define GR_IOPL_MSG "denied use of iopl() by " ++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " ++#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by " ++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " ++#define GR_KMEM_MSG "denied write of /dev/kmem by " ++#define GR_PORT_OPEN_MSG "denied open of /dev/port by " ++#define GR_MEM_WRITE_MSG "denied write of /dev/mem by " ++#define GR_MEM_MMAP_MSG "denied mmap write of /dev/[k]mem by " ++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " ++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4" ++#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4" ++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " ++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " ++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " ++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " ++#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by " ++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " ++#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by " ++#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against " ++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " ++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " ++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " ++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " ++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " ++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " ++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " ++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " ++#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by " ++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " ++#define GR_NPROC_MSG "denied overstep of process limit by " ++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " ++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " ++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds" ++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds" ++#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by " ++#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by " ++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " ++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " ++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " ++#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by " ++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " ++#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by " ++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " ++#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by " ++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " ++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " ++#define GR_INITF_ACL_MSG "init_variables() failed %s by " ++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader" ++#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbaged by " ++#define GR_SHUTS_ACL_MSG "shutdown auth success for " ++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " ++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " ++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " ++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " ++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " ++#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by " ++#define GR_ENABLEF_ACL_MSG "unable to load %s for " ++#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system" ++#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by " ++#define GR_RELOADF_ACL_MSG "failed reload of %s for " ++#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for " ++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " ++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " ++#define GR_SPROLEF_ACL_MSG "special role %s failure for " ++#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for " ++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " ++#define GR_INVMODE_ACL_MSG "invalid mode %d by " ++#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by " ++#define GR_FAILFORK_MSG "failed fork with errno %d by " ++#define GR_NICE_CHROOT_MSG "denied priority change by " ++#define GR_UNISIGLOG_MSG "%.32s occurred at %p in " ++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " ++#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by " ++#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by " ++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " ++#define GR_TIME_MSG "time set by " ++#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by " ++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " ++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " ++#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by " ++#define GR_SOCK2_MSG "denied socket(%d,%.16s,%.16s) by " ++#define GR_BIND_MSG "denied bind() by " ++#define GR_CONNECT_MSG "denied connect() by " ++#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by " ++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4" ++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " ++#define GR_CAP_ACL_MSG "use of %s denied for " ++#define GR_CAP_ACL_MSG2 "use of %s permitted for " ++#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for " ++#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for " ++#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by " ++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by " ++#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by " ++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " ++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by " ++#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for " ++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " ++#define GR_NONROOT_MODLOAD_MSG "denied kernel module auto-load of %.64s by " ++#define GR_VM86_MSG "denied use of vm86 by " ++#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by " +diff -urNp linux-2.6.35.4/include/linux/grsecurity.h linux-2.6.35.4/include/linux/grsecurity.h +--- linux-2.6.35.4/include/linux/grsecurity.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/grsecurity.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,203 @@ ++#ifndef GR_SECURITY_H ++#define GR_SECURITY_H ++#include <linux/fs.h> ++#include <linux/fs_struct.h> ++#include <linux/binfmts.h> ++#include <linux/gracl.h> ++ ++/* notify of brain-dead configs */ ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC) ++#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled." ++#endif ++#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS) ++#error "CONFIG_PAX_NOEXEC enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled." ++#endif ++#if defined(CONFIG_PAX_ASLR) && (defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)) && !defined(CONFIG_PAX_EI_PAX) && !defined(CONFIG_PAX_PT_PAX_FLAGS) ++#error "CONFIG_PAX_ASLR enabled, but neither CONFIG_PAX_EI_PAX nor CONFIG_PAX_PT_PAX_FLAGS are enabled." ++#endif ++#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP) ++#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled." ++#endif ++#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR) ++#error "CONFIG_PAX enabled, but no PaX options are enabled." ++#endif ++ ++void gr_handle_brute_attach(struct task_struct *p); ++void gr_handle_brute_check(void); ++ ++char gr_roletype_to_char(void); ++ ++int gr_check_user_change(int real, int effective, int fs); ++int gr_check_group_change(int real, int effective, int fs); ++ ++void gr_del_task_from_ip_table(struct task_struct *p); ++ ++int gr_pid_is_chrooted(struct task_struct *p); ++int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type); ++int gr_handle_chroot_nice(void); ++int gr_handle_chroot_sysctl(const int op); ++int gr_handle_chroot_setpriority(struct task_struct *p, ++ const int niceval); ++int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt); ++int gr_handle_chroot_chroot(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_chroot_caps(struct path *path); ++void gr_handle_chroot_chdir(struct path *path); ++int gr_handle_chroot_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mknod(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int mode); ++int gr_handle_chroot_mount(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const char *dev_name); ++int gr_handle_chroot_pivot(void); ++int gr_handle_chroot_unix(const pid_t pid); ++ ++int gr_handle_rawio(const struct inode *inode); ++int gr_handle_nproc(void); ++ ++void gr_handle_ioperm(void); ++void gr_handle_iopl(void); ++ ++int gr_tpe_allow(const struct file *file); ++ ++void gr_set_chroot_entries(struct task_struct *task, struct path *path); ++void gr_clear_chroot_entries(struct task_struct *task); ++ ++void gr_log_forkfail(const int retval); ++void gr_log_timechange(void); ++void gr_log_signal(const int sig, const void *addr, const struct task_struct *t); ++void gr_log_chdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_log_chroot_exec(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_exec_args(struct linux_binprm *bprm, char **argv); ++void gr_log_remount(const char *devname, const int retval); ++void gr_log_unmount(const char *devname, const int retval); ++void gr_log_mount(const char *from, const char *to, const int retval); ++void gr_log_textrel(struct vm_area_struct *vma); ++ ++int gr_handle_follow_link(const struct inode *parent, ++ const struct inode *inode, ++ const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_fifo(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const struct dentry *dir, const int flag, ++ const int acc_mode); ++int gr_handle_hardlink(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ struct inode *inode, ++ const int mode, const char *to); ++ ++int gr_is_capable(const int cap); ++int gr_is_capable_nolog(const int cap); ++void gr_learn_resource(const struct task_struct *task, const int limit, ++ const unsigned long wanted, const int gt); ++void gr_copy_label(struct task_struct *tsk); ++void gr_handle_crash(struct task_struct *task, const int sig); ++int gr_handle_signal(const struct task_struct *p, const int sig); ++int gr_check_crash_uid(const uid_t uid); ++int gr_check_protected_task(const struct task_struct *task); ++int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type); ++int gr_acl_handle_mmap(const struct file *file, ++ const unsigned long prot); ++int gr_acl_handle_mprotect(const struct file *file, ++ const unsigned long prot); ++int gr_check_hidden_task(const struct task_struct *tsk); ++__u32 gr_acl_handle_truncate(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_utime(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_access(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int fmode); ++__u32 gr_acl_handle_fchmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, mode_t mode); ++__u32 gr_acl_handle_chmod(const struct dentry *dentry, ++ const struct vfsmount *mnt, mode_t mode); ++__u32 gr_acl_handle_chown(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_handle_ptrace(struct task_struct *task, const long request); ++int gr_handle_proc_ptrace(struct task_struct *task); ++__u32 gr_acl_handle_execve(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++int gr_check_crash_exec(const struct file *filp); ++int gr_acl_is_enabled(void); ++void gr_set_kernel_label(struct task_struct *task); ++void gr_set_role_label(struct task_struct *task, const uid_t uid, ++ const gid_t gid); ++int gr_set_proc_label(const struct dentry *dentry, ++ const struct vfsmount *mnt, ++ const int unsafe_share); ++__u32 gr_acl_handle_hidden_file(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_open(const struct dentry *dentry, ++ const struct vfsmount *mnt, const int fmode); ++__u32 gr_acl_handle_creat(const struct dentry *dentry, ++ const struct dentry *p_dentry, ++ const struct vfsmount *p_mnt, const int fmode, ++ const int imode); ++void gr_handle_create(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_mknod(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const int mode); ++__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt); ++__u32 gr_acl_handle_rmdir(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_handle_delete(const ino_t ino, const dev_t dev); ++__u32 gr_acl_handle_unlink(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++__u32 gr_acl_handle_symlink(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const char *from); ++__u32 gr_acl_handle_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt, const char *to); ++int gr_acl_handle_rename(struct dentry *new_dentry, ++ struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ struct dentry *old_dentry, ++ struct inode *old_parent_inode, ++ struct vfsmount *old_mnt, const char *newname); ++void gr_handle_rename(struct inode *old_dir, struct inode *new_dir, ++ struct dentry *old_dentry, ++ struct dentry *new_dentry, ++ struct vfsmount *mnt, const __u8 replace); ++__u32 gr_check_link(const struct dentry *new_dentry, ++ const struct dentry *parent_dentry, ++ const struct vfsmount *parent_mnt, ++ const struct dentry *old_dentry, ++ const struct vfsmount *old_mnt); ++int gr_acl_handle_filldir(const struct file *file, const char *name, ++ const unsigned int namelen, const ino_t ino); ++ ++__u32 gr_acl_handle_unix(const struct dentry *dentry, ++ const struct vfsmount *mnt); ++void gr_acl_handle_exit(void); ++void gr_acl_handle_psacct(struct task_struct *task, const long code); ++int gr_acl_handle_procpidmem(const struct task_struct *task); ++int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags); ++int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode); ++void gr_audit_ptrace(struct task_struct *task); ++ ++#ifdef CONFIG_GRKERNSEC ++void gr_log_nonroot_mod_load(const char *modname); ++void gr_handle_vm86(void); ++void gr_handle_mem_write(void); ++void gr_handle_kmem_write(void); ++void gr_handle_open_port(void); ++int gr_handle_mem_mmap(const unsigned long offset, ++ struct vm_area_struct *vma); ++ ++extern int grsec_enable_dmesg; ++extern int grsec_disable_privio; ++#endif ++ ++#endif +diff -urNp linux-2.6.35.4/include/linux/grsock.h linux-2.6.35.4/include/linux/grsock.h +--- linux-2.6.35.4/include/linux/grsock.h 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/include/linux/grsock.h 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1,19 @@ ++#ifndef __GRSOCK_H ++#define __GRSOCK_H ++ ++extern void gr_attach_curr_ip(const struct sock *sk); ++extern int gr_handle_sock_all(const int family, const int type, ++ const int protocol); ++extern int gr_handle_sock_server(const struct sockaddr *sck); ++extern int gr_handle_sock_server_other(const struct sock *sck); ++extern int gr_handle_sock_client(const struct sockaddr *sck); ++extern int gr_search_connect(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_bind(struct socket * sock, ++ struct sockaddr_in * addr); ++extern int gr_search_listen(struct socket * sock); ++extern int gr_search_accept(struct socket * sock); ++extern int gr_search_socket(const int domain, const int type, ++ const int protocol); ++ ++#endif +diff -urNp linux-2.6.35.4/include/linux/highmem.h linux-2.6.35.4/include/linux/highmem.h +--- linux-2.6.35.4/include/linux/highmem.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/highmem.h 2010-09-17 20:12:09.000000000 -0400 +@@ -143,6 +143,18 @@ static inline void clear_highpage(struct + kunmap_atomic(kaddr, KM_USER0); + } + ++static inline void sanitize_highpage(struct page *page) ++{ ++ void *kaddr; ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ kaddr = kmap_atomic(page, KM_CLEARPAGE); ++ clear_page(kaddr); ++ kunmap_atomic(kaddr, KM_CLEARPAGE); ++ local_irq_restore(flags); ++} ++ + static inline void zero_user_segments(struct page *page, + unsigned start1, unsigned end1, + unsigned start2, unsigned end2) +diff -urNp linux-2.6.35.4/include/linux/interrupt.h linux-2.6.35.4/include/linux/interrupt.h +--- linux-2.6.35.4/include/linux/interrupt.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/interrupt.h 2010-09-17 20:12:09.000000000 -0400 +@@ -392,7 +392,7 @@ enum + /* map softirq index to softirq name. update 'softirq_to_name' in + * kernel/softirq.c when adding a new softirq. + */ +-extern char *softirq_to_name[NR_SOFTIRQS]; ++extern const char * const softirq_to_name[NR_SOFTIRQS]; + + /* softirq mask and active fields moved to irq_cpustat_t in + * asm/hardirq.h to get better cache usage. KAO +@@ -400,12 +400,12 @@ extern char *softirq_to_name[NR_SOFTIRQS + + struct softirq_action + { +- void (*action)(struct softirq_action *); ++ void (*action)(void); + }; + + asmlinkage void do_softirq(void); + asmlinkage void __do_softirq(void); +-extern void open_softirq(int nr, void (*action)(struct softirq_action *)); ++extern void open_softirq(int nr, void (*action)(void)); + extern void softirq_init(void); + #define __raise_softirq_irqoff(nr) do { or_softirq_pending(1UL << (nr)); } while (0) + extern void raise_softirq_irqoff(unsigned int nr); +diff -urNp linux-2.6.35.4/include/linux/jbd2.h linux-2.6.35.4/include/linux/jbd2.h +--- linux-2.6.35.4/include/linux/jbd2.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/jbd2.h 2010-09-17 20:12:09.000000000 -0400 +@@ -67,7 +67,7 @@ extern u8 jbd2_journal_enable_debug; + } \ + } while (0) + #else +-#define jbd_debug(f, a...) /**/ ++#define jbd_debug(f, a...) do {} while (0) + #endif + + extern void *jbd2_alloc(size_t size, gfp_t flags); +diff -urNp linux-2.6.35.4/include/linux/jbd.h linux-2.6.35.4/include/linux/jbd.h +--- linux-2.6.35.4/include/linux/jbd.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/jbd.h 2010-09-17 20:12:09.000000000 -0400 +@@ -67,7 +67,7 @@ extern u8 journal_enable_debug; + } \ + } while (0) + #else +-#define jbd_debug(f, a...) /**/ ++#define jbd_debug(f, a...) do {} while (0) + #endif + + static inline void *jbd_alloc(size_t size, gfp_t flags) +diff -urNp linux-2.6.35.4/include/linux/kallsyms.h linux-2.6.35.4/include/linux/kallsyms.h +--- linux-2.6.35.4/include/linux/kallsyms.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/kallsyms.h 2010-09-17 20:12:37.000000000 -0400 +@@ -15,7 +15,8 @@ + + struct module; + +-#ifdef CONFIG_KALLSYMS ++#ifndef __INCLUDED_BY_HIDESYM ++#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM) + /* Lookup the address for a symbol. Returns 0 if not found. */ + unsigned long kallsyms_lookup_name(const char *name); + +@@ -92,6 +93,9 @@ static inline int lookup_symbol_attrs(un + /* Stupid that this does nothing, but I didn't create this mess. */ + #define __print_symbol(fmt, addr) + #endif /*CONFIG_KALLSYMS*/ ++#else /* when included by kallsyms.c, with HIDESYM enabled */ ++extern void __print_symbol(const char *fmt, unsigned long address); ++#endif + + /* This macro allows us to keep printk typechecking */ + static void __check_printsym_format(const char *fmt, ...) +diff -urNp linux-2.6.35.4/include/linux/kgdb.h linux-2.6.35.4/include/linux/kgdb.h +--- linux-2.6.35.4/include/linux/kgdb.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/kgdb.h 2010-09-17 20:12:09.000000000 -0400 +@@ -263,22 +263,22 @@ struct kgdb_arch { + */ + struct kgdb_io { + const char *name; +- int (*read_char) (void); +- void (*write_char) (u8); +- void (*flush) (void); +- int (*init) (void); +- void (*pre_exception) (void); +- void (*post_exception) (void); ++ int (* const read_char) (void); ++ void (* const write_char) (u8); ++ void (* const flush) (void); ++ int (* const init) (void); ++ void (* const pre_exception) (void); ++ void (* const post_exception) (void); + int is_console; + }; + +-extern struct kgdb_arch arch_kgdb_ops; ++extern const struct kgdb_arch arch_kgdb_ops; + + extern unsigned long __weak kgdb_arch_pc(int exception, struct pt_regs *regs); + +-extern int kgdb_register_io_module(struct kgdb_io *local_kgdb_io_ops); +-extern void kgdb_unregister_io_module(struct kgdb_io *local_kgdb_io_ops); +-extern struct kgdb_io *dbg_io_ops; ++extern int kgdb_register_io_module(const struct kgdb_io *local_kgdb_io_ops); ++extern void kgdb_unregister_io_module(const struct kgdb_io *local_kgdb_io_ops); ++extern const struct kgdb_io *dbg_io_ops; + + extern int kgdb_hex2long(char **ptr, unsigned long *long_val); + extern int kgdb_mem2hex(char *mem, char *buf, int count); +diff -urNp linux-2.6.35.4/include/linux/kvm_host.h linux-2.6.35.4/include/linux/kvm_host.h +--- linux-2.6.35.4/include/linux/kvm_host.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/kvm_host.h 2010-09-17 20:12:09.000000000 -0400 +@@ -243,7 +243,7 @@ void kvm_vcpu_uninit(struct kvm_vcpu *vc + void vcpu_load(struct kvm_vcpu *vcpu); + void vcpu_put(struct kvm_vcpu *vcpu); + +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module); + void kvm_exit(void); + +@@ -367,7 +367,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug( + struct kvm_guest_debug *dbg); + int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); + +-int kvm_arch_init(void *opaque); ++int kvm_arch_init(const void *opaque); + void kvm_arch_exit(void); + + int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); +diff -urNp linux-2.6.35.4/include/linux/libata.h linux-2.6.35.4/include/linux/libata.h +--- linux-2.6.35.4/include/linux/libata.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/libata.h 2010-09-17 20:12:09.000000000 -0400 +@@ -64,11 +64,11 @@ + #ifdef ATA_VERBOSE_DEBUG + #define VPRINTK(fmt, args...) printk(KERN_ERR "%s: " fmt, __func__, ## args) + #else +-#define VPRINTK(fmt, args...) ++#define VPRINTK(fmt, args...) do {} while (0) + #endif /* ATA_VERBOSE_DEBUG */ + #else +-#define DPRINTK(fmt, args...) +-#define VPRINTK(fmt, args...) ++#define DPRINTK(fmt, args...) do {} while (0) ++#define VPRINTK(fmt, args...) do {} while (0) + #endif /* ATA_DEBUG */ + + #define BPRINTK(fmt, args...) if (ap->flags & ATA_FLAG_DEBUGMSG) printk(KERN_ERR "%s: " fmt, __func__, ## args) +@@ -523,11 +523,11 @@ struct ata_ioports { + + struct ata_host { + spinlock_t lock; +- struct device *dev; ++ struct device *dev; + void __iomem * const *iomap; + unsigned int n_ports; + void *private_data; +- struct ata_port_operations *ops; ++ const struct ata_port_operations *ops; + unsigned long flags; + #ifdef CONFIG_ATA_ACPI + acpi_handle acpi_handle; +@@ -709,7 +709,7 @@ struct ata_link { + + struct ata_port { + struct Scsi_Host *scsi_host; /* our co-allocated scsi host */ +- struct ata_port_operations *ops; ++ const struct ata_port_operations *ops; + spinlock_t *lock; + /* Flags owned by the EH context. Only EH should touch these once the + port is active */ +@@ -894,7 +894,7 @@ struct ata_port_info { + unsigned long pio_mask; + unsigned long mwdma_mask; + unsigned long udma_mask; +- struct ata_port_operations *port_ops; ++ const struct ata_port_operations *port_ops; + void *private_data; + }; + +@@ -918,7 +918,7 @@ extern const unsigned long sata_deb_timi + extern const unsigned long sata_deb_timing_hotplug[]; + extern const unsigned long sata_deb_timing_long[]; + +-extern struct ata_port_operations ata_dummy_port_ops; ++extern const struct ata_port_operations ata_dummy_port_ops; + extern const struct ata_port_info ata_dummy_port_info; + + static inline const unsigned long * +@@ -962,7 +962,7 @@ extern int ata_host_activate(struct ata_ + struct scsi_host_template *sht); + extern void ata_host_detach(struct ata_host *host); + extern void ata_host_init(struct ata_host *, struct device *, +- unsigned long, struct ata_port_operations *); ++ unsigned long, const struct ata_port_operations *); + extern int ata_scsi_detect(struct scsi_host_template *sht); + extern int ata_scsi_ioctl(struct scsi_device *dev, int cmd, void __user *arg); + extern int ata_scsi_queuecmd(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *)); +diff -urNp linux-2.6.35.4/include/linux/lockd/bind.h linux-2.6.35.4/include/linux/lockd/bind.h +--- linux-2.6.35.4/include/linux/lockd/bind.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/lockd/bind.h 2010-09-17 20:12:09.000000000 -0400 +@@ -23,13 +23,13 @@ struct svc_rqst; + * This is the set of functions for lockd->nfsd communication + */ + struct nlmsvc_binding { +- __be32 (*fopen)(struct svc_rqst *, ++ __be32 (* const fopen)(struct svc_rqst *, + struct nfs_fh *, + struct file **); +- void (*fclose)(struct file *); ++ void (* const fclose)(struct file *); + }; + +-extern struct nlmsvc_binding * nlmsvc_ops; ++extern const struct nlmsvc_binding * nlmsvc_ops; + + /* + * Similar to nfs_client_initdata, but without the NFS-specific +diff -urNp linux-2.6.35.4/include/linux/mm.h linux-2.6.35.4/include/linux/mm.h +--- linux-2.6.35.4/include/linux/mm.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/mm.h 2010-09-17 20:12:09.000000000 -0400 +@@ -103,7 +103,14 @@ extern unsigned int kobjsize(const void + + #define VM_CAN_NONLINEAR 0x08000000 /* Has ->fault & does nonlinear pages */ + #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */ ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++#define VM_SAO 0x00000000 /* Strong Access Ordering (powerpc) */ ++#define VM_PAGEEXEC 0x20000000 /* vma->vm_page_prot needs special handling */ ++#else + #define VM_SAO 0x20000000 /* Strong Access Ordering (powerpc) */ ++#endif ++ + #define VM_PFN_AT_MMAP 0x40000000 /* PFNMAP vma that is fully mapped at mmap time */ + #define VM_MERGEABLE 0x80000000 /* KSM may merge identical pages */ + +@@ -1010,6 +1017,8 @@ struct shrinker { + extern void register_shrinker(struct shrinker *); + extern void unregister_shrinker(struct shrinker *); + ++pgprot_t vm_get_page_prot(unsigned long vm_flags); ++ + int vma_wants_writenotify(struct vm_area_struct *vma); + + extern pte_t *get_locked_pte(struct mm_struct *mm, unsigned long addr, spinlock_t **ptl); +@@ -1286,6 +1295,7 @@ out: + } + + extern int do_munmap(struct mm_struct *, unsigned long, size_t); ++extern int __do_munmap(struct mm_struct *, unsigned long, size_t); + + extern unsigned long do_brk(unsigned long, unsigned long); + +@@ -1340,6 +1350,10 @@ extern struct vm_area_struct * find_vma( + extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr, + struct vm_area_struct **pprev); + ++extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma); ++extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma); ++extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl); ++ + /* Look up the first VMA which intersects the interval start_addr..end_addr-1, + NULL if none. Assume start_addr < end_addr. */ + static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr) +@@ -1356,7 +1370,6 @@ static inline unsigned long vma_pages(st + return (vma->vm_end - vma->vm_start) >> PAGE_SHIFT; + } + +-pgprot_t vm_get_page_prot(unsigned long vm_flags); + struct vm_area_struct *find_extend_vma(struct mm_struct *, unsigned long addr); + int remap_pfn_range(struct vm_area_struct *, unsigned long addr, + unsigned long pfn, unsigned long size, pgprot_t); +@@ -1463,10 +1476,16 @@ extern int unpoison_memory(unsigned long + extern int sysctl_memory_failure_early_kill; + extern int sysctl_memory_failure_recovery; + extern void shake_page(struct page *p, int access); +-extern atomic_long_t mce_bad_pages; ++extern atomic_long_unchecked_t mce_bad_pages; + extern int soft_offline_page(struct page *page, int flags); + + extern void dump_page(struct page *page); + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot); ++#else ++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {} ++#endif ++ + #endif /* __KERNEL__ */ + #endif /* _LINUX_MM_H */ +diff -urNp linux-2.6.35.4/include/linux/mm_types.h linux-2.6.35.4/include/linux/mm_types.h +--- linux-2.6.35.4/include/linux/mm_types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/mm_types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -183,6 +183,8 @@ struct vm_area_struct { + #ifdef CONFIG_NUMA + struct mempolicy *vm_policy; /* NUMA policy for the VMA */ + #endif ++ ++ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */ + }; + + struct core_thread { +@@ -310,6 +312,24 @@ struct mm_struct { + #ifdef CONFIG_MMU_NOTIFIER + struct mmu_notifier_mm *mmu_notifier_mm; + #endif ++ ++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS) || defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++ unsigned long pax_flags; ++#endif ++ ++#ifdef CONFIG_PAX_DLRESOLVE ++ unsigned long call_dl_resolve; ++#endif ++ ++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT) ++ unsigned long call_syscall; ++#endif ++ ++#ifdef CONFIG_PAX_ASLR ++ unsigned long delta_mmap; /* randomized offset */ ++ unsigned long delta_stack; /* randomized offset */ ++#endif ++ + }; + + /* Future-safe accessor for struct mm_struct's cpu_vm_mask. */ +diff -urNp linux-2.6.35.4/include/linux/mmu_notifier.h linux-2.6.35.4/include/linux/mmu_notifier.h +--- linux-2.6.35.4/include/linux/mmu_notifier.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/mmu_notifier.h 2010-09-17 20:12:09.000000000 -0400 +@@ -235,12 +235,12 @@ static inline void mmu_notifier_mm_destr + */ + #define ptep_clear_flush_notify(__vma, __address, __ptep) \ + ({ \ +- pte_t __pte; \ ++ pte_t ___pte; \ + struct vm_area_struct *___vma = __vma; \ + unsigned long ___address = __address; \ +- __pte = ptep_clear_flush(___vma, ___address, __ptep); \ ++ ___pte = ptep_clear_flush(___vma, ___address, __ptep); \ + mmu_notifier_invalidate_page(___vma->vm_mm, ___address); \ +- __pte; \ ++ ___pte; \ + }) + + #define ptep_clear_flush_young_notify(__vma, __address, __ptep) \ +diff -urNp linux-2.6.35.4/include/linux/mmzone.h linux-2.6.35.4/include/linux/mmzone.h +--- linux-2.6.35.4/include/linux/mmzone.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/mmzone.h 2010-09-17 20:12:09.000000000 -0400 +@@ -345,7 +345,7 @@ struct zone { + unsigned long flags; /* zone flags, see below */ + + /* Zone statistics */ +- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + /* + * prev_priority holds the scanning priority for this zone. It is +diff -urNp linux-2.6.35.4/include/linux/mod_devicetable.h linux-2.6.35.4/include/linux/mod_devicetable.h +--- linux-2.6.35.4/include/linux/mod_devicetable.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/mod_devicetable.h 2010-09-17 20:12:09.000000000 -0400 +@@ -12,7 +12,7 @@ + typedef unsigned long kernel_ulong_t; + #endif + +-#define PCI_ANY_ID (~0) ++#define PCI_ANY_ID ((__u16)~0) + + struct pci_device_id { + __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/ +@@ -131,7 +131,7 @@ struct usb_device_id { + #define USB_DEVICE_ID_MATCH_INT_SUBCLASS 0x0100 + #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200 + +-#define HID_ANY_ID (~0) ++#define HID_ANY_ID (~0U) + + struct hid_device_id { + __u16 bus; +diff -urNp linux-2.6.35.4/include/linux/module.h linux-2.6.35.4/include/linux/module.h +--- linux-2.6.35.4/include/linux/module.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/module.h 2010-09-17 20:12:09.000000000 -0400 +@@ -297,16 +297,16 @@ struct module + int (*init)(void); + + /* If this is non-NULL, vfree after init() returns */ +- void *module_init; ++ void *module_init_rx, *module_init_rw; + + /* Here is the actual code + data, vfree'd on unload. */ +- void *module_core; ++ void *module_core_rx, *module_core_rw; + + /* Here are the sizes of the init and core sections */ +- unsigned int init_size, core_size; ++ unsigned int init_size_rw, core_size_rw; + + /* The size of the executable code in each section. */ +- unsigned int init_text_size, core_text_size; ++ unsigned int init_size_rx, core_size_rx; + + /* Arch-specific module values */ + struct mod_arch_specific arch; +@@ -408,16 +408,46 @@ bool is_module_address(unsigned long add + bool is_module_percpu_address(unsigned long addr); + bool is_module_text_address(unsigned long addr); + ++static inline int within_module_range(unsigned long addr, void *start, unsigned long size) ++{ ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (ktla_ktva(addr) >= (unsigned long)start && ++ ktla_ktva(addr) < (unsigned long)start + size) ++ return 1; ++#endif ++ ++ return ((void *)addr >= start && (void *)addr < start + size); ++} ++ ++static inline int within_module_core_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx); ++} ++ ++static inline int within_module_core_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw); ++} ++ ++static inline int within_module_init_rx(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx); ++} ++ ++static inline int within_module_init_rw(unsigned long addr, struct module *mod) ++{ ++ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw); ++} ++ + static inline int within_module_core(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_core <= addr && +- addr < (unsigned long)mod->module_core + mod->core_size; ++ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod); + } + + static inline int within_module_init(unsigned long addr, struct module *mod) + { +- return (unsigned long)mod->module_init <= addr && +- addr < (unsigned long)mod->module_init + mod->init_size; ++ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod); + } + + /* Search for module by name: must hold module_mutex. */ +diff -urNp linux-2.6.35.4/include/linux/moduleloader.h linux-2.6.35.4/include/linux/moduleloader.h +--- linux-2.6.35.4/include/linux/moduleloader.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/moduleloader.h 2010-09-17 20:12:09.000000000 -0400 +@@ -20,9 +20,21 @@ unsigned int arch_mod_section_prepend(st + sections. Returns NULL on failure. */ + void *module_alloc(unsigned long size); + ++#ifdef CONFIG_PAX_KERNEXEC ++void *module_alloc_exec(unsigned long size); ++#else ++#define module_alloc_exec(x) module_alloc(x) ++#endif ++ + /* Free memory returned from module_alloc. */ + void module_free(struct module *mod, void *module_region); + ++#ifdef CONFIG_PAX_KERNEXEC ++void module_free_exec(struct module *mod, void *module_region); ++#else ++#define module_free_exec(x, y) module_free((x), (y)) ++#endif ++ + /* Apply the given relocation to the (simplified) ELF. Return -error + or 0. */ + int apply_relocate(Elf_Shdr *sechdrs, +diff -urNp linux-2.6.35.4/include/linux/namei.h linux-2.6.35.4/include/linux/namei.h +--- linux-2.6.35.4/include/linux/namei.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/namei.h 2010-09-17 20:12:09.000000000 -0400 +@@ -22,7 +22,7 @@ struct nameidata { + unsigned int flags; + int last_type; + unsigned depth; +- char *saved_names[MAX_NESTED_LINKS + 1]; ++ const char *saved_names[MAX_NESTED_LINKS + 1]; + + /* Intent data */ + union { +@@ -81,12 +81,12 @@ extern int follow_up(struct path *); + extern struct dentry *lock_rename(struct dentry *, struct dentry *); + extern void unlock_rename(struct dentry *, struct dentry *); + +-static inline void nd_set_link(struct nameidata *nd, char *path) ++static inline void nd_set_link(struct nameidata *nd, const char *path) + { + nd->saved_names[nd->depth] = path; + } + +-static inline char *nd_get_link(struct nameidata *nd) ++static inline const char *nd_get_link(const struct nameidata *nd) + { + return nd->saved_names[nd->depth]; + } +diff -urNp linux-2.6.35.4/include/linux/oprofile.h linux-2.6.35.4/include/linux/oprofile.h +--- linux-2.6.35.4/include/linux/oprofile.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/oprofile.h 2010-09-17 20:12:09.000000000 -0400 +@@ -129,9 +129,9 @@ int oprofilefs_create_ulong(struct super + int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root, + char const * name, ulong * val); + +-/** Create a file for read-only access to an atomic_t. */ ++/** Create a file for read-only access to an atomic_unchecked_t. */ + int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root, +- char const * name, atomic_t * val); ++ char const * name, atomic_unchecked_t * val); + + /** create a directory */ + struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root, +diff -urNp linux-2.6.35.4/include/linux/pipe_fs_i.h linux-2.6.35.4/include/linux/pipe_fs_i.h +--- linux-2.6.35.4/include/linux/pipe_fs_i.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/pipe_fs_i.h 2010-09-17 20:12:09.000000000 -0400 +@@ -45,9 +45,9 @@ struct pipe_buffer { + struct pipe_inode_info { + wait_queue_head_t wait; + unsigned int nrbufs, curbuf, buffers; +- unsigned int readers; +- unsigned int writers; +- unsigned int waiting_writers; ++ atomic_t readers; ++ atomic_t writers; ++ atomic_t waiting_writers; + unsigned int r_counter; + unsigned int w_counter; + struct page *tmp_page; +diff -urNp linux-2.6.35.4/include/linux/poison.h linux-2.6.35.4/include/linux/poison.h +--- linux-2.6.35.4/include/linux/poison.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/poison.h 2010-09-17 20:12:09.000000000 -0400 +@@ -19,8 +19,8 @@ + * under normal circumstances, used to verify that nobody uses + * non-initialized list entries. + */ +-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA) +-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA) ++#define LIST_POISON1 ((void *) (long)0xFFFFFF01) ++#define LIST_POISON2 ((void *) (long)0xFFFFFF02) + + /********** include/linux/timer.h **********/ + /* +diff -urNp linux-2.6.35.4/include/linux/proc_fs.h linux-2.6.35.4/include/linux/proc_fs.h +--- linux-2.6.35.4/include/linux/proc_fs.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/proc_fs.h 2010-09-17 20:12:37.000000000 -0400 +@@ -155,6 +155,19 @@ static inline struct proc_dir_entry *pro + return proc_create_data(name, mode, parent, proc_fops, NULL); + } + ++static inline struct proc_dir_entry *proc_create_grsec(const char *name, mode_t mode, ++ struct proc_dir_entry *parent, const struct file_operations *proc_fops) ++{ ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL); ++#else ++ return proc_create_data(name, mode, parent, proc_fops, NULL); ++#endif ++} ++ ++ + static inline struct proc_dir_entry *create_proc_read_entry(const char *name, + mode_t mode, struct proc_dir_entry *base, + read_proc_t *read_proc, void * data) +diff -urNp linux-2.6.35.4/include/linux/random.h linux-2.6.35.4/include/linux/random.h +--- linux-2.6.35.4/include/linux/random.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/random.h 2010-09-17 20:12:09.000000000 -0400 +@@ -80,12 +80,17 @@ void srandom32(u32 seed); + + u32 prandom32(struct rnd_state *); + ++static inline unsigned long pax_get_random_long(void) ++{ ++ return random32() + (sizeof(long) > 4 ? (unsigned long)random32() << 32 : 0); ++} ++ + /* + * Handle minimum values for seeds + */ + static inline u32 __seed(u32 x, u32 m) + { +- return (x < m) ? x + m : x; ++ return (x <= m) ? x + m + 1 : x; + } + + /** +diff -urNp linux-2.6.35.4/include/linux/reiserfs_fs.h linux-2.6.35.4/include/linux/reiserfs_fs.h +--- linux-2.6.35.4/include/linux/reiserfs_fs.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/reiserfs_fs.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1404,7 +1404,7 @@ static inline loff_t max_reiserfs_offset + #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */ + + #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter) +-#define get_generation(s) atomic_read (&fs_generation(s)) ++#define get_generation(s) atomic_read_unchecked (&fs_generation(s)) + #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen) + #define __fs_changed(gen,s) (gen != get_generation (s)) + #define fs_changed(gen,s) \ +@@ -1616,24 +1616,24 @@ static inline struct super_block *sb_fro + */ + + struct item_operations { +- int (*bytes_number) (struct item_head * ih, int block_size); +- void (*decrement_key) (struct cpu_key *); +- int (*is_left_mergeable) (struct reiserfs_key * ih, ++ int (* const bytes_number) (struct item_head * ih, int block_size); ++ void (* const decrement_key) (struct cpu_key *); ++ int (* const is_left_mergeable) (struct reiserfs_key * ih, + unsigned long bsize); +- void (*print_item) (struct item_head *, char *item); +- void (*check_item) (struct item_head *, char *item); ++ void (* const print_item) (struct item_head *, char *item); ++ void (* const check_item) (struct item_head *, char *item); + +- int (*create_vi) (struct virtual_node * vn, struct virtual_item * vi, ++ int (* const create_vi) (struct virtual_node * vn, struct virtual_item * vi, + int is_affected, int insert_size); +- int (*check_left) (struct virtual_item * vi, int free, ++ int (* const check_left) (struct virtual_item * vi, int free, + int start_skip, int end_skip); +- int (*check_right) (struct virtual_item * vi, int free); +- int (*part_size) (struct virtual_item * vi, int from, int to); +- int (*unit_num) (struct virtual_item * vi); +- void (*print_vi) (struct virtual_item * vi); ++ int (* const check_right) (struct virtual_item * vi, int free); ++ int (* const part_size) (struct virtual_item * vi, int from, int to); ++ int (* const unit_num) (struct virtual_item * vi); ++ void (* const print_vi) (struct virtual_item * vi); + }; + +-extern struct item_operations *item_ops[TYPE_ANY + 1]; ++extern const struct item_operations * const item_ops[TYPE_ANY + 1]; + + #define op_bytes_number(ih,bsize) item_ops[le_ih_k_type (ih)]->bytes_number (ih, bsize) + #define op_is_left_mergeable(key,bsize) item_ops[le_key_k_type (le_key_version (key), key)]->is_left_mergeable (key, bsize) +diff -urNp linux-2.6.35.4/include/linux/reiserfs_fs_sb.h linux-2.6.35.4/include/linux/reiserfs_fs_sb.h +--- linux-2.6.35.4/include/linux/reiserfs_fs_sb.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/reiserfs_fs_sb.h 2010-09-17 20:12:09.000000000 -0400 +@@ -386,7 +386,7 @@ struct reiserfs_sb_info { + /* Comment? -Hans */ + wait_queue_head_t s_wait; + /* To be obsoleted soon by per buffer seals.. -Hans */ +- atomic_t s_generation_counter; // increased by one every time the ++ atomic_unchecked_t s_generation_counter; // increased by one every time the + // tree gets re-balanced + unsigned long s_properties; /* File system properties. Currently holds + on-disk FS format */ +diff -urNp linux-2.6.35.4/include/linux/rmap.h linux-2.6.35.4/include/linux/rmap.h +--- linux-2.6.35.4/include/linux/rmap.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/rmap.h 2010-09-17 20:12:09.000000000 -0400 +@@ -119,8 +119,8 @@ static inline void anon_vma_unlock(struc + void anon_vma_init(void); /* create anon_vma_cachep */ + int anon_vma_prepare(struct vm_area_struct *); + void unlink_anon_vmas(struct vm_area_struct *); +-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *); +-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *); ++int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *); ++int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *); + void __anon_vma_link(struct vm_area_struct *); + void anon_vma_free(struct anon_vma *); + +diff -urNp linux-2.6.35.4/include/linux/sched.h linux-2.6.35.4/include/linux/sched.h +--- linux-2.6.35.4/include/linux/sched.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/sched.h 2010-09-17 20:12:37.000000000 -0400 +@@ -100,6 +100,7 @@ struct robust_list_head; + struct bio_list; + struct fs_struct; + struct perf_event_context; ++struct linux_binprm; + + /* + * List of flags we want to share for kernel threads, +@@ -381,10 +382,12 @@ struct user_namespace; + #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN) + + extern int sysctl_max_map_count; ++extern unsigned long sysctl_heap_stack_gap; + + #include <linux/aio.h> + + #ifdef CONFIG_MMU ++extern bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len); + extern void arch_pick_mmap_layout(struct mm_struct *mm); + extern unsigned long + arch_get_unmapped_area(struct file *, unsigned long, unsigned long, +@@ -628,6 +631,15 @@ struct signal_struct { + struct tty_audit_buf *tty_audit_buf; + #endif + ++#ifdef CONFIG_GRKERNSEC ++ u32 curr_ip; ++ u32 gr_saddr; ++ u32 gr_daddr; ++ u16 gr_sport; ++ u16 gr_dport; ++ u8 used_accept:1; ++#endif ++ + int oom_adj; /* OOM kill score adjustment (bit shift) */ + }; + +@@ -1166,7 +1178,7 @@ struct rcu_node; + + struct task_struct { + volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ +- void *stack; ++ struct thread_info *stack; + atomic_t usage; + unsigned int flags; /* per process flags, defined below */ + unsigned int ptrace; +@@ -1274,8 +1286,8 @@ struct task_struct { + struct list_head thread_group; + + struct completion *vfork_done; /* for vfork() */ +- int __user *set_child_tid; /* CLONE_CHILD_SETTID */ +- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ ++ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */ ++ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */ + + cputime_t utime, stime, utimescaled, stimescaled; + cputime_t gtime; +@@ -1291,16 +1303,6 @@ struct task_struct { + struct task_cputime cputime_expires; + struct list_head cpu_timers[3]; + +-/* process credentials */ +- const struct cred *real_cred; /* objective and real subjective task +- * credentials (COW) */ +- const struct cred *cred; /* effective (overridable) subjective task +- * credentials (COW) */ +- struct mutex cred_guard_mutex; /* guard against foreign influences on +- * credential calculations +- * (notably. ptrace) */ +- struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ +- + char comm[TASK_COMM_LEN]; /* executable name excluding path + - access with [gs]et_task_comm (which lock + it with task_lock()) +@@ -1384,6 +1386,15 @@ struct task_struct { + int softirqs_enabled; + int softirq_context; + #endif ++ ++/* process credentials */ ++ const struct cred *real_cred; /* objective and real subjective task ++ * credentials (COW) */ ++ struct mutex cred_guard_mutex; /* guard against foreign influences on ++ * credential calculations ++ * (notably. ptrace) */ ++ struct cred *replacement_session_keyring; /* for KEYCTL_SESSION_TO_PARENT */ ++ + #ifdef CONFIG_LOCKDEP + # define MAX_LOCK_DEPTH 48UL + u64 curr_chain_key; +@@ -1404,6 +1415,9 @@ struct task_struct { + + struct backing_dev_info *backing_dev_info; + ++ const struct cred *cred; /* effective (overridable) subjective task ++ * credentials (COW) */ ++ + struct io_context *io_context; + + unsigned long ptrace_message; +@@ -1469,6 +1483,20 @@ struct task_struct { + unsigned long default_timer_slack_ns; + + struct list_head *scm_work_list; ++ ++#ifdef CONFIG_GRKERNSEC ++ /* grsecurity */ ++ struct dentry *gr_chroot_dentry; ++ struct acl_subject_label *acl; ++ struct acl_role_label *role; ++ struct file *exec_file; ++ u16 acl_role_id; ++ u8 acl_sp_role; ++ u8 is_writable; ++ u8 brute; ++ u8 gr_is_chrooted; ++#endif ++ + #ifdef CONFIG_FUNCTION_GRAPH_TRACER + /* Index of current stored address in ret_stack */ + int curr_ret_stack; +@@ -1500,6 +1528,52 @@ struct task_struct { + #endif + }; + ++#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */ ++#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */ ++#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */ ++#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */ ++/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */ ++#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */ ++ ++#ifdef CONFIG_PAX_SOFTMODE ++extern unsigned int pax_softmode; ++#endif ++ ++extern int pax_check_flags(unsigned long *); ++ ++/* if tsk != current then task_lock must be held on it */ ++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR) ++static inline unsigned long pax_get_flags(struct task_struct *tsk) ++{ ++ if (likely(tsk->mm)) ++ return tsk->mm->pax_flags; ++ else ++ return 0UL; ++} ++ ++/* if tsk != current then task_lock must be held on it */ ++static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags) ++{ ++ if (likely(tsk->mm)) { ++ tsk->mm->pax_flags = flags; ++ return 0; ++ } ++ return -EINVAL; ++} ++#endif ++ ++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS ++extern void pax_set_initial_flags(struct linux_binprm *bprm); ++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS) ++extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm); ++#endif ++ ++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp); ++void pax_report_insns(void *pc, void *sp); ++void pax_report_refcount_overflow(struct pt_regs *regs); ++void pax_report_leak_to_user(const void *ptr, unsigned long len); ++void pax_report_overflow_from_user(const void *ptr, unsigned long len); ++ + /* Future-safe accessor for struct task_struct's cpus_allowed. */ + #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) + +@@ -2101,7 +2175,7 @@ extern void __cleanup_sighand(struct sig + extern void exit_itimers(struct signal_struct *); + extern void flush_itimer_signals(void); + +-extern NORET_TYPE void do_group_exit(int); ++extern NORET_TYPE void do_group_exit(int) ATTRIB_NORET; + + extern void daemonize(const char *, ...); + extern int allow_signal(int); +@@ -2217,8 +2291,8 @@ static inline void unlock_task_sighand(s + + #ifndef __HAVE_THREAD_FUNCTIONS + +-#define task_thread_info(task) ((struct thread_info *)(task)->stack) +-#define task_stack_page(task) ((task)->stack) ++#define task_thread_info(task) ((task)->stack) ++#define task_stack_page(task) ((void *)(task)->stack) + + static inline void setup_thread_stack(struct task_struct *p, struct task_struct *org) + { +@@ -2233,13 +2307,17 @@ static inline unsigned long *end_of_stac + + #endif + +-static inline int object_is_on_stack(void *obj) ++static inline int object_starts_on_stack(void *obj) + { +- void *stack = task_stack_page(current); ++ const void *stack = task_stack_page(current); + + return (obj >= stack) && (obj < (stack + THREAD_SIZE)); + } + ++#ifdef CONFIG_PAX_USERCOPY ++extern int object_is_on_stack(const void *obj, unsigned long len); ++#endif ++ + extern void thread_info_cache_init(void); + + #ifdef CONFIG_DEBUG_STACK_USAGE +diff -urNp linux-2.6.35.4/include/linux/screen_info.h linux-2.6.35.4/include/linux/screen_info.h +--- linux-2.6.35.4/include/linux/screen_info.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/screen_info.h 2010-09-17 20:12:09.000000000 -0400 +@@ -43,7 +43,8 @@ struct screen_info { + __u16 pages; /* 0x32 */ + __u16 vesa_attributes; /* 0x34 */ + __u32 capabilities; /* 0x36 */ +- __u8 _reserved[6]; /* 0x3a */ ++ __u16 vesapm_size; /* 0x3a */ ++ __u8 _reserved[4]; /* 0x3c */ + } __attribute__((packed)); + + #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */ +diff -urNp linux-2.6.35.4/include/linux/security.h linux-2.6.35.4/include/linux/security.h +--- linux-2.6.35.4/include/linux/security.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/security.h 2010-09-17 20:12:37.000000000 -0400 +@@ -34,6 +34,7 @@ + #include <linux/key.h> + #include <linux/xfrm.h> + #include <linux/slab.h> ++#include <linux/grsecurity.h> + #include <net/flow.h> + + /* Maximum number of letters for an LSM name string */ +diff -urNp linux-2.6.35.4/include/linux/shm.h linux-2.6.35.4/include/linux/shm.h +--- linux-2.6.35.4/include/linux/shm.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/shm.h 2010-09-17 20:12:37.000000000 -0400 +@@ -95,6 +95,10 @@ struct shmid_kernel /* private to the ke + pid_t shm_cprid; + pid_t shm_lprid; + struct user_struct *mlock_user; ++#ifdef CONFIG_GRKERNSEC ++ time_t shm_createtime; ++ pid_t shm_lapid; ++#endif + }; + + /* shm_mode upper byte flags */ +diff -urNp linux-2.6.35.4/include/linux/slab.h linux-2.6.35.4/include/linux/slab.h +--- linux-2.6.35.4/include/linux/slab.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/slab.h 2010-09-17 20:12:09.000000000 -0400 +@@ -11,6 +11,7 @@ + + #include <linux/gfp.h> + #include <linux/types.h> ++#include <linux/err.h> + + /* + * Flags to pass to kmem_cache_create(). +@@ -87,10 +88,13 @@ + * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can. + * Both make kfree a no-op. + */ +-#define ZERO_SIZE_PTR ((void *)16) ++#define ZERO_SIZE_PTR \ ++({ \ ++ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\ ++ (void *)(-MAX_ERRNO-1L); \ ++}) + +-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \ +- (unsigned long)ZERO_SIZE_PTR) ++#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1) + + /* + * struct kmem_cache related prototypes +@@ -144,6 +148,7 @@ void * __must_check krealloc(const void + void kfree(const void *); + void kzfree(const void *); + size_t ksize(const void *); ++void check_object_size(const void *ptr, unsigned long n, bool to); + + /* + * Allocator specific definitions. These are mainly used to establish optimized +@@ -334,4 +339,37 @@ static inline void *kzalloc_node(size_t + + void __init kmem_cache_init_late(void); + ++#define kmalloc(x, y) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "kmalloc size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = kmalloc((size_t)___x, (y)); \ ++ ___retval; \ ++}) ++ ++#define kmalloc_node(x, y, z) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "kmalloc_node size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = kmalloc_node((size_t)___x, (y), (z));\ ++ ___retval; \ ++}) ++ ++#define kzalloc(x, y) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "kzalloc size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = kzalloc((size_t)___x, (y)); \ ++ ___retval; \ ++}) ++ + #endif /* _LINUX_SLAB_H */ +diff -urNp linux-2.6.35.4/include/linux/slub_def.h linux-2.6.35.4/include/linux/slub_def.h +--- linux-2.6.35.4/include/linux/slub_def.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/slub_def.h 2010-09-17 20:12:09.000000000 -0400 +@@ -79,7 +79,7 @@ struct kmem_cache { + struct kmem_cache_order_objects max; + struct kmem_cache_order_objects min; + gfp_t allocflags; /* gfp flags to use on each alloc */ +- int refcount; /* Refcount for slab cache destroy */ ++ atomic_t refcount; /* Refcount for slab cache destroy */ + void (*ctor)(void *); + int inuse; /* Offset to metadata */ + int align; /* Alignment */ +diff -urNp linux-2.6.35.4/include/linux/sonet.h linux-2.6.35.4/include/linux/sonet.h +--- linux-2.6.35.4/include/linux/sonet.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/sonet.h 2010-09-17 20:12:09.000000000 -0400 +@@ -61,7 +61,7 @@ struct sonet_stats { + #include <asm/atomic.h> + + struct k_sonet_stats { +-#define __HANDLE_ITEM(i) atomic_t i ++#define __HANDLE_ITEM(i) atomic_unchecked_t i + __SONET_ITEMS + #undef __HANDLE_ITEM + }; +diff -urNp linux-2.6.35.4/include/linux/suspend.h linux-2.6.35.4/include/linux/suspend.h +--- linux-2.6.35.4/include/linux/suspend.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/suspend.h 2010-09-17 20:12:09.000000000 -0400 +@@ -104,15 +104,15 @@ typedef int __bitwise suspend_state_t; + * which require special recovery actions in that situation. + */ + struct platform_suspend_ops { +- int (*valid)(suspend_state_t state); +- int (*begin)(suspend_state_t state); +- int (*prepare)(void); +- int (*prepare_late)(void); +- int (*enter)(suspend_state_t state); +- void (*wake)(void); +- void (*finish)(void); +- void (*end)(void); +- void (*recover)(void); ++ int (* const valid)(suspend_state_t state); ++ int (* const begin)(suspend_state_t state); ++ int (* const prepare)(void); ++ int (* const prepare_late)(void); ++ int (* const enter)(suspend_state_t state); ++ void (* const wake)(void); ++ void (* const finish)(void); ++ void (* const end)(void); ++ void (* const recover)(void); + }; + + #ifdef CONFIG_SUSPEND +@@ -120,7 +120,7 @@ struct platform_suspend_ops { + * suspend_set_ops - set platform dependent suspend operations + * @ops: The new suspend operations to set. + */ +-extern void suspend_set_ops(struct platform_suspend_ops *ops); ++extern void suspend_set_ops(const struct platform_suspend_ops *ops); + extern int suspend_valid_only_mem(suspend_state_t state); + + /** +@@ -145,7 +145,7 @@ extern int pm_suspend(suspend_state_t st + #else /* !CONFIG_SUSPEND */ + #define suspend_valid_only_mem NULL + +-static inline void suspend_set_ops(struct platform_suspend_ops *ops) {} ++static inline void suspend_set_ops(const struct platform_suspend_ops *ops) {} + static inline int pm_suspend(suspend_state_t state) { return -ENOSYS; } + #endif /* !CONFIG_SUSPEND */ + +@@ -215,16 +215,16 @@ extern void mark_free_pages(struct zone + * platforms which require special recovery actions in that situation. + */ + struct platform_hibernation_ops { +- int (*begin)(void); +- void (*end)(void); +- int (*pre_snapshot)(void); +- void (*finish)(void); +- int (*prepare)(void); +- int (*enter)(void); +- void (*leave)(void); +- int (*pre_restore)(void); +- void (*restore_cleanup)(void); +- void (*recover)(void); ++ int (* const begin)(void); ++ void (* const end)(void); ++ int (* const pre_snapshot)(void); ++ void (* const finish)(void); ++ int (* const prepare)(void); ++ int (* const enter)(void); ++ void (* const leave)(void); ++ int (* const pre_restore)(void); ++ void (* const restore_cleanup)(void); ++ void (* const recover)(void); + }; + + #ifdef CONFIG_HIBERNATION +@@ -243,7 +243,7 @@ extern void swsusp_set_page_free(struct + extern void swsusp_unset_page_free(struct page *); + extern unsigned long get_safe_page(gfp_t gfp_mask); + +-extern void hibernation_set_ops(struct platform_hibernation_ops *ops); ++extern void hibernation_set_ops(const struct platform_hibernation_ops *ops); + extern int hibernate(void); + extern bool system_entering_hibernation(void); + #else /* CONFIG_HIBERNATION */ +@@ -251,7 +251,7 @@ static inline int swsusp_page_is_forbidd + static inline void swsusp_set_page_free(struct page *p) {} + static inline void swsusp_unset_page_free(struct page *p) {} + +-static inline void hibernation_set_ops(struct platform_hibernation_ops *ops) {} ++static inline void hibernation_set_ops(const struct platform_hibernation_ops *ops) {} + static inline int hibernate(void) { return -ENOSYS; } + static inline bool system_entering_hibernation(void) { return false; } + #endif /* CONFIG_HIBERNATION */ +diff -urNp linux-2.6.35.4/include/linux/sysctl.h linux-2.6.35.4/include/linux/sysctl.h +--- linux-2.6.35.4/include/linux/sysctl.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/sysctl.h 2010-09-17 20:12:09.000000000 -0400 +@@ -155,7 +155,11 @@ enum + KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */ + }; + +- ++#ifdef CONFIG_PAX_SOFTMODE ++enum { ++ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */ ++}; ++#endif + + /* CTL_VM names: */ + enum +diff -urNp linux-2.6.35.4/include/linux/sysfs.h linux-2.6.35.4/include/linux/sysfs.h +--- linux-2.6.35.4/include/linux/sysfs.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/sysfs.h 2010-09-17 20:12:09.000000000 -0400 +@@ -115,8 +115,8 @@ struct bin_attribute { + #define sysfs_bin_attr_init(bin_attr) sysfs_attr_init(&(bin_attr)->attr) + + struct sysfs_ops { +- ssize_t (*show)(struct kobject *, struct attribute *,char *); +- ssize_t (*store)(struct kobject *,struct attribute *,const char *, size_t); ++ ssize_t (* const show)(struct kobject *, struct attribute *,char *); ++ ssize_t (* const store)(struct kobject *,struct attribute *,const char *, size_t); + }; + + struct sysfs_dirent; +diff -urNp linux-2.6.35.4/include/linux/thread_info.h linux-2.6.35.4/include/linux/thread_info.h +--- linux-2.6.35.4/include/linux/thread_info.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/thread_info.h 2010-09-17 20:12:09.000000000 -0400 +@@ -23,7 +23,7 @@ struct restart_block { + }; + /* For futex_wait and futex_wait_requeue_pi */ + struct { +- u32 *uaddr; ++ u32 __user *uaddr; + u32 val; + u32 flags; + u32 bitset; +diff -urNp linux-2.6.35.4/include/linux/tty.h linux-2.6.35.4/include/linux/tty.h +--- linux-2.6.35.4/include/linux/tty.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/tty.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,6 +13,7 @@ + #include <linux/tty_driver.h> + #include <linux/tty_ldisc.h> + #include <linux/mutex.h> ++#include <linux/poll.h> + + #include <asm/system.h> + +@@ -453,7 +454,6 @@ extern int tty_perform_flush(struct tty_ + extern dev_t tty_devnum(struct tty_struct *tty); + extern void proc_clear_tty(struct task_struct *p); + extern struct tty_struct *get_current_tty(void); +-extern void tty_default_fops(struct file_operations *fops); + extern struct tty_struct *alloc_tty_struct(void); + extern void free_tty_struct(struct tty_struct *tty); + extern void initialize_tty_struct(struct tty_struct *tty, +@@ -514,6 +514,18 @@ extern void tty_ldisc_begin(void); + /* This last one is just for the tty layer internals and shouldn't be used elsewhere */ + extern void tty_ldisc_enable(struct tty_struct *tty); + ++/* tty_io.c */ ++extern ssize_t tty_read(struct file *, char __user *, size_t, loff_t *); ++extern ssize_t tty_write(struct file *, const char __user *, size_t, loff_t *); ++extern unsigned int tty_poll(struct file *, poll_table *); ++#ifdef CONFIG_COMPAT ++extern long tty_compat_ioctl(struct file *file, unsigned int cmd, ++ unsigned long arg); ++#else ++#define tty_compat_ioctl NULL ++#endif ++extern int tty_release(struct inode *, struct file *); ++extern int tty_fasync(int fd, struct file *filp, int on); + + /* n_tty.c */ + extern struct tty_ldisc_ops tty_ldisc_N_TTY; +diff -urNp linux-2.6.35.4/include/linux/tty_ldisc.h linux-2.6.35.4/include/linux/tty_ldisc.h +--- linux-2.6.35.4/include/linux/tty_ldisc.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/tty_ldisc.h 2010-09-17 20:12:09.000000000 -0400 +@@ -147,7 +147,7 @@ struct tty_ldisc_ops { + + struct module *owner; + +- int refcount; ++ atomic_t refcount; + }; + + struct tty_ldisc { +diff -urNp linux-2.6.35.4/include/linux/types.h linux-2.6.35.4/include/linux/types.h +--- linux-2.6.35.4/include/linux/types.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/types.h 2010-09-17 20:12:09.000000000 -0400 +@@ -191,10 +191,26 @@ typedef struct { + int counter; + } atomic_t; + ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ int counter; ++} atomic_unchecked_t; ++#else ++typedef atomic_t atomic_unchecked_t; ++#endif ++ + #ifdef CONFIG_64BIT + typedef struct { + long counter; + } atomic64_t; ++ ++#ifdef CONFIG_PAX_REFCOUNT ++typedef struct { ++ long counter; ++} atomic64_unchecked_t; ++#else ++typedef atomic64_t atomic64_unchecked_t; ++#endif + #endif + + struct ustat { +diff -urNp linux-2.6.35.4/include/linux/uaccess.h linux-2.6.35.4/include/linux/uaccess.h +--- linux-2.6.35.4/include/linux/uaccess.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/uaccess.h 2010-09-17 20:12:09.000000000 -0400 +@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_ + long ret; \ + mm_segment_t old_fs = get_fs(); \ + \ +- set_fs(KERNEL_DS); \ + pagefault_disable(); \ ++ set_fs(KERNEL_DS); \ + ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \ +- pagefault_enable(); \ + set_fs(old_fs); \ ++ pagefault_enable(); \ + ret; \ + }) + +@@ -93,8 +93,8 @@ static inline unsigned long __copy_from_ + * Safely read from address @src to the buffer at @dst. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-extern long probe_kernel_read(void *dst, void *src, size_t size); +-extern long __probe_kernel_read(void *dst, void *src, size_t size); ++extern long probe_kernel_read(void *dst, const void *src, size_t size); ++extern long __probe_kernel_read(void *dst, const void *src, size_t size); + + /* + * probe_kernel_write(): safely attempt to write to a location +@@ -105,7 +105,7 @@ extern long __probe_kernel_read(void *ds + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-extern long notrace probe_kernel_write(void *dst, void *src, size_t size); +-extern long notrace __probe_kernel_write(void *dst, void *src, size_t size); ++extern long notrace probe_kernel_write(void *dst, const void *src, size_t size); ++extern long notrace __probe_kernel_write(void *dst, const void *src, size_t size); + + #endif /* __LINUX_UACCESS_H__ */ +diff -urNp linux-2.6.35.4/include/linux/usb/hcd.h linux-2.6.35.4/include/linux/usb/hcd.h +--- linux-2.6.35.4/include/linux/usb/hcd.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/usb/hcd.h 2010-09-17 20:12:09.000000000 -0400 +@@ -559,7 +559,7 @@ struct usb_mon_operations { + /* void (*urb_unlink)(struct usb_bus *bus, struct urb *urb); */ + }; + +-extern struct usb_mon_operations *mon_ops; ++extern const struct usb_mon_operations *mon_ops; + + static inline void usbmon_urb_submit(struct usb_bus *bus, struct urb *urb) + { +@@ -581,7 +581,7 @@ static inline void usbmon_urb_complete(s + (*mon_ops->urb_complete)(bus, urb, status); + } + +-int usb_mon_register(struct usb_mon_operations *ops); ++int usb_mon_register(const struct usb_mon_operations *ops); + void usb_mon_deregister(void); + + #else +diff -urNp linux-2.6.35.4/include/linux/vmalloc.h linux-2.6.35.4/include/linux/vmalloc.h +--- linux-2.6.35.4/include/linux/vmalloc.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/vmalloc.h 2010-09-17 20:12:09.000000000 -0400 +@@ -13,6 +13,11 @@ struct vm_area_struct; /* vma defining + #define VM_MAP 0x00000004 /* vmap()ed pages */ + #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */ + #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */ ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++#define VM_KERNEXEC 0x00000020 /* allocate from executable kernel memory range */ ++#endif ++ + /* bits [20..32] reserved for arch specific ioremap internals */ + + /* +@@ -121,4 +126,81 @@ struct vm_struct **pcpu_get_vm_areas(con + + void pcpu_free_vm_areas(struct vm_struct **vms, int nr_vms); + ++#define vmalloc(x) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc size overflow\n")) \ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc((unsigned long)___x); \ ++ ___retval; \ ++}) ++ ++#define __vmalloc(x, y, z) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "__vmalloc size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = __vmalloc((unsigned long)___x, (y), (z));\ ++ ___retval; \ ++}) ++ ++#define vmalloc_user(x) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc_user size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc_user((unsigned long)___x); \ ++ ___retval; \ ++}) ++ ++#define vmalloc_exec(x) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc_exec size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc_exec((unsigned long)___x); \ ++ ___retval; \ ++}) ++ ++#define vmalloc_node(x, y) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc_node size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc_node((unsigned long)___x, (y));\ ++ ___retval; \ ++}) ++ ++#define vmalloc_32(x) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc_32 size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc_32((unsigned long)___x); \ ++ ___retval; \ ++}) ++ ++#define vmalloc_32_user(x) \ ++({ \ ++ void *___retval; \ ++ intoverflow_t ___x = (intoverflow_t)x; \ ++ if (WARN(___x > ULONG_MAX, "vmalloc_32_user size overflow\n"))\ ++ ___retval = NULL; \ ++ else \ ++ ___retval = vmalloc_32_user((unsigned long)___x);\ ++ ___retval; \ ++}) ++ + #endif /* _LINUX_VMALLOC_H */ +diff -urNp linux-2.6.35.4/include/linux/vmstat.h linux-2.6.35.4/include/linux/vmstat.h +--- linux-2.6.35.4/include/linux/vmstat.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/linux/vmstat.h 2010-09-17 20:12:09.000000000 -0400 +@@ -140,18 +140,18 @@ static inline void vm_events_fold_cpu(in + /* + * Zone based page accounting with per cpu differentials. + */ +-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + + static inline void zone_page_state_add(long x, struct zone *zone, + enum zone_stat_item item) + { +- atomic_long_add(x, &zone->vm_stat[item]); +- atomic_long_add(x, &vm_stat[item]); ++ atomic_long_add_unchecked(x, &zone->vm_stat[item]); ++ atomic_long_add_unchecked(x, &vm_stat[item]); + } + + static inline unsigned long global_page_state(enum zone_stat_item item) + { +- long x = atomic_long_read(&vm_stat[item]); ++ long x = atomic_long_read_unchecked(&vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -162,7 +162,7 @@ static inline unsigned long global_page_ + static inline unsigned long zone_page_state(struct zone *zone, + enum zone_stat_item item) + { +- long x = atomic_long_read(&zone->vm_stat[item]); ++ long x = atomic_long_read_unchecked(&zone->vm_stat[item]); + #ifdef CONFIG_SMP + if (x < 0) + x = 0; +@@ -246,8 +246,8 @@ static inline void __mod_zone_page_state + + static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_inc(&zone->vm_stat[item]); +- atomic_long_inc(&vm_stat[item]); ++ atomic_long_inc_unchecked(&zone->vm_stat[item]); ++ atomic_long_inc_unchecked(&vm_stat[item]); + } + + static inline void __inc_zone_page_state(struct page *page, +@@ -258,8 +258,8 @@ static inline void __inc_zone_page_state + + static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item) + { +- atomic_long_dec(&zone->vm_stat[item]); +- atomic_long_dec(&vm_stat[item]); ++ atomic_long_dec_unchecked(&zone->vm_stat[item]); ++ atomic_long_dec_unchecked(&vm_stat[item]); + } + + static inline void __dec_zone_page_state(struct page *page, +diff -urNp linux-2.6.35.4/include/net/irda/ircomm_tty.h linux-2.6.35.4/include/net/irda/ircomm_tty.h +--- linux-2.6.35.4/include/net/irda/ircomm_tty.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/net/irda/ircomm_tty.h 2010-09-17 20:12:09.000000000 -0400 +@@ -105,8 +105,8 @@ struct ircomm_tty_cb { + unsigned short close_delay; + unsigned short closing_wait; /* time to wait before closing */ + +- int open_count; +- int blocked_open; /* # of blocked opens */ ++ atomic_t open_count; ++ atomic_t blocked_open; /* # of blocked opens */ + + /* Protect concurent access to : + * o self->open_count +diff -urNp linux-2.6.35.4/include/net/neighbour.h linux-2.6.35.4/include/net/neighbour.h +--- linux-2.6.35.4/include/net/neighbour.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/net/neighbour.h 2010-09-17 20:12:09.000000000 -0400 +@@ -116,12 +116,12 @@ struct neighbour { + + struct neigh_ops { + int family; +- void (*solicit)(struct neighbour *, struct sk_buff*); +- void (*error_report)(struct neighbour *, struct sk_buff*); +- int (*output)(struct sk_buff*); +- int (*connected_output)(struct sk_buff*); +- int (*hh_output)(struct sk_buff*); +- int (*queue_xmit)(struct sk_buff*); ++ void (* const solicit)(struct neighbour *, struct sk_buff*); ++ void (* const error_report)(struct neighbour *, struct sk_buff*); ++ int (* const output)(struct sk_buff*); ++ int (* const connected_output)(struct sk_buff*); ++ int (* const hh_output)(struct sk_buff*); ++ int (* const queue_xmit)(struct sk_buff*); + }; + + struct pneigh_entry { +diff -urNp linux-2.6.35.4/include/net/sctp/sctp.h linux-2.6.35.4/include/net/sctp/sctp.h +--- linux-2.6.35.4/include/net/sctp/sctp.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/net/sctp/sctp.h 2010-09-17 20:12:09.000000000 -0400 +@@ -305,8 +305,8 @@ extern int sctp_debug_flag; + + #else /* SCTP_DEBUG */ + +-#define SCTP_DEBUG_PRINTK(whatever...) +-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) ++#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0) ++#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0) + #define SCTP_ENABLE_DEBUG + #define SCTP_DISABLE_DEBUG + #define SCTP_ASSERT(expr, str, func) +diff -urNp linux-2.6.35.4/include/net/tcp.h linux-2.6.35.4/include/net/tcp.h +--- linux-2.6.35.4/include/net/tcp.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/net/tcp.h 2010-09-17 20:12:09.000000000 -0400 +@@ -1404,6 +1404,7 @@ enum tcp_seq_states { + struct tcp_seq_afinfo { + char *name; + sa_family_t family; ++ /* cannot be const */ + struct file_operations seq_fops; + struct seq_operations seq_ops; + }; +diff -urNp linux-2.6.35.4/include/net/udp.h linux-2.6.35.4/include/net/udp.h +--- linux-2.6.35.4/include/net/udp.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/net/udp.h 2010-09-17 20:12:09.000000000 -0400 +@@ -221,6 +221,7 @@ struct udp_seq_afinfo { + char *name; + sa_family_t family; + struct udp_table *udp_table; ++ /* cannot be const */ + struct file_operations seq_fops; + struct seq_operations seq_ops; + }; +diff -urNp linux-2.6.35.4/include/sound/ac97_codec.h linux-2.6.35.4/include/sound/ac97_codec.h +--- linux-2.6.35.4/include/sound/ac97_codec.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/sound/ac97_codec.h 2010-09-17 20:12:09.000000000 -0400 +@@ -419,15 +419,15 @@ + struct snd_ac97; + + struct snd_ac97_build_ops { +- int (*build_3d) (struct snd_ac97 *ac97); +- int (*build_specific) (struct snd_ac97 *ac97); +- int (*build_spdif) (struct snd_ac97 *ac97); +- int (*build_post_spdif) (struct snd_ac97 *ac97); ++ int (* const build_3d) (struct snd_ac97 *ac97); ++ int (* const build_specific) (struct snd_ac97 *ac97); ++ int (* const build_spdif) (struct snd_ac97 *ac97); ++ int (* const build_post_spdif) (struct snd_ac97 *ac97); + #ifdef CONFIG_PM +- void (*suspend) (struct snd_ac97 *ac97); +- void (*resume) (struct snd_ac97 *ac97); ++ void (* const suspend) (struct snd_ac97 *ac97); ++ void (* const resume) (struct snd_ac97 *ac97); + #endif +- void (*update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */ ++ void (* const update_jacks) (struct snd_ac97 *ac97); /* for jack-sharing */ + }; + + struct snd_ac97_bus_ops { +@@ -477,7 +477,7 @@ struct snd_ac97_template { + + struct snd_ac97 { + /* -- lowlevel (hardware) driver specific -- */ +- struct snd_ac97_build_ops * build_ops; ++ const struct snd_ac97_build_ops * build_ops; + void *private_data; + void (*private_free) (struct snd_ac97 *ac97); + /* --- */ +diff -urNp linux-2.6.35.4/include/trace/events/irq.h linux-2.6.35.4/include/trace/events/irq.h +--- linux-2.6.35.4/include/trace/events/irq.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/trace/events/irq.h 2010-09-17 20:12:09.000000000 -0400 +@@ -34,7 +34,7 @@ + */ + TRACE_EVENT(irq_handler_entry, + +- TP_PROTO(int irq, struct irqaction *action), ++ TP_PROTO(int irq, const struct irqaction *action), + + TP_ARGS(irq, action), + +@@ -64,7 +64,7 @@ TRACE_EVENT(irq_handler_entry, + */ + TRACE_EVENT(irq_handler_exit, + +- TP_PROTO(int irq, struct irqaction *action, int ret), ++ TP_PROTO(int irq, const struct irqaction *action, int ret), + + TP_ARGS(irq, action, ret), + +@@ -84,7 +84,7 @@ TRACE_EVENT(irq_handler_exit, + + DECLARE_EVENT_CLASS(softirq, + +- TP_PROTO(struct softirq_action *h, struct softirq_action *vec), ++ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec), + + TP_ARGS(h, vec), + +@@ -113,7 +113,7 @@ DECLARE_EVENT_CLASS(softirq, + */ + DEFINE_EVENT(softirq, softirq_entry, + +- TP_PROTO(struct softirq_action *h, struct softirq_action *vec), ++ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec), + + TP_ARGS(h, vec) + ); +@@ -131,7 +131,7 @@ DEFINE_EVENT(softirq, softirq_entry, + */ + DEFINE_EVENT(softirq, softirq_exit, + +- TP_PROTO(struct softirq_action *h, struct softirq_action *vec), ++ TP_PROTO(const struct softirq_action *h, const struct softirq_action *vec), + + TP_ARGS(h, vec) + ); +diff -urNp linux-2.6.35.4/include/video/uvesafb.h linux-2.6.35.4/include/video/uvesafb.h +--- linux-2.6.35.4/include/video/uvesafb.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/include/video/uvesafb.h 2010-09-17 20:12:09.000000000 -0400 +@@ -177,6 +177,7 @@ struct uvesafb_par { + u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */ + u8 pmi_setpal; /* PMI for palette changes */ + u16 *pmi_base; /* protected mode interface location */ ++ u8 *pmi_code; /* protected mode code location */ + void *pmi_start; + void *pmi_pal; + u8 *vbe_state_orig; /* +diff -urNp linux-2.6.35.4/init/do_mounts.c linux-2.6.35.4/init/do_mounts.c +--- linux-2.6.35.4/init/do_mounts.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/do_mounts.c 2010-09-17 20:12:09.000000000 -0400 +@@ -217,11 +217,11 @@ static void __init get_fs_names(char *pa + + static int __init do_mount_root(char *name, char *fs, int flags, void *data) + { +- int err = sys_mount(name, "/root", fs, flags, data); ++ int err = sys_mount((__force char __user *)name, (__force char __user *)"/root", (__force char __user *)fs, flags, (__force void __user *)data); + if (err) + return err; + +- sys_chdir("/root"); ++ sys_chdir((__force char __user *)"/root"); + ROOT_DEV = current->fs->pwd.mnt->mnt_sb->s_dev; + printk("VFS: Mounted root (%s filesystem)%s on device %u:%u.\n", + current->fs->pwd.mnt->mnt_sb->s_type->name, +@@ -312,18 +312,18 @@ void __init change_floppy(char *fmt, ... + va_start(args, fmt); + vsprintf(buf, fmt, args); + va_end(args); +- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0); ++ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0); + if (fd >= 0) { + sys_ioctl(fd, FDEJECT, 0); + sys_close(fd); + } + printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf); +- fd = sys_open("/dev/console", O_RDWR, 0); ++ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0); + if (fd >= 0) { + sys_ioctl(fd, TCGETS, (long)&termios); + termios.c_lflag &= ~ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); +- sys_read(fd, &c, 1); ++ sys_read(fd, (char __user *)&c, 1); + termios.c_lflag |= ICANON; + sys_ioctl(fd, TCSETSF, (long)&termios); + sys_close(fd); +@@ -417,6 +417,6 @@ void __init prepare_namespace(void) + mount_root(); + out: + devtmpfs_mount("dev"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((__force char __user *)"."); + } +diff -urNp linux-2.6.35.4/init/do_mounts.h linux-2.6.35.4/init/do_mounts.h +--- linux-2.6.35.4/init/do_mounts.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/do_mounts.h 2010-09-17 20:12:09.000000000 -0400 +@@ -15,15 +15,15 @@ extern int root_mountflags; + + static inline int create_dev(char *name, dev_t dev) + { +- sys_unlink(name); +- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev)); ++ sys_unlink((__force char __user *)name); ++ return sys_mknod((__force char __user *)name, S_IFBLK|0600, new_encode_dev(dev)); + } + + #if BITS_PER_LONG == 32 + static inline u32 bstat(char *name) + { + struct stat64 stat; +- if (sys_stat64(name, &stat) != 0) ++ if (sys_stat64((__force char __user *)name, (__force struct stat64 __user *)&stat) != 0) + return 0; + if (!S_ISBLK(stat.st_mode)) + return 0; +diff -urNp linux-2.6.35.4/init/do_mounts_initrd.c linux-2.6.35.4/init/do_mounts_initrd.c +--- linux-2.6.35.4/init/do_mounts_initrd.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/do_mounts_initrd.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,13 +43,13 @@ static void __init handle_initrd(void) + create_dev("/dev/root.old", Root_RAM0); + /* mount initrd on rootfs' /root */ + mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY); +- sys_mkdir("/old", 0700); +- root_fd = sys_open("/", 0, 0); +- old_fd = sys_open("/old", 0, 0); ++ sys_mkdir((__force const char __user *)"/old", 0700); ++ root_fd = sys_open((__force const char __user *)"/", 0, 0); ++ old_fd = sys_open((__force const char __user *)"/old", 0, 0); + /* move initrd over / and chdir/chroot in initrd root */ +- sys_chdir("/root"); +- sys_mount(".", "/", NULL, MS_MOVE, NULL); +- sys_chroot("."); ++ sys_chdir((__force const char __user *)"/root"); ++ sys_mount((__force char __user *)".", (__force char __user *)"/", NULL, MS_MOVE, NULL); ++ sys_chroot((__force const char __user *)"."); + + /* + * In case that a resume from disk is carried out by linuxrc or one of +@@ -66,15 +66,15 @@ static void __init handle_initrd(void) + + /* move initrd to rootfs' /old */ + sys_fchdir(old_fd); +- sys_mount("/", ".", NULL, MS_MOVE, NULL); ++ sys_mount((__force char __user *)"/", (__force char __user *)".", NULL, MS_MOVE, NULL); + /* switch root and cwd back to / of rootfs */ + sys_fchdir(root_fd); +- sys_chroot("."); ++ sys_chroot((__force const char __user *)"."); + sys_close(old_fd); + sys_close(root_fd); + + if (new_decode_dev(real_root_dev) == Root_RAM0) { +- sys_chdir("/old"); ++ sys_chdir((__force const char __user *)"/old"); + return; + } + +@@ -82,17 +82,17 @@ static void __init handle_initrd(void) + mount_root(); + + printk(KERN_NOTICE "Trying to move old root to /initrd ... "); +- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL); ++ error = sys_mount((__force char __user *)"/old", (__force char __user *)"/root/initrd", NULL, MS_MOVE, NULL); + if (!error) + printk("okay\n"); + else { +- int fd = sys_open("/dev/root.old", O_RDWR, 0); ++ int fd = sys_open((__force const char __user *)"/dev/root.old", O_RDWR, 0); + if (error == -ENOENT) + printk("/initrd does not exist. Ignored.\n"); + else + printk("failed\n"); + printk(KERN_NOTICE "Unmounting old root\n"); +- sys_umount("/old", MNT_DETACH); ++ sys_umount((__force char __user *)"/old", MNT_DETACH); + printk(KERN_NOTICE "Trying to free ramdisk memory ... "); + if (fd < 0) { + error = fd; +@@ -115,11 +115,11 @@ int __init initrd_load(void) + * mounted in the normal path. + */ + if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) { +- sys_unlink("/initrd.image"); ++ sys_unlink((__force const char __user *)"/initrd.image"); + handle_initrd(); + return 1; + } + } +- sys_unlink("/initrd.image"); ++ sys_unlink((__force const char __user *)"/initrd.image"); + return 0; + } +diff -urNp linux-2.6.35.4/init/do_mounts_md.c linux-2.6.35.4/init/do_mounts_md.c +--- linux-2.6.35.4/init/do_mounts_md.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/do_mounts_md.c 2010-09-17 20:12:09.000000000 -0400 +@@ -170,7 +170,7 @@ static void __init md_setup_drive(void) + partitioned ? "_d" : "", minor, + md_setup_args[ent].device_names); + +- fd = sys_open(name, 0, 0); ++ fd = sys_open((__force char __user *)name, 0, 0); + if (fd < 0) { + printk(KERN_ERR "md: open failed - cannot start " + "array %s\n", name); +@@ -233,7 +233,7 @@ static void __init md_setup_drive(void) + * array without it + */ + sys_close(fd); +- fd = sys_open(name, 0, 0); ++ fd = sys_open((__force char __user *)name, 0, 0); + sys_ioctl(fd, BLKRRPART, 0); + } + sys_close(fd); +@@ -283,7 +283,7 @@ static void __init autodetect_raid(void) + + wait_for_device_probe(); + +- fd = sys_open("/dev/md0", 0, 0); ++ fd = sys_open((__force char __user *)"/dev/md0", 0, 0); + if (fd >= 0) { + sys_ioctl(fd, RAID_AUTORUN, raid_autopart); + sys_close(fd); +diff -urNp linux-2.6.35.4/init/initramfs.c linux-2.6.35.4/init/initramfs.c +--- linux-2.6.35.4/init/initramfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/initramfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -74,7 +74,7 @@ static void __init free_hash(void) + } + } + +-static long __init do_utime(char __user *filename, time_t mtime) ++static long __init do_utime(__force char __user *filename, time_t mtime) + { + struct timespec t[2]; + +@@ -109,7 +109,7 @@ static void __init dir_utime(void) + struct dir_entry *de, *tmp; + list_for_each_entry_safe(de, tmp, &dir_list, list) { + list_del(&de->list); +- do_utime(de->name, de->mtime); ++ do_utime((__force char __user *)de->name, de->mtime); + kfree(de->name); + kfree(de); + } +@@ -271,7 +271,7 @@ static int __init maybe_link(void) + if (nlink >= 2) { + char *old = find_link(major, minor, ino, mode, collected); + if (old) +- return (sys_link(old, collected) < 0) ? -1 : 1; ++ return (sys_link((__force char __user *)old, (__force char __user *)collected) < 0) ? -1 : 1; + } + return 0; + } +@@ -280,11 +280,11 @@ static void __init clean_path(char *path + { + struct stat st; + +- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) { ++ if (!sys_newlstat((__force char __user *)path, (__force struct stat __user *)&st) && (st.st_mode^mode) & S_IFMT) { + if (S_ISDIR(st.st_mode)) +- sys_rmdir(path); ++ sys_rmdir((__force char __user *)path); + else +- sys_unlink(path); ++ sys_unlink((__force char __user *)path); + } + } + +@@ -305,7 +305,7 @@ static int __init do_name(void) + int openflags = O_WRONLY|O_CREAT; + if (ml != 1) + openflags |= O_TRUNC; +- wfd = sys_open(collected, openflags, mode); ++ wfd = sys_open((__force char __user *)collected, openflags, mode); + + if (wfd >= 0) { + sys_fchown(wfd, uid, gid); +@@ -317,17 +317,17 @@ static int __init do_name(void) + } + } + } else if (S_ISDIR(mode)) { +- sys_mkdir(collected, mode); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); ++ sys_mkdir((__force char __user *)collected, mode); ++ sys_chown((__force char __user *)collected, uid, gid); ++ sys_chmod((__force char __user *)collected, mode); + dir_add(collected, mtime); + } else if (S_ISBLK(mode) || S_ISCHR(mode) || + S_ISFIFO(mode) || S_ISSOCK(mode)) { + if (maybe_link() == 0) { +- sys_mknod(collected, mode, rdev); +- sys_chown(collected, uid, gid); +- sys_chmod(collected, mode); +- do_utime(collected, mtime); ++ sys_mknod((__force char __user *)collected, mode, rdev); ++ sys_chown((__force char __user *)collected, uid, gid); ++ sys_chmod((__force char __user *)collected, mode); ++ do_utime((__force char __user *)collected, mtime); + } + } + return 0; +@@ -336,15 +336,15 @@ static int __init do_name(void) + static int __init do_copy(void) + { + if (count >= body_len) { +- sys_write(wfd, victim, body_len); ++ sys_write(wfd, (__force char __user *)victim, body_len); + sys_close(wfd); +- do_utime(vcollected, mtime); ++ do_utime((__force char __user *)vcollected, mtime); + kfree(vcollected); + eat(body_len); + state = SkipIt; + return 0; + } else { +- sys_write(wfd, victim, count); ++ sys_write(wfd, (__force char __user *)victim, count); + body_len -= count; + eat(count); + return 1; +@@ -355,9 +355,9 @@ static int __init do_symlink(void) + { + collected[N_ALIGN(name_len) + body_len] = '\0'; + clean_path(collected, 0); +- sys_symlink(collected + N_ALIGN(name_len), collected); +- sys_lchown(collected, uid, gid); +- do_utime(collected, mtime); ++ sys_symlink((__force char __user *)collected + N_ALIGN(name_len), (__force char __user *)collected); ++ sys_lchown((__force char __user *)collected, uid, gid); ++ do_utime((__force char __user *)collected, mtime); + state = SkipIt; + next_state = Reset; + return 0; +diff -urNp linux-2.6.35.4/init/Kconfig linux-2.6.35.4/init/Kconfig +--- linux-2.6.35.4/init/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/Kconfig 2010-09-17 20:12:09.000000000 -0400 +@@ -1063,7 +1063,7 @@ config SLUB_DEBUG + + config COMPAT_BRK + bool "Disable heap randomization" +- default y ++ default n + help + Randomizing heap placement makes heap exploits harder, but it + also breaks ancient binaries (including anything libc5 based). +diff -urNp linux-2.6.35.4/init/main.c linux-2.6.35.4/init/main.c +--- linux-2.6.35.4/init/main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/main.c 2010-09-17 20:12:37.000000000 -0400 +@@ -98,6 +98,7 @@ static inline void mark_rodata_ro(void) + #ifdef CONFIG_TC + extern void tc_init(void); + #endif ++extern void grsecurity_init(void); + + enum system_states system_state __read_mostly; + EXPORT_SYMBOL(system_state); +@@ -200,6 +201,50 @@ static int __init set_reset_devices(char + + __setup("reset_devices", set_reset_devices); + ++#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF) ++extern void pax_enter_kernel_user(void); ++extern void pax_exit_kernel_user(void); ++extern pgdval_t clone_pgd_mask; ++#endif ++ ++#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF) ++static int __init setup_pax_nouderef(char *str) ++{ ++#ifdef CONFIG_X86_32 ++ unsigned int cpu; ++ ++ for (cpu = 0; cpu < NR_CPUS; cpu++) { ++ get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].type = 3; ++ get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].limit = 0xf; ++ } ++ asm("mov %0, %%ds" : : "r" (__KERNEL_DS) : "memory"); ++ asm("mov %0, %%es" : : "r" (__KERNEL_DS) : "memory"); ++ asm("mov %0, %%ss" : : "r" (__KERNEL_DS) : "memory"); ++#else ++ char *p; ++ p = (char *)pax_enter_kernel_user; ++ *p = 0xc3; ++ p = (char *)pax_exit_kernel_user; ++ *p = 0xc3; ++ clone_pgd_mask = ~(pgdval_t)0UL; ++#endif ++ ++ return 0; ++} ++early_param("pax_nouderef", setup_pax_nouderef); ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++unsigned int pax_softmode; ++ ++static int __init setup_pax_softmode(char *str) ++{ ++ get_option(&str, &pax_softmode); ++ return 1; ++} ++__setup("pax_softmode=", setup_pax_softmode); ++#endif ++ + static char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, }; + char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, }; + static const char *panic_later, *panic_param; +@@ -725,52 +770,53 @@ int initcall_debug; + core_param(initcall_debug, initcall_debug, bool, 0644); + + static char msgbuf[64]; +-static struct boot_trace_call call; +-static struct boot_trace_ret ret; ++static struct boot_trace_call trace_call; ++static struct boot_trace_ret trace_ret; + + int do_one_initcall(initcall_t fn) + { + int count = preempt_count(); + ktime_t calltime, delta, rettime; ++ const char *msg1 = "", *msg2 = ""; + + if (initcall_debug) { +- call.caller = task_pid_nr(current); +- printk("calling %pF @ %i\n", fn, call.caller); ++ trace_call.caller = task_pid_nr(current); ++ printk("calling %pF @ %i\n", fn, trace_call.caller); + calltime = ktime_get(); +- trace_boot_call(&call, fn); ++ trace_boot_call(&trace_call, fn); + enable_boot_trace(); + } + +- ret.result = fn(); ++ trace_ret.result = fn(); + + if (initcall_debug) { + disable_boot_trace(); + rettime = ktime_get(); + delta = ktime_sub(rettime, calltime); +- ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; +- trace_boot_ret(&ret, fn); ++ trace_ret.duration = (unsigned long long) ktime_to_ns(delta) >> 10; ++ trace_boot_ret(&trace_ret, fn); + printk("initcall %pF returned %d after %Ld usecs\n", fn, +- ret.result, ret.duration); ++ trace_ret.result, trace_ret.duration); + } + + msgbuf[0] = 0; + +- if (ret.result && ret.result != -ENODEV && initcall_debug) +- sprintf(msgbuf, "error code %d ", ret.result); ++ if (trace_ret.result && trace_ret.result != -ENODEV && initcall_debug) ++ sprintf(msgbuf, "error code %d ", trace_ret.result); + + if (preempt_count() != count) { +- strlcat(msgbuf, "preemption imbalance ", sizeof(msgbuf)); ++ msg1 = " preemption imbalance"; + preempt_count() = count; + } + if (irqs_disabled()) { +- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf)); ++ msg2 = " disabled interrupts"; + local_irq_enable(); + } +- if (msgbuf[0]) { +- printk("initcall %pF returned with %s\n", fn, msgbuf); ++ if (msgbuf[0] || *msg1 || *msg2) { ++ printk("initcall %pF returned with %s%s%s\n", fn, msgbuf, msg1, msg2); + } + +- return ret.result; ++ return trace_ret.result; + } + + +@@ -902,7 +948,7 @@ static int __init kernel_init(void * unu + do_basic_setup(); + + /* Open the /dev/console on the rootfs, this should never fail */ +- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0) ++ if (sys_open((__force const char __user *) "/dev/console", O_RDWR, 0) < 0) + printk(KERN_WARNING "Warning: unable to open an initial console.\n"); + + (void) sys_dup(0); +@@ -915,11 +961,13 @@ static int __init kernel_init(void * unu + if (!ramdisk_execute_command) + ramdisk_execute_command = "/init"; + +- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) { ++ if (sys_access((__force const char __user *) ramdisk_execute_command, 0) != 0) { + ramdisk_execute_command = NULL; + prepare_namespace(); + } + ++ grsecurity_init(); ++ + /* + * Ok, we have completed the initial bootup, and + * we're essentially up and running. Get rid of the +diff -urNp linux-2.6.35.4/init/noinitramfs.c linux-2.6.35.4/init/noinitramfs.c +--- linux-2.6.35.4/init/noinitramfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/init/noinitramfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -29,17 +29,17 @@ static int __init default_rootfs(void) + { + int err; + +- err = sys_mkdir("/dev", 0755); ++ err = sys_mkdir((const char __user *)"/dev", 0755); + if (err < 0) + goto out; + +- err = sys_mknod((const char __user *) "/dev/console", ++ err = sys_mknod((__force const char __user *) "/dev/console", + S_IFCHR | S_IRUSR | S_IWUSR, + new_encode_dev(MKDEV(5, 1))); + if (err < 0) + goto out; + +- err = sys_mkdir("/root", 0700); ++ err = sys_mkdir((const char __user *)"/root", 0700); + if (err < 0) + goto out; + +diff -urNp linux-2.6.35.4/ipc/mqueue.c linux-2.6.35.4/ipc/mqueue.c +--- linux-2.6.35.4/ipc/mqueue.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/ipc/mqueue.c 2010-09-17 20:12:37.000000000 -0400 +@@ -153,6 +153,7 @@ static struct inode *mqueue_get_inode(st + mq_bytes = (mq_msg_tblsz + + (info->attr.mq_maxmsg * info->attr.mq_msgsize)); + ++ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1); + spin_lock(&mq_lock); + if (u->mq_bytes + mq_bytes < u->mq_bytes || + u->mq_bytes + mq_bytes > +diff -urNp linux-2.6.35.4/ipc/shm.c linux-2.6.35.4/ipc/shm.c +--- linux-2.6.35.4/ipc/shm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/ipc/shm.c 2010-09-17 20:12:37.000000000 -0400 +@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_name + static int sysvipc_shm_proc_show(struct seq_file *s, void *it); + #endif + ++#ifdef CONFIG_GRKERNSEC ++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime, const uid_t cuid, ++ const int shmid); ++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid, ++ const time_t shm_createtime); ++#endif ++ + void shm_init_ns(struct ipc_namespace *ns) + { + ns->shm_ctlmax = SHMMAX; +@@ -395,6 +403,14 @@ static int newseg(struct ipc_namespace * + shp->shm_lprid = 0; + shp->shm_atim = shp->shm_dtim = 0; + shp->shm_ctim = get_seconds(); ++#ifdef CONFIG_GRKERNSEC ++ { ++ struct timespec timeval; ++ do_posix_clock_monotonic_gettime(&timeval); ++ ++ shp->shm_createtime = timeval.tv_sec; ++ } ++#endif + shp->shm_segsz = size; + shp->shm_nattch = 0; + shp->shm_file = file; +@@ -877,9 +893,21 @@ long do_shmat(int shmid, char __user *sh + if (err) + goto out_unlock; + ++#ifdef CONFIG_GRKERNSEC ++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime, ++ shp->shm_perm.cuid, shmid) || ++ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) { ++ err = -EACCES; ++ goto out_unlock; ++ } ++#endif ++ + path = shp->shm_file->f_path; + path_get(&path); + shp->shm_nattch++; ++#ifdef CONFIG_GRKERNSEC ++ shp->shm_lapid = current->pid; ++#endif + size = i_size_read(path.dentry->d_inode); + shm_unlock(shp); + +diff -urNp linux-2.6.35.4/kernel/acct.c linux-2.6.35.4/kernel/acct.c +--- linux-2.6.35.4/kernel/acct.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/acct.c 2010-09-17 20:12:09.000000000 -0400 +@@ -570,7 +570,7 @@ static void do_acct_process(struct bsd_a + */ + flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur; + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY; +- file->f_op->write(file, (char *)&ac, ++ file->f_op->write(file, (__force char __user *)&ac, + sizeof(acct_t), &file->f_pos); + current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim; + set_fs(fs); +diff -urNp linux-2.6.35.4/kernel/capability.c linux-2.6.35.4/kernel/capability.c +--- linux-2.6.35.4/kernel/capability.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/capability.c 2010-09-17 20:12:37.000000000 -0400 +@@ -205,6 +205,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_ + * before modification is attempted and the application + * fails. + */ ++ if (tocopy > ARRAY_SIZE(kdata)) ++ return -EFAULT; ++ + if (copy_to_user(dataptr, kdata, tocopy + * sizeof(struct __user_cap_data_struct))) { + return -EFAULT; +@@ -306,10 +309,21 @@ int capable(int cap) + BUG(); + } + +- if (security_capable(cap) == 0) { ++ if (security_capable(cap) == 0 && gr_is_capable(cap)) { ++ current->flags |= PF_SUPERPRIV; ++ return 1; ++ } ++ return 0; ++} ++ ++int capable_nolog(int cap) ++{ ++ if (security_capable(cap) == 0 && gr_is_capable_nolog(cap)) { + current->flags |= PF_SUPERPRIV; + return 1; + } + return 0; + } ++ + EXPORT_SYMBOL(capable); ++EXPORT_SYMBOL(capable_nolog); +diff -urNp linux-2.6.35.4/kernel/compat.c linux-2.6.35.4/kernel/compat.c +--- linux-2.6.35.4/kernel/compat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/compat.c 2010-09-17 20:12:37.000000000 -0400 +@@ -13,6 +13,7 @@ + + #include <linux/linkage.h> + #include <linux/compat.h> ++#include <linux/module.h> + #include <linux/errno.h> + #include <linux/time.h> + #include <linux/signal.h> +@@ -1137,3 +1138,24 @@ compat_sys_sysinfo(struct compat_sysinfo + + return 0; + } ++ ++/* ++ * Allocate user-space memory for the duration of a single system call, ++ * in order to marshall parameters inside a compat thunk. ++ */ ++void __user *compat_alloc_user_space(unsigned long len) ++{ ++ void __user *ptr; ++ ++ /* If len would occupy more than half of the entire compat space... */ ++ if (unlikely(len > (((compat_uptr_t)~0) >> 1))) ++ return NULL; ++ ++ ptr = arch_compat_alloc_user_space(len); ++ ++ if (unlikely(!access_ok(VERIFY_WRITE, ptr, len))) ++ return NULL; ++ ++ return ptr; ++} ++EXPORT_SYMBOL_GPL(compat_alloc_user_space); +diff -urNp linux-2.6.35.4/kernel/configs.c linux-2.6.35.4/kernel/configs.c +--- linux-2.6.35.4/kernel/configs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/configs.c 2010-09-17 20:12:37.000000000 -0400 +@@ -73,8 +73,19 @@ static int __init ikconfig_init(void) + struct proc_dir_entry *entry; + + /* create the current config file */ ++#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM) ++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL, ++ &ikconfig_file_ops); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL, ++ &ikconfig_file_ops); ++#endif ++#else + entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL, + &ikconfig_file_ops); ++#endif ++ + if (!entry) + return -ENOMEM; + +diff -urNp linux-2.6.35.4/kernel/cred.c linux-2.6.35.4/kernel/cred.c +--- linux-2.6.35.4/kernel/cred.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/cred.c 2010-09-17 20:12:37.000000000 -0400 +@@ -489,6 +489,8 @@ int commit_creds(struct cred *new) + + get_cred(new); /* we will require a ref for the subj creds too */ + ++ gr_set_role_label(task, new->uid, new->gid); ++ + /* dumpability changes */ + if (old->euid != new->euid || + old->egid != new->egid || +diff -urNp linux-2.6.35.4/kernel/debug/debug_core.c linux-2.6.35.4/kernel/debug/debug_core.c +--- linux-2.6.35.4/kernel/debug/debug_core.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/debug/debug_core.c 2010-09-17 20:12:09.000000000 -0400 +@@ -71,7 +71,7 @@ int kgdb_io_module_registered; + /* Guard for recursive entry */ + static int exception_level; + +-struct kgdb_io *dbg_io_ops; ++const struct kgdb_io *dbg_io_ops; + static DEFINE_SPINLOCK(kgdb_registration_lock); + + /* kgdb console driver is loaded */ +@@ -871,7 +871,7 @@ static void kgdb_initial_breakpoint(void + * + * Register it with the KGDB core. + */ +-int kgdb_register_io_module(struct kgdb_io *new_dbg_io_ops) ++int kgdb_register_io_module(const struct kgdb_io *new_dbg_io_ops) + { + int err; + +@@ -916,7 +916,7 @@ EXPORT_SYMBOL_GPL(kgdb_register_io_modul + * + * Unregister it with the KGDB core. + */ +-void kgdb_unregister_io_module(struct kgdb_io *old_dbg_io_ops) ++void kgdb_unregister_io_module(const struct kgdb_io *old_dbg_io_ops) + { + BUG_ON(kgdb_connected); + +diff -urNp linux-2.6.35.4/kernel/debug/kdb/kdb_main.c linux-2.6.35.4/kernel/debug/kdb/kdb_main.c +--- linux-2.6.35.4/kernel/debug/kdb/kdb_main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/debug/kdb/kdb_main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1872,7 +1872,7 @@ static int kdb_lsmod(int argc, const cha + list_for_each_entry(mod, kdb_modules, list) { + + kdb_printf("%-20s%8u 0x%p ", mod->name, +- mod->core_size, (void *)mod); ++ mod->core_size_rx + mod->core_size_rw, (void *)mod); + #ifdef CONFIG_MODULE_UNLOAD + kdb_printf("%4d ", module_refcount(mod)); + #endif +@@ -1882,7 +1882,7 @@ static int kdb_lsmod(int argc, const cha + kdb_printf(" (Loading)"); + else + kdb_printf(" (Live)"); +- kdb_printf(" 0x%p", mod->module_core); ++ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); + + #ifdef CONFIG_MODULE_UNLOAD + { +diff -urNp linux-2.6.35.4/kernel/exit.c linux-2.6.35.4/kernel/exit.c +--- linux-2.6.35.4/kernel/exit.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/exit.c 2010-09-17 20:13:49.000000000 -0400 +@@ -56,6 +56,10 @@ + #include <asm/pgtable.h> + #include <asm/mmu_context.h> + ++#ifdef CONFIG_GRKERNSEC ++extern rwlock_t grsec_exec_file_lock; ++#endif ++ + static void exit_mm(struct task_struct * tsk); + + static void __unhash_process(struct task_struct *p, bool group_dead) +@@ -162,6 +166,8 @@ void release_task(struct task_struct * p + struct task_struct *leader; + int zap_leader; + repeat: ++ gr_del_task_from_ip_table(p); ++ + tracehook_prepare_release_task(p); + /* don't need to get the RCU readlock here - the process is dead and + * can't be modifying its own credentials. But shut RCU-lockdep up */ +@@ -331,11 +337,22 @@ static void reparent_to_kthreadd(void) + { + write_lock_irq(&tasklist_lock); + ++#ifdef CONFIG_GRKERNSEC ++ write_lock(&grsec_exec_file_lock); ++ if (current->exec_file) { ++ fput(current->exec_file); ++ current->exec_file = NULL; ++ } ++ write_unlock(&grsec_exec_file_lock); ++#endif ++ + ptrace_unlink(current); + /* Reparent to init */ + current->real_parent = current->parent = kthreadd_task; + list_move_tail(¤t->sibling, ¤t->real_parent->children); + ++ gr_set_kernel_label(current); ++ + /* Set the exit signal to SIGCHLD so we signal init on exit */ + current->exit_signal = SIGCHLD; + +@@ -387,7 +404,7 @@ int allow_signal(int sig) + * know it'll be handled, so that they don't get converted to + * SIGKILL or just silently dropped. + */ +- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2; ++ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2; + recalc_sigpending(); + spin_unlock_irq(¤t->sighand->siglock); + return 0; +@@ -423,6 +440,17 @@ void daemonize(const char *name, ...) + vsnprintf(current->comm, sizeof(current->comm), name, args); + va_end(args); + ++#ifdef CONFIG_GRKERNSEC ++ write_lock(&grsec_exec_file_lock); ++ if (current->exec_file) { ++ fput(current->exec_file); ++ current->exec_file = NULL; ++ } ++ write_unlock(&grsec_exec_file_lock); ++#endif ++ ++ gr_set_kernel_label(current); ++ + /* + * If we were started as result of loading a module, close all of the + * user space pages. We don't need them, and if we didn't close them +@@ -960,6 +988,9 @@ NORET_TYPE void do_exit(long code) + tsk->exit_code = code; + taskstats_exit(tsk, group_dead); + ++ gr_acl_handle_psacct(tsk, code); ++ gr_acl_handle_exit(); ++ + exit_mm(tsk); + + if (group_dead) +diff -urNp linux-2.6.35.4/kernel/fork.c linux-2.6.35.4/kernel/fork.c +--- linux-2.6.35.4/kernel/fork.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/fork.c 2010-09-17 20:12:37.000000000 -0400 +@@ -276,7 +276,7 @@ static struct task_struct *dup_task_stru + *stackend = STACK_END_MAGIC; /* for overflow detection */ + + #ifdef CONFIG_CC_STACKPROTECTOR +- tsk->stack_canary = get_random_int(); ++ tsk->stack_canary = pax_get_random_long(); + #endif + + /* One for us, one for whoever does the "release_task()" (usually parent) */ +@@ -298,13 +298,78 @@ out: + } + + #ifdef CONFIG_MMU ++static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct vm_area_struct *mpnt) ++{ ++ struct vm_area_struct *tmp; ++ unsigned long charge; ++ struct mempolicy *pol; ++ struct file *file; ++ ++ charge = 0; ++ if (mpnt->vm_flags & VM_ACCOUNT) { ++ unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; ++ if (security_vm_enough_memory(len)) ++ goto fail_nomem; ++ charge = len; ++ } ++ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!tmp) ++ goto fail_nomem; ++ *tmp = *mpnt; ++ tmp->vm_mm = mm; ++ INIT_LIST_HEAD(&tmp->anon_vma_chain); ++ pol = mpol_dup(vma_policy(mpnt)); ++ if (IS_ERR(pol)) ++ goto fail_nomem_policy; ++ vma_set_policy(tmp, pol); ++ if (anon_vma_fork(tmp, mpnt)) ++ goto fail_nomem_anon_vma_fork; ++ tmp->vm_flags &= ~VM_LOCKED; ++ tmp->vm_next = NULL; ++ tmp->vm_mirror = NULL; ++ file = tmp->vm_file; ++ if (file) { ++ struct inode *inode = file->f_path.dentry->d_inode; ++ struct address_space *mapping = file->f_mapping; ++ ++ get_file(file); ++ if (tmp->vm_flags & VM_DENYWRITE) ++ atomic_dec(&inode->i_writecount); ++ spin_lock(&mapping->i_mmap_lock); ++ if (tmp->vm_flags & VM_SHARED) ++ mapping->i_mmap_writable++; ++ tmp->vm_truncate_count = mpnt->vm_truncate_count; ++ flush_dcache_mmap_lock(mapping); ++ /* insert tmp into the share list, just after mpnt */ ++ vma_prio_tree_add(tmp, mpnt); ++ flush_dcache_mmap_unlock(mapping); ++ spin_unlock(&mapping->i_mmap_lock); ++ } ++ ++ /* ++ * Clear hugetlb-related page reserves for children. This only ++ * affects MAP_PRIVATE mappings. Faults generated by the child ++ * are not guaranteed to succeed, even if read-only ++ */ ++ if (is_vm_hugetlb_page(tmp)) ++ reset_vma_resv_huge_pages(tmp); ++ ++ return tmp; ++ ++fail_nomem_anon_vma_fork: ++ mpol_put(pol); ++fail_nomem_policy: ++ kmem_cache_free(vm_area_cachep, tmp); ++fail_nomem: ++ vm_unacct_memory(charge); ++ return NULL; ++} ++ + static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) + { + struct vm_area_struct *mpnt, *tmp, *prev, **pprev; + struct rb_node **rb_link, *rb_parent; + int retval; +- unsigned long charge; +- struct mempolicy *pol; + + down_write(&oldmm->mmap_sem); + flush_cache_dup_mm(oldmm); +@@ -316,8 +381,8 @@ static int dup_mmap(struct mm_struct *mm + mm->locked_vm = 0; + mm->mmap = NULL; + mm->mmap_cache = NULL; +- mm->free_area_cache = oldmm->mmap_base; +- mm->cached_hole_size = ~0UL; ++ mm->free_area_cache = oldmm->free_area_cache; ++ mm->cached_hole_size = oldmm->cached_hole_size; + mm->map_count = 0; + cpumask_clear(mm_cpumask(mm)); + mm->mm_rb = RB_ROOT; +@@ -330,8 +395,6 @@ static int dup_mmap(struct mm_struct *mm + + prev = NULL; + for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) { +- struct file *file; +- + if (mpnt->vm_flags & VM_DONTCOPY) { + long pages = vma_pages(mpnt); + mm->total_vm -= pages; +@@ -339,56 +402,13 @@ static int dup_mmap(struct mm_struct *mm + -pages); + continue; + } +- charge = 0; +- if (mpnt->vm_flags & VM_ACCOUNT) { +- unsigned int len = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT; +- if (security_vm_enough_memory(len)) +- goto fail_nomem; +- charge = len; +- } +- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); +- if (!tmp) +- goto fail_nomem; +- *tmp = *mpnt; +- INIT_LIST_HEAD(&tmp->anon_vma_chain); +- pol = mpol_dup(vma_policy(mpnt)); +- retval = PTR_ERR(pol); +- if (IS_ERR(pol)) +- goto fail_nomem_policy; +- vma_set_policy(tmp, pol); +- if (anon_vma_fork(tmp, mpnt)) +- goto fail_nomem_anon_vma_fork; +- tmp->vm_flags &= ~VM_LOCKED; +- tmp->vm_mm = mm; +- tmp->vm_next = tmp->vm_prev = NULL; +- file = tmp->vm_file; +- if (file) { +- struct inode *inode = file->f_path.dentry->d_inode; +- struct address_space *mapping = file->f_mapping; +- +- get_file(file); +- if (tmp->vm_flags & VM_DENYWRITE) +- atomic_dec(&inode->i_writecount); +- spin_lock(&mapping->i_mmap_lock); +- if (tmp->vm_flags & VM_SHARED) +- mapping->i_mmap_writable++; +- tmp->vm_truncate_count = mpnt->vm_truncate_count; +- flush_dcache_mmap_lock(mapping); +- /* insert tmp into the share list, just after mpnt */ +- vma_prio_tree_add(tmp, mpnt); +- flush_dcache_mmap_unlock(mapping); +- spin_unlock(&mapping->i_mmap_lock); ++ tmp = dup_vma(mm, mpnt); ++ if (!tmp) { ++ retval = -ENOMEM; ++ goto out; + } + + /* +- * Clear hugetlb-related page reserves for children. This only +- * affects MAP_PRIVATE mappings. Faults generated by the child +- * are not guaranteed to succeed, even if read-only +- */ +- if (is_vm_hugetlb_page(tmp)) +- reset_vma_resv_huge_pages(tmp); +- +- /* + * Link in the new vma and copy the page table entries. + */ + *pprev = tmp; +@@ -409,6 +429,31 @@ static int dup_mmap(struct mm_struct *mm + if (retval) + goto out; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) { ++ struct vm_area_struct *mpnt_m; ++ ++ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) { ++ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm); ++ ++ if (!mpnt->vm_mirror) ++ continue; ++ ++ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) { ++ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt); ++ mpnt->vm_mirror = mpnt_m; ++ } else { ++ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm); ++ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror; ++ mpnt_m->vm_mirror->vm_mirror = mpnt_m; ++ mpnt->vm_mirror->vm_mirror = mpnt; ++ } ++ } ++ BUG_ON(mpnt_m); ++ } ++#endif ++ + /* a new mm has just been created */ + arch_dup_mmap(oldmm, mm); + retval = 0; +@@ -417,14 +462,6 @@ out: + flush_tlb_mm(oldmm); + up_write(&oldmm->mmap_sem); + return retval; +-fail_nomem_anon_vma_fork: +- mpol_put(pol); +-fail_nomem_policy: +- kmem_cache_free(vm_area_cachep, tmp); +-fail_nomem: +- retval = -ENOMEM; +- vm_unacct_memory(charge); +- goto out; + } + + static inline int mm_alloc_pgd(struct mm_struct * mm) +@@ -760,13 +797,14 @@ static int copy_fs(unsigned long clone_f + write_unlock(&fs->lock); + return -EAGAIN; + } +- fs->users++; ++ atomic_inc(&fs->users); + write_unlock(&fs->lock); + return 0; + } + tsk->fs = copy_fs_struct(fs); + if (!tsk->fs) + return -ENOMEM; ++ gr_set_chroot_entries(tsk, &tsk->fs->root); + return 0; + } + +@@ -1019,10 +1057,13 @@ static struct task_struct *copy_process( + DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled); + #endif + retval = -EAGAIN; ++ ++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0); ++ + if (atomic_read(&p->real_cred->user->processes) >= + task_rlimit(p, RLIMIT_NPROC)) { +- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) && +- p->real_cred->user != INIT_USER) ++ if (p->real_cred->user != INIT_USER && ++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) + goto bad_fork_free; + } + +@@ -1176,6 +1217,8 @@ static struct task_struct *copy_process( + goto bad_fork_free_pid; + } + ++ gr_copy_label(p); ++ + p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL; + /* + * Clear TID on mm_release()? +@@ -1328,6 +1371,8 @@ bad_fork_cleanup_count: + bad_fork_free: + free_task(p); + fork_out: ++ gr_log_forkfail(retval); ++ + return ERR_PTR(retval); + } + +@@ -1433,6 +1478,8 @@ long do_fork(unsigned long clone_flags, + if (clone_flags & CLONE_PARENT_SETTID) + put_user(nr, parent_tidptr); + ++ gr_handle_brute_check(); ++ + if (clone_flags & CLONE_VFORK) { + p->vfork_done = &vfork; + init_completion(&vfork); +@@ -1557,7 +1604,7 @@ static int unshare_fs(unsigned long unsh + return 0; + + /* don't need lock here; in the worst case we'll do useless copy */ +- if (fs->users == 1) ++ if (atomic_read(&fs->users) == 1) + return 0; + + *new_fsp = copy_fs_struct(fs); +@@ -1680,7 +1727,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, + fs = current->fs; + write_lock(&fs->lock); + current->fs = new_fs; +- if (--fs->users) ++ gr_set_chroot_entries(current, ¤t->fs->root); ++ if (atomic_dec_return(&fs->users)) + new_fs = NULL; + else + new_fs = fs; +diff -urNp linux-2.6.35.4/kernel/futex.c linux-2.6.35.4/kernel/futex.c +--- linux-2.6.35.4/kernel/futex.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/futex.c 2010-09-17 20:12:37.000000000 -0400 +@@ -54,6 +54,7 @@ + #include <linux/mount.h> + #include <linux/pagemap.h> + #include <linux/syscalls.h> ++#include <linux/ptrace.h> + #include <linux/signal.h> + #include <linux/module.h> + #include <linux/magic.h> +@@ -221,6 +222,11 @@ get_futex_key(u32 __user *uaddr, int fsh + struct page *page; + int err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE) ++ return -EFAULT; ++#endif ++ + /* + * The futex address must be "naturally" aligned. + */ +@@ -1843,7 +1849,7 @@ retry: + + restart = ¤t_thread_info()->restart_block; + restart->fn = futex_wait_restart; +- restart->futex.uaddr = (u32 *)uaddr; ++ restart->futex.uaddr = uaddr; + restart->futex.val = val; + restart->futex.time = abs_time->tv64; + restart->futex.bitset = bitset; +@@ -2376,7 +2382,9 @@ SYSCALL_DEFINE3(get_robust_list, int, pi + { + struct robust_list_head __user *head; + unsigned long ret; ++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP + const struct cred *cred = current_cred(), *pcred; ++#endif + + if (!futex_cmpxchg_enabled) + return -ENOSYS; +@@ -2392,11 +2400,16 @@ SYSCALL_DEFINE3(get_robust_list, int, pi + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#else + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; ++#endif + head = p->robust_list; + rcu_read_unlock(); + } +@@ -2458,7 +2471,7 @@ retry: + */ + static inline int fetch_robust_entry(struct robust_list __user **entry, + struct robust_list __user * __user *head, +- int *pi) ++ unsigned int *pi) + { + unsigned long uentry; + +diff -urNp linux-2.6.35.4/kernel/futex_compat.c linux-2.6.35.4/kernel/futex_compat.c +--- linux-2.6.35.4/kernel/futex_compat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/futex_compat.c 2010-09-17 20:12:37.000000000 -0400 +@@ -10,6 +10,7 @@ + #include <linux/compat.h> + #include <linux/nsproxy.h> + #include <linux/futex.h> ++#include <linux/ptrace.h> + + #include <asm/uaccess.h> + +@@ -135,7 +136,10 @@ compat_sys_get_robust_list(int pid, comp + { + struct compat_robust_list_head __user *head; + unsigned long ret; +- const struct cred *cred = current_cred(), *pcred; ++#ifndef CONFIG_GRKERNSEC_PROC_MEMMAP ++ const struct cred *cred = current_cred(); ++ const struct cred *pcred; ++#endif + + if (!futex_cmpxchg_enabled) + return -ENOSYS; +@@ -151,11 +155,16 @@ compat_sys_get_robust_list(int pid, comp + if (!p) + goto err_unlock; + ret = -EPERM; ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (!ptrace_may_access(p, PTRACE_MODE_READ)) ++ goto err_unlock; ++#else + pcred = __task_cred(p); + if (cred->euid != pcred->euid && + cred->euid != pcred->uid && + !capable(CAP_SYS_PTRACE)) + goto err_unlock; ++#endif + head = p->compat_robust_list; + rcu_read_unlock(); + } +diff -urNp linux-2.6.35.4/kernel/gcov/base.c linux-2.6.35.4/kernel/gcov/base.c +--- linux-2.6.35.4/kernel/gcov/base.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/gcov/base.c 2010-09-17 20:12:09.000000000 -0400 +@@ -102,11 +102,6 @@ void gcov_enable_events(void) + } + + #ifdef CONFIG_MODULES +-static inline int within(void *addr, void *start, unsigned long size) +-{ +- return ((addr >= start) && (addr < start + size)); +-} +- + /* Update list and generate events when modules are unloaded. */ + static int gcov_module_notifier(struct notifier_block *nb, unsigned long event, + void *data) +@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct n + prev = NULL; + /* Remove entries located in module from linked list. */ + for (info = gcov_info_head; info; info = info->next) { +- if (within(info, mod->module_core, mod->core_size)) { ++ if (within_module_core_rw((unsigned long)info, mod)) { + if (prev) + prev->next = info->next; + else +diff -urNp linux-2.6.35.4/kernel/hrtimer.c linux-2.6.35.4/kernel/hrtimer.c +--- linux-2.6.35.4/kernel/hrtimer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/hrtimer.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1398,7 +1398,7 @@ void hrtimer_peek_ahead_timers(void) + local_irq_restore(flags); + } + +-static void run_hrtimer_softirq(struct softirq_action *h) ++static void run_hrtimer_softirq(void) + { + hrtimer_peek_ahead_timers(); + } +diff -urNp linux-2.6.35.4/kernel/kallsyms.c linux-2.6.35.4/kernel/kallsyms.c +--- linux-2.6.35.4/kernel/kallsyms.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/kallsyms.c 2010-09-17 20:12:37.000000000 -0400 +@@ -11,6 +11,9 @@ + * Changed the compression method from stem compression to "table lookup" + * compression (see scripts/kallsyms.c for a more complete description) + */ ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++#define __INCLUDED_BY_HIDESYM 1 ++#endif + #include <linux/kallsyms.h> + #include <linux/module.h> + #include <linux/init.h> +@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_mark + + static inline int is_kernel_inittext(unsigned long addr) + { ++ if (system_state != SYSTEM_BOOTING) ++ return 0; ++ + if (addr >= (unsigned long)_sinittext + && addr <= (unsigned long)_einittext) + return 1; + return 0; + } + ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++#ifdef CONFIG_MODULES ++static inline int is_module_text(unsigned long addr) ++{ ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END) ++ return 1; ++ ++ addr = ktla_ktva(addr); ++ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END; ++} ++#else ++static inline int is_module_text(unsigned long addr) ++{ ++ return 0; ++} ++#endif ++#endif ++ + static inline int is_kernel_text(unsigned long addr) + { + if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) || +@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigne + + static inline int is_kernel(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_kernel_text(addr) || is_kernel_inittext(addr)) ++ return 1; ++ ++ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end) ++#else + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end) ++#endif ++ + return 1; + return in_gate_area_no_task(addr); + } + + static int is_ksym_addr(unsigned long addr) + { ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (is_module_text(addr)) ++ return 0; ++#endif ++ + if (all_var) + return is_kernel(addr); + +@@ -416,7 +455,6 @@ static unsigned long get_ksymbol_core(st + + static void reset_iter(struct kallsym_iter *iter, loff_t new_pos) + { +- iter->name[0] = '\0'; + iter->nameoff = get_symbol_offset(new_pos); + iter->pos = new_pos; + } +@@ -464,6 +502,11 @@ static int s_show(struct seq_file *m, vo + { + struct kallsym_iter *iter = m->private; + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ if (current_uid()) ++ return 0; ++#endif ++ + /* Some debugging symbols have no name. Ignore them. */ + if (!iter->name[0]) + return 0; +@@ -504,7 +547,7 @@ static int kallsyms_open(struct inode *i + struct kallsym_iter *iter; + int ret; + +- iter = kmalloc(sizeof(*iter), GFP_KERNEL); ++ iter = kzalloc(sizeof(*iter), GFP_KERNEL); + if (!iter) + return -ENOMEM; + reset_iter(iter, 0); +diff -urNp linux-2.6.35.4/kernel/kmod.c linux-2.6.35.4/kernel/kmod.c +--- linux-2.6.35.4/kernel/kmod.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/kmod.c 2010-09-17 20:12:37.000000000 -0400 +@@ -90,6 +90,18 @@ int __request_module(bool wait, const ch + if (ret) + return ret; + ++#ifdef CONFIG_GRKERNSEC_MODHARDEN ++ /* we could do a tighter check here, but some distros ++ are taking it upon themselves to remove CAP_SYS_MODULE ++ from even root-running apps which cause modules to be ++ auto-loaded ++ */ ++ if (current_uid()) { ++ gr_log_nonroot_mod_load(module_name); ++ return -EPERM; ++ } ++#endif ++ + /* If modprobe needs a service that is in a module, we get a recursive + * loop. Limit the number of running kmod threads to max_threads/2 or + * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method +diff -urNp linux-2.6.35.4/kernel/kprobes.c linux-2.6.35.4/kernel/kprobes.c +--- linux-2.6.35.4/kernel/kprobes.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/kprobes.c 2010-09-17 20:12:09.000000000 -0400 +@@ -183,7 +183,7 @@ static kprobe_opcode_t __kprobes *__get_ + * kernel image and loaded module images reside. This is required + * so x86_64 can correctly handle the %rip-relative fixups. + */ +- kip->insns = module_alloc(PAGE_SIZE); ++ kip->insns = module_alloc_exec(PAGE_SIZE); + if (!kip->insns) { + kfree(kip); + return NULL; +@@ -223,7 +223,7 @@ static int __kprobes collect_one_slot(st + */ + if (!list_is_singular(&kip->list)) { + list_del(&kip->list); +- module_free(NULL, kip->insns); ++ module_free_exec(NULL, kip->insns); + kfree(kip); + } + return 1; +@@ -1709,7 +1709,7 @@ static int __init init_kprobes(void) + { + int i, err = 0; + unsigned long offset = 0, size = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + const char *symbol_name; + void *addr; + struct kprobe_blackpoint *kb; +@@ -1835,7 +1835,7 @@ static int __kprobes show_kprobe_addr(st + const char *sym = NULL; + unsigned int i = *(loff_t *) v; + unsigned long offset = 0; +- char *modname, namebuf[128]; ++ char *modname, namebuf[KSYM_NAME_LEN]; + + head = &kprobe_table[i]; + preempt_disable(); +diff -urNp linux-2.6.35.4/kernel/lockdep.c linux-2.6.35.4/kernel/lockdep.c +--- linux-2.6.35.4/kernel/lockdep.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/lockdep.c 2010-09-17 20:12:09.000000000 -0400 +@@ -571,6 +571,10 @@ static int static_obj(void *obj) + end = (unsigned long) &_end, + addr = (unsigned long) obj; + ++#ifdef CONFIG_PAX_KERNEXEC ++ start = ktla_ktva(start); ++#endif ++ + /* + * static variable? + */ +@@ -696,6 +700,7 @@ register_lock_class(struct lockdep_map * + if (!static_obj(lock->key)) { + debug_locks_off(); + printk("INFO: trying to register non-static key.\n"); ++ printk("lock:%pS key:%pS.\n", lock, lock->key); + printk("the code is fine but needs lockdep annotation.\n"); + printk("turning off the locking correctness validator.\n"); + dump_stack(); +diff -urNp linux-2.6.35.4/kernel/lockdep_proc.c linux-2.6.35.4/kernel/lockdep_proc.c +--- linux-2.6.35.4/kernel/lockdep_proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/lockdep_proc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -39,7 +39,7 @@ static void l_stop(struct seq_file *m, v + + static void print_name(struct seq_file *m, struct lock_class *class) + { +- char str[128]; ++ char str[KSYM_NAME_LEN]; + const char *name = class->name; + + if (!name) { +diff -urNp linux-2.6.35.4/kernel/module.c linux-2.6.35.4/kernel/module.c +--- linux-2.6.35.4/kernel/module.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/module.c 2010-09-17 20:12:37.000000000 -0400 +@@ -96,7 +96,8 @@ static BLOCKING_NOTIFIER_HEAD(module_not + + /* Bounds of module allocation, for speeding __module_address. + * Protected by module_mutex. */ +-static unsigned long module_addr_min = -1UL, module_addr_max = 0; ++static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0; ++static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0; + + int register_module_notifier(struct notifier_block * nb) + { +@@ -250,7 +251,7 @@ bool each_symbol(bool (*fn)(const struct + return true; + + list_for_each_entry_rcu(mod, &modules, list) { +- struct symsearch arr[] = { ++ struct symsearch modarr[] = { + { mod->syms, mod->syms + mod->num_syms, mod->crcs, + NOT_GPL_ONLY, false }, + { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms, +@@ -272,7 +273,7 @@ bool each_symbol(bool (*fn)(const struct + #endif + }; + +- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data)) ++ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data)) + return true; + } + return false; +@@ -383,7 +384,7 @@ static inline void __percpu *mod_percpu( + static int percpu_modalloc(struct module *mod, + unsigned long size, unsigned long align) + { +- if (align > PAGE_SIZE) { ++ if (align-1 >= PAGE_SIZE) { + printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", + mod->name, align, PAGE_SIZE); + align = PAGE_SIZE; +@@ -1562,7 +1563,8 @@ static void free_module(struct module *m + destroy_params(mod->kp, mod->num_kp); + + /* This may be NULL, but that's OK */ +- module_free(mod, mod->module_init); ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); + kfree(mod->args); + percpu_modfree(mod); + #if defined(CONFIG_MODULE_UNLOAD) +@@ -1570,10 +1572,12 @@ static void free_module(struct module *m + free_percpu(mod->refptr); + #endif + /* Free lock-classes: */ +- lockdep_free_key_range(mod->module_core, mod->core_size); ++ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx); ++ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw); + + /* Finally, free the core (containing the module structure) */ +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_core_rx); ++ module_free(mod, mod->module_core_rw); + + #ifdef CONFIG_MPU + update_protections(current->mm); +@@ -1670,7 +1674,9 @@ static int simplify_symbols(Elf_Shdr *se + mod); + /* Ok if resolved. */ + if (ksym && !IS_ERR(ksym)) { ++ pax_open_kernel(); + sym[i].st_value = ksym->value; ++ pax_close_kernel(); + break; + } + +@@ -1690,7 +1696,9 @@ static int simplify_symbols(Elf_Shdr *se + secbase = (unsigned long)mod_percpu(mod); + else + secbase = sechdrs[sym[i].st_shndx].sh_addr; ++ pax_open_kernel(); + sym[i].st_value += secbase; ++ pax_close_kernel(); + break; + } + } +@@ -1751,11 +1759,12 @@ static void layout_sections(struct modul + || s->sh_entsize != ~0UL + || strstarts(secstrings + s->sh_name, ".init")) + continue; +- s->sh_entsize = get_offset(mod, &mod->core_size, s, i); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i); + DEBUGP("\t%s\n", secstrings + s->sh_name); + } +- if (m == 0) +- mod->core_text_size = mod->core_size; + } + + DEBUGP("Init section allocation order:\n"); +@@ -1768,12 +1777,13 @@ static void layout_sections(struct modul + || s->sh_entsize != ~0UL + || !strstarts(secstrings + s->sh_name, ".init")) + continue; +- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i) +- | INIT_OFFSET_MASK); ++ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC)) ++ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i); ++ else ++ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i); ++ s->sh_entsize |= INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + s->sh_name); + } +- if (m == 0) +- mod->init_text_size = mod->init_size; + } + } + +@@ -1877,9 +1887,8 @@ static int is_exported(const char *name, + + /* As per nm */ + static char elf_type(const Elf_Sym *sym, +- Elf_Shdr *sechdrs, +- const char *secstrings, +- struct module *mod) ++ const Elf_Shdr *sechdrs, ++ const char *secstrings) + { + if (ELF_ST_BIND(sym->st_info) == STB_WEAK) { + if (ELF_ST_TYPE(sym->st_info) == STT_OBJECT) +@@ -1954,7 +1963,7 @@ static unsigned long layout_symtab(struc + + /* Put symbol section at end of init part of module. */ + symsect->sh_flags |= SHF_ALLOC; +- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect, ++ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect, + symindex) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + symsect->sh_name); + +@@ -1971,19 +1980,19 @@ static unsigned long layout_symtab(struc + } + + /* Append room for core symbols at end of core part. */ +- symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1); +- mod->core_size = symoffs + ndst * sizeof(Elf_Sym); ++ symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1); ++ mod->core_size_rx = symoffs + ndst * sizeof(Elf_Sym); + + /* Put string table section at end of init part of module. */ + strsect->sh_flags |= SHF_ALLOC; +- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect, ++ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect, + strindex) | INIT_OFFSET_MASK; + DEBUGP("\t%s\n", secstrings + strsect->sh_name); + + /* Append room for core symbols' strings at end of core part. */ +- *pstroffs = mod->core_size; ++ *pstroffs = mod->core_size_rx; + __set_bit(0, strmap); +- mod->core_size += bitmap_weight(strmap, strsect->sh_size); ++ mod->core_size_rx += bitmap_weight(strmap, strsect->sh_size); + + return symoffs; + } +@@ -2007,12 +2016,14 @@ static void add_kallsyms(struct module * + mod->num_symtab = sechdrs[symindex].sh_size / sizeof(Elf_Sym); + mod->strtab = (void *)sechdrs[strindex].sh_addr; + ++ pax_open_kernel(); ++ + /* Set types up while we still have access to sections. */ + for (i = 0; i < mod->num_symtab; i++) + mod->symtab[i].st_info +- = elf_type(&mod->symtab[i], sechdrs, secstrings, mod); ++ = elf_type(&mod->symtab[i], sechdrs, secstrings); + +- mod->core_symtab = dst = mod->module_core + symoffs; ++ mod->core_symtab = dst = mod->module_core_rx + symoffs; + src = mod->symtab; + *dst = *src; + for (ndst = i = 1; i < mod->num_symtab; ++i, ++src) { +@@ -2024,10 +2035,12 @@ static void add_kallsyms(struct module * + } + mod->core_num_syms = ndst; + +- mod->core_strtab = s = mod->module_core + stroffs; ++ mod->core_strtab = s = mod->module_core_rx + stroffs; + for (*s = 0, i = 1; i < sechdrs[strindex].sh_size; ++i) + if (test_bit(i, strmap)) + *++s = mod->strtab[i]; ++ ++ pax_close_kernel(); + } + #else + static inline unsigned long layout_symtab(struct module *mod, +@@ -2070,17 +2083,33 @@ static void dynamic_debug_remove(struct + ddebug_remove_module(debug->modname); + } + +-static void *module_alloc_update_bounds(unsigned long size) ++static void *module_alloc_update_bounds_rw(unsigned long size) + { + void *ret = module_alloc(size); + + if (ret) { + mutex_lock(&module_mutex); + /* Update module bounds. */ +- if ((unsigned long)ret < module_addr_min) +- module_addr_min = (unsigned long)ret; +- if ((unsigned long)ret + size > module_addr_max) +- module_addr_max = (unsigned long)ret + size; ++ if ((unsigned long)ret < module_addr_min_rw) ++ module_addr_min_rw = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rw) ++ module_addr_max_rw = (unsigned long)ret + size; ++ mutex_unlock(&module_mutex); ++ } ++ return ret; ++} ++ ++static void *module_alloc_update_bounds_rx(unsigned long size) ++{ ++ void *ret = module_alloc_exec(size); ++ ++ if (ret) { ++ mutex_lock(&module_mutex); ++ /* Update module bounds. */ ++ if ((unsigned long)ret < module_addr_min_rx) ++ module_addr_min_rx = (unsigned long)ret; ++ if ((unsigned long)ret + size > module_addr_max_rx) ++ module_addr_max_rx = (unsigned long)ret + size; + mutex_unlock(&module_mutex); + } + return ret; +@@ -2284,7 +2313,7 @@ static noinline struct module *load_modu + secstrings, &stroffs, strmap); + + /* Do the allocs. */ +- ptr = module_alloc_update_bounds(mod->core_size); ++ ptr = module_alloc_update_bounds_rw(mod->core_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. Just mark it as not being a +@@ -2295,23 +2324,47 @@ static noinline struct module *load_modu + err = -ENOMEM; + goto free_percpu; + } +- memset(ptr, 0, mod->core_size); +- mod->module_core = ptr; ++ memset(ptr, 0, mod->core_size_rw); ++ mod->module_core_rw = ptr; + +- ptr = module_alloc_update_bounds(mod->init_size); ++ ptr = module_alloc_update_bounds_rw(mod->init_size_rw); + /* + * The pointer to this block is stored in the module structure + * which is inside the block. This block doesn't need to be + * scanned as it contains data and code that will be freed + * after the module is initialized. + */ +- kmemleak_ignore(ptr); +- if (!ptr && mod->init_size) { ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rw) { ++ err = -ENOMEM; ++ goto free_core_rw; ++ } ++ memset(ptr, 0, mod->init_size_rw); ++ mod->module_init_rw = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->core_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr) { ++ err = -ENOMEM; ++ goto free_init_rw; ++ } ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->core_size_rx); ++ pax_close_kernel(); ++ mod->module_core_rx = ptr; ++ ++ ptr = module_alloc_update_bounds_rx(mod->init_size_rx); ++ kmemleak_not_leak(ptr); ++ if (!ptr && mod->init_size_rx) { + err = -ENOMEM; +- goto free_core; ++ goto free_core_rx; + } +- memset(ptr, 0, mod->init_size); +- mod->module_init = ptr; ++ ++ pax_open_kernel(); ++ memset(ptr, 0, mod->init_size_rx); ++ pax_close_kernel(); ++ mod->module_init_rx = ptr; + + /* Transfer each section which specifies SHF_ALLOC */ + DEBUGP("final section addresses:\n"); +@@ -2321,17 +2374,41 @@ static noinline struct module *load_modu + if (!(sechdrs[i].sh_flags & SHF_ALLOC)) + continue; + +- if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) +- dest = mod->module_init +- + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); +- else +- dest = mod->module_core + sechdrs[i].sh_entsize; ++ if (sechdrs[i].sh_entsize & INIT_OFFSET_MASK) { ++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) ++ dest = mod->module_init_rw ++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); ++ else ++ dest = mod->module_init_rx ++ + (sechdrs[i].sh_entsize & ~INIT_OFFSET_MASK); ++ } else { ++ if ((sechdrs[i].sh_flags & SHF_WRITE) || !(sechdrs[i].sh_flags & SHF_ALLOC)) ++ dest = mod->module_core_rw + sechdrs[i].sh_entsize; ++ else ++ dest = mod->module_core_rx + sechdrs[i].sh_entsize; ++ } ++ ++ if (sechdrs[i].sh_type != SHT_NOBITS) { + +- if (sechdrs[i].sh_type != SHT_NOBITS) +- memcpy(dest, (void *)sechdrs[i].sh_addr, +- sechdrs[i].sh_size); ++#ifdef CONFIG_PAX_KERNEXEC ++ if (!(sechdrs[i].sh_flags & SHF_WRITE) && (sechdrs[i].sh_flags & SHF_ALLOC)) { ++ pax_open_kernel(); ++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); ++ pax_close_kernel(); ++ } else ++#endif ++ ++ memcpy(dest, (void *)sechdrs[i].sh_addr, sechdrs[i].sh_size); ++ } + /* Update sh_addr to point to copy in image. */ +- sechdrs[i].sh_addr = (unsigned long)dest; ++ ++#ifdef CONFIG_PAX_KERNEXEC ++ if (sechdrs[i].sh_flags & SHF_EXECINSTR) ++ sechdrs[i].sh_addr = ktva_ktla((unsigned long)dest); ++ else ++#endif ++ ++ sechdrs[i].sh_addr = (unsigned long)dest; + DEBUGP("\t0x%lx %s\n", sechdrs[i].sh_addr, secstrings + sechdrs[i].sh_name); + } + /* Module has been moved. */ +@@ -2342,7 +2419,7 @@ static noinline struct module *load_modu + mod->refptr = alloc_percpu(struct module_ref); + if (!mod->refptr) { + err = -ENOMEM; +- goto free_init; ++ goto free_init_rx; + } + #endif + /* Now we've moved module, initialize linked lists, etc. */ +@@ -2452,8 +2529,8 @@ static noinline struct module *load_modu + + /* Now do relocations. */ + for (i = 1; i < hdr->e_shnum; i++) { +- const char *strtab = (char *)sechdrs[strindex].sh_addr; + unsigned int info = sechdrs[i].sh_info; ++ strtab = (char *)sechdrs[strindex].sh_addr; + + /* Not a valid relocation section? */ + if (info >= hdr->e_shnum) +@@ -2503,12 +2580,12 @@ static noinline struct module *load_modu + * Do it before processing of module parameters, so the module + * can provide parameter accessor functions of its own. + */ +- if (mod->module_init) +- flush_icache_range((unsigned long)mod->module_init, +- (unsigned long)mod->module_init +- + mod->init_size); +- flush_icache_range((unsigned long)mod->module_core, +- (unsigned long)mod->module_core + mod->core_size); ++ if (mod->module_init_rx) ++ flush_icache_range((unsigned long)mod->module_init_rx, ++ (unsigned long)mod->module_init_rx ++ + mod->init_size_rx); ++ flush_icache_range((unsigned long)mod->module_core_rx, ++ (unsigned long)mod->module_core_rx + mod->core_size_rx); + + set_fs(old_fs); + +@@ -2574,12 +2651,16 @@ static noinline struct module *load_modu + free_modinfo(mod); + module_unload_free(mod); + #if defined(CONFIG_MODULE_UNLOAD) ++ free_init_rx: + free_percpu(mod->refptr); +- free_init: + #endif +- module_free(mod, mod->module_init); +- free_core: +- module_free(mod, mod->module_core); ++ module_free_exec(mod, mod->module_init_rx); ++ free_core_rx: ++ module_free_exec(mod, mod->module_core_rx); ++ free_init_rw: ++ module_free(mod, mod->module_init_rw); ++ free_core_rw: ++ module_free(mod, mod->module_core_rw); + /* mod will be freed with core. Don't access it beyond this line! */ + free_percpu: + free_percpu(percpu); +@@ -2669,10 +2750,12 @@ SYSCALL_DEFINE3(init_module, void __user + mod->symtab = mod->core_symtab; + mod->strtab = mod->core_strtab; + #endif +- module_free(mod, mod->module_init); +- mod->module_init = NULL; +- mod->init_size = 0; +- mod->init_text_size = 0; ++ module_free(mod, mod->module_init_rw); ++ module_free_exec(mod, mod->module_init_rx); ++ mod->module_init_rw = NULL; ++ mod->module_init_rx = NULL; ++ mod->init_size_rw = 0; ++ mod->init_size_rx = 0; + mutex_unlock(&module_mutex); + + return 0; +@@ -2703,10 +2786,16 @@ static const char *get_ksymbol(struct mo + unsigned long nextval; + + /* At worse, next value is at end of module */ +- if (within_module_init(addr, mod)) +- nextval = (unsigned long)mod->module_init+mod->init_text_size; ++ if (within_module_init_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx; ++ else if (within_module_init_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw; ++ else if (within_module_core_rx(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx; ++ else if (within_module_core_rw(addr, mod)) ++ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw; + else +- nextval = (unsigned long)mod->module_core+mod->core_text_size; ++ return NULL; + + /* Scan for closest preceeding symbol, and next symbol. (ELF + starts real symbols at 1). */ +@@ -2952,7 +3041,7 @@ static int m_show(struct seq_file *m, vo + char buf[8]; + + seq_printf(m, "%s %u", +- mod->name, mod->init_size + mod->core_size); ++ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw); + print_unload_info(m, mod); + + /* Informative for users. */ +@@ -2961,7 +3050,7 @@ static int m_show(struct seq_file *m, vo + mod->state == MODULE_STATE_COMING ? "Loading": + "Live"); + /* Used by oprofile and other similar tools. */ +- seq_printf(m, " 0x%p", mod->module_core); ++ seq_printf(m, " 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw); + + /* Taints info */ + if (mod->taints) +@@ -2997,7 +3086,17 @@ static const struct file_operations proc + + static int __init proc_modules_init(void) + { ++#ifndef CONFIG_GRKERNSEC_HIDESYM ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations); ++#else + proc_create("modules", 0, NULL, &proc_modules_operations); ++#endif ++#else ++ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations); ++#endif + return 0; + } + module_init(proc_modules_init); +@@ -3056,12 +3155,12 @@ struct module *__module_address(unsigned + { + struct module *mod; + +- if (addr < module_addr_min || addr > module_addr_max) ++ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) && ++ (addr < module_addr_min_rw || addr > module_addr_max_rw)) + return NULL; + + list_for_each_entry_rcu(mod, &modules, list) +- if (within_module_core(addr, mod) +- || within_module_init(addr, mod)) ++ if (within_module_init(addr, mod) || within_module_core(addr, mod)) + return mod; + return NULL; + } +@@ -3095,11 +3194,20 @@ bool is_module_text_address(unsigned lon + */ + struct module *__module_text_address(unsigned long addr) + { +- struct module *mod = __module_address(addr); ++ struct module *mod; ++ ++#ifdef CONFIG_X86_32 ++ addr = ktla_ktva(addr); ++#endif ++ ++ if (addr < module_addr_min_rx || addr > module_addr_max_rx) ++ return NULL; ++ ++ mod = __module_address(addr); ++ + if (mod) { + /* Make sure it's within the text section. */ +- if (!within(addr, mod->module_init, mod->init_text_size) +- && !within(addr, mod->module_core, mod->core_text_size)) ++ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod)) + mod = NULL; + } + return mod; +diff -urNp linux-2.6.35.4/kernel/panic.c linux-2.6.35.4/kernel/panic.c +--- linux-2.6.35.4/kernel/panic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/panic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -429,7 +429,8 @@ EXPORT_SYMBOL(warn_slowpath_null); + */ + void __stack_chk_fail(void) + { +- panic("stack-protector: Kernel stack is corrupted in: %p\n", ++ dump_stack(); ++ panic("stack-protector: Kernel stack is corrupted in: %pS\n", + __builtin_return_address(0)); + } + EXPORT_SYMBOL(__stack_chk_fail); +diff -urNp linux-2.6.35.4/kernel/pid.c linux-2.6.35.4/kernel/pid.c +--- linux-2.6.35.4/kernel/pid.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/pid.c 2010-09-17 20:12:37.000000000 -0400 +@@ -33,6 +33,7 @@ + #include <linux/rculist.h> + #include <linux/bootmem.h> + #include <linux/hash.h> ++#include <linux/security.h> + #include <linux/pid_namespace.h> + #include <linux/init_task.h> + #include <linux/syscalls.h> +@@ -45,7 +46,7 @@ struct pid init_struct_pid = INIT_STRUCT + + int pid_max = PID_MAX_DEFAULT; + +-#define RESERVED_PIDS 300 ++#define RESERVED_PIDS 500 + + int pid_max_min = RESERVED_PIDS + 1; + int pid_max_max = PID_MAX_LIMIT; +@@ -382,7 +383,14 @@ EXPORT_SYMBOL(pid_task); + */ + struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns) + { +- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ struct task_struct *task; ++ ++ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID); ++ ++ if (gr_pid_is_chrooted(task)) ++ return NULL; ++ ++ return task; + } + + struct task_struct *find_task_by_vpid(pid_t vnr) +diff -urNp linux-2.6.35.4/kernel/posix-cpu-timers.c linux-2.6.35.4/kernel/posix-cpu-timers.c +--- linux-2.6.35.4/kernel/posix-cpu-timers.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/posix-cpu-timers.c 2010-09-17 20:12:37.000000000 -0400 +@@ -6,6 +6,7 @@ + #include <linux/posix-timers.h> + #include <linux/errno.h> + #include <linux/math64.h> ++#include <linux/security.h> + #include <asm/uaccess.h> + #include <linux/kernel_stat.h> + #include <trace/events/timer.h> +@@ -972,6 +973,7 @@ static void check_thread_timers(struct t + unsigned long hard = + ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max); + ++ gr_learn_resource(tsk, RLIMIT_RTTIME, tsk->rt.timeout * (USEC_PER_SEC/HZ), 1); + if (hard != RLIM_INFINITY && + tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) { + /* +@@ -1138,6 +1140,7 @@ static void check_process_timers(struct + unsigned long hard = + ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max); + cputime_t x; ++ gr_learn_resource(tsk, RLIMIT_CPU, psecs, 0); + if (psecs >= hard) { + /* + * At the hard limit, we just die. +diff -urNp linux-2.6.35.4/kernel/power/hibernate.c linux-2.6.35.4/kernel/power/hibernate.c +--- linux-2.6.35.4/kernel/power/hibernate.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/power/hibernate.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,14 +50,14 @@ enum { + + static int hibernation_mode = HIBERNATION_SHUTDOWN; + +-static struct platform_hibernation_ops *hibernation_ops; ++static const struct platform_hibernation_ops *hibernation_ops; + + /** + * hibernation_set_ops - set the global hibernate operations + * @ops: the hibernation operations to use in subsequent hibernation transitions + */ + +-void hibernation_set_ops(struct platform_hibernation_ops *ops) ++void hibernation_set_ops(const struct platform_hibernation_ops *ops) + { + if (ops && !(ops->begin && ops->end && ops->pre_snapshot + && ops->prepare && ops->finish && ops->enter && ops->pre_restore +diff -urNp linux-2.6.35.4/kernel/power/poweroff.c linux-2.6.35.4/kernel/power/poweroff.c +--- linux-2.6.35.4/kernel/power/poweroff.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/power/poweroff.c 2010-09-17 20:12:09.000000000 -0400 +@@ -37,7 +37,7 @@ static struct sysrq_key_op sysrq_powerof + .enable_mask = SYSRQ_ENABLE_BOOT, + }; + +-static int pm_sysrq_init(void) ++static int __init pm_sysrq_init(void) + { + register_sysrq_key('o', &sysrq_poweroff_op); + return 0; +diff -urNp linux-2.6.35.4/kernel/power/process.c linux-2.6.35.4/kernel/power/process.c +--- linux-2.6.35.4/kernel/power/process.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/power/process.c 2010-09-17 20:12:09.000000000 -0400 +@@ -38,12 +38,15 @@ static int try_to_freeze_tasks(bool sig_ + struct timeval start, end; + u64 elapsed_csecs64; + unsigned int elapsed_csecs; ++ bool timedout = false; + + do_gettimeofday(&start); + + end_time = jiffies + TIMEOUT; + while (true) { + todo = 0; ++ if (time_after(jiffies, end_time)) ++ timedout = true; + read_lock(&tasklist_lock); + do_each_thread(g, p) { + if (frozen(p) || !freezeable(p)) +@@ -58,12 +61,16 @@ static int try_to_freeze_tasks(bool sig_ + * It is "frozen enough". If the task does wake + * up, it will immediately call try_to_freeze. + */ +- if (!task_is_stopped_or_traced(p) && +- !freezer_should_skip(p)) ++ if (!task_is_stopped_or_traced(p) && !freezer_should_skip(p)) { + todo++; ++ if (timedout) { ++ printk(KERN_ERR "Task refusing to freeze:\n"); ++ sched_show_task(p); ++ } ++ } + } while_each_thread(g, p); + read_unlock(&tasklist_lock); +- if (!todo || time_after(jiffies, end_time)) ++ if (!todo || timedout) + break; + + /* +diff -urNp linux-2.6.35.4/kernel/power/suspend.c linux-2.6.35.4/kernel/power/suspend.c +--- linux-2.6.35.4/kernel/power/suspend.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/power/suspend.c 2010-09-17 20:12:09.000000000 -0400 +@@ -30,13 +30,13 @@ const char *const pm_states[PM_SUSPEND_M + [PM_SUSPEND_MEM] = "mem", + }; + +-static struct platform_suspend_ops *suspend_ops; ++static const struct platform_suspend_ops *suspend_ops; + + /** + * suspend_set_ops - Set the global suspend method table. + * @ops: Pointer to ops structure. + */ +-void suspend_set_ops(struct platform_suspend_ops *ops) ++void suspend_set_ops(const struct platform_suspend_ops *ops) + { + mutex_lock(&pm_mutex); + suspend_ops = ops; +diff -urNp linux-2.6.35.4/kernel/printk.c linux-2.6.35.4/kernel/printk.c +--- linux-2.6.35.4/kernel/printk.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/printk.c 2010-09-17 20:12:37.000000000 -0400 +@@ -266,6 +266,11 @@ int do_syslog(int type, char __user *buf + char c; + int error = 0; + ++#ifdef CONFIG_GRKERNSEC_DMESG ++ if (grsec_enable_dmesg && !capable(CAP_SYS_ADMIN)) ++ return -EPERM; ++#endif ++ + error = security_syslog(type, from_file); + if (error) + return error; +diff -urNp linux-2.6.35.4/kernel/ptrace.c linux-2.6.35.4/kernel/ptrace.c +--- linux-2.6.35.4/kernel/ptrace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/ptrace.c 2010-09-17 20:12:37.000000000 -0400 +@@ -140,7 +140,7 @@ int __ptrace_may_access(struct task_stru + cred->gid != tcred->egid || + cred->gid != tcred->sgid || + cred->gid != tcred->gid) && +- !capable(CAP_SYS_PTRACE)) { ++ !capable_nolog(CAP_SYS_PTRACE)) { + rcu_read_unlock(); + return -EPERM; + } +@@ -148,7 +148,7 @@ int __ptrace_may_access(struct task_stru + smp_rmb(); + if (task->mm) + dumpable = get_dumpable(task->mm); +- if (!dumpable && !capable(CAP_SYS_PTRACE)) ++ if (!dumpable && !capable_nolog(CAP_SYS_PTRACE)) + return -EPERM; + + return security_ptrace_access_check(task, mode); +@@ -198,7 +198,7 @@ int ptrace_attach(struct task_struct *ta + goto unlock_tasklist; + + task->ptrace = PT_PTRACED; +- if (capable(CAP_SYS_PTRACE)) ++ if (capable_nolog(CAP_SYS_PTRACE)) + task->ptrace |= PT_PTRACE_CAP; + + __ptrace_link(task, current); +@@ -361,7 +361,7 @@ int ptrace_readdata(struct task_struct * + break; + return -EIO; + } +- if (copy_to_user(dst, buf, retval)) ++ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval)) + return -EFAULT; + copied += retval; + src += retval; +@@ -572,18 +572,18 @@ int ptrace_request(struct task_struct *c + ret = ptrace_setoptions(child, data); + break; + case PTRACE_GETEVENTMSG: +- ret = put_user(child->ptrace_message, (unsigned long __user *) data); ++ ret = put_user(child->ptrace_message, (__force unsigned long __user *) data); + break; + + case PTRACE_GETSIGINFO: + ret = ptrace_getsiginfo(child, &siginfo); + if (!ret) +- ret = copy_siginfo_to_user((siginfo_t __user *) data, ++ ret = copy_siginfo_to_user((__force siginfo_t __user *) data, + &siginfo); + break; + + case PTRACE_SETSIGINFO: +- if (copy_from_user(&siginfo, (siginfo_t __user *) data, ++ if (copy_from_user(&siginfo, (__force siginfo_t __user *) data, + sizeof siginfo)) + ret = -EFAULT; + else +@@ -703,14 +703,21 @@ SYSCALL_DEFINE4(ptrace, long, request, l + goto out; + } + ++ if (gr_handle_ptrace(child, request)) { ++ ret = -EPERM; ++ goto out_put_task_struct; ++ } ++ + if (request == PTRACE_ATTACH) { + ret = ptrace_attach(child); + /* + * Some architectures need to do book-keeping after + * a ptrace attach. + */ +- if (!ret) ++ if (!ret) { + arch_ptrace_attach(child); ++ gr_audit_ptrace(child); ++ } + goto out_put_task_struct; + } + +@@ -734,7 +741,7 @@ int generic_ptrace_peekdata(struct task_ + copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0); + if (copied != sizeof(tmp)) + return -EIO; +- return put_user(tmp, (unsigned long __user *)data); ++ return put_user(tmp, (__force unsigned long __user *)data); + } + + int generic_ptrace_pokedata(struct task_struct *tsk, long addr, long data) +diff -urNp linux-2.6.35.4/kernel/rcutree.c linux-2.6.35.4/kernel/rcutree.c +--- linux-2.6.35.4/kernel/rcutree.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/rcutree.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1356,7 +1356,7 @@ __rcu_process_callbacks(struct rcu_state + /* + * Do softirq processing for the current CPU. + */ +-static void rcu_process_callbacks(struct softirq_action *unused) ++static void rcu_process_callbacks(void) + { + /* + * Memory references from any prior RCU read-side critical sections +diff -urNp linux-2.6.35.4/kernel/resource.c linux-2.6.35.4/kernel/resource.c +--- linux-2.6.35.4/kernel/resource.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/resource.c 2010-09-17 20:12:37.000000000 -0400 +@@ -133,8 +133,18 @@ static const struct file_operations proc + + static int __init ioresources_init(void) + { ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++#ifdef CONFIG_GRKERNSEC_PROC_USER ++ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations); ++#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP) ++ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations); ++ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations); ++#endif ++#else + proc_create("ioports", 0, NULL, &proc_ioports_operations); + proc_create("iomem", 0, NULL, &proc_iomem_operations); ++#endif + return 0; + } + __initcall(ioresources_init); +diff -urNp linux-2.6.35.4/kernel/sched.c linux-2.6.35.4/kernel/sched.c +--- linux-2.6.35.4/kernel/sched.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/sched.c 2010-09-17 20:12:37.000000000 -0400 +@@ -4266,6 +4266,8 @@ int can_nice(const struct task_struct *p + /* convert nice value [19,-20] to rlimit style value [1,40] */ + int nice_rlim = 20 - nice; + ++ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1); ++ + return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) || + capable(CAP_SYS_NICE)); + } +@@ -4299,7 +4301,8 @@ SYSCALL_DEFINE1(nice, int, increment) + if (nice > 19) + nice = 19; + +- if (increment < 0 && !can_nice(current, nice)) ++ if (increment < 0 && (!can_nice(current, nice) || ++ gr_handle_chroot_nice())) + return -EPERM; + + retval = security_task_setnice(current, nice); +@@ -4446,6 +4449,7 @@ recheck: + rlim_rtprio = task_rlimit(p, RLIMIT_RTPRIO); + unlock_task_sighand(p, &flags); + ++ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1); + /* can't set/change the rt policy */ + if (policy != p->policy && !rlim_rtprio) + return -EPERM; +diff -urNp linux-2.6.35.4/kernel/sched_fair.c linux-2.6.35.4/kernel/sched_fair.c +--- linux-2.6.35.4/kernel/sched_fair.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/sched_fair.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3390,7 +3390,7 @@ out: + * In CONFIG_NO_HZ case, the idle load balance owner will do the + * rebalancing for all the cpus for whom scheduler ticks are stopped. + */ +-static void run_rebalance_domains(struct softirq_action *h) ++static void run_rebalance_domains(void) + { + int this_cpu = smp_processor_id(); + struct rq *this_rq = cpu_rq(this_cpu); +diff -urNp linux-2.6.35.4/kernel/signal.c linux-2.6.35.4/kernel/signal.c +--- linux-2.6.35.4/kernel/signal.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/signal.c 2010-09-17 20:20:18.000000000 -0400 +@@ -45,12 +45,12 @@ static struct kmem_cache *sigqueue_cache + + int print_fatal_signals __read_mostly; + +-static void __user *sig_handler(struct task_struct *t, int sig) ++static __sighandler_t sig_handler(struct task_struct *t, int sig) + { + return t->sighand->action[sig - 1].sa.sa_handler; + } + +-static int sig_handler_ignored(void __user *handler, int sig) ++static int sig_handler_ignored(__sighandler_t handler, int sig) + { + /* Is it explicitly or implicitly ignored? */ + return handler == SIG_IGN || +@@ -60,7 +60,7 @@ static int sig_handler_ignored(void __us + static int sig_task_ignored(struct task_struct *t, int sig, + int from_ancestor_ns) + { +- void __user *handler; ++ __sighandler_t handler; + + handler = sig_handler(t, sig); + +@@ -243,6 +243,9 @@ __sigqueue_alloc(int sig, struct task_st + atomic_inc(&user->sigpending); + rcu_read_unlock(); + ++ if (!override_rlimit) ++ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1); ++ + if (override_rlimit || + atomic_read(&user->sigpending) <= + task_rlimit(t, RLIMIT_SIGPENDING)) { +@@ -367,7 +370,7 @@ flush_signal_handlers(struct task_struct + + int unhandled_signal(struct task_struct *tsk, int sig) + { +- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler; ++ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler; + if (is_global_init(tsk)) + return 1; + if (handler != SIG_IGN && handler != SIG_DFL) +@@ -678,6 +681,9 @@ static int check_kill_permission(int sig + } + } + ++ if (gr_handle_signal(t, sig)) ++ return -EPERM; ++ + return security_task_kill(t, info, sig, 0); + } + +@@ -1025,7 +1031,7 @@ __group_send_sig_info(int sig, struct si + return send_signal(sig, info, p, 1); + } + +-static int ++int + specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t) + { + return send_signal(sig, info, t, 0); +@@ -1079,6 +1085,9 @@ force_sig_info(int sig, struct siginfo * + ret = specific_send_sig_info(sig, info, t); + spin_unlock_irqrestore(&t->sighand->siglock, flags); + ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t); ++ gr_handle_crash(t, sig); ++ + return ret; + } + +@@ -1136,8 +1145,11 @@ int group_send_sig_info(int sig, struct + ret = check_kill_permission(sig, info, p); + rcu_read_unlock(); + +- if (!ret && sig) ++ if (!ret && sig) { + ret = do_send_sig_info(sig, info, p, true); ++ if (!ret) ++ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p); ++ } + + return ret; + } +diff -urNp linux-2.6.35.4/kernel/smp.c linux-2.6.35.4/kernel/smp.c +--- linux-2.6.35.4/kernel/smp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/smp.c 2010-09-17 20:12:09.000000000 -0400 +@@ -499,22 +499,22 @@ int smp_call_function(void (*func)(void + } + EXPORT_SYMBOL(smp_call_function); + +-void ipi_call_lock(void) ++void ipi_call_lock(void) __acquires(call_function.lock) + { + raw_spin_lock(&call_function.lock); + } + +-void ipi_call_unlock(void) ++void ipi_call_unlock(void) __releases(call_function.lock) + { + raw_spin_unlock(&call_function.lock); + } + +-void ipi_call_lock_irq(void) ++void ipi_call_lock_irq(void) __acquires(call_function.lock) + { + raw_spin_lock_irq(&call_function.lock); + } + +-void ipi_call_unlock_irq(void) ++void ipi_call_unlock_irq(void) __releases(call_function.lock) + { + raw_spin_unlock_irq(&call_function.lock); + } +diff -urNp linux-2.6.35.4/kernel/softirq.c linux-2.6.35.4/kernel/softirq.c +--- linux-2.6.35.4/kernel/softirq.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/softirq.c 2010-09-17 20:12:09.000000000 -0400 +@@ -56,7 +56,7 @@ static struct softirq_action softirq_vec + + static DEFINE_PER_CPU(struct task_struct *, ksoftirqd); + +-char *softirq_to_name[NR_SOFTIRQS] = { ++const char * const softirq_to_name[NR_SOFTIRQS] = { + "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL", + "TASKLET", "SCHED", "HRTIMER", "RCU" + }; +@@ -190,7 +190,7 @@ EXPORT_SYMBOL(local_bh_enable_ip); + + asmlinkage void __do_softirq(void) + { +- struct softirq_action *h; ++ const struct softirq_action *h; + __u32 pending; + int max_restart = MAX_SOFTIRQ_RESTART; + int cpu; +@@ -216,7 +216,7 @@ restart: + kstat_incr_softirqs_this_cpu(h - softirq_vec); + + trace_softirq_entry(h, softirq_vec); +- h->action(h); ++ h->action(); + trace_softirq_exit(h, softirq_vec); + if (unlikely(prev_count != preempt_count())) { + printk(KERN_ERR "huh, entered softirq %td %s %p" +@@ -340,7 +340,7 @@ void raise_softirq(unsigned int nr) + local_irq_restore(flags); + } + +-void open_softirq(int nr, void (*action)(struct softirq_action *)) ++void open_softirq(int nr, void (*action)(void)) + { + softirq_vec[nr].action = action; + } +@@ -396,7 +396,7 @@ void __tasklet_hi_schedule_first(struct + + EXPORT_SYMBOL(__tasklet_hi_schedule_first); + +-static void tasklet_action(struct softirq_action *a) ++static void tasklet_action(void) + { + struct tasklet_struct *list; + +@@ -431,7 +431,7 @@ static void tasklet_action(struct softir + } + } + +-static void tasklet_hi_action(struct softirq_action *a) ++static void tasklet_hi_action(void) + { + struct tasklet_struct *list; + +diff -urNp linux-2.6.35.4/kernel/sys.c linux-2.6.35.4/kernel/sys.c +--- linux-2.6.35.4/kernel/sys.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/sys.c 2010-09-17 20:28:33.000000000 -0400 +@@ -134,6 +134,12 @@ static int set_one_prio(struct task_stru + error = -EACCES; + goto out; + } ++ ++ if (gr_handle_chroot_setpriority(p, niceval)) { ++ error = -EACCES; ++ goto out; ++ } ++ + no_nice = security_task_setnice(p, niceval); + if (no_nice) { + error = no_nice; +@@ -511,6 +517,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, g + goto error; + } + ++ if (gr_check_group_change(new->gid, new->egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1 || + (egid != (gid_t) -1 && egid != old->gid)) + new->sgid = new->egid; +@@ -540,6 +549,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_group_change(gid, gid, gid)) ++ goto error; ++ + if (capable(CAP_SETGID)) + new->gid = new->egid = new->sgid = new->fsgid = gid; + else if (gid == old->gid || gid == old->sgid) +@@ -620,6 +633,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, u + goto error; + } + ++ if (gr_check_user_change(new->uid, new->euid, -1)) ++ goto error; ++ + if (new->uid != old->uid) { + retval = set_user(new); + if (retval < 0) +@@ -664,6 +680,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid) + old = current_cred(); + + retval = -EPERM; ++ ++ if (gr_check_crash_uid(uid)) ++ goto error; ++ if (gr_check_user_change(uid, uid, uid)) ++ goto error; ++ + if (capable(CAP_SETUID)) { + new->suid = new->uid = uid; + if (uid != old->uid) { +@@ -718,6 +740,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, + goto error; + } + ++ if (gr_check_user_change(ruid, euid, -1)) ++ goto error; ++ + if (ruid != (uid_t) -1) { + new->uid = ruid; + if (ruid != old->uid) { +@@ -782,6 +807,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, + goto error; + } + ++ if (gr_check_group_change(rgid, egid, -1)) ++ goto error; ++ + if (rgid != (gid_t) -1) + new->gid = rgid; + if (egid != (gid_t) -1) +@@ -828,6 +856,9 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + old = current_cred(); + old_fsuid = old->fsuid; + ++ if (gr_check_user_change(-1, -1, uid)) ++ goto error; ++ + if (uid == old->uid || uid == old->euid || + uid == old->suid || uid == old->fsuid || + capable(CAP_SETUID)) { +@@ -838,6 +869,7 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid) + } + } + ++error: + abort_creds(new); + return old_fsuid; + +@@ -864,12 +896,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid) + if (gid == old->gid || gid == old->egid || + gid == old->sgid || gid == old->fsgid || + capable(CAP_SETGID)) { ++ if (gr_check_group_change(-1, -1, gid)) ++ goto error; ++ + if (gid != old_fsgid) { + new->fsgid = gid; + goto change_okay; + } + } + ++error: + abort_creds(new); + return old_fsgid; + +@@ -1491,7 +1527,7 @@ SYSCALL_DEFINE5(prctl, int, option, unsi + error = get_dumpable(me->mm); + break; + case PR_SET_DUMPABLE: +- if (arg2 < 0 || arg2 > 1) { ++ if (arg2 > 1) { + error = -EINVAL; + break; + } +diff -urNp linux-2.6.35.4/kernel/sysctl.c linux-2.6.35.4/kernel/sysctl.c +--- linux-2.6.35.4/kernel/sysctl.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/sysctl.c 2010-09-17 20:18:09.000000000 -0400 +@@ -78,6 +78,13 @@ + + + #if defined(CONFIG_SYSCTL) ++#include <linux/grsecurity.h> ++#include <linux/grinternal.h> ++ ++extern __u32 gr_handle_sysctl(const ctl_table *table, const int op); ++extern int gr_handle_sysctl_mod(const char *dirname, const char *name, ++ const int op); ++extern int gr_handle_chroot_sysctl(const int op); + + /* External variables not in a header file. */ + extern int sysctl_overcommit_memory; +@@ -185,6 +192,7 @@ static int sysrq_sysctl_handler(ctl_tabl + } + + #endif ++extern struct ctl_table grsecurity_table[]; + + static struct ctl_table root_table[]; + static struct ctl_table_root sysctl_table_root; +@@ -217,6 +225,20 @@ extern struct ctl_table epoll_table[]; + int sysctl_legacy_va_layout; + #endif + ++#ifdef CONFIG_PAX_SOFTMODE ++static ctl_table pax_table[] = { ++ { ++ .procname = "softmode", ++ .data = &pax_softmode, ++ .maxlen = sizeof(unsigned int), ++ .mode = 0600, ++ .proc_handler = &proc_dointvec, ++ }, ++ ++ { } ++}; ++#endif ++ + /* The default sysctl tables: */ + + static struct ctl_table root_table[] = { +@@ -269,6 +291,22 @@ static int max_extfrag_threshold = 1000; + #endif + + static struct ctl_table kern_table[] = { ++#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS) ++ { ++ .procname = "grsecurity", ++ .mode = 0500, ++ .child = grsecurity_table, ++ }, ++#endif ++ ++#ifdef CONFIG_PAX_SOFTMODE ++ { ++ .procname = "pax", ++ .mode = 0500, ++ .child = pax_table, ++ }, ++#endif ++ + { + .procname = "sched_child_runs_first", + .data = &sysctl_sched_child_runs_first, +@@ -1171,6 +1209,13 @@ static struct ctl_table vm_table[] = { + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + }, ++ { ++ .procname = "heap_stack_gap", ++ .data = &sysctl_heap_stack_gap, ++ .maxlen = sizeof(sysctl_heap_stack_gap), ++ .mode = 0644, ++ .proc_handler = proc_doulongvec_minmax, ++ }, + #else + { + .procname = "nr_trim_pages", +@@ -1686,6 +1731,16 @@ int sysctl_perm(struct ctl_table_root *r + int error; + int mode; + ++ if (table->parent != NULL && table->parent->procname != NULL && ++ table->procname != NULL && ++ gr_handle_sysctl_mod(table->parent->procname, table->procname, op)) ++ return -EACCES; ++ if (gr_handle_chroot_sysctl(op)) ++ return -EACCES; ++ error = gr_handle_sysctl(table, op); ++ if (error) ++ return error; ++ + error = security_sysctl(table, op & (MAY_READ | MAY_WRITE | MAY_EXEC)); + if (error) + return error; +@@ -2201,6 +2256,8 @@ static int proc_put_long(void __user **b + len = strlen(tmp); + if (len > *size) + len = *size; ++ if (len > sizeof(tmp)) ++ len = sizeof(tmp); + if (copy_to_user(*buf, tmp, len)) + return -EFAULT; + *size -= len; +diff -urNp linux-2.6.35.4/kernel/taskstats.c linux-2.6.35.4/kernel/taskstats.c +--- linux-2.6.35.4/kernel/taskstats.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/taskstats.c 2010-09-17 20:12:37.000000000 -0400 +@@ -27,9 +27,12 @@ + #include <linux/cgroup.h> + #include <linux/fs.h> + #include <linux/file.h> ++#include <linux/grsecurity.h> + #include <net/genetlink.h> + #include <asm/atomic.h> + ++extern int gr_is_taskstats_denied(int pid); ++ + /* + * Maximum length of a cpumask that can be specified in + * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute +@@ -432,6 +435,9 @@ static int taskstats_user_cmd(struct sk_ + size_t size; + cpumask_var_t mask; + ++ if (gr_is_taskstats_denied(current->pid)) ++ return -EACCES; ++ + if (!alloc_cpumask_var(&mask, GFP_KERNEL)) + return -ENOMEM; + +diff -urNp linux-2.6.35.4/kernel/time/tick-broadcast.c linux-2.6.35.4/kernel/time/tick-broadcast.c +--- linux-2.6.35.4/kernel/time/tick-broadcast.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/time/tick-broadcast.c 2010-09-17 20:12:09.000000000 -0400 +@@ -116,7 +116,7 @@ int tick_device_uses_broadcast(struct cl + * then clear the broadcast bit. + */ + if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) { +- int cpu = smp_processor_id(); ++ cpu = smp_processor_id(); + + cpumask_clear_cpu(cpu, tick_get_broadcast_mask()); + tick_broadcast_clear_oneshot(cpu); +diff -urNp linux-2.6.35.4/kernel/time/timer_list.c linux-2.6.35.4/kernel/time/timer_list.c +--- linux-2.6.35.4/kernel/time/timer_list.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/time/timer_list.c 2010-09-17 20:12:37.000000000 -0400 +@@ -38,12 +38,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, + + static void print_name_offset(struct seq_file *m, void *sym) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name((unsigned long)sym, symname) < 0) + SEQ_printf(m, "<%p>", sym); + else + SEQ_printf(m, "%s", symname); ++#endif + } + + static void +@@ -112,7 +116,11 @@ next_one: + static void + print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ SEQ_printf(m, " .base: %p\n", NULL); ++#else + SEQ_printf(m, " .base: %p\n", base); ++#endif + SEQ_printf(m, " .index: %d\n", + base->index); + SEQ_printf(m, " .resolution: %Lu nsecs\n", +@@ -293,7 +301,11 @@ static int __init init_timer_list_procfs + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops); ++#else + pe = proc_create("timer_list", 0444, NULL, &timer_list_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff -urNp linux-2.6.35.4/kernel/time/timer_stats.c linux-2.6.35.4/kernel/time/timer_stats.c +--- linux-2.6.35.4/kernel/time/timer_stats.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/time/timer_stats.c 2010-09-17 20:12:37.000000000 -0400 +@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *time + + static void print_name_offset(struct seq_file *m, unsigned long addr) + { ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(m, "<%p>", NULL); ++#else + char symname[KSYM_NAME_LEN]; + + if (lookup_symbol_name(addr, symname) < 0) + seq_printf(m, "<%p>", (void *)addr); + else + seq_printf(m, "%s", symname); ++#endif + } + + static int tstats_show(struct seq_file *m, void *v) +@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(voi + { + struct proc_dir_entry *pe; + ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops); ++#else + pe = proc_create("timer_stats", 0644, NULL, &tstats_fops); ++#endif + if (!pe) + return -ENOMEM; + return 0; +diff -urNp linux-2.6.35.4/kernel/time.c linux-2.6.35.4/kernel/time.c +--- linux-2.6.35.4/kernel/time.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/time.c 2010-09-17 20:12:37.000000000 -0400 +@@ -93,6 +93,9 @@ SYSCALL_DEFINE1(stime, time_t __user *, + return err; + + do_settimeofday(&tv); ++ ++ gr_log_timechange(); ++ + return 0; + } + +@@ -200,6 +203,8 @@ SYSCALL_DEFINE2(settimeofday, struct tim + return -EFAULT; + } + ++ gr_log_timechange(); ++ + return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL); + } + +@@ -238,7 +243,7 @@ EXPORT_SYMBOL(current_fs_time); + * Avoid unnecessary multiplications/divisions in the + * two most common HZ cases: + */ +-unsigned int inline jiffies_to_msecs(const unsigned long j) ++inline unsigned int jiffies_to_msecs(const unsigned long j) + { + #if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) + return (MSEC_PER_SEC / HZ) * j; +@@ -254,7 +259,7 @@ unsigned int inline jiffies_to_msecs(con + } + EXPORT_SYMBOL(jiffies_to_msecs); + +-unsigned int inline jiffies_to_usecs(const unsigned long j) ++inline unsigned int jiffies_to_usecs(const unsigned long j) + { + #if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ) + return (USEC_PER_SEC / HZ) * j; +diff -urNp linux-2.6.35.4/kernel/timer.c linux-2.6.35.4/kernel/timer.c +--- linux-2.6.35.4/kernel/timer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/timer.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1272,7 +1272,7 @@ void update_process_times(int user_tick) + /* + * This function runs timers and the timer-tq in bottom half context. + */ +-static void run_timer_softirq(struct softirq_action *h) ++static void run_timer_softirq(void) + { + struct tvec_base *base = __get_cpu_var(tvec_bases); + +diff -urNp linux-2.6.35.4/kernel/trace/ftrace.c linux-2.6.35.4/kernel/trace/ftrace.c +--- linux-2.6.35.4/kernel/trace/ftrace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/trace/ftrace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1101,13 +1101,18 @@ ftrace_code_disable(struct module *mod, + + ip = rec->ip; + ++ ret = ftrace_arch_code_modify_prepare(); ++ FTRACE_WARN_ON(ret); ++ if (ret) ++ return 0; ++ + ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR); ++ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process()); + if (ret) { + ftrace_bug(ret, ip); + rec->flags |= FTRACE_FL_FAILED; +- return 0; + } +- return 1; ++ return ret ? 0 : 1; + } + + /* +diff -urNp linux-2.6.35.4/kernel/trace/ring_buffer.c linux-2.6.35.4/kernel/trace/ring_buffer.c +--- linux-2.6.35.4/kernel/trace/ring_buffer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/trace/ring_buffer.c 2010-09-17 20:12:09.000000000 -0400 +@@ -635,7 +635,7 @@ static struct list_head *rb_list_head(st + * the reader page). But if the next page is a header page, + * its flags will be non zero. + */ +-static int inline ++static inline int + rb_is_head_page(struct ring_buffer_per_cpu *cpu_buffer, + struct buffer_page *page, struct list_head *list) + { +diff -urNp linux-2.6.35.4/kernel/trace/trace.c linux-2.6.35.4/kernel/trace/trace.c +--- linux-2.6.35.4/kernel/trace/trace.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/trace/trace.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3965,10 +3965,9 @@ static const struct file_operations trac + }; + #endif + +-static struct dentry *d_tracer; +- + struct dentry *tracing_init_dentry(void) + { ++ static struct dentry *d_tracer; + static int once; + + if (d_tracer) +@@ -3988,10 +3987,9 @@ struct dentry *tracing_init_dentry(void) + return d_tracer; + } + +-static struct dentry *d_percpu; +- + struct dentry *tracing_dentry_percpu(void) + { ++ static struct dentry *d_percpu; + static int once; + struct dentry *d_tracer; + +diff -urNp linux-2.6.35.4/kernel/trace/trace_output.c linux-2.6.35.4/kernel/trace/trace_output.c +--- linux-2.6.35.4/kernel/trace/trace_output.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/trace/trace_output.c 2010-09-17 20:12:09.000000000 -0400 +@@ -281,7 +281,7 @@ int trace_seq_path(struct trace_seq *s, + + p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len); + if (!IS_ERR(p)) { +- p = mangle_path(s->buffer + s->len, p, "\n"); ++ p = mangle_path(s->buffer + s->len, p, "\n\\"); + if (p) { + s->len = p - s->buffer; + return 1; +diff -urNp linux-2.6.35.4/kernel/trace/trace_stack.c linux-2.6.35.4/kernel/trace/trace_stack.c +--- linux-2.6.35.4/kernel/trace/trace_stack.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/kernel/trace/trace_stack.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,7 +50,7 @@ static inline void check_stack(void) + return; + + /* we do not handle interrupt stacks yet */ +- if (!object_is_on_stack(&this_size)) ++ if (!object_starts_on_stack(&this_size)) + return; + + local_irq_save(flags); +diff -urNp linux-2.6.35.4/lib/bug.c linux-2.6.35.4/lib/bug.c +--- linux-2.6.35.4/lib/bug.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/bug.c 2010-09-17 20:12:09.000000000 -0400 +@@ -135,6 +135,8 @@ enum bug_trap_type report_bug(unsigned l + return BUG_TRAP_TYPE_NONE; + + bug = find_bug(bugaddr); ++ if (!bug) ++ return BUG_TRAP_TYPE_NONE; + + printk(KERN_EMERG "------------[ cut here ]------------\n"); + +diff -urNp linux-2.6.35.4/lib/debugobjects.c linux-2.6.35.4/lib/debugobjects.c +--- linux-2.6.35.4/lib/debugobjects.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/debugobjects.c 2010-09-17 20:12:09.000000000 -0400 +@@ -281,7 +281,7 @@ static void debug_object_is_on_stack(voi + if (limit > 4) + return; + +- is_on_stack = object_is_on_stack(addr); ++ is_on_stack = object_starts_on_stack(addr); + if (is_on_stack == onstack) + return; + +diff -urNp linux-2.6.35.4/lib/dma-debug.c linux-2.6.35.4/lib/dma-debug.c +--- linux-2.6.35.4/lib/dma-debug.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/dma-debug.c 2010-09-17 20:12:09.000000000 -0400 +@@ -861,7 +861,7 @@ out: + + static void check_for_stack(struct device *dev, void *addr) + { +- if (object_is_on_stack(addr)) ++ if (object_starts_on_stack(addr)) + err_printk(dev, NULL, "DMA-API: device driver maps memory from" + "stack [addr=%p]\n", addr); + } +diff -urNp linux-2.6.35.4/lib/inflate.c linux-2.6.35.4/lib/inflate.c +--- linux-2.6.35.4/lib/inflate.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/inflate.c 2010-09-17 20:12:09.000000000 -0400 +@@ -267,7 +267,7 @@ static void free(void *where) + malloc_ptr = free_mem_ptr; + } + #else +-#define malloc(a) kmalloc(a, GFP_KERNEL) ++#define malloc(a) kmalloc((a), GFP_KERNEL) + #define free(a) kfree(a) + #endif + +diff -urNp linux-2.6.35.4/lib/Kconfig.debug linux-2.6.35.4/lib/Kconfig.debug +--- linux-2.6.35.4/lib/Kconfig.debug 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/Kconfig.debug 2010-09-17 20:12:37.000000000 -0400 +@@ -970,7 +970,7 @@ config LATENCYTOP + select STACKTRACE + select SCHEDSTATS + select SCHED_DEBUG +- depends on HAVE_LATENCYTOP_SUPPORT ++ depends on HAVE_LATENCYTOP_SUPPORT && !GRKERNSEC_HIDESYM + help + Enable this option if you want to use the LatencyTOP tool + to find out which userspace is blocking on what kernel operations. +diff -urNp linux-2.6.35.4/lib/parser.c linux-2.6.35.4/lib/parser.c +--- linux-2.6.35.4/lib/parser.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/parser.c 2010-09-17 20:12:09.000000000 -0400 +@@ -129,7 +129,7 @@ static int match_number(substring_t *s, + char *buf; + int ret; + +- buf = kmalloc(s->to - s->from + 1, GFP_KERNEL); ++ buf = kmalloc((s->to - s->from) + 1, GFP_KERNEL); + if (!buf) + return -ENOMEM; + memcpy(buf, s->from, s->to - s->from); +diff -urNp linux-2.6.35.4/lib/radix-tree.c linux-2.6.35.4/lib/radix-tree.c +--- linux-2.6.35.4/lib/radix-tree.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/lib/radix-tree.c 2010-09-17 20:12:09.000000000 -0400 +@@ -80,7 +80,7 @@ struct radix_tree_preload { + int nr; + struct radix_tree_node *nodes[RADIX_TREE_MAX_PATH]; + }; +-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, }; ++static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads); + + static inline gfp_t root_gfp_mask(struct radix_tree_root *root) + { +diff -urNp linux-2.6.35.4/localversion-grsec linux-2.6.35.4/localversion-grsec +--- linux-2.6.35.4/localversion-grsec 1969-12-31 19:00:00.000000000 -0500 ++++ linux-2.6.35.4/localversion-grsec 2010-09-17 20:12:37.000000000 -0400 +@@ -0,0 +1 @@ ++-grsec +diff -urNp linux-2.6.35.4/Makefile linux-2.6.35.4/Makefile +--- linux-2.6.35.4/Makefile 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/Makefile 2010-09-17 20:12:37.000000000 -0400 +@@ -230,8 +230,8 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" + + HOSTCC = gcc + HOSTCXX = g++ +-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer +-HOSTCXXFLAGS = -O2 ++HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks ++HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks + + # Decide whether to build built-in, modular, or both. + # Normally, just do built-in. +@@ -650,7 +650,7 @@ export mod_strip_cmd + + + ifeq ($(KBUILD_EXTMOD),) +-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ ++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/ + + vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \ + $(core-y) $(core-m) $(drivers-y) $(drivers-m) \ +diff -urNp linux-2.6.35.4/mm/bootmem.c linux-2.6.35.4/mm/bootmem.c +--- linux-2.6.35.4/mm/bootmem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/bootmem.c 2010-09-17 20:12:09.000000000 -0400 +@@ -200,19 +200,30 @@ static void __init __free_pages_memory(u + unsigned long __init free_all_memory_core_early(int nodeid) + { + int i; +- u64 start, end; ++ u64 start, end, startrange, endrange; + unsigned long count = 0; +- struct range *range = NULL; ++ struct range *range = NULL, rangerange = { 0, 0 }; + int nr_range; + + nr_range = get_free_all_memory_range(&range, nodeid); ++ startrange = __pa(range) >> PAGE_SHIFT; ++ endrange = (__pa(range + nr_range) - 1) >> PAGE_SHIFT; + + for (i = 0; i < nr_range; i++) { + start = range[i].start; + end = range[i].end; ++ if (start <= endrange && startrange < end) { ++ BUG_ON(rangerange.start | rangerange.end); ++ rangerange = range[i]; ++ continue; ++ } + count += end - start; + __free_pages_memory(start, end); + } ++ start = rangerange.start; ++ end = rangerange.end; ++ count += end - start; ++ __free_pages_memory(start, end); + + return count; + } +diff -urNp linux-2.6.35.4/mm/filemap.c linux-2.6.35.4/mm/filemap.c +--- linux-2.6.35.4/mm/filemap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/filemap.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1640,7 +1640,7 @@ int generic_file_mmap(struct file * file + struct address_space *mapping = file->f_mapping; + + if (!mapping->a_ops->readpage) +- return -ENOEXEC; ++ return -ENODEV; + file_accessed(file); + vma->vm_ops = &generic_file_vm_ops; + vma->vm_flags |= VM_CAN_NONLINEAR; +@@ -2036,6 +2036,7 @@ inline int generic_write_checks(struct f + *pos = i_size_read(inode); + + if (limit != RLIM_INFINITY) { ++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0); + if (*pos >= limit) { + send_sig(SIGXFSZ, current, 0); + return -EFBIG; +diff -urNp linux-2.6.35.4/mm/fremap.c linux-2.6.35.4/mm/fremap.c +--- linux-2.6.35.4/mm/fremap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/fremap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -153,6 +153,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsign + retry: + vma = find_vma(mm, start); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC)) ++ goto out; ++#endif ++ + /* + * Make sure the vma is shared, that it supports prefaulting, + * and that the remapped range is valid and fully within +@@ -221,7 +226,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsign + /* + * drop PG_Mlocked flag for over-mapped range + */ +- unsigned int saved_flags = vma->vm_flags; ++ unsigned long saved_flags = vma->vm_flags; + munlock_vma_pages_range(vma, start, start + size); + vma->vm_flags = saved_flags; + } +diff -urNp linux-2.6.35.4/mm/highmem.c linux-2.6.35.4/mm/highmem.c +--- linux-2.6.35.4/mm/highmem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/highmem.c 2010-09-17 20:12:09.000000000 -0400 +@@ -116,9 +116,10 @@ static void flush_all_zero_pkmaps(void) + * So no dangers, even with speculative execution. + */ + page = pte_page(pkmap_page_table[i]); ++ pax_open_kernel(); + pte_clear(&init_mm, (unsigned long)page_address(page), + &pkmap_page_table[i]); +- ++ pax_close_kernel(); + set_page_address(page, NULL); + need_flush = 1; + } +@@ -177,9 +178,11 @@ start: + } + } + vaddr = PKMAP_ADDR(last_pkmap_nr); ++ ++ pax_open_kernel(); + set_pte_at(&init_mm, vaddr, + &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot)); +- ++ pax_close_kernel(); + pkmap_count[last_pkmap_nr] = 1; + set_page_address(page, (void *)vaddr); + +diff -urNp linux-2.6.35.4/mm/hugetlb.c linux-2.6.35.4/mm/hugetlb.c +--- linux-2.6.35.4/mm/hugetlb.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/hugetlb.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2272,6 +2272,26 @@ static int unmap_ref_private(struct mm_s + return 1; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ struct vm_area_struct *vma_m; ++ unsigned long address_m; ++ pte_t *ptep_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK); ++ get_page(page_m); ++ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0)); ++} ++#endif ++ + static int hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, pte_t pte, + struct page *pagecache_page) +@@ -2352,6 +2372,11 @@ retry_avoidcopy: + huge_ptep_clear_flush(vma, address, ptep); + set_huge_pte_at(mm, address, ptep, + make_huge_pte(vma, new_page, 1)); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, new_page); ++#endif ++ + /* Make the old page be freed below */ + new_page = old_page; + } +@@ -2483,6 +2508,10 @@ retry: + && (vma->vm_flags & VM_SHARED))); + set_huge_pte_at(mm, address, ptep, new_pte); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_huge_pte(vma, address, page); ++#endif ++ + if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) { + /* Optimization, do the COW without a second fault */ + ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page); +@@ -2511,6 +2540,28 @@ int hugetlb_fault(struct mm_struct *mm, + static DEFINE_MUTEX(hugetlb_instantiation_mutex); + struct hstate *h = hstate_vma(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ h = hstate_vma(vma); ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ if (!huge_pte_alloc(mm, address_m, huge_page_size(h))) ++ return VM_FAULT_OOM; ++ address_m &= HPAGE_MASK; ++ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL); ++ } ++#endif ++ + ptep = huge_pte_alloc(mm, address, huge_page_size(h)); + if (!ptep) + return VM_FAULT_OOM; +diff -urNp linux-2.6.35.4/mm/Kconfig linux-2.6.35.4/mm/Kconfig +--- linux-2.6.35.4/mm/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/Kconfig 2010-09-17 20:12:37.000000000 -0400 +@@ -240,7 +240,7 @@ config KSM + config DEFAULT_MMAP_MIN_ADDR + int "Low address space to protect from user allocation" + depends on MMU +- default 4096 ++ default 65536 + help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages +diff -urNp linux-2.6.35.4/mm/maccess.c linux-2.6.35.4/mm/maccess.c +--- linux-2.6.35.4/mm/maccess.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/maccess.c 2010-09-17 20:12:09.000000000 -0400 +@@ -15,10 +15,10 @@ + * happens, handle that and return -EFAULT. + */ + +-long __weak probe_kernel_read(void *dst, void *src, size_t size) ++long __weak probe_kernel_read(void *dst, const void *src, size_t size) + __attribute__((alias("__probe_kernel_read"))); + +-long __probe_kernel_read(void *dst, void *src, size_t size) ++long __probe_kernel_read(void *dst, const void *src, size_t size) + { + long ret; + mm_segment_t old_fs = get_fs(); +@@ -43,10 +43,10 @@ EXPORT_SYMBOL_GPL(probe_kernel_read); + * Safely write to address @dst from the buffer at @src. If a kernel fault + * happens, handle that and return -EFAULT. + */ +-long __weak probe_kernel_write(void *dst, void *src, size_t size) ++long __weak probe_kernel_write(void *dst, const void *src, size_t size) + __attribute__((alias("__probe_kernel_write"))); + +-long __probe_kernel_write(void *dst, void *src, size_t size) ++long __probe_kernel_write(void *dst, const void *src, size_t size) + { + long ret; + mm_segment_t old_fs = get_fs(); +diff -urNp linux-2.6.35.4/mm/madvise.c linux-2.6.35.4/mm/madvise.c +--- linux-2.6.35.4/mm/madvise.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/madvise.c 2010-09-17 20:12:09.000000000 -0400 +@@ -45,6 +45,10 @@ static long madvise_behavior(struct vm_a + pgoff_t pgoff; + unsigned long new_flags = vma->vm_flags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + switch (behavior) { + case MADV_NORMAL: + new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ; +@@ -104,6 +108,13 @@ success: + /* + * vm_flags is protected by the mmap_sem held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) ++ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT); ++#endif ++ + vma->vm_flags = new_flags; + + out: +@@ -162,6 +173,11 @@ static long madvise_dontneed(struct vm_a + struct vm_area_struct ** prev, + unsigned long start, unsigned long end) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + *prev = vma; + if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP)) + return -EINVAL; +@@ -174,6 +190,21 @@ static long madvise_dontneed(struct vm_a + zap_page_range(vma, start, end - start, &details); + } else + zap_page_range(vma, start, end - start, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ if (unlikely(vma->vm_flags & VM_NONLINEAR)) { ++ struct zap_details details = { ++ .nonlinear_vma = vma_m, ++ .last_index = ULONG_MAX, ++ }; ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details); ++ } else ++ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL); ++ } ++#endif ++ + return 0; + } + +@@ -366,6 +397,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, + if (end < start) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ goto out; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ goto out; ++ + error = 0; + if (end == start) + goto out; +diff -urNp linux-2.6.35.4/mm/memory.c linux-2.6.35.4/mm/memory.c +--- linux-2.6.35.4/mm/memory.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/memory.c 2010-09-17 20:12:09.000000000 -0400 +@@ -259,8 +259,12 @@ static inline void free_pmd_range(struct + return; + + pmd = pmd_offset(pud, start); ++ ++#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD) + pud_clear(pud); + pmd_free_tlb(tlb, pmd, start); ++#endif ++ + } + + static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd, +@@ -292,8 +296,12 @@ static inline void free_pud_range(struct + return; + + pud = pud_offset(pgd, start); ++ ++#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD) + pgd_clear(pgd); + pud_free_tlb(tlb, pud, start); ++#endif ++ + } + + /* +@@ -1363,10 +1371,10 @@ int __get_user_pages(struct task_struct + (VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE); + i = 0; + +- do { ++ while (nr_pages) { + struct vm_area_struct *vma; + +- vma = find_extend_vma(mm, start); ++ vma = find_vma(mm, start); + if (!vma && in_gate_area(tsk, start)) { + unsigned long pg = start & PAGE_MASK; + struct vm_area_struct *gate_vma = get_gate_vma(tsk); +@@ -1418,7 +1426,7 @@ int __get_user_pages(struct task_struct + continue; + } + +- if (!vma || ++ if (!vma || start < vma->vm_start || + (vma->vm_flags & (VM_IO | VM_PFNMAP)) || + !(vm_flags & vma->vm_flags)) + return i ? : -EFAULT; +@@ -1493,7 +1501,7 @@ int __get_user_pages(struct task_struct + start += PAGE_SIZE; + nr_pages--; + } while (nr_pages && start < vma->vm_end); +- } while (nr_pages); ++ } + return i; + } + +@@ -2089,6 +2097,186 @@ static inline void cow_user_page(struct + copy_user_highpage(dst, src, va, vma); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ spinlock_t *ptl; ++ pte_t *pte, entry; ++ ++ pte = pte_offset_map_lock(mm, pmd, address, &ptl); ++ entry = *pte; ++ if (!pte_present(entry)) { ++ if (!pte_none(entry)) { ++ BUG_ON(pte_file(entry)); ++ free_swap_and_cache(pte_to_swp_entry(entry)); ++ pte_clear_not_present_full(mm, address, pte, 0); ++ } ++ } else { ++ struct page *page; ++ ++ flush_cache_page(vma, address, pte_pfn(entry)); ++ entry = ptep_clear_flush(vma, address, pte); ++ BUG_ON(pte_dirty(entry)); ++ page = vm_normal_page(vma, address, entry); ++ if (page) { ++ update_hiwater_rss(mm); ++ if (PageAnon(page)) ++ dec_mm_counter_fast(mm, MM_ANONPAGES); ++ else ++ dec_mm_counter_fast(mm, MM_FILEPAGES); ++ page_remove_rmap(page); ++ page_cache_release(page); ++ } ++ } ++ pte_unmap_unlock(pte, ptl); ++} ++ ++/* PaX: if vma is mirrored, synchronize the mirror's PTE ++ * ++ * the ptl of the lower mapped page is held on entry and is not released on exit ++ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc) ++ */ ++static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || !PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(!PageLocked(page_m)); ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_anon_rmap(page_m, vma_m, address_m); ++ inc_mm_counter_fast(mm, MM_ANONPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++ unlock_page(page_m); ++} ++ ++void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ BUG_ON(!page_m || PageAnon(page_m)); ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot); ++ page_cache_get(page_m); ++ page_add_file_rmap(page_m); ++ inc_mm_counter_fast(mm, MM_FILEPAGES); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++ update_mmu_cache(vma_m, address_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++} ++ ++static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl) ++{ ++ struct mm_struct *mm = vma->vm_mm; ++ unsigned long address_m; ++ spinlock_t *ptl_m; ++ struct vm_area_struct *vma_m; ++ pmd_t *pmd_m; ++ pte_t *pte_m, entry_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++ if (!vma_m) ++ return; ++ ++ BUG_ON(address >= SEGMEXEC_TASK_SIZE); ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m); ++ pte_m = pte_offset_map_nested(pmd_m, address_m); ++ ptl_m = pte_lockptr(mm, pmd_m); ++ if (ptl != ptl_m) { ++ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING); ++ if (!pte_none(*pte_m)) ++ goto out; ++ } ++ ++ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot); ++ set_pte_at(mm, address_m, pte_m, entry_m); ++out: ++ if (ptl != ptl_m) ++ spin_unlock(ptl_m); ++ pte_unmap_nested(pte_m); ++} ++ ++static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl) ++{ ++ struct page *page_m; ++ pte_t entry; ++ ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC)) ++ goto out; ++ ++ entry = *pte; ++ page_m = vm_normal_page(vma, address, entry); ++ if (!page_m) ++ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl); ++ else if (PageAnon(page_m)) { ++ if (pax_find_mirror_vma(vma)) { ++ pte_unmap_unlock(pte, ptl); ++ lock_page(page_m); ++ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl); ++ if (pte_same(entry, *pte)) ++ pax_mirror_anon_pte(vma, address, page_m, ptl); ++ else ++ unlock_page(page_m); ++ } ++ } else ++ pax_mirror_file_pte(vma, address, page_m, ptl); ++ ++out: ++ pte_unmap_unlock(pte, ptl); ++} ++#endif ++ + /* + * This routine handles present pages, when users try to write + * to a shared page. It is done by copying the page to a new address +@@ -2275,6 +2463,12 @@ gotten: + */ + page_table = pte_offset_map_lock(mm, pmd, address, &ptl); + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(new_page)); ++#endif ++ + if (old_page) { + if (!PageAnon(old_page)) { + dec_mm_counter_fast(mm, MM_FILEPAGES); +@@ -2326,6 +2520,10 @@ gotten: + page_remove_rmap(old_page); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, new_page, ptl); ++#endif ++ + /* Free the old page.. */ + new_page = old_page; + ret |= VM_FAULT_WRITE; +@@ -2734,6 +2932,11 @@ static int do_swap_page(struct mm_struct + swap_free(entry); + if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page)) + try_to_free_swap(page); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma)) ++#endif ++ + unlock_page(page); + + if (flags & FAULT_FLAG_WRITE) { +@@ -2745,6 +2948,11 @@ static int do_swap_page(struct mm_struct + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + out: +@@ -2760,33 +2968,6 @@ out_release: + } + + /* +- * This is like a special single-page "expand_downwards()", +- * except we must first make sure that 'address-PAGE_SIZE' +- * doesn't hit another vma. +- * +- * The "find_vma()" will do the right thing even if we wrap +- */ +-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address) +-{ +- address &= PAGE_MASK; +- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) { +- struct vm_area_struct *prev = vma->vm_prev; +- +- /* +- * Is there a mapping abutting this one below? +- * +- * That's only ok if it's the same stack mapping +- * that has gotten split.. +- */ +- if (prev && prev->vm_end == address) +- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM; +- +- expand_stack(vma, address - PAGE_SIZE); +- } +- return 0; +-} +- +-/* + * We enter with non-exclusive mmap_sem (to exclude vma changes, + * but allow concurrent faults), and pte mapped but not yet locked. + * We return with mmap_sem still held, but pte unmapped and unlocked. +@@ -2795,27 +2976,23 @@ static int do_anonymous_page(struct mm_s + unsigned long address, pte_t *page_table, pmd_t *pmd, + unsigned int flags) + { +- struct page *page; ++ struct page *page = NULL; + spinlock_t *ptl; + pte_t entry; + +- pte_unmap(page_table); +- +- /* Check if we need to add a guard page to the stack */ +- if (check_stack_guard_page(vma, address) < 0) +- return VM_FAULT_SIGBUS; +- +- /* Use the zero-page for reads */ + if (!(flags & FAULT_FLAG_WRITE)) { + entry = pte_mkspecial(pfn_pte(my_zero_pfn(address), + vma->vm_page_prot)); +- page_table = pte_offset_map_lock(mm, pmd, address, &ptl); ++ ptl = pte_lockptr(mm, pmd); ++ spin_lock(ptl); + if (!pte_none(*page_table)) + goto unlock; + goto setpte; + } + + /* Allocate our own private page. */ ++ pte_unmap(page_table); ++ + if (unlikely(anon_vma_prepare(vma))) + goto oom; + page = alloc_zeroed_user_highpage_movable(vma, address); +@@ -2834,6 +3011,11 @@ static int do_anonymous_page(struct mm_s + if (!pte_none(*page_table)) + goto release; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + inc_mm_counter_fast(mm, MM_ANONPAGES); + page_add_new_anon_rmap(page, vma, address); + setpte: +@@ -2841,6 +3023,12 @@ setpte: + + /* No need to invalidate - it was non-present before */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (page) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++#endif ++ + unlock: + pte_unmap_unlock(page_table, ptl); + return 0; +@@ -2983,6 +3171,12 @@ static int __do_fault(struct mm_struct * + */ + /* Only go through if we didn't race with anybody else... */ + if (likely(pte_same(*page_table, orig_pte))) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon && pax_find_mirror_vma(vma)) ++ BUG_ON(!trylock_page(page)); ++#endif ++ + flush_icache_page(vma, page); + entry = mk_pte(page, vma->vm_page_prot); + if (flags & FAULT_FLAG_WRITE) +@@ -3002,6 +3196,14 @@ static int __do_fault(struct mm_struct * + + /* no need to invalidate: a not-present page won't be cached */ + update_mmu_cache(vma, address, page_table); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (anon) ++ pax_mirror_anon_pte(vma, address, page, ptl); ++ else ++ pax_mirror_file_pte(vma, address, page, ptl); ++#endif ++ + } else { + if (charged) + mem_cgroup_uncharge_page(page); +@@ -3149,6 +3351,12 @@ static inline int handle_pte_fault(struc + if (flags & FAULT_FLAG_WRITE) + flush_tlb_page(vma, address); + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ pax_mirror_pte(vma, address, pte, pmd, ptl); ++ return 0; ++#endif ++ + unlock: + pte_unmap_unlock(pte, ptl); + return 0; +@@ -3165,6 +3373,10 @@ int handle_mm_fault(struct mm_struct *mm + pmd_t *pmd; + pte_t *pte; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + __set_current_state(TASK_RUNNING); + + count_vm_event(PGFAULT); +@@ -3175,6 +3387,34 @@ int handle_mm_fault(struct mm_struct *mm + if (unlikely(is_vm_hugetlb_page(vma))) + return hugetlb_fault(mm, vma, address, flags); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ unsigned long address_m; ++ pgd_t *pgd_m; ++ pud_t *pud_m; ++ pmd_t *pmd_m; ++ ++ if (vma->vm_start > vma_m->vm_start) { ++ address_m = address; ++ address -= SEGMEXEC_TASK_SIZE; ++ vma = vma_m; ++ } else ++ address_m = address + SEGMEXEC_TASK_SIZE; ++ ++ pgd_m = pgd_offset(mm, address_m); ++ pud_m = pud_alloc(mm, pgd_m, address_m); ++ if (!pud_m) ++ return VM_FAULT_OOM; ++ pmd_m = pmd_alloc(mm, pud_m, address_m); ++ if (!pmd_m) ++ return VM_FAULT_OOM; ++ if (!pmd_present(*pmd_m) && __pte_alloc(mm, pmd_m, address_m)) ++ return VM_FAULT_OOM; ++ pax_unmap_mirror_pte(vma_m, address_m, pmd_m); ++ } ++#endif ++ + pgd = pgd_offset(mm, address); + pud = pud_alloc(mm, pgd, address); + if (!pud) +@@ -3272,7 +3512,7 @@ static int __init gate_vma_init(void) + gate_vma.vm_start = FIXADDR_USER_START; + gate_vma.vm_end = FIXADDR_USER_END; + gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC; +- gate_vma.vm_page_prot = __P101; ++ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags); + /* + * Make sure the vDSO gets into every core dump. + * Dumping its contents makes post-mortem fully interpretable later +diff -urNp linux-2.6.35.4/mm/memory-failure.c linux-2.6.35.4/mm/memory-failure.c +--- linux-2.6.35.4/mm/memory-failure.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/memory-failure.c 2010-09-17 20:12:09.000000000 -0400 +@@ -51,7 +51,7 @@ int sysctl_memory_failure_early_kill __r + + int sysctl_memory_failure_recovery __read_mostly = 1; + +-atomic_long_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); ++atomic_long_unchecked_t mce_bad_pages __read_mostly = ATOMIC_LONG_INIT(0); + + #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE) + +@@ -939,7 +939,7 @@ int __memory_failure(unsigned long pfn, + return 0; + } + +- atomic_long_add(1, &mce_bad_pages); ++ atomic_long_add_unchecked(1, &mce_bad_pages); + + /* + * We need/can do nothing about count=0 pages. +@@ -1003,7 +1003,7 @@ int __memory_failure(unsigned long pfn, + } + if (hwpoison_filter(p)) { + if (TestClearPageHWPoison(p)) +- atomic_long_dec(&mce_bad_pages); ++ atomic_long_dec_unchecked(&mce_bad_pages); + unlock_page(p); + put_page(p); + return 0; +@@ -1096,7 +1096,7 @@ int unpoison_memory(unsigned long pfn) + + if (!get_page_unless_zero(page)) { + if (TestClearPageHWPoison(p)) +- atomic_long_dec(&mce_bad_pages); ++ atomic_long_dec_unchecked(&mce_bad_pages); + pr_debug("MCE: Software-unpoisoned free page %#lx\n", pfn); + return 0; + } +@@ -1110,7 +1110,7 @@ int unpoison_memory(unsigned long pfn) + */ + if (TestClearPageHWPoison(p)) { + pr_debug("MCE: Software-unpoisoned page %#lx\n", pfn); +- atomic_long_dec(&mce_bad_pages); ++ atomic_long_dec_unchecked(&mce_bad_pages); + freeit = 1; + } + unlock_page(page); +@@ -1291,7 +1291,7 @@ int soft_offline_page(struct page *page, + return ret; + + done: +- atomic_long_add(1, &mce_bad_pages); ++ atomic_long_add_unchecked(1, &mce_bad_pages); + SetPageHWPoison(page); + /* keep elevated page count for bad page */ + return ret; +diff -urNp linux-2.6.35.4/mm/mempolicy.c linux-2.6.35.4/mm/mempolicy.c +--- linux-2.6.35.4/mm/mempolicy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/mempolicy.c 2010-09-17 20:12:37.000000000 -0400 +@@ -642,6 +642,10 @@ static int mbind_range(struct mm_struct + unsigned long vmstart; + unsigned long vmend; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++#endif ++ + vma = find_vma_prev(mm, start, &prev); + if (!vma || vma->vm_start > start) + return -EFAULT; +@@ -672,6 +676,16 @@ static int mbind_range(struct mm_struct + err = policy_vma(vma, new_pol); + if (err) + goto out; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++ if (vma_m) { ++ err = policy_vma(vma_m, new_pol); ++ if (err) ++ goto out; ++ } ++#endif ++ + } + + out: +@@ -1098,6 +1112,17 @@ static long do_mbind(unsigned long start + + if (end < start) + return -EINVAL; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (end == start) + return 0; + +@@ -1303,6 +1328,14 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi + if (!mm) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1312,8 +1345,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pi + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +@@ -2564,7 +2596,7 @@ int show_numa_map(struct seq_file *m, vo + + if (file) { + seq_printf(m, " file="); +- seq_path(m, &file->f_path, "\n\t= "); ++ seq_path(m, &file->f_path, "\n\t\\= "); + } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) { + seq_printf(m, " heap"); + } else if (vma->vm_start <= mm->start_stack && +diff -urNp linux-2.6.35.4/mm/migrate.c linux-2.6.35.4/mm/migrate.c +--- linux-2.6.35.4/mm/migrate.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/migrate.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1102,6 +1102,14 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, + if (!mm) + return -EINVAL; + ++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP ++ if (mm != current->mm && ++ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) { ++ err = -EPERM; ++ goto out; ++ } ++#endif ++ + /* + * Check if this process has the right to modify the specified + * process. The right exists if the process has administrative +@@ -1111,8 +1119,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, + rcu_read_lock(); + tcred = __task_cred(task); + if (cred->euid != tcred->suid && cred->euid != tcred->uid && +- cred->uid != tcred->suid && cred->uid != tcred->uid && +- !capable(CAP_SYS_NICE)) { ++ cred->uid != tcred->suid && !capable(CAP_SYS_NICE)) { + rcu_read_unlock(); + err = -EPERM; + goto out; +diff -urNp linux-2.6.35.4/mm/mlock.c linux-2.6.35.4/mm/mlock.c +--- linux-2.6.35.4/mm/mlock.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/mlock.c 2010-09-17 20:12:37.000000000 -0400 +@@ -13,6 +13,7 @@ + #include <linux/pagemap.h> + #include <linux/mempolicy.h> + #include <linux/syscalls.h> ++#include <linux/security.h> + #include <linux/sched.h> + #include <linux/module.h> + #include <linux/rmap.h> +@@ -135,19 +136,6 @@ void munlock_vma_page(struct page *page) + } + } + +-/* Is the vma a continuation of the stack vma above it? */ +-static inline int vma_stack_continue(struct vm_area_struct *vma, unsigned long addr) +-{ +- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN); +-} +- +-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr) +-{ +- return (vma->vm_flags & VM_GROWSDOWN) && +- (vma->vm_start == addr) && +- !vma_stack_continue(vma->vm_prev, addr); +-} +- + /** + * __mlock_vma_pages_range() - mlock a range of pages in the vma. + * @vma: target vma +@@ -180,12 +168,6 @@ static long __mlock_vma_pages_range(stru + if (vma->vm_flags & VM_WRITE) + gup_flags |= FOLL_WRITE; + +- /* We don't try to access the guard page of a stack vma */ +- if (stack_guard_page(vma, start)) { +- addr += PAGE_SIZE; +- nr_pages--; +- } +- + while (nr_pages > 0) { + int i; + +@@ -451,6 +433,9 @@ static int do_mlock(unsigned long start, + return -EINVAL; + if (end == start) + return 0; ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + vma = find_vma_prev(current->mm, start, &prev); + if (!vma || vma->vm_start > start) + return -ENOMEM; +@@ -461,6 +446,11 @@ static int do_mlock(unsigned long start, + for (nstart = start ; ; ) { + unsigned int newflags; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif ++ + /* Here we know that vma->vm_start <= nstart < vma->vm_end. */ + + newflags = vma->vm_flags | VM_LOCKED; +@@ -510,6 +500,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, st + lock_limit >>= PAGE_SHIFT; + + /* check against resource limits */ ++ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1); + if ((locked <= lock_limit) || capable(CAP_IPC_LOCK)) + error = do_mlock(start, len, 1); + up_write(¤t->mm->mmap_sem); +@@ -531,17 +522,23 @@ SYSCALL_DEFINE2(munlock, unsigned long, + static int do_mlockall(int flags) + { + struct vm_area_struct * vma, * prev = NULL; +- unsigned int def_flags = 0; + + if (flags & MCL_FUTURE) +- def_flags = VM_LOCKED; +- current->mm->def_flags = def_flags; ++ current->mm->def_flags |= VM_LOCKED; ++ else ++ current->mm->def_flags &= ~VM_LOCKED; + if (flags == MCL_FUTURE) + goto out; + + for (vma = current->mm->mmap; vma ; vma = prev->vm_next) { +- unsigned int newflags; ++ unsigned long newflags; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) ++ break; ++#endif + ++ BUG_ON(vma->vm_end > TASK_SIZE); + newflags = vma->vm_flags | VM_LOCKED; + if (!(flags & MCL_CURRENT)) + newflags &= ~VM_LOCKED; +@@ -573,6 +570,7 @@ SYSCALL_DEFINE1(mlockall, int, flags) + lock_limit >>= PAGE_SHIFT; + + ret = -ENOMEM; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1); + if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) || + capable(CAP_IPC_LOCK)) + ret = do_mlockall(flags); +diff -urNp linux-2.6.35.4/mm/mmap.c linux-2.6.35.4/mm/mmap.c +--- linux-2.6.35.4/mm/mmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/mmap.c 2010-09-17 20:12:37.000000000 -0400 +@@ -44,6 +44,16 @@ + #define arch_rebalance_pgtables(addr, len) (addr) + #endif + ++static inline void verify_mm_writelocked(struct mm_struct *mm) ++{ ++#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX) ++ if (unlikely(down_read_trylock(&mm->mmap_sem))) { ++ up_read(&mm->mmap_sem); ++ BUG(); ++ } ++#endif ++} ++ + static void unmap_region(struct mm_struct *mm, + struct vm_area_struct *vma, struct vm_area_struct *prev, + unsigned long start, unsigned long end); +@@ -69,22 +79,32 @@ static void unmap_region(struct mm_struc + * x: (no) no x: (no) yes x: (no) yes x: (yes) yes + * + */ +-pgprot_t protection_map[16] = { ++pgprot_t protection_map[16] __read_only = { + __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111, + __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111 + }; + + pgprot_t vm_get_page_prot(unsigned long vm_flags) + { +- return __pgprot(pgprot_val(protection_map[vm_flags & ++ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags & + (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) | + pgprot_val(arch_vm_get_page_prot(vm_flags))); ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if (!(__supported_pte_mask & _PAGE_NX) && ++ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC && ++ (vm_flags & (VM_READ | VM_WRITE))) ++ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot))))); ++#endif ++ ++ return prot; + } + EXPORT_SYMBOL(vm_get_page_prot); + + int sysctl_overcommit_memory = OVERCOMMIT_GUESS; /* heuristic overcommit */ + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT; ++unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024; + struct percpu_counter vm_committed_as; + + /* +@@ -230,6 +250,7 @@ static struct vm_area_struct *remove_vma + struct vm_area_struct *next = vma->vm_next; + + might_sleep(); ++ BUG_ON(vma->vm_mirror); + if (vma->vm_ops && vma->vm_ops->close) + vma->vm_ops->close(vma); + if (vma->vm_file) { +@@ -266,6 +287,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk) + * not page aligned -Ram Gupta + */ + rlim = rlimit(RLIMIT_DATA); ++ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1); + if (rlim < RLIM_INFINITY && (brk - mm->start_brk) + + (mm->end_data - mm->start_data) > rlim) + goto out; +@@ -695,6 +717,12 @@ static int + can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { + if (vma->vm_pgoff == vm_pgoff) +@@ -714,6 +742,12 @@ static int + can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags, + struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE) ++ return 0; ++#endif ++ + if (is_mergeable_vma(vma, file, vm_flags) && + is_mergeable_anon_vma(anon_vma, vma->anon_vma)) { + pgoff_t vm_pglen; +@@ -756,13 +790,20 @@ can_vma_merge_after(struct vm_area_struc + struct vm_area_struct *vma_merge(struct mm_struct *mm, + struct vm_area_struct *prev, unsigned long addr, + unsigned long end, unsigned long vm_flags, +- struct anon_vma *anon_vma, struct file *file, ++ struct anon_vma *anon_vma, struct file *file, + pgoff_t pgoff, struct mempolicy *policy) + { + pgoff_t pglen = (end - addr) >> PAGE_SHIFT; + struct vm_area_struct *area, *next; + int err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE; ++ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL; ++ ++ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end); ++#endif ++ + /* + * We later require that vma->vm_flags == vm_flags, + * so this tests vma->vm_flags & VM_SPECIAL, too. +@@ -778,6 +819,15 @@ struct vm_area_struct *vma_merge(struct + if (next && next->vm_end == end) /* cases 6, 7, 8 */ + next = next->vm_next; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (prev) ++ prev_m = pax_find_mirror_vma(prev); ++ if (area) ++ area_m = pax_find_mirror_vma(area); ++ if (next) ++ next_m = pax_find_mirror_vma(next); ++#endif ++ + /* + * Can it merge with the predecessor? + */ +@@ -797,9 +847,24 @@ struct vm_area_struct *vma_merge(struct + /* cases 1, 6 */ + err = vma_adjust(prev, prev->vm_start, + next->vm_end, prev->vm_pgoff, NULL); +- } else /* cases 2, 5, 7 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ next_m->vm_end, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 2, 5, 7 */ + err = vma_adjust(prev, prev->vm_start, + end, prev->vm_pgoff, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ end_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } + if (err) + return NULL; + return prev; +@@ -812,12 +877,27 @@ struct vm_area_struct *vma_merge(struct + mpol_equal(policy, vma_policy(next)) && + can_vma_merge_before(next, vm_flags, + anon_vma, file, pgoff+pglen)) { +- if (prev && addr < prev->vm_end) /* case 4 */ ++ if (prev && addr < prev->vm_end) { /* case 4 */ + err = vma_adjust(prev, prev->vm_start, + addr, prev->vm_pgoff, NULL); +- else /* cases 3, 8 */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && prev_m) ++ err = vma_adjust(prev_m, prev_m->vm_start, ++ addr_m, prev_m->vm_pgoff, NULL); ++#endif ++ ++ } else { /* cases 3, 8 */ + err = vma_adjust(area, addr, next->vm_end, + next->vm_pgoff - pglen, NULL); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && area_m) ++ err = vma_adjust(area_m, addr_m, next_m->vm_end, ++ next_m->vm_pgoff - pglen, NULL); ++#endif ++ ++ } + if (err) + return NULL; + return area; +@@ -932,14 +1012,11 @@ none: + void vm_stat_account(struct mm_struct *mm, unsigned long flags, + struct file *file, long pages) + { +- const unsigned long stack_flags +- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN); +- + if (file) { + mm->shared_vm += pages; + if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC) + mm->exec_vm += pages; +- } else if (flags & stack_flags) ++ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN)) + mm->stack_vm += pages; + if (flags & (VM_RESERVED|VM_IO)) + mm->reserved_vm += pages; +@@ -966,7 +1043,7 @@ unsigned long do_mmap_pgoff(struct file + * (the exception is when the underlying filesystem is noexec + * mounted, in which case we dont add PROT_EXEC.) + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC))) + prot |= PROT_EXEC; + +@@ -992,7 +1069,7 @@ unsigned long do_mmap_pgoff(struct file + /* Obtain the address to map to. we verify (or select) it and ensure + * that it represents a valid section of the address space. + */ +- addr = get_unmapped_area(file, addr, len, pgoff, flags); ++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0)); + if (addr & ~PAGE_MASK) + return addr; + +@@ -1003,6 +1080,28 @@ unsigned long do_mmap_pgoff(struct file + vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) | + mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ ++#ifdef CONFIG_PAX_EMUPLT ++ vm_flags &= ~VM_EXEC; ++#else ++ return -EPERM; ++#endif ++ ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file) ++ vm_flags &= ~VM_PAGEEXEC; ++#endif ++ + if (flags & MAP_LOCKED) + if (!can_do_mlock()) + return -EPERM; +@@ -1014,6 +1113,7 @@ unsigned long do_mmap_pgoff(struct file + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > lock_limit && !capable(CAP_IPC_LOCK)) + return -EAGAIN; + } +@@ -1084,6 +1184,9 @@ unsigned long do_mmap_pgoff(struct file + if (error) + return error; + ++ if (!gr_acl_handle_mmap(file, prot)) ++ return -EACCES; ++ + return mmap_region(file, addr, len, flags, vm_flags, pgoff); + } + EXPORT_SYMBOL(do_mmap_pgoff); +@@ -1160,10 +1263,10 @@ SYSCALL_DEFINE1(old_mmap, struct mmap_ar + */ + int vma_wants_writenotify(struct vm_area_struct *vma) + { +- unsigned int vm_flags = vma->vm_flags; ++ unsigned long vm_flags = vma->vm_flags; + + /* If it was private or non-writable, the write bit is already clear */ +- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED))) ++ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED)) + return 0; + + /* The backer wishes to know when pages are first written to? */ +@@ -1212,14 +1315,24 @@ unsigned long mmap_region(struct file *f + unsigned long charged = 0; + struct inode *inode = file ? file->f_path.dentry->d_inode : NULL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + /* Clear old maps */ + error = -ENOMEM; +-munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limit. */ +@@ -1268,6 +1381,16 @@ munmap_back: + goto unacct_error; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto free_vma; ++ } ++ } ++#endif ++ + vma->vm_mm = mm; + vma->vm_start = addr; + vma->vm_end = addr + len; +@@ -1291,6 +1414,19 @@ munmap_back: + error = file->f_op->mmap(file, vma); + if (error) + goto unmap_and_free_vma; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m && (vm_flags & VM_EXECUTABLE)) ++ added_exe_file_vma(mm); ++#endif ++ ++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32) ++ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) { ++ vma->vm_flags |= VM_PAGEEXEC; ++ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); ++ } ++#endif ++ + if (vm_flags & VM_EXECUTABLE) + added_exe_file_vma(mm); + +@@ -1326,6 +1462,11 @@ munmap_back: + vma_link(mm, vma, prev, rb_link, rb_parent); + file = vma->vm_file; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + /* Once vma denies write, undo our temporary denial count */ + if (correct_wcount) + atomic_inc(&inode->i_writecount); +@@ -1334,6 +1475,7 @@ out: + + mm->total_vm += len >> PAGE_SHIFT; + vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT); ++ track_exec_limit(mm, addr, addr + len, vm_flags); + if (vm_flags & VM_LOCKED) { + if (!mlock_vma_pages_range(vma, addr, addr + len)) + mm->locked_vm += (len >> PAGE_SHIFT); +@@ -1351,6 +1493,12 @@ unmap_and_free_vma: + unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); + charged = 0; + free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ kmem_cache_free(vm_area_cachep, vma_m); ++#endif ++ + kmem_cache_free(vm_area_cachep, vma); + unacct_error: + if (charged) +@@ -1358,6 +1506,33 @@ unacct_error: + return error; + } + ++bool check_heap_stack_gap(struct vm_area_struct *vma, unsigned long addr, unsigned long len) ++{ ++ if (!vma) { ++#ifdef CONFIG_STACK_GROWSUP ++ if (addr > sysctl_heap_stack_gap) ++ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap); ++ else ++ vma = find_vma(current->mm, 0); ++ if (vma && (vma->vm_flags & VM_GROWSUP)) ++ return false; ++#endif ++ return true; ++ } ++ ++ if (addr + len > vma->vm_start) ++ return false; ++ ++ if (vma->vm_flags & VM_GROWSDOWN) ++ return sysctl_heap_stack_gap <= vma->vm_start - addr - len; ++#ifdef CONFIG_STACK_GROWSUP ++ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) ++ return addr - vma->vm_prev->vm_end <= sysctl_heap_stack_gap; ++#endif ++ ++ return true; ++} ++ + /* Get an address range which is currently unmapped. + * For shmat() with addr=0. + * +@@ -1384,18 +1559,23 @@ arch_get_unmapped_area(struct file *filp + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + if (len > mm->cached_hole_size) { +- start_addr = addr = mm->free_area_cache; ++ start_addr = addr = mm->free_area_cache; + } else { +- start_addr = addr = TASK_UNMAPPED_BASE; +- mm->cached_hole_size = 0; ++ start_addr = addr = mm->mmap_base; ++ mm->cached_hole_size = 0; + } + + full_search: +@@ -1406,34 +1586,40 @@ full_search: + * Start a new search - just in case we missed + * some holes. + */ +- if (start_addr != TASK_UNMAPPED_BASE) { +- addr = TASK_UNMAPPED_BASE; +- start_addr = addr; ++ if (start_addr != mm->mmap_base) { ++ start_addr = addr = mm->mmap_base; + mm->cached_hole_size = 0; + goto full_search; + } + return -ENOMEM; + } +- if (!vma || addr + len <= vma->vm_start) { +- /* +- * Remember the place where we stopped the search: +- */ +- mm->free_area_cache = addr + len; +- return addr; +- } ++ if (check_heap_stack_gap(vma, addr, len)) ++ break; + if (addr + mm->cached_hole_size < vma->vm_start) + mm->cached_hole_size = vma->vm_start - addr; + addr = vma->vm_end; + } ++ ++ /* ++ * Remember the place where we stopped the search: ++ */ ++ mm->free_area_cache = addr + len; ++ return addr; + } + #endif + + void arch_unmap_area(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the lowest possible address? + */ +- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache) { ++ if (addr >= mm->mmap_base && addr < mm->free_area_cache) { + mm->free_area_cache = addr; + mm->cached_hole_size = ~0UL; + } +@@ -1451,7 +1637,7 @@ arch_get_unmapped_area_topdown(struct fi + { + struct vm_area_struct *vma; + struct mm_struct *mm = current->mm; +- unsigned long addr = addr0; ++ unsigned long base = mm->mmap_base, addr = addr0; + + /* requested length too big for entire address space */ + if (len > TASK_SIZE) +@@ -1460,13 +1646,18 @@ arch_get_unmapped_area_topdown(struct fi + if (flags & MAP_FIXED) + return addr; + ++#ifdef CONFIG_PAX_RANDMMAP ++ if (!(mm->pax_flags & MF_PAX_RANDMMAP)) ++#endif ++ + /* requesting a specific address */ + if (addr) { + addr = PAGE_ALIGN(addr); +- vma = find_vma(mm, addr); +- if (TASK_SIZE - len >= addr && +- (!vma || addr + len <= vma->vm_start)) +- return addr; ++ if (TASK_SIZE - len >= addr) { ++ vma = find_vma(mm, addr); ++ if (check_heap_stack_gap(vma, addr, len)) ++ return addr; ++ } + } + + /* check if free_area_cache is useful for us */ +@@ -1481,7 +1672,7 @@ arch_get_unmapped_area_topdown(struct fi + /* make sure it can fit in the remaining address space */ + if (addr > len) { + vma = find_vma(mm, addr-len); +- if (!vma || addr <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr - len, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr-len); + } +@@ -1498,7 +1689,7 @@ arch_get_unmapped_area_topdown(struct fi + * return with success: + */ + vma = find_vma(mm, addr); +- if (!vma || addr+len <= vma->vm_start) ++ if (check_heap_stack_gap(vma, addr, len)) + /* remember the address as a hint for next time */ + return (mm->free_area_cache = addr); + +@@ -1517,13 +1708,21 @@ bottomup: + * can happen with large stack limits and large mmap() + * allocations. + */ ++ mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ ++ mm->free_area_cache = mm->mmap_base; + mm->cached_hole_size = ~0UL; +- mm->free_area_cache = TASK_UNMAPPED_BASE; + addr = arch_get_unmapped_area(filp, addr0, len, pgoff, flags); + /* + * Restore the topdown base: + */ +- mm->free_area_cache = mm->mmap_base; ++ mm->mmap_base = base; ++ mm->free_area_cache = base; + mm->cached_hole_size = ~0UL; + + return addr; +@@ -1532,6 +1731,12 @@ bottomup: + + void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr) ++ return; ++#endif ++ + /* + * Is this a new hole at the highest possible address? + */ +@@ -1539,8 +1744,10 @@ void arch_unmap_area_topdown(struct mm_s + mm->free_area_cache = addr; + + /* dont allow allocations above current base */ +- if (mm->free_area_cache > mm->mmap_base) ++ if (mm->free_area_cache > mm->mmap_base) { + mm->free_area_cache = mm->mmap_base; ++ mm->cached_hole_size = ~0UL; ++ } + } + + unsigned long +@@ -1648,6 +1855,34 @@ out: + return prev ? prev->vm_next : vma; + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *vma_m; ++ ++ BUG_ON(!vma || vma->vm_start >= vma->vm_end); ++ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) { ++ BUG_ON(vma->vm_mirror); ++ return NULL; ++ } ++ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end); ++ vma_m = vma->vm_mirror; ++ BUG_ON(!vma_m || vma_m->vm_mirror != vma); ++ BUG_ON(vma->vm_file != vma_m->vm_file); ++ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start); ++ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff); ++ if (vma->anon_vma != vma_m->anon_vma) { ++ struct anon_vma_chain *avc, *avc_m; ++ ++ avc = list_entry(vma->anon_vma_chain.prev, struct anon_vma_chain, same_vma); ++ avc_m = list_entry(vma_m->anon_vma_chain.prev, struct anon_vma_chain, same_vma); ++ BUG_ON(avc->anon_vma != avc_m->anon_vma); ++ } ++ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED)); ++ return vma_m; ++} ++#endif ++ + /* + * Verify that the stack growth is acceptable and + * update accounting. This is shared with both the +@@ -1664,6 +1899,7 @@ static int acct_stack_growth(struct vm_a + return -ENOMEM; + + /* Stack limit test */ ++ gr_learn_resource(current, RLIMIT_STACK, size, 1); + if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur)) + return -ENOMEM; + +@@ -1674,6 +1910,7 @@ static int acct_stack_growth(struct vm_a + locked = mm->locked_vm + grow; + limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur); + limit >>= PAGE_SHIFT; ++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1); + if (locked > limit && !capable(CAP_IPC_LOCK)) + return -ENOMEM; + } +@@ -1709,35 +1946,42 @@ static + #endif + int expand_upwards(struct vm_area_struct *vma, unsigned long address) + { +- int error; ++ int error, locknext; + + if (!(vma->vm_flags & VM_GROWSUP)) + return -EFAULT; + ++ /* Also guard against wrapping around to address 0. */ ++ if (address < PAGE_ALIGN(address+1)) ++ address = PAGE_ALIGN(address+1); ++ else ++ return -ENOMEM; ++ + /* + * We must make sure the anon_vma is allocated + * so that the anon_vma locking is not a noop. + */ + if (unlikely(anon_vma_prepare(vma))) + return -ENOMEM; ++ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN); ++ if (locknext && anon_vma_prepare(vma->vm_next)) ++ return -ENOMEM; + anon_vma_lock(vma); ++ if (locknext) ++ anon_vma_lock(vma->vm_next); + + /* + * vma->vm_start/vm_end cannot change under us because the caller + * is required to hold the mmap_sem in read mode. We need the +- * anon_vma lock to serialize against concurrent expand_stacks. +- * Also guard against wrapping around to address 0. ++ * anon_vma locks to serialize against concurrent expand_stacks ++ * and expand_upwards. + */ +- if (address < PAGE_ALIGN(address+4)) +- address = PAGE_ALIGN(address+4); +- else { +- anon_vma_unlock(vma); +- return -ENOMEM; +- } + error = 0; + + /* Somebody else might have raced and expanded it already */ +- if (address > vma->vm_end) { ++ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) { + unsigned long size, grow; + + size = address - vma->vm_start; +@@ -1747,6 +1991,8 @@ int expand_upwards(struct vm_area_struct + if (!error) + vma->vm_end = address; + } ++ if (locknext) ++ anon_vma_unlock(vma->vm_next); + anon_vma_unlock(vma); + return error; + } +@@ -1758,7 +2004,8 @@ int expand_upwards(struct vm_area_struct + static int expand_downwards(struct vm_area_struct *vma, + unsigned long address) + { +- int error; ++ int error, lockprev = 0; ++ struct vm_area_struct *prev; + + /* + * We must make sure the anon_vma is allocated +@@ -1772,6 +2019,15 @@ static int expand_downwards(struct vm_ar + if (error) + return error; + ++ prev = vma->vm_prev; ++#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64) ++ lockprev = prev && (prev->vm_flags & VM_GROWSUP); ++#endif ++ if (lockprev && anon_vma_prepare(prev)) ++ return -ENOMEM; ++ if (lockprev) ++ anon_vma_lock(prev); ++ + anon_vma_lock(vma); + + /* +@@ -1781,9 +2037,17 @@ static int expand_downwards(struct vm_ar + */ + + /* Somebody else might have raced and expanded it already */ +- if (address < vma->vm_start) { ++ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap) ++ error = -ENOMEM; ++ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) { + unsigned long size, grow; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m; ++ ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + size = vma->vm_end - address; + grow = (vma->vm_start - address) >> PAGE_SHIFT; + +@@ -1791,9 +2055,20 @@ static int expand_downwards(struct vm_ar + if (!error) { + vma->vm_start = address; + vma->vm_pgoff -= grow; ++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ vma_m->vm_start -= grow << PAGE_SHIFT; ++ vma_m->vm_pgoff -= grow; ++ } ++#endif ++ + } + } + anon_vma_unlock(vma); ++ if (lockprev) ++ anon_vma_unlock(prev); + return error; + } + +@@ -1867,6 +2142,13 @@ static void remove_vma_list(struct mm_st + do { + long nrpages = vma_pages(vma); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) { ++ vma = remove_vma(vma); ++ continue; ++ } ++#endif ++ + mm->total_vm -= nrpages; + vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages); + vma = remove_vma(vma); +@@ -1912,6 +2194,16 @@ detach_vmas_to_be_unmapped(struct mm_str + insertion_point = (prev ? &prev->vm_next : &mm->mmap); + vma->vm_prev = NULL; + do { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma->vm_mirror) { ++ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma); ++ vma->vm_mirror->vm_mirror = NULL; ++ vma->vm_mirror->vm_flags &= ~VM_EXEC; ++ vma->vm_mirror = NULL; ++ } ++#endif ++ + rb_erase(&vma->vm_rb, &mm->mm_rb); + mm->map_count--; + tail_vma = vma; +@@ -1940,14 +2232,33 @@ static int __split_vma(struct mm_struct + struct vm_area_struct *new; + int err = -ENOMEM; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m, *new_m = NULL; ++ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (is_vm_hugetlb_page(vma) && (addr & + ~(huge_page_mask(hstate_vma(vma))))) + return -EINVAL; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ vma_m = pax_find_mirror_vma(vma); ++#endif ++ + new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); + if (!new) + goto out_err; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL); ++ if (!new_m) { ++ kmem_cache_free(vm_area_cachep, new); ++ goto out_err; ++ } ++ } ++#endif ++ + /* most fields are the same, copy all, and then fixup */ + *new = *vma; + +@@ -1960,6 +2271,22 @@ static int __split_vma(struct mm_struct + new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT); + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) { ++ *new_m = *vma_m; ++ INIT_LIST_HEAD(&new_m->anon_vma_chain); ++ new_m->vm_mirror = new; ++ new->vm_mirror = new_m; ++ ++ if (new_below) ++ new_m->vm_end = addr_m; ++ else { ++ new_m->vm_start = addr_m; ++ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT); ++ } ++ } ++#endif ++ + pol = mpol_dup(vma_policy(vma)); + if (IS_ERR(pol)) { + err = PTR_ERR(pol); +@@ -1985,6 +2312,42 @@ static int __split_vma(struct mm_struct + else + err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (!err && vma_m) { ++ if (anon_vma_clone(new_m, vma_m)) ++ goto out_free_mpol; ++ ++ mpol_get(pol); ++ vma_set_policy(new_m, pol); ++ ++ if (new_m->vm_file) { ++ get_file(new_m->vm_file); ++ if (vma_m->vm_flags & VM_EXECUTABLE) ++ added_exe_file_vma(mm); ++ } ++ ++ if (new_m->vm_ops && new_m->vm_ops->open) ++ new_m->vm_ops->open(new_m); ++ ++ if (new_below) ++ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff + ++ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m); ++ else ++ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m); ++ ++ if (err) { ++ if (new_m->vm_ops && new_m->vm_ops->close) ++ new_m->vm_ops->close(new_m); ++ if (new_m->vm_file) { ++ if (vma_m->vm_flags & VM_EXECUTABLE) ++ removed_exe_file_vma(mm); ++ fput(new_m->vm_file); ++ } ++ mpol_put(pol); ++ } ++ } ++#endif ++ + /* Success. */ + if (!err) + return 0; +@@ -2000,6 +2363,15 @@ static int __split_vma(struct mm_struct + out_free_mpol: + mpol_put(pol); + out_free_vma: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (new_m) { ++ unlink_anon_vmas(new_m); ++ kmem_cache_free(vm_area_cachep, new_m); ++ } ++#endif ++ ++ unlink_anon_vmas(new); + kmem_cache_free(vm_area_cachep, new); + out_err: + return err; +@@ -2012,6 +2384,15 @@ static int __split_vma(struct mm_struct + int split_vma(struct mm_struct *mm, struct vm_area_struct *vma, + unsigned long addr, int new_below) + { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) { ++ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE); ++ if (mm->map_count >= sysctl_max_map_count-1) ++ return -ENOMEM; ++ } else ++#endif ++ + if (mm->map_count >= sysctl_max_map_count) + return -ENOMEM; + +@@ -2023,11 +2404,30 @@ int split_vma(struct mm_struct *mm, stru + * work. This now handles partial unmappings. + * Jeremy Fitzhardinge <jeremy@goop.org> + */ ++#ifdef CONFIG_PAX_SEGMEXEC ++int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++{ ++ int ret = __do_munmap(mm, start, len); ++ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC)) ++ return ret; ++ ++ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len); ++} ++ ++int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#else + int do_munmap(struct mm_struct *mm, unsigned long start, size_t len) ++#endif + { + unsigned long end; + struct vm_area_struct *vma, *prev, *last; + ++ /* ++ * mm->mmap_sem is required to protect against another thread ++ * changing the mappings in case we sleep. ++ */ ++ verify_mm_writelocked(mm); ++ + if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start) + return -EINVAL; + +@@ -2101,6 +2501,8 @@ int do_munmap(struct mm_struct *mm, unsi + /* Fix up all other VM information */ + remove_vma_list(mm, vma); + ++ track_exec_limit(mm, start, end, 0UL); ++ + return 0; + } + +@@ -2113,22 +2515,18 @@ SYSCALL_DEFINE2(munmap, unsigned long, a + + profile_munmap(addr); + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)) ++ return -EINVAL; ++#endif ++ + down_write(&mm->mmap_sem); + ret = do_munmap(mm, addr, len); + up_write(&mm->mmap_sem); + return ret; + } + +-static inline void verify_mm_writelocked(struct mm_struct *mm) +-{ +-#ifdef CONFIG_DEBUG_VM +- if (unlikely(down_read_trylock(&mm->mmap_sem))) { +- WARN_ON(1); +- up_read(&mm->mmap_sem); +- } +-#endif +-} +- + /* + * this is really a simplified "do_mmap". it only handles + * anonymous maps. eventually we may be able to do some +@@ -2142,6 +2540,7 @@ unsigned long do_brk(unsigned long addr, + struct rb_node ** rb_link, * rb_parent; + pgoff_t pgoff = addr >> PAGE_SHIFT; + int error; ++ unsigned long charged; + + len = PAGE_ALIGN(len); + if (!len) +@@ -2153,16 +2552,30 @@ unsigned long do_brk(unsigned long addr, + + flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags; + ++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) ++ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) { ++ flags &= ~VM_EXEC; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) ++ flags &= ~VM_MAYEXEC; ++#endif ++ ++ } ++#endif ++ + error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED); + if (error & ~PAGE_MASK) + return error; + ++ charged = len >> PAGE_SHIFT; ++ + /* + * mlock MCL_FUTURE? + */ + if (mm->def_flags & VM_LOCKED) { + unsigned long locked, lock_limit; +- locked = len >> PAGE_SHIFT; ++ locked = charged; + locked += mm->locked_vm; + lock_limit = rlimit(RLIMIT_MEMLOCK); + lock_limit >>= PAGE_SHIFT; +@@ -2179,22 +2592,22 @@ unsigned long do_brk(unsigned long addr, + /* + * Clear old maps. this also does some error checking for us + */ +- munmap_back: + vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); + if (vma && vma->vm_start < addr + len) { + if (do_munmap(mm, addr, len)) + return -ENOMEM; +- goto munmap_back; ++ vma = find_vma_prepare(mm, addr, &prev, &rb_link, &rb_parent); ++ BUG_ON(vma && vma->vm_start < addr + len); + } + + /* Check against address space limits *after* clearing old maps... */ +- if (!may_expand_vm(mm, len >> PAGE_SHIFT)) ++ if (!may_expand_vm(mm, charged)) + return -ENOMEM; + + if (mm->map_count > sysctl_max_map_count) + return -ENOMEM; + +- if (security_vm_enough_memory(len >> PAGE_SHIFT)) ++ if (security_vm_enough_memory(charged)) + return -ENOMEM; + + /* Can we just expand an old private anonymous mapping? */ +@@ -2208,7 +2621,7 @@ unsigned long do_brk(unsigned long addr, + */ + vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); + if (!vma) { +- vm_unacct_memory(len >> PAGE_SHIFT); ++ vm_unacct_memory(charged); + return -ENOMEM; + } + +@@ -2221,11 +2634,12 @@ unsigned long do_brk(unsigned long addr, + vma->vm_page_prot = vm_get_page_prot(flags); + vma_link(mm, vma, prev, rb_link, rb_parent); + out: +- mm->total_vm += len >> PAGE_SHIFT; ++ mm->total_vm += charged; + if (flags & VM_LOCKED) { + if (!mlock_vma_pages_range(vma, addr, addr + len)) +- mm->locked_vm += (len >> PAGE_SHIFT); ++ mm->locked_vm += charged; + } ++ track_exec_limit(mm, addr, addr + len, flags); + return addr; + } + +@@ -2272,8 +2686,10 @@ void exit_mmap(struct mm_struct *mm) + * Walk the list again, actually closing and freeing it, + * with preemption enabled, without holding any MM locks. + */ +- while (vma) ++ while (vma) { ++ vma->vm_mirror = NULL; + vma = remove_vma(vma); ++ } + + BUG_ON(mm->nr_ptes > (FIRST_USER_ADDRESS+PMD_SIZE-1)>>PMD_SHIFT); + } +@@ -2287,6 +2703,10 @@ int insert_vm_struct(struct mm_struct * + struct vm_area_struct * __vma, * prev; + struct rb_node ** rb_link, * rb_parent; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++#endif ++ + /* + * The vm_pgoff of a purely anonymous vma should be irrelevant + * until its first write fault, when page's anon_vma and index +@@ -2309,7 +2729,22 @@ int insert_vm_struct(struct mm_struct * + if ((vma->vm_flags & VM_ACCOUNT) && + security_vm_enough_memory_mm(mm, vma_pages(vma))) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) ++ return -ENOMEM; ++ } ++#endif ++ + vma_link(mm, vma, prev, rb_link, rb_parent); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (vma_m) ++ BUG_ON(pax_mirror_vma(vma_m, vma)); ++#endif ++ + return 0; + } + +@@ -2327,6 +2762,8 @@ struct vm_area_struct *copy_vma(struct v + struct rb_node **rb_link, *rb_parent; + struct mempolicy *pol; + ++ BUG_ON(vma->vm_mirror); ++ + /* + * If anonymous vma has not yet been faulted, update new pgoff + * to match new location, to increase its chance of merging. +@@ -2376,6 +2813,39 @@ struct vm_area_struct *copy_vma(struct v + kmem_cache_free(vm_area_cachep, new_vma); + return NULL; + } ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma) ++{ ++ struct vm_area_struct *prev_m; ++ struct rb_node **rb_link_m, *rb_parent_m; ++ struct mempolicy *pol_m; ++ ++ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)); ++ BUG_ON(vma->vm_mirror || vma_m->vm_mirror); ++ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m))); ++ *vma_m = *vma; ++ INIT_LIST_HEAD(&vma_m->anon_vma_chain); ++ if (anon_vma_clone(vma_m, vma)) ++ return -ENOMEM; ++ pol_m = vma_policy(vma_m); ++ mpol_get(pol_m); ++ vma_set_policy(vma_m, pol_m); ++ vma_m->vm_start += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_end += SEGMEXEC_TASK_SIZE; ++ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED); ++ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags); ++ if (vma_m->vm_file) ++ get_file(vma_m->vm_file); ++ if (vma_m->vm_ops && vma_m->vm_ops->open) ++ vma_m->vm_ops->open(vma_m); ++ find_vma_prepare(vma->vm_mm, vma_m->vm_start, &prev_m, &rb_link_m, &rb_parent_m); ++ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m); ++ vma_m->vm_mirror = vma; ++ vma->vm_mirror = vma_m; ++ return 0; ++} ++#endif + + /* + * Return true if the calling process may expand its vm space by the passed +@@ -2387,7 +2857,7 @@ int may_expand_vm(struct mm_struct *mm, + unsigned long lim; + + lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT; +- ++ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1); + if (cur + npages > lim) + return 0; + return 1; +@@ -2457,6 +2927,17 @@ int install_special_mapping(struct mm_st + vma->vm_start = addr; + vma->vm_end = addr + len; + ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->pax_flags & MF_PAX_MPROTECT) { ++ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) ++ return -EPERM; ++ if (!(vm_flags & VM_EXEC)) ++ vm_flags &= ~VM_MAYEXEC; ++ else ++ vm_flags &= ~VM_MAYWRITE; ++ } ++#endif ++ + vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND; + vma->vm_page_prot = vm_get_page_prot(vma->vm_flags); + +diff -urNp linux-2.6.35.4/mm/mprotect.c linux-2.6.35.4/mm/mprotect.c +--- linux-2.6.35.4/mm/mprotect.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/mprotect.c 2010-09-17 20:12:37.000000000 -0400 +@@ -23,10 +23,16 @@ + #include <linux/mmu_notifier.h> + #include <linux/migrate.h> + #include <linux/perf_event.h> ++ ++#ifdef CONFIG_PAX_MPROTECT ++#include <linux/elf.h> ++#endif ++ + #include <asm/uaccess.h> + #include <asm/pgtable.h> + #include <asm/cacheflush.h> + #include <asm/tlbflush.h> ++#include <asm/mmu_context.h> + + #ifndef pgprot_modify + static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot) +@@ -131,6 +137,48 @@ static void change_protection(struct vm_ + flush_tlb_range(vma, start, end); + } + ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++/* called while holding the mmap semaphor for writing except stack expansion */ ++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) ++{ ++ unsigned long oldlimit, newlimit = 0UL; ++ ++ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX)) ++ return; ++ ++ spin_lock(&mm->page_table_lock); ++ oldlimit = mm->context.user_cs_limit; ++ if ((prot & VM_EXEC) && oldlimit < end) ++ /* USER_CS limit moved up */ ++ newlimit = end; ++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end) ++ /* USER_CS limit moved down */ ++ newlimit = start; ++ ++ if (newlimit) { ++ mm->context.user_cs_limit = newlimit; ++ ++#ifdef CONFIG_SMP ++ wmb(); ++ cpus_clear(mm->context.cpu_user_cs_mask); ++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask); ++#endif ++ ++ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id()); ++ } ++ spin_unlock(&mm->page_table_lock); ++ if (newlimit == end) { ++ struct vm_area_struct *vma = find_vma(mm, oldlimit); ++ ++ for (; vma && vma->vm_start < end; vma = vma->vm_next) ++ if (is_vm_hugetlb_page(vma)) ++ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot); ++ else ++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma)); ++ } ++} ++#endif ++ + int + mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev, + unsigned long start, unsigned long end, unsigned long newflags) +@@ -143,11 +191,29 @@ mprotect_fixup(struct vm_area_struct *vm + int error; + int dirty_accountable = 0; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = NULL; ++ unsigned long start_m, end_m; ++ ++ start_m = start + SEGMEXEC_TASK_SIZE; ++ end_m = end + SEGMEXEC_TASK_SIZE; ++#endif ++ + if (newflags == oldflags) { + *pprev = vma; + return 0; + } + ++ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) { ++ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next; ++ ++ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end) ++ return -ENOMEM; ++ ++ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end) ++ return -ENOMEM; ++ } ++ + /* + * If we make a private mapping writable we increase our commit; + * but (without finer accounting) cannot reduce our commit if we +@@ -164,6 +230,42 @@ mprotect_fixup(struct vm_area_struct *vm + } + } + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) { ++ if (start != vma->vm_start) { ++ error = split_vma(mm, vma, start, 1); ++ if (error) ++ goto fail; ++ BUG_ON(!*pprev || (*pprev)->vm_next == vma); ++ *pprev = (*pprev)->vm_next; ++ } ++ ++ if (end != vma->vm_end) { ++ error = split_vma(mm, vma, end, 0); ++ if (error) ++ goto fail; ++ } ++ ++ if (pax_find_mirror_vma(vma)) { ++ error = __do_munmap(mm, start_m, end_m - start_m); ++ if (error) ++ goto fail; ++ } else { ++ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL); ++ if (!vma_m) { ++ error = -ENOMEM; ++ goto fail; ++ } ++ vma->vm_flags = newflags; ++ error = pax_mirror_vma(vma_m, vma); ++ if (error) { ++ vma->vm_flags = oldflags; ++ goto fail; ++ } ++ } ++ } ++#endif ++ + /* + * First try to merge with previous and/or next vma. + */ +@@ -194,9 +296,21 @@ success: + * vm_flags and vm_page_prot are protected by the mmap_sem + * held in write mode. + */ ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ)) ++ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ; ++#endif ++ + vma->vm_flags = newflags; ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (mm->binfmt && mm->binfmt->handle_mprotect) ++ mm->binfmt->handle_mprotect(vma, newflags); ++#endif ++ + vma->vm_page_prot = pgprot_modify(vma->vm_page_prot, +- vm_get_page_prot(newflags)); ++ vm_get_page_prot(vma->vm_flags)); + + if (vma_wants_writenotify(vma)) { + vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED); +@@ -237,6 +351,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, + end = start + len; + if (end <= start) + return -ENOMEM; ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) { ++ if (end > SEGMEXEC_TASK_SIZE) ++ return -EINVAL; ++ } else ++#endif ++ ++ if (end > TASK_SIZE) ++ return -EINVAL; ++ + if (!arch_validate_prot(prot)) + return -EINVAL; + +@@ -244,7 +369,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, + /* + * Does the application expect PROT_READ to imply PROT_EXEC: + */ +- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC)) ++ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC)) + prot |= PROT_EXEC; + + vm_flags = calc_vm_prot_bits(prot); +@@ -276,6 +401,16 @@ SYSCALL_DEFINE3(mprotect, unsigned long, + if (start > vma->vm_start) + prev = vma; + ++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) { ++ error = -EACCES; ++ goto out; ++ } ++ ++#ifdef CONFIG_PAX_MPROTECT ++ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect) ++ current->mm->binfmt->handle_mprotect(vma, vm_flags); ++#endif ++ + for (nstart = start ; ; ) { + unsigned long newflags; + +@@ -300,6 +435,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, + if (error) + goto out; + perf_event_mmap(vma); ++ ++ track_exec_limit(current->mm, nstart, tmp, vm_flags); ++ + nstart = tmp; + + if (nstart < prev->vm_end) +diff -urNp linux-2.6.35.4/mm/mremap.c linux-2.6.35.4/mm/mremap.c +--- linux-2.6.35.4/mm/mremap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/mremap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -113,6 +113,12 @@ static void move_ptes(struct vm_area_str + continue; + pte = ptep_clear_flush(vma, old_addr, old_pte); + pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr); ++ ++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT ++ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC) ++ pte = pte_exprotect(pte); ++#endif ++ + set_pte_at(mm, new_addr, new_pte, pte); + } + +@@ -272,6 +278,11 @@ static struct vm_area_struct *vma_to_res + if (is_vm_hugetlb_page(vma)) + goto Einval; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (pax_find_mirror_vma(vma)) ++ goto Einval; ++#endif ++ + /* We can't remap across vm area boundaries */ + if (old_len > vma->vm_end - addr) + goto Efault; +@@ -321,20 +332,23 @@ static unsigned long mremap_to(unsigned + unsigned long ret = -EINVAL; + unsigned long charged = 0; + unsigned long map_flags; ++ unsigned long pax_task_size = TASK_SIZE; + + if (new_addr & ~PAGE_MASK) + goto out; + +- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len) ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len) + goto out; + + /* Check if the location we're moving into overlaps the + * old location at all, and fail if it does. + */ +- if ((new_addr <= addr) && (new_addr+new_len) > addr) +- goto out; +- +- if ((addr <= new_addr) && (addr+old_len) > new_addr) ++ if (addr + old_len > new_addr && new_addr + new_len > addr) + goto out; + + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); +@@ -406,6 +420,7 @@ unsigned long do_mremap(unsigned long ad + struct vm_area_struct *vma; + unsigned long ret = -EINVAL; + unsigned long charged = 0; ++ unsigned long pax_task_size = TASK_SIZE; + + if (flags & ~(MREMAP_FIXED | MREMAP_MAYMOVE)) + goto out; +@@ -424,6 +439,15 @@ unsigned long do_mremap(unsigned long ad + if (!new_len) + goto out; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (mm->pax_flags & MF_PAX_SEGMEXEC) ++ pax_task_size = SEGMEXEC_TASK_SIZE; ++#endif ++ ++ if (new_len > pax_task_size || addr > pax_task_size-new_len || ++ old_len > pax_task_size || addr > pax_task_size-old_len) ++ goto out; ++ + if (flags & MREMAP_FIXED) { + if (flags & MREMAP_MAYMOVE) + ret = mremap_to(addr, old_len, new_addr, new_len); +@@ -473,6 +497,7 @@ unsigned long do_mremap(unsigned long ad + addr + new_len); + } + ret = addr; ++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags); + goto out; + } + } +@@ -499,7 +524,13 @@ unsigned long do_mremap(unsigned long ad + ret = security_file_mmap(NULL, 0, 0, 0, new_addr, 1); + if (ret) + goto out; ++ ++ map_flags = vma->vm_flags; + ret = move_vma(vma, addr, old_len, new_len, new_addr); ++ if (!(ret & ~PAGE_MASK)) { ++ track_exec_limit(current->mm, addr, addr + old_len, 0UL); ++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags); ++ } + } + out: + if (ret & ~PAGE_MASK) +diff -urNp linux-2.6.35.4/mm/nommu.c linux-2.6.35.4/mm/nommu.c +--- linux-2.6.35.4/mm/nommu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/nommu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -67,7 +67,6 @@ int sysctl_overcommit_memory = OVERCOMMI + int sysctl_overcommit_ratio = 50; /* default is 50% */ + int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT; + int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS; +-int heap_stack_gap = 0; + + atomic_long_t mmap_pages_allocated; + +@@ -762,15 +761,6 @@ struct vm_area_struct *find_vma(struct m + EXPORT_SYMBOL(find_vma); + + /* +- * find a VMA +- * - we don't extend stack VMAs under NOMMU conditions +- */ +-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr) +-{ +- return find_vma(mm, addr); +-} +- +-/* + * expand a stack to a given address + * - not supported under NOMMU conditions + */ +@@ -1491,6 +1481,7 @@ int split_vma(struct mm_struct *mm, stru + + /* most fields are the same, copy all, and then fixup */ + *new = *vma; ++ INIT_LIST_HEAD(&new->anon_vma_chain); + *region = *vma->vm_region; + new->vm_region = region; + +diff -urNp linux-2.6.35.4/mm/page_alloc.c linux-2.6.35.4/mm/page_alloc.c +--- linux-2.6.35.4/mm/page_alloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/page_alloc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -641,6 +641,10 @@ static bool free_pages_prepare(struct pa + int i; + int bad = 0; + ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ unsigned long index = 1UL << order; ++#endif ++ + trace_mm_page_free_direct(page, order); + kmemcheck_free_shadow(page, order); + +@@ -659,6 +663,12 @@ static bool free_pages_prepare(struct pa + debug_check_no_obj_freed(page_address(page), + PAGE_SIZE << order); + } ++ ++#ifdef CONFIG_PAX_MEMORY_SANITIZE ++ for (; index; --index) ++ sanitize_highpage(page + index - 1); ++#endif ++ + arch_free_page(page, order); + kernel_map_pages(page, 1 << order, 0); + +@@ -773,8 +783,10 @@ static int prep_new_page(struct page *pa + arch_alloc_page(page, order); + kernel_map_pages(page, 1 << order, 1); + ++#ifndef CONFIG_PAX_MEMORY_SANITIZE + if (gfp_flags & __GFP_ZERO) + prep_zero_page(page, order, gfp_flags); ++#endif + + if (order && (gfp_flags & __GFP_COMP)) + prep_compound_page(page, order); +@@ -3973,7 +3985,7 @@ static void __init setup_usemap(struct p + zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); + } + #else +-static void inline setup_usemap(struct pglist_data *pgdat, ++static inline void setup_usemap(struct pglist_data *pgdat, + struct zone *zone, unsigned long zonesize) {} + #endif /* CONFIG_SPARSEMEM */ + +diff -urNp linux-2.6.35.4/mm/percpu.c linux-2.6.35.4/mm/percpu.c +--- linux-2.6.35.4/mm/percpu.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/percpu.c 2010-09-17 20:12:09.000000000 -0400 +@@ -115,7 +115,7 @@ static unsigned int pcpu_first_unit_cpu + static unsigned int pcpu_last_unit_cpu __read_mostly; + + /* the address of the first chunk which starts with the kernel static area */ +-void *pcpu_base_addr __read_mostly; ++void *pcpu_base_addr __read_only; + EXPORT_SYMBOL_GPL(pcpu_base_addr); + + static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */ +diff -urNp linux-2.6.35.4/mm/rmap.c linux-2.6.35.4/mm/rmap.c +--- linux-2.6.35.4/mm/rmap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/rmap.c 2010-09-17 20:12:09.000000000 -0400 +@@ -116,6 +116,10 @@ int anon_vma_prepare(struct vm_area_stru + struct anon_vma *anon_vma = vma->anon_vma; + struct anon_vma_chain *avc; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct anon_vma_chain *avc_m = NULL; ++#endif ++ + might_sleep(); + if (unlikely(!anon_vma)) { + struct mm_struct *mm = vma->vm_mm; +@@ -125,6 +129,12 @@ int anon_vma_prepare(struct vm_area_stru + if (!avc) + goto out_enomem; + ++#ifdef CONFIG_PAX_SEGMEXEC ++ avc_m = anon_vma_chain_alloc(); ++ if (!avc_m) ++ goto out_enomem_free_avc; ++#endif ++ + anon_vma = find_mergeable_anon_vma(vma); + allocated = NULL; + if (!anon_vma) { +@@ -138,6 +148,21 @@ int anon_vma_prepare(struct vm_area_stru + /* page_table_lock to protect against threads */ + spin_lock(&mm->page_table_lock); + if (likely(!vma->anon_vma)) { ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma); ++ ++ if (vma_m) { ++ BUG_ON(vma_m->anon_vma); ++ vma_m->anon_vma = anon_vma; ++ avc_m->anon_vma = anon_vma; ++ avc_m->vma = vma; ++ list_add(&avc_m->same_vma, &vma_m->anon_vma_chain); ++ list_add(&avc_m->same_anon_vma, &anon_vma->head); ++ avc_m = NULL; ++ } ++#endif ++ + vma->anon_vma = anon_vma; + avc->anon_vma = anon_vma; + avc->vma = vma; +@@ -151,12 +176,24 @@ int anon_vma_prepare(struct vm_area_stru + + if (unlikely(allocated)) + anon_vma_free(allocated); ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (unlikely(avc_m)) ++ anon_vma_chain_free(avc_m); ++#endif ++ + if (unlikely(avc)) + anon_vma_chain_free(avc); + } + return 0; + + out_enomem_free_avc: ++ ++#ifdef CONFIG_PAX_SEGMEXEC ++ if (avc_m) ++ anon_vma_chain_free(avc_m); ++#endif ++ + anon_vma_chain_free(avc); + out_enomem: + return -ENOMEM; +@@ -179,7 +216,7 @@ static void anon_vma_chain_link(struct v + * Attach the anon_vmas from src to dst. + * Returns 0 on success, -ENOMEM on failure. + */ +-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src) ++int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src) + { + struct anon_vma_chain *avc, *pavc; + +@@ -201,7 +238,7 @@ int anon_vma_clone(struct vm_area_struct + * the corresponding VMA in the parent process is attached to. + * Returns 0 on success, non-zero on failure. + */ +-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma) ++int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma) + { + struct anon_vma_chain *avc; + struct anon_vma *anon_vma; +diff -urNp linux-2.6.35.4/mm/shmem.c linux-2.6.35.4/mm/shmem.c +--- linux-2.6.35.4/mm/shmem.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/shmem.c 2010-09-17 20:12:37.000000000 -0400 +@@ -30,7 +30,7 @@ + #include <linux/module.h> + #include <linux/swap.h> + +-static struct vfsmount *shm_mnt; ++struct vfsmount *shm_mnt; + + #ifdef CONFIG_SHMEM + /* +diff -urNp linux-2.6.35.4/mm/slab.c linux-2.6.35.4/mm/slab.c +--- linux-2.6.35.4/mm/slab.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/slab.c 2010-09-17 20:12:37.000000000 -0400 +@@ -285,7 +285,7 @@ struct kmem_list3 { + * Need this for bootstrapping a per node allocator. + */ + #define NUM_INIT_LISTS (3 * MAX_NUMNODES) +-struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS]; ++struct kmem_list3 initkmem_list3[NUM_INIT_LISTS]; + #define CACHE_CACHE 0 + #define SIZE_AC MAX_NUMNODES + #define SIZE_L3 (2 * MAX_NUMNODES) +@@ -535,7 +535,7 @@ static inline void *index_to_obj(struct + * reciprocal_divide(offset, cache->reciprocal_buffer_size) + */ + static inline unsigned int obj_to_index(const struct kmem_cache *cache, +- const struct slab *slab, void *obj) ++ const struct slab *slab, const void *obj) + { + u32 offset = (obj - slab->s_mem); + return reciprocal_divide(offset, cache->reciprocal_buffer_size); +@@ -561,14 +561,14 @@ struct cache_names { + static struct cache_names __initdata cache_names[] = { + #define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" }, + #include <linux/kmalloc_sizes.h> +- {NULL,} ++ {NULL, NULL} + #undef CACHE + }; + + static struct arraycache_init initarray_cache __initdata = +- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; ++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} }; + static struct arraycache_init initarray_generic = +- { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} }; ++ { {0, BOOT_CPUCACHE_ENTRIES, 1, 0}, {NULL} }; + + /* internal cache of cache description objs */ + static struct kmem_cache cache_cache = { +@@ -4558,15 +4558,66 @@ static const struct file_operations proc + + static int __init slab_proc_init(void) + { +- proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations); ++ mode_t gr_mode = S_IRUGO; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ ++ proc_create("slabinfo",S_IWUSR|gr_mode,NULL,&proc_slabinfo_operations); + #ifdef CONFIG_DEBUG_SLAB_LEAK +- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations); ++ proc_create("slab_allocators", gr_mode, NULL, &proc_slabstats_operations); + #endif + return 0; + } + module_init(slab_proc_init); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct kmem_cache *cachep; ++ struct slab *slabp; ++ struct page *page; ++ unsigned int objnr; ++ unsigned long offset; ++ ++ if (!n) ++ return; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = virt_to_head_page(ptr); ++ ++ if (!PageSlab(page)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ cachep = page_get_cache(page); ++ slabp = page_get_slab(page); ++ objnr = obj_to_index(cachep, slabp, ptr); ++ BUG_ON(objnr >= cachep->num); ++ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep); ++ if (offset <= obj_size(cachep) && n <= obj_size(cachep) - offset) ++ return; ++ ++report: ++ if (to) ++ pax_report_leak_to_user(ptr, n); ++ else ++ pax_report_overflow_from_user(ptr, n); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /** + * ksize - get the actual amount of memory allocated for a given object + * @objp: Pointer to the object +diff -urNp linux-2.6.35.4/mm/slob.c linux-2.6.35.4/mm/slob.c +--- linux-2.6.35.4/mm/slob.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/slob.c 2010-09-17 20:12:09.000000000 -0400 +@@ -29,7 +29,7 @@ + * If kmalloc is asked for objects of PAGE_SIZE or larger, it calls + * alloc_pages() directly, allocating compound pages so the page order + * does not have to be separately tracked, and also stores the exact +- * allocation size in page->private so that it can be used to accurately ++ * allocation size in slob_page->size so that it can be used to accurately + * provide ksize(). These objects are detected in kfree() because slob_page() + * is false for them. + * +@@ -58,6 +58,7 @@ + */ + + #include <linux/kernel.h> ++#include <linux/sched.h> + #include <linux/slab.h> + #include <linux/mm.h> + #include <linux/swap.h> /* struct reclaim_state */ +@@ -100,7 +101,8 @@ struct slob_page { + unsigned long flags; /* mandatory */ + atomic_t _count; /* mandatory */ + slobidx_t units; /* free units left in page */ +- unsigned long pad[2]; ++ unsigned long pad[1]; ++ unsigned long size; /* size when >=PAGE_SIZE */ + slob_t *free; /* first free slob_t in page */ + struct list_head list; /* linked list of free pages */ + }; +@@ -133,7 +135,7 @@ static LIST_HEAD(free_slob_large); + */ + static inline int is_slob_page(struct slob_page *sp) + { +- return PageSlab((struct page *)sp); ++ return PageSlab((struct page *)sp) && !sp->size; + } + + static inline void set_slob_page(struct slob_page *sp) +@@ -148,7 +150,7 @@ static inline void clear_slob_page(struc + + static inline struct slob_page *slob_page(const void *addr) + { +- return (struct slob_page *)virt_to_page(addr); ++ return (struct slob_page *)virt_to_head_page(addr); + } + + /* +@@ -208,7 +210,7 @@ static void set_slob(slob_t *s, slobidx_ + /* + * Return the size of a slob block. + */ +-static slobidx_t slob_units(slob_t *s) ++static slobidx_t slob_units(const slob_t *s) + { + if (s->units > 0) + return s->units; +@@ -218,7 +220,7 @@ static slobidx_t slob_units(slob_t *s) + /* + * Return the next free slob block pointer after this one. + */ +-static slob_t *slob_next(slob_t *s) ++static slob_t *slob_next(const slob_t *s) + { + slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK); + slobidx_t next; +@@ -233,7 +235,7 @@ static slob_t *slob_next(slob_t *s) + /* + * Returns true if s is the last free block in its page. + */ +-static int slob_last(slob_t *s) ++static int slob_last(const slob_t *s) + { + return !((unsigned long)slob_next(s) & ~PAGE_MASK); + } +@@ -252,6 +254,7 @@ static void *slob_new_pages(gfp_t gfp, i + if (!page) + return NULL; + ++ set_slob_page(page); + return page_address(page); + } + +@@ -368,11 +371,11 @@ static void *slob_alloc(size_t size, gfp + if (!b) + return NULL; + sp = slob_page(b); +- set_slob_page(sp); + + spin_lock_irqsave(&slob_lock, flags); + sp->units = SLOB_UNITS(PAGE_SIZE); + sp->free = b; ++ sp->size = 0; + INIT_LIST_HEAD(&sp->list); + set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE)); + set_slob_page_free(sp, slob_list); +@@ -467,10 +470,9 @@ out: + * End of slob allocator proper. Begin kmem_cache_alloc and kmalloc frontend. + */ + +-void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++static void *__kmalloc_node_align(size_t size, gfp_t gfp, int node, int align) + { +- unsigned int *m; +- int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ slob_t *m; + void *ret; + + lockdep_trace_alloc(gfp); +@@ -483,7 +485,10 @@ void *__kmalloc_node(size_t size, gfp_t + + if (!m) + return NULL; +- *m = size; ++ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT); ++ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT); ++ m[0].units = size; ++ m[1].units = align; + ret = (void *)m + align; + + trace_kmalloc_node(_RET_IP_, ret, +@@ -493,9 +498,9 @@ void *__kmalloc_node(size_t size, gfp_t + + ret = slob_new_pages(gfp | __GFP_COMP, get_order(size), node); + if (ret) { +- struct page *page; +- page = virt_to_page(ret); +- page->private = size; ++ struct slob_page *sp; ++ sp = slob_page(ret); ++ sp->size = size; + } + + trace_kmalloc_node(_RET_IP_, ret, +@@ -505,6 +510,13 @@ void *__kmalloc_node(size_t size, gfp_t + kmemleak_alloc(ret, size, 1, gfp); + return ret; + } ++ ++void *__kmalloc_node(size_t size, gfp_t gfp, int node) ++{ ++ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); ++ ++ return __kmalloc_node_align(size, gfp, node, align); ++} + EXPORT_SYMBOL(__kmalloc_node); + + void kfree(const void *block) +@@ -520,13 +532,84 @@ void kfree(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- slob_free(m, *m + align); +- } else ++ slob_t *m = (slob_t *)(block - align); ++ slob_free(m, m[0].units + align); ++ } else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + put_page(&sp->page); ++ } + } + EXPORT_SYMBOL(kfree); + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct slob_page *sp; ++ const slob_t *free; ++ const void *base; ++ ++ if (!n) ++ return; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ sp = slob_page(ptr); ++ if (!PageSlab((struct page*)sp)) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ if (sp->size) { ++ base = page_address(&sp->page); ++ if (base <= ptr && n <= sp->size - (ptr - base)) ++ return; ++ goto report; ++ } ++ ++ /* some tricky double walking to find the chunk */ ++ base = (void *)((unsigned long)ptr & PAGE_MASK); ++ free = sp->free; ++ ++ while (!slob_last(free) && (void *)free <= ptr) { ++ base = free + slob_units(free); ++ free = slob_next(free); ++ } ++ ++ while (base < (void *)free) { ++ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units; ++ int size = SLOB_UNIT * SLOB_UNITS(m + align); ++ int offset; ++ ++ if (ptr < base + align) ++ goto report; ++ ++ offset = ptr - base - align; ++ if (offset < m) { ++ if (n <= m - offset) ++ return; ++ goto report; ++ } ++ base += size; ++ } ++ ++report: ++ if (to) ++ pax_report_leak_to_user(ptr, n); ++ else ++ pax_report_overflow_from_user(ptr, n); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + /* can't use ksize for kmem_cache_alloc memory, only kmalloc */ + size_t ksize(const void *block) + { +@@ -539,10 +622,10 @@ size_t ksize(const void *block) + sp = slob_page(block); + if (is_slob_page(sp)) { + int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN); +- unsigned int *m = (unsigned int *)(block - align); +- return SLOB_UNITS(*m) * SLOB_UNIT; ++ slob_t *m = (slob_t *)(block - align); ++ return SLOB_UNITS(m[0].units) * SLOB_UNIT; + } else +- return sp->page.private; ++ return sp->size; + } + EXPORT_SYMBOL(ksize); + +@@ -597,17 +680,25 @@ void *kmem_cache_alloc_node(struct kmem_ + { + void *b; + ++#ifdef CONFIG_PAX_USERCOPY ++ b = __kmalloc_node_align(c->size, flags, node, c->align); ++#else + if (c->size < PAGE_SIZE) { + b = slob_alloc(c->size, flags, c->align, node); + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + SLOB_UNITS(c->size) * SLOB_UNIT, + flags, node); + } else { ++ struct slob_page *sp; ++ + b = slob_new_pages(flags, get_order(c->size), node); ++ sp = slob_page(b); ++ sp->size = c->size; + trace_kmem_cache_alloc_node(_RET_IP_, b, c->size, + PAGE_SIZE << get_order(c->size), + flags, node); + } ++#endif + + if (c->ctor) + c->ctor(b); +@@ -619,10 +710,16 @@ EXPORT_SYMBOL(kmem_cache_alloc_node); + + static void __kmem_cache_free(void *b, int size) + { +- if (size < PAGE_SIZE) ++ struct slob_page *sp = slob_page(b); ++ ++ if (is_slob_page(sp)) + slob_free(b, size); +- else ++ else { ++ clear_slob_page(sp); ++ free_slob_page(sp); ++ sp->size = 0; + slob_free_pages(b, get_order(size)); ++ } + } + + static void kmem_rcu_free(struct rcu_head *head) +@@ -635,15 +732,24 @@ static void kmem_rcu_free(struct rcu_hea + + void kmem_cache_free(struct kmem_cache *c, void *b) + { ++ int size = c->size; ++ ++#ifdef CONFIG_PAX_USERCOPY ++ if (size + c->align < PAGE_SIZE) { ++ size += c->align; ++ b -= c->align; ++ } ++#endif ++ + kmemleak_free_recursive(b, c->flags); + if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) { + struct slob_rcu *slob_rcu; +- slob_rcu = b + (c->size - sizeof(struct slob_rcu)); ++ slob_rcu = b + (size - sizeof(struct slob_rcu)); + INIT_RCU_HEAD(&slob_rcu->head); +- slob_rcu->size = c->size; ++ slob_rcu->size = size; + call_rcu(&slob_rcu->head, kmem_rcu_free); + } else { +- __kmem_cache_free(b, c->size); ++ __kmem_cache_free(b, size); + } + + trace_kmem_cache_free(_RET_IP_, b); +diff -urNp linux-2.6.35.4/mm/slub.c linux-2.6.35.4/mm/slub.c +--- linux-2.6.35.4/mm/slub.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/slub.c 2010-09-17 20:12:37.000000000 -0400 +@@ -1873,6 +1873,8 @@ void kmem_cache_free(struct kmem_cache * + + page = virt_to_head_page(x); + ++ BUG_ON(!PageSlab(page)); ++ + slab_free(s, page, x, _RET_IP_); + + trace_kmem_cache_free(_RET_IP_, x); +@@ -1917,7 +1919,7 @@ static int slub_min_objects; + * Merge control. If this is set then no merging of slab caches will occur. + * (Could be removed. This was introduced to pacify the merge skeptics.) + */ +-static int slub_nomerge; ++static int slub_nomerge = 1; + + /* + * Calculate the order of allocation given an slab object size. +@@ -2344,7 +2346,7 @@ static int kmem_cache_open(struct kmem_c + * list to avoid pounding the page allocator excessively. + */ + set_min_partial(s, ilog2(s->size)); +- s->refcount = 1; ++ atomic_set(&s->refcount, 1); + #ifdef CONFIG_NUMA + s->remote_node_defrag_ratio = 1000; + #endif +@@ -2487,8 +2489,7 @@ static inline int kmem_cache_close(struc + void kmem_cache_destroy(struct kmem_cache *s) + { + down_write(&slub_lock); +- s->refcount--; +- if (!s->refcount) { ++ if (atomic_dec_and_test(&s->refcount)) { + list_del(&s->list); + up_write(&slub_lock); + if (kmem_cache_close(s)) { +@@ -2780,6 +2781,46 @@ void *__kmalloc_node(size_t size, gfp_t + EXPORT_SYMBOL(__kmalloc_node); + #endif + ++void check_object_size(const void *ptr, unsigned long n, bool to) ++{ ++ ++#ifdef CONFIG_PAX_USERCOPY ++ struct page *page; ++ struct kmem_cache *s; ++ unsigned long offset; ++ ++ if (!n) ++ return; ++ ++ if (ZERO_OR_NULL_PTR(ptr)) ++ goto report; ++ ++ if (!virt_addr_valid(ptr)) ++ return; ++ ++ page = get_object_page(ptr); ++ ++ if (!page) { ++ if (object_is_on_stack(ptr, n) == -1) ++ goto report; ++ return; ++ } ++ ++ s = page->slab; ++ offset = (ptr - page_address(page)) % s->size; ++ if (offset <= s->objsize && n <= s->objsize - offset) ++ return; ++ ++report: ++ if (to) ++ pax_report_leak_to_user(ptr, n); ++ else ++ pax_report_overflow_from_user(ptr, n); ++#endif ++ ++} ++EXPORT_SYMBOL(check_object_size); ++ + size_t ksize(const void *object) + { + struct page *page; +@@ -3049,7 +3090,7 @@ void __init kmem_cache_init(void) + */ + create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node", + sizeof(struct kmem_cache_node), GFP_NOWAIT); +- kmalloc_caches[0].refcount = -1; ++ atomic_set(&kmalloc_caches[0].refcount, -1); + caches++; + + hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI); +@@ -3158,7 +3199,7 @@ static int slab_unmergeable(struct kmem_ + /* + * We may have set a slab to be unmergeable during bootstrap. + */ +- if (s->refcount < 0) ++ if (atomic_read(&s->refcount) < 0) + return 1; + + return 0; +@@ -3216,7 +3257,7 @@ struct kmem_cache *kmem_cache_create(con + down_write(&slub_lock); + s = find_mergeable(size, align, flags, name, ctor); + if (s) { +- s->refcount++; ++ atomic_inc(&s->refcount); + /* + * Adjust the object sizes so that we clear + * the complete object on kzalloc. +@@ -3227,7 +3268,7 @@ struct kmem_cache *kmem_cache_create(con + + if (sysfs_slab_alias(s, name)) { + down_write(&slub_lock); +- s->refcount--; ++ atomic_dec(&s->refcount); + up_write(&slub_lock); + goto err; + } +@@ -3953,7 +3994,7 @@ SLAB_ATTR_RO(ctor); + + static ssize_t aliases_show(struct kmem_cache *s, char *buf) + { +- return sprintf(buf, "%d\n", s->refcount - 1); ++ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1); + } + SLAB_ATTR_RO(aliases); + +@@ -4674,7 +4715,13 @@ static const struct file_operations proc + + static int __init slab_proc_init(void) + { +- proc_create("slabinfo", S_IRUGO, NULL, &proc_slabinfo_operations); ++ mode_t gr_mode = S_IRUGO; ++ ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ ++ proc_create("slabinfo", gr_mode, NULL, &proc_slabinfo_operations); + return 0; + } + module_init(slab_proc_init); +diff -urNp linux-2.6.35.4/mm/util.c linux-2.6.35.4/mm/util.c +--- linux-2.6.35.4/mm/util.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/util.c 2010-09-17 20:12:09.000000000 -0400 +@@ -245,6 +245,12 @@ EXPORT_SYMBOL(strndup_user); + void arch_pick_mmap_layout(struct mm_struct *mm) + { + mm->mmap_base = TASK_UNMAPPED_BASE; ++ ++#ifdef CONFIG_PAX_RANDMMAP ++ if (mm->pax_flags & MF_PAX_RANDMMAP) ++ mm->mmap_base += mm->delta_mmap; ++#endif ++ + mm->get_unmapped_area = arch_get_unmapped_area; + mm->unmap_area = arch_unmap_area; + } +diff -urNp linux-2.6.35.4/mm/vmalloc.c linux-2.6.35.4/mm/vmalloc.c +--- linux-2.6.35.4/mm/vmalloc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/vmalloc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -40,8 +40,19 @@ static void vunmap_pte_range(pmd_t *pmd, + + pte = pte_offset_kernel(pmd, addr); + do { +- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); +- WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) { ++ BUG_ON(!pte_exec(*pte)); ++ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC)); ++ continue; ++ } ++#endif ++ ++ { ++ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte); ++ WARN_ON(!pte_none(ptent) && !pte_present(ptent)); ++ } + } while (pte++, addr += PAGE_SIZE, addr != end); + } + +@@ -92,6 +103,7 @@ static int vmap_pte_range(pmd_t *pmd, un + unsigned long end, pgprot_t prot, struct page **pages, int *nr) + { + pte_t *pte; ++ int ret = -ENOMEM; + + /* + * nr is a running index into the array which helps higher level +@@ -101,17 +113,30 @@ static int vmap_pte_range(pmd_t *pmd, un + pte = pte_alloc_kernel(pmd, addr); + if (!pte) + return -ENOMEM; ++ ++ pax_open_kernel(); + do { + struct page *page = pages[*nr]; + +- if (WARN_ON(!pte_none(*pte))) +- return -EBUSY; +- if (WARN_ON(!page)) +- return -ENOMEM; ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (pgprot_val(prot) & _PAGE_NX) ++#endif ++ ++ if (WARN_ON(!pte_none(*pte))) { ++ ret = -EBUSY; ++ goto out; ++ } ++ if (WARN_ON(!page)) { ++ ret = -ENOMEM; ++ goto out; ++ } + set_pte_at(&init_mm, addr, pte, mk_pte(page, prot)); + (*nr)++; + } while (pte++, addr += PAGE_SIZE, addr != end); +- return 0; ++ ret = 0; ++out: ++ pax_close_kernel(); ++ return ret; + } + + static int vmap_pmd_range(pud_t *pud, unsigned long addr, +@@ -192,11 +217,20 @@ int is_vmalloc_or_module_addr(const void + * and fall back on vmalloc() if that fails. Others + * just put it in the vmalloc space. + */ +-#if defined(CONFIG_MODULES) && defined(MODULES_VADDR) ++#ifdef CONFIG_MODULES ++#ifdef MODULES_VADDR + unsigned long addr = (unsigned long)x; + if (addr >= MODULES_VADDR && addr < MODULES_END) + return 1; + #endif ++ ++#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC) ++ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END) ++ return 1; ++#endif ++ ++#endif ++ + return is_vmalloc_addr(x); + } + +@@ -217,8 +251,14 @@ struct page *vmalloc_to_page(const void + + if (!pgd_none(*pgd)) { + pud_t *pud = pud_offset(pgd, addr); ++#ifdef CONFIG_X86 ++ if (!pud_large(*pud)) ++#endif + if (!pud_none(*pud)) { + pmd_t *pmd = pmd_offset(pud, addr); ++#ifdef CONFIG_X86 ++ if (!pmd_large(*pmd)) ++#endif + if (!pmd_none(*pmd)) { + pte_t *ptep, pte; + +@@ -292,13 +332,13 @@ static void __insert_vmap_area(struct vm + struct rb_node *tmp; + + while (*p) { +- struct vmap_area *tmp; ++ struct vmap_area *varea; + + parent = *p; +- tmp = rb_entry(parent, struct vmap_area, rb_node); +- if (va->va_start < tmp->va_end) ++ varea = rb_entry(parent, struct vmap_area, rb_node); ++ if (va->va_start < varea->va_end) + p = &(*p)->rb_left; +- else if (va->va_end > tmp->va_start) ++ else if (va->va_end > varea->va_start) + p = &(*p)->rb_right; + else + BUG(); +@@ -1224,6 +1264,16 @@ static struct vm_struct *__get_vm_area_n + struct vm_struct *area; + + BUG_ON(in_interrupt()); ++ ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (flags & VM_KERNEXEC) { ++ if (start != VMALLOC_START || end != VMALLOC_END) ++ return NULL; ++ start = (unsigned long)MODULES_EXEC_VADDR; ++ end = (unsigned long)MODULES_EXEC_END; ++ } ++#endif ++ + if (flags & VM_IOREMAP) { + int bit = fls(size); + +@@ -1449,6 +1499,11 @@ void *vmap(struct page **pages, unsigned + if (count > totalram_pages) + return NULL; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ flags |= VM_KERNEXEC; ++#endif ++ + area = get_vm_area_caller((count << PAGE_SHIFT), flags, + __builtin_return_address(0)); + if (!area) +@@ -1558,6 +1613,13 @@ static void *__vmalloc_node(unsigned lon + if (!size || (size >> PAGE_SHIFT) > totalram_pages) + return NULL; + ++#if defined(CONFIG_MODULES) && defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC) ++ if (!(pgprot_val(prot) & _PAGE_NX)) ++ area = __get_vm_area_node(size, align, VM_ALLOC | VM_KERNEXEC, VMALLOC_START, VMALLOC_END, ++ node, gfp_mask, caller); ++ else ++#endif ++ + area = __get_vm_area_node(size, align, VM_ALLOC, VMALLOC_START, + VMALLOC_END, node, gfp_mask, caller); + +@@ -1576,6 +1638,7 @@ static void *__vmalloc_node(unsigned lon + return addr; + } + ++#undef __vmalloc + void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot) + { + return __vmalloc_node(size, 1, gfp_mask, prot, -1, +@@ -1592,6 +1655,7 @@ EXPORT_SYMBOL(__vmalloc); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ ++#undef vmalloc + void *vmalloc(unsigned long size) + { + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, +@@ -1606,6 +1670,7 @@ EXPORT_SYMBOL(vmalloc); + * The resulting memory area is zeroed so it can be mapped to userspace + * without leaking data. + */ ++#undef vmalloc_user + void *vmalloc_user(unsigned long size) + { + struct vm_struct *area; +@@ -1633,6 +1698,7 @@ EXPORT_SYMBOL(vmalloc_user); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ ++#undef vmalloc_node + void *vmalloc_node(unsigned long size, int node) + { + return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL, +@@ -1655,10 +1721,10 @@ EXPORT_SYMBOL(vmalloc_node); + * For tight control over page level allocator and protection flags + * use __vmalloc() instead. + */ +- ++#undef vmalloc_exec + void *vmalloc_exec(unsigned long size) + { +- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC, ++ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC, + -1, __builtin_return_address(0)); + } + +@@ -1677,6 +1743,7 @@ void *vmalloc_exec(unsigned long size) + * Allocate enough 32bit PA addressable pages to cover @size from the + * page level allocator and map them into contiguous kernel virtual space. + */ ++#undef vmalloc_32 + void *vmalloc_32(unsigned long size) + { + return __vmalloc_node(size, 1, GFP_VMALLOC32, PAGE_KERNEL, +@@ -1691,6 +1758,7 @@ EXPORT_SYMBOL(vmalloc_32); + * The resulting memory area is 32bit addressable and zeroed so it can be + * mapped to userspace without leaking data. + */ ++#undef vmalloc_32_user + void *vmalloc_32_user(unsigned long size) + { + struct vm_struct *area; +diff -urNp linux-2.6.35.4/mm/vmstat.c linux-2.6.35.4/mm/vmstat.c +--- linux-2.6.35.4/mm/vmstat.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/mm/vmstat.c 2010-09-17 20:12:37.000000000 -0400 +@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu) + * + * vm_stat contains the global counters + */ +-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; ++atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS]; + EXPORT_SYMBOL(vm_stat); + + #ifdef CONFIG_SMP +@@ -315,7 +315,7 @@ void refresh_cpu_vm_stats(int cpu) + v = p->vm_stat_diff[i]; + p->vm_stat_diff[i] = 0; + local_irq_restore(flags); +- atomic_long_add(v, &zone->vm_stat[i]); ++ atomic_long_add_unchecked(v, &zone->vm_stat[i]); + global_diff[i] += v; + #ifdef CONFIG_NUMA + /* 3 seconds idle till flush */ +@@ -353,7 +353,7 @@ void refresh_cpu_vm_stats(int cpu) + + for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++) + if (global_diff[i]) +- atomic_long_add(global_diff[i], &vm_stat[i]); ++ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]); + } + + #endif +@@ -1038,10 +1038,16 @@ static int __init setup_vmstat(void) + start_cpu_timer(cpu); + #endif + #ifdef CONFIG_PROC_FS +- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations); +- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops); +- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations); +- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations); ++ { ++ mode_t gr_mode = S_IRUGO; ++#ifdef CONFIG_GRKERNSEC_PROC_ADD ++ gr_mode = S_IRUSR; ++#endif ++ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations); ++ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops); ++ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations); ++ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations); ++ } + #endif + return 0; + } +diff -urNp linux-2.6.35.4/net/8021q/vlan.c linux-2.6.35.4/net/8021q/vlan.c +--- linux-2.6.35.4/net/8021q/vlan.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/8021q/vlan.c 2010-09-17 20:12:09.000000000 -0400 +@@ -618,8 +618,7 @@ static int vlan_ioctl_handler(struct net + err = -EPERM; + if (!capable(CAP_NET_ADMIN)) + break; +- if ((args.u.name_type >= 0) && +- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) { ++ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) { + struct vlan_net *vn; + + vn = net_generic(net, vlan_net_id); +diff -urNp linux-2.6.35.4/net/atm/atm_misc.c linux-2.6.35.4/net/atm/atm_misc.c +--- linux-2.6.35.4/net/atm/atm_misc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/atm/atm_misc.c 2010-09-17 20:12:09.000000000 -0400 +@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int + if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf) + return 1; + atm_return(vcc, truesize); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return 0; + } + EXPORT_SYMBOL(atm_charge); +@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct + } + } + atm_return(vcc, guess); +- atomic_inc(&vcc->stats->rx_drop); ++ atomic_inc_unchecked(&vcc->stats->rx_drop); + return NULL; + } + EXPORT_SYMBOL(atm_alloc_charge); +@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal); + + void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats); + + void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i) + __SONET_ITEMS + #undef __HANDLE_ITEM + } +diff -urNp linux-2.6.35.4/net/atm/proc.c linux-2.6.35.4/net/atm/proc.c +--- linux-2.6.35.4/net/atm/proc.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/atm/proc.c 2010-09-17 20:12:37.000000000 -0400 +@@ -44,9 +44,9 @@ static void add_stats(struct seq_file *s + const struct k_atm_aal_stats *stats) + { + seq_printf(seq, "%s ( %d %d %d %d %d )", aal, +- atomic_read(&stats->tx), atomic_read(&stats->tx_err), +- atomic_read(&stats->rx), atomic_read(&stats->rx_err), +- atomic_read(&stats->rx_drop)); ++ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err), ++ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err), ++ atomic_read_unchecked(&stats->rx_drop)); + } + + static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev) +@@ -190,7 +190,12 @@ static void vcc_info(struct seq_file *se + { + struct sock *sk = sk_atm(vcc); + ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ seq_printf(seq, "%p ", NULL); ++#else + seq_printf(seq, "%p ", vcc); ++#endif ++ + if (!vcc->dev) + seq_printf(seq, "Unassigned "); + else +diff -urNp linux-2.6.35.4/net/atm/resources.c linux-2.6.35.4/net/atm/resources.c +--- linux-2.6.35.4/net/atm/resources.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/atm/resources.c 2010-09-17 20:12:09.000000000 -0400 +@@ -159,7 +159,7 @@ EXPORT_SYMBOL(atm_dev_deregister); + static void copy_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i) ++#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +@@ -167,7 +167,7 @@ static void copy_aal_stats(struct k_atm_ + static void subtract_aal_stats(struct k_atm_aal_stats *from, + struct atm_aal_stats *to) + { +-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i) ++#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i) + __AAL_STAT_ITEMS + #undef __HANDLE_ITEM + } +diff -urNp linux-2.6.35.4/net/bridge/br_stp_if.c linux-2.6.35.4/net/bridge/br_stp_if.c +--- linux-2.6.35.4/net/bridge/br_stp_if.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/bridge/br_stp_if.c 2010-09-17 20:12:09.000000000 -0400 +@@ -145,7 +145,7 @@ static void br_stp_stop(struct net_bridg + char *envp[] = { NULL }; + + if (br->stp_enabled == BR_USER_STP) { +- r = call_usermodehelper(BR_STP_PROG, argv, envp, 1); ++ r = call_usermodehelper(BR_STP_PROG, argv, envp, UMH_WAIT_PROC); + br_info(br, "userspace STP stopped, return code %d\n", r); + + /* To start timers on any ports left in blocking */ +diff -urNp linux-2.6.35.4/net/bridge/netfilter/ebtables.c linux-2.6.35.4/net/bridge/netfilter/ebtables.c +--- linux-2.6.35.4/net/bridge/netfilter/ebtables.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/bridge/netfilter/ebtables.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1501,7 +1501,7 @@ static int do_ebt_get_ctl(struct sock *s + tmp.valid_hooks = t->table->valid_hooks; + } + mutex_unlock(&ebt_mutex); +- if (copy_to_user(user, &tmp, *len) != 0){ ++ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){ + BUGPRINT("c2u Didn't work\n"); + ret = -EFAULT; + break; +diff -urNp linux-2.6.35.4/net/core/dev.c linux-2.6.35.4/net/core/dev.c +--- linux-2.6.35.4/net/core/dev.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/core/dev.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2541,7 +2541,7 @@ int netif_rx_ni(struct sk_buff *skb) + } + EXPORT_SYMBOL(netif_rx_ni); + +-static void net_tx_action(struct softirq_action *h) ++static void net_tx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + +@@ -3474,7 +3474,7 @@ void netif_napi_del(struct napi_struct * + } + EXPORT_SYMBOL(netif_napi_del); + +-static void net_rx_action(struct softirq_action *h) ++static void net_rx_action(void) + { + struct softnet_data *sd = &__get_cpu_var(softnet_data); + unsigned long time_limit = jiffies + 2; +diff -urNp linux-2.6.35.4/net/core/net-sysfs.c linux-2.6.35.4/net/core/net-sysfs.c +--- linux-2.6.35.4/net/core/net-sysfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/core/net-sysfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -511,7 +511,7 @@ static ssize_t rx_queue_attr_store(struc + return attribute->store(queue, attribute, buf, count); + } + +-static struct sysfs_ops rx_queue_sysfs_ops = { ++static const struct sysfs_ops rx_queue_sysfs_ops = { + .show = rx_queue_attr_show, + .store = rx_queue_attr_store, + }; +diff -urNp linux-2.6.35.4/net/core/sock.c linux-2.6.35.4/net/core/sock.c +--- linux-2.6.35.4/net/core/sock.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/core/sock.c 2010-09-17 20:12:09.000000000 -0400 +@@ -915,7 +915,7 @@ int sock_getsockopt(struct socket *sock, + return -ENOTCONN; + if (lv < len) + return -EINVAL; +- if (copy_to_user(optval, address, len)) ++ if (len > sizeof(address) || copy_to_user(optval, address, len)) + return -EFAULT; + goto lenout; + } +@@ -948,7 +948,7 @@ int sock_getsockopt(struct socket *sock, + + if (len > lv) + len = lv; +- if (copy_to_user(optval, &v, len)) ++ if (len > sizeof(v) || copy_to_user(optval, &v, len)) + return -EFAULT; + lenout: + if (put_user(len, optlen)) +diff -urNp linux-2.6.35.4/net/dccp/ccids/ccid3.c linux-2.6.35.4/net/dccp/ccids/ccid3.c +--- linux-2.6.35.4/net/dccp/ccids/ccid3.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/dccp/ccids/ccid3.c 2010-09-17 20:12:09.000000000 -0400 +@@ -41,7 +41,7 @@ + static int ccid3_debug; + #define ccid3_pr_debug(format, a...) DCCP_PR_DEBUG(ccid3_debug, format, ##a) + #else +-#define ccid3_pr_debug(format, a...) ++#define ccid3_pr_debug(format, a...) do {} while (0) + #endif + + /* +diff -urNp linux-2.6.35.4/net/dccp/dccp.h linux-2.6.35.4/net/dccp/dccp.h +--- linux-2.6.35.4/net/dccp/dccp.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/dccp/dccp.h 2010-09-17 20:12:09.000000000 -0400 +@@ -44,9 +44,9 @@ extern int dccp_debug; + #define dccp_pr_debug_cat(format, a...) DCCP_PRINTK(dccp_debug, format, ##a) + #define dccp_debug(fmt, a...) dccp_pr_debug_cat(KERN_DEBUG fmt, ##a) + #else +-#define dccp_pr_debug(format, a...) +-#define dccp_pr_debug_cat(format, a...) +-#define dccp_debug(format, a...) ++#define dccp_pr_debug(format, a...) do {} while (0) ++#define dccp_pr_debug_cat(format, a...) do {} while (0) ++#define dccp_debug(format, a...) do {} while (0) + #endif + + extern struct inet_hashinfo dccp_hashinfo; +diff -urNp linux-2.6.35.4/net/decnet/sysctl_net_decnet.c linux-2.6.35.4/net/decnet/sysctl_net_decnet.c +--- linux-2.6.35.4/net/decnet/sysctl_net_decnet.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/decnet/sysctl_net_decnet.c 2010-09-17 20:12:37.000000000 -0400 +@@ -173,7 +173,7 @@ static int dn_node_address_handler(ctl_t + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, addr, len)) ++ if (len > sizeof(addr) || copy_to_user(buffer, addr, len)) + return -EFAULT; + + *lenp = len; +@@ -236,7 +236,7 @@ static int dn_def_dev_handler(ctl_table + + if (len > *lenp) len = *lenp; + +- if (copy_to_user(buffer, devname, len)) ++ if (len > sizeof(devname) || copy_to_user(buffer, devname, len)) + return -EFAULT; + + *lenp = len; +diff -urNp linux-2.6.35.4/net/ipv4/inet_hashtables.c linux-2.6.35.4/net/ipv4/inet_hashtables.c +--- linux-2.6.35.4/net/ipv4/inet_hashtables.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/inet_hashtables.c 2010-09-17 20:12:37.000000000 -0400 +@@ -18,11 +18,14 @@ + #include <linux/sched.h> + #include <linux/slab.h> + #include <linux/wait.h> ++#include <linux/security.h> + + #include <net/inet_connection_sock.h> + #include <net/inet_hashtables.h> + #include <net/ip.h> + ++extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet); ++ + /* + * Allocate and initialize a new local port bind bucket. + * The bindhash mutex for snum's hash chain must be held here. +@@ -508,6 +511,8 @@ ok: + twrefcnt += inet_twsk_bind_unhash(tw, hinfo); + spin_unlock(&head->lock); + ++ gr_update_task_in_ip_table(current, inet_sk(sk)); ++ + if (tw) { + inet_twsk_deschedule(tw, death_row); + while (twrefcnt) { +diff -urNp linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c +--- linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/netfilter/nf_nat_snmp_basic.c 2010-09-17 20:12:09.000000000 -0400 +@@ -398,7 +398,7 @@ static unsigned char asn1_octets_decode( + + *len = 0; + +- *octets = kmalloc(eoc - ctx->pointer, GFP_ATOMIC); ++ *octets = kmalloc((eoc - ctx->pointer), GFP_ATOMIC); + if (*octets == NULL) { + if (net_ratelimit()) + pr_notice("OOM in bsalg (%d)\n", __LINE__); +diff -urNp linux-2.6.35.4/net/ipv4/tcp_ipv4.c linux-2.6.35.4/net/ipv4/tcp_ipv4.c +--- linux-2.6.35.4/net/ipv4/tcp_ipv4.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/tcp_ipv4.c 2010-09-17 20:12:37.000000000 -0400 +@@ -85,6 +85,9 @@ + int sysctl_tcp_tw_reuse __read_mostly; + int sysctl_tcp_low_latency __read_mostly; + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif + + #ifdef CONFIG_TCP_MD5SIG + static struct tcp_md5sig_key *tcp_v4_md5_do_lookup(struct sock *sk, +@@ -1593,6 +1596,9 @@ int tcp_v4_do_rcv(struct sock *sk, struc + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v4_send_reset(rsk, skb); + discard: + kfree_skb(skb); +@@ -1654,12 +1660,19 @@ int tcp_v4_rcv(struct sk_buff *skb) + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; +- ++ } + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -1709,6 +1722,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v4_send_reset(NULL, skb); + } + +@@ -2316,7 +2333,11 @@ static void get_openreq4(struct sock *sk + 0, /* non standard timer */ + 0, /* open_requests have no inode */ + atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + req, ++#endif + len); + } + +@@ -2366,7 +2387,12 @@ static void get_tcp4_sock(struct sock *s + sock_i_uid(sk), + icsk->icsk_probes_out, + sock_i_ino(sk), +- atomic_read(&sk->sk_refcnt), sk, ++ atomic_read(&sk->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sk, ++#endif + jiffies_to_clock_t(icsk->icsk_rto), + jiffies_to_clock_t(icsk->icsk_ack.ato), + (icsk->icsk_ack.quick << 1) | icsk->icsk_ack.pingpong, +@@ -2394,7 +2420,13 @@ static void get_timewait4_sock(struct in + " %02X %08X:%08X %02X:%08lX %08X %5d %8d %d %d %p%n", + i, src, srcp, dest, destp, tw->tw_substate, 0, 0, + 3, jiffies_to_clock_t(ttd), 0, 0, 0, 0, +- atomic_read(&tw->tw_refcnt), tw, len); ++ atomic_read(&tw->tw_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ tw, ++#endif ++ len); + } + + #define TMPSZ 150 +diff -urNp linux-2.6.35.4/net/ipv4/tcp_minisocks.c linux-2.6.35.4/net/ipv4/tcp_minisocks.c +--- linux-2.6.35.4/net/ipv4/tcp_minisocks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/tcp_minisocks.c 2010-09-17 20:12:37.000000000 -0400 +@@ -27,6 +27,10 @@ + #include <net/inet_common.h> + #include <net/xfrm.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int sysctl_tcp_syncookies __read_mostly = 1; + EXPORT_SYMBOL(sysctl_tcp_syncookies); + +@@ -700,6 +704,10 @@ listen_overflow: + + embryonic_reset: + NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_EMBRYONICRSTS); ++ ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + if (!(flg & TCP_FLAG_RST)) + req->rsk_ops->send_reset(sk, skb); + +diff -urNp linux-2.6.35.4/net/ipv4/tcp_probe.c linux-2.6.35.4/net/ipv4/tcp_probe.c +--- linux-2.6.35.4/net/ipv4/tcp_probe.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/tcp_probe.c 2010-09-17 20:12:37.000000000 -0400 +@@ -202,7 +202,7 @@ static ssize_t tcpprobe_read(struct file + if (cnt + width >= len) + break; + +- if (copy_to_user(buf + cnt, tbuf, width)) ++ if (width > sizeof(tbuf) || copy_to_user(buf + cnt, tbuf, width)) + return -EFAULT; + cnt += width; + } +diff -urNp linux-2.6.35.4/net/ipv4/tcp_timer.c linux-2.6.35.4/net/ipv4/tcp_timer.c +--- linux-2.6.35.4/net/ipv4/tcp_timer.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/tcp_timer.c 2010-09-17 20:12:37.000000000 -0400 +@@ -22,6 +22,10 @@ + #include <linux/gfp.h> + #include <net/tcp.h> + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_lastack_retries; ++#endif ++ + int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES; + int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES; + int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME; +@@ -195,6 +199,13 @@ static int tcp_write_timeout(struct sock + } + } + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if ((sk->sk_state == TCP_LAST_ACK) && ++ (grsec_lastack_retries > 0) && ++ (grsec_lastack_retries < retry_until)) ++ retry_until = grsec_lastack_retries; ++#endif ++ + if (retransmits_timed_out(sk, retry_until)) { + /* Has it gone just too far? */ + tcp_write_err(sk); +diff -urNp linux-2.6.35.4/net/ipv4/udp.c linux-2.6.35.4/net/ipv4/udp.c +--- linux-2.6.35.4/net/ipv4/udp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv4/udp.c 2010-09-17 20:12:37.000000000 -0400 +@@ -86,6 +86,7 @@ + #include <linux/types.h> + #include <linux/fcntl.h> + #include <linux/module.h> ++#include <linux/security.h> + #include <linux/socket.h> + #include <linux/sockios.h> + #include <linux/igmp.h> +@@ -107,6 +108,10 @@ + #include <net/xfrm.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + struct udp_table udp_table __read_mostly; + EXPORT_SYMBOL(udp_table); + +@@ -564,6 +569,9 @@ found: + return s; + } + ++extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb); ++extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr); ++ + /* + * This routine is called by the ICMP module when it gets some + * sort of error condition. If err < 0 then the socket should +@@ -832,9 +840,18 @@ int udp_sendmsg(struct kiocb *iocb, stru + dport = usin->sin_port; + if (dport == 0) + return -EINVAL; ++ ++ err = gr_search_udp_sendmsg(sk, usin); ++ if (err) ++ return err; + } else { + if (sk->sk_state != TCP_ESTABLISHED) + return -EDESTADDRREQ; ++ ++ err = gr_search_udp_sendmsg(sk, NULL); ++ if (err) ++ return err; ++ + daddr = inet->inet_daddr; + dport = inet->inet_dport; + /* Open fast path for connected socket. +@@ -1141,6 +1158,10 @@ try_again: + if (!skb) + goto out; + ++ err = gr_search_udp_recvmsg(sk, skb); ++ if (err) ++ goto out_free; ++ + ulen = skb->len - sizeof(struct udphdr); + if (len > ulen) + len = ulen; +@@ -1582,6 +1603,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, + goto csum_error; + + UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE); ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0); + + /* +@@ -2007,7 +2031,12 @@ static void udp4_format_sock(struct sock + sk_wmem_alloc_get(sp), + sk_rmem_alloc_get(sp), + 0, 0L, 0, sock_i_uid(sp), 0, sock_i_ino(sp), +- atomic_read(&sp->sk_refcnt), sp, ++ atomic_read(&sp->sk_refcnt), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else ++ sp, ++#endif + atomic_read(&sp->sk_drops), len); + } + +diff -urNp linux-2.6.35.4/net/ipv6/exthdrs.c linux-2.6.35.4/net/ipv6/exthdrs.c +--- linux-2.6.35.4/net/ipv6/exthdrs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv6/exthdrs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -636,7 +636,7 @@ static struct tlvtype_proc tlvprochopopt + .type = IPV6_TLV_JUMBO, + .func = ipv6_hop_jumbo, + }, +- { -1, } ++ { -1, NULL } + }; + + int ipv6_parse_hopopts(struct sk_buff *skb) +diff -urNp linux-2.6.35.4/net/ipv6/raw.c linux-2.6.35.4/net/ipv6/raw.c +--- linux-2.6.35.4/net/ipv6/raw.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv6/raw.c 2010-09-17 20:12:09.000000000 -0400 +@@ -601,7 +601,7 @@ out: + return err; + } + +-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length, ++static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length, + struct flowi *fl, struct rt6_info *rt, + unsigned int flags) + { +diff -urNp linux-2.6.35.4/net/ipv6/tcp_ipv6.c linux-2.6.35.4/net/ipv6/tcp_ipv6.c +--- linux-2.6.35.4/net/ipv6/tcp_ipv6.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv6/tcp_ipv6.c 2010-09-17 20:23:25.000000000 -0400 +@@ -92,6 +92,10 @@ static struct tcp_md5sig_key *tcp_v6_md5 + } + #endif + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + static void tcp_v6_hash(struct sock *sk) + { + if (sk->sk_state != TCP_CLOSE) { +@@ -1641,6 +1645,9 @@ static int tcp_v6_do_rcv(struct sock *sk + return 0; + + reset: ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole) ++#endif + tcp_v6_send_reset(sk, skb); + discard: + if (opt_skb) +@@ -1720,12 +1727,20 @@ static int tcp_v6_rcv(struct sk_buff *sk + TCP_SKB_CB(skb)->sacked = 0; + + sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest); +- if (!sk) ++ if (!sk) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 1; ++#endif + goto no_tcp_socket; ++ } + + process: +- if (sk->sk_state == TCP_TIME_WAIT) ++ if (sk->sk_state == TCP_TIME_WAIT) { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ ret = 2; ++#endif + goto do_time_wait; ++ } + + if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) { + NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP); +@@ -1773,6 +1788,10 @@ no_tcp_socket: + bad_packet: + TCP_INC_STATS_BH(net, TCP_MIB_INERRS); + } else { ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (ret == 1 && ++ (skb->dev->flags & IFF_LOOPBACK))) ++#endif + tcp_v6_send_reset(NULL, skb); + } + +diff -urNp linux-2.6.35.4/net/ipv6/udp.c linux-2.6.35.4/net/ipv6/udp.c +--- linux-2.6.35.4/net/ipv6/udp.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/ipv6/udp.c 2010-09-17 20:12:37.000000000 -0400 +@@ -50,6 +50,10 @@ + #include <linux/seq_file.h> + #include "udp_impl.h" + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++extern int grsec_enable_blackhole; ++#endif ++ + int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2) + { + const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr; +@@ -756,6 +760,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, + UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, + proto == IPPROTO_UDPLITE); + ++#ifdef CONFIG_GRKERNSEC_BLACKHOLE ++ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK)) ++#endif + icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0); + + kfree_skb(skb); +diff -urNp linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c +--- linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/irda/ircomm/ircomm_tty.c 2010-09-17 20:12:09.000000000 -0400 +@@ -281,16 +281,16 @@ static int ircomm_tty_block_til_ready(st + add_wait_queue(&self->open_wait, &wait); + + IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) ); + + /* As far as I can see, we protect open_count - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); + if (!tty_hung_up_p(filp)) { + extra_count = 1; +- self->open_count--; ++ atomic_dec(&self->open_count); + } + spin_unlock_irqrestore(&self->spinlock, flags); +- self->blocked_open++; ++ atomic_inc(&self->blocked_open); + + while (1) { + if (tty->termios->c_cflag & CBAUD) { +@@ -330,7 +330,7 @@ static int ircomm_tty_block_til_ready(st + } + + IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count ); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count) ); + + schedule(); + } +@@ -341,13 +341,13 @@ static int ircomm_tty_block_til_ready(st + if (extra_count) { + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ atomic_inc(&self->open_count); + spin_unlock_irqrestore(&self->spinlock, flags); + } +- self->blocked_open--; ++ atomic_dec(&self->blocked_open); + + IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n", +- __FILE__,__LINE__, tty->driver->name, self->open_count); ++ __FILE__,__LINE__, tty->driver->name, atomic_read(&self->open_count)); + + if (!retval) + self->flags |= ASYNC_NORMAL_ACTIVE; +@@ -416,14 +416,14 @@ static int ircomm_tty_open(struct tty_st + } + /* ++ is not atomic, so this should be protected - Jean II */ + spin_lock_irqsave(&self->spinlock, flags); +- self->open_count++; ++ atomic_inc(&self->open_count); + + tty->driver_data = self; + self->tty = tty; + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name, +- self->line, self->open_count); ++ self->line, atomic_read(&self->open_count)); + + /* Not really used by us, but lets do it anyway */ + self->tty->low_latency = (self->flags & ASYNC_LOW_LATENCY) ? 1 : 0; +@@ -509,7 +509,7 @@ static void ircomm_tty_close(struct tty_ + return; + } + +- if ((tty->count == 1) && (self->open_count != 1)) { ++ if ((tty->count == 1) && (atomic_read(&self->open_count) != 1)) { + /* + * Uh, oh. tty->count is 1, which means that the tty + * structure will be freed. state->count should always +@@ -519,16 +519,16 @@ static void ircomm_tty_close(struct tty_ + */ + IRDA_DEBUG(0, "%s(), bad serial port count; " + "tty->count is 1, state->count is %d\n", __func__ , +- self->open_count); +- self->open_count = 1; ++ atomic_read(&self->open_count)); ++ atomic_set(&self->open_count, 1); + } + +- if (--self->open_count < 0) { ++ if (atomic_dec_return(&self->open_count) < 0) { + IRDA_ERROR("%s(), bad serial port count for ttys%d: %d\n", +- __func__, self->line, self->open_count); +- self->open_count = 0; ++ __func__, self->line, atomic_read(&self->open_count)); ++ atomic_set(&self->open_count, 0); + } +- if (self->open_count) { ++ if (atomic_read(&self->open_count)) { + spin_unlock_irqrestore(&self->spinlock, flags); + + IRDA_DEBUG(0, "%s(), open count > 0\n", __func__ ); +@@ -560,7 +560,7 @@ static void ircomm_tty_close(struct tty_ + tty->closing = 0; + self->tty = NULL; + +- if (self->blocked_open) { ++ if (atomic_read(&self->blocked_open)) { + if (self->close_delay) + schedule_timeout_interruptible(self->close_delay); + wake_up_interruptible(&self->open_wait); +@@ -1012,7 +1012,7 @@ static void ircomm_tty_hangup(struct tty + spin_lock_irqsave(&self->spinlock, flags); + self->flags &= ~ASYNC_NORMAL_ACTIVE; + self->tty = NULL; +- self->open_count = 0; ++ atomic_set(&self->open_count, 0); + spin_unlock_irqrestore(&self->spinlock, flags); + + wake_up_interruptible(&self->open_wait); +@@ -1364,7 +1364,7 @@ static void ircomm_tty_line_info(struct + seq_putc(m, '\n'); + + seq_printf(m, "Role: %s\n", self->client ? "client" : "server"); +- seq_printf(m, "Open count: %d\n", self->open_count); ++ seq_printf(m, "Open count: %d\n", atomic_read(&self->open_count)); + seq_printf(m, "Max data size: %d\n", self->max_data_size); + seq_printf(m, "Max header size: %d\n", self->max_header_size); + +diff -urNp linux-2.6.35.4/net/key/af_key.c linux-2.6.35.4/net/key/af_key.c +--- linux-2.6.35.4/net/key/af_key.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/key/af_key.c 2010-09-17 20:12:37.000000000 -0400 +@@ -3644,7 +3644,11 @@ static int pfkey_seq_show(struct seq_fil + seq_printf(f ,"sk RefCnt Rmem Wmem User Inode\n"); + else + seq_printf(f ,"%p %-6d %-6u %-6u %-6u %-6lu\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), +diff -urNp linux-2.6.35.4/net/mac80211/ieee80211_i.h linux-2.6.35.4/net/mac80211/ieee80211_i.h +--- linux-2.6.35.4/net/mac80211/ieee80211_i.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/ieee80211_i.h 2010-09-17 20:12:09.000000000 -0400 +@@ -649,7 +649,7 @@ struct ieee80211_local { + /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */ + spinlock_t queue_stop_reason_lock; + +- int open_count; ++ atomic_t open_count; + int monitors, cooked_mntrs; + /* number of interfaces with corresponding FIF_ flags */ + int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll; +diff -urNp linux-2.6.35.4/net/mac80211/iface.c linux-2.6.35.4/net/mac80211/iface.c +--- linux-2.6.35.4/net/mac80211/iface.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/iface.c 2010-09-17 20:12:09.000000000 -0400 +@@ -183,7 +183,7 @@ static int ieee80211_open(struct net_dev + break; + } + +- if (local->open_count == 0) { ++ if (atomic_read(&local->open_count) == 0) { + res = drv_start(local); + if (res) + goto err_del_bss; +@@ -215,7 +215,7 @@ static int ieee80211_open(struct net_dev + * Validate the MAC address for this device. + */ + if (!is_valid_ether_addr(dev->dev_addr)) { +- if (!local->open_count) ++ if (!atomic_read(&local->open_count)) + drv_stop(local); + return -EADDRNOTAVAIL; + } +@@ -308,7 +308,7 @@ static int ieee80211_open(struct net_dev + + hw_reconf_flags |= __ieee80211_recalc_idle(local); + +- local->open_count++; ++ atomic_inc(&local->open_count); + if (hw_reconf_flags) { + ieee80211_hw_config(local, hw_reconf_flags); + /* +@@ -336,7 +336,7 @@ static int ieee80211_open(struct net_dev + err_del_interface: + drv_remove_interface(local, &sdata->vif); + err_stop: +- if (!local->open_count) ++ if (!atomic_read(&local->open_count)) + drv_stop(local); + err_del_bss: + sdata->bss = NULL; +@@ -439,7 +439,7 @@ static int ieee80211_stop(struct net_dev + WARN_ON(!list_empty(&sdata->u.ap.vlans)); + } + +- local->open_count--; ++ atomic_dec(&local->open_count); + + switch (sdata->vif.type) { + case NL80211_IFTYPE_AP_VLAN: +@@ -542,7 +542,7 @@ static int ieee80211_stop(struct net_dev + + ieee80211_recalc_ps(local, -1); + +- if (local->open_count == 0) { ++ if (atomic_read(&local->open_count) == 0) { + ieee80211_clear_tx_pending(local); + ieee80211_stop_device(local); + +diff -urNp linux-2.6.35.4/net/mac80211/main.c linux-2.6.35.4/net/mac80211/main.c +--- linux-2.6.35.4/net/mac80211/main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -148,7 +148,7 @@ int ieee80211_hw_config(struct ieee80211 + local->hw.conf.power_level = power; + } + +- if (changed && local->open_count) { ++ if (changed && atomic_read(&local->open_count)) { + ret = drv_config(local, changed); + /* + * Goal: +diff -urNp linux-2.6.35.4/net/mac80211/pm.c linux-2.6.35.4/net/mac80211/pm.c +--- linux-2.6.35.4/net/mac80211/pm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/pm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -101,7 +101,7 @@ int __ieee80211_suspend(struct ieee80211 + } + + /* stop hardware - this must stop RX */ +- if (local->open_count) ++ if (atomic_read(&local->open_count)) + ieee80211_stop_device(local); + + local->suspended = true; +diff -urNp linux-2.6.35.4/net/mac80211/rate.c linux-2.6.35.4/net/mac80211/rate.c +--- linux-2.6.35.4/net/mac80211/rate.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/rate.c 2010-09-17 20:12:09.000000000 -0400 +@@ -355,7 +355,7 @@ int ieee80211_init_rate_ctrl_alg(struct + + ASSERT_RTNL(); + +- if (local->open_count) ++ if (atomic_read(&local->open_count)) + return -EBUSY; + + if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) { +diff -urNp linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c +--- linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/rc80211_pid_debugfs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -192,7 +192,7 @@ static ssize_t rate_control_pid_events_r + + spin_unlock_irqrestore(&events->lock, status); + +- if (copy_to_user(buf, pb, p)) ++ if (p > sizeof(pb) || copy_to_user(buf, pb, p)) + return -EFAULT; + + return p; +diff -urNp linux-2.6.35.4/net/mac80211/tx.c linux-2.6.35.4/net/mac80211/tx.c +--- linux-2.6.35.4/net/mac80211/tx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/tx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -173,7 +173,7 @@ static __le16 ieee80211_duration(struct + return cpu_to_le16(dur); + } + +-static int inline is_ieee80211_device(struct ieee80211_local *local, ++static inline int is_ieee80211_device(struct ieee80211_local *local, + struct net_device *dev) + { + return local == wdev_priv(dev->ieee80211_ptr); +diff -urNp linux-2.6.35.4/net/mac80211/util.c linux-2.6.35.4/net/mac80211/util.c +--- linux-2.6.35.4/net/mac80211/util.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/mac80211/util.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1097,7 +1097,7 @@ int ieee80211_reconfig(struct ieee80211_ + local->resuming = true; + + /* restart hardware */ +- if (local->open_count) { ++ if (atomic_read(&local->open_count)) { + /* + * Upon resume hardware can sometimes be goofy due to + * various platform / driver / bus issues, so restarting +diff -urNp linux-2.6.35.4/net/netlink/af_netlink.c linux-2.6.35.4/net/netlink/af_netlink.c +--- linux-2.6.35.4/net/netlink/af_netlink.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/netlink/af_netlink.c 2010-09-17 20:12:37.000000000 -0400 +@@ -2001,13 +2001,21 @@ static int netlink_seq_show(struct seq_f + struct netlink_sock *nlk = nlk_sk(s); + + seq_printf(seq, "%p %-3d %-6d %08x %-8d %-8d %p %-8d %-8d %-8lu\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + s->sk_protocol, + nlk->pid, + nlk->groups ? (u32)nlk->groups[0] : 0, + sk_rmem_alloc_get(s), + sk_wmem_alloc_get(s), ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + nlk->cb, ++#endif + atomic_read(&s->sk_refcnt), + atomic_read(&s->sk_drops), + sock_i_ino(s) +diff -urNp linux-2.6.35.4/net/packet/af_packet.c linux-2.6.35.4/net/packet/af_packet.c +--- linux-2.6.35.4/net/packet/af_packet.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/packet/af_packet.c 2010-09-17 20:12:37.000000000 -0400 +@@ -2093,7 +2093,7 @@ static int packet_getsockopt(struct sock + case PACKET_HDRLEN: + if (len > sizeof(int)) + len = sizeof(int); +- if (copy_from_user(&val, optval, len)) ++ if (len > sizeof(val) || copy_from_user(&val, optval, len)) + return -EFAULT; + switch (val) { + case TPACKET_V1: +@@ -2125,7 +2125,7 @@ static int packet_getsockopt(struct sock + + if (put_user(len, optlen)) + return -EFAULT; +- if (copy_to_user(optval, data, len)) ++ if (len > sizeof(st) || copy_to_user(optval, data, len)) + return -EFAULT; + return 0; + } +@@ -2604,7 +2604,11 @@ static int packet_seq_show(struct seq_fi + + seq_printf(seq, + "%p %-6d %-4d %04x %-5d %1d %-6u %-6u %-6lu\n", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + s->sk_type, + ntohs(po->num), +diff -urNp linux-2.6.35.4/net/sctp/socket.c linux-2.6.35.4/net/sctp/socket.c +--- linux-2.6.35.4/net/sctp/socket.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/sctp/socket.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1483,7 +1483,7 @@ SCTP_STATIC int sctp_sendmsg(struct kioc + struct sctp_sndrcvinfo *sinfo; + struct sctp_initmsg *sinit; + sctp_assoc_t associd = 0; +- sctp_cmsgs_t cmsgs = { NULL }; ++ sctp_cmsgs_t cmsgs = { NULL, NULL }; + int err; + sctp_scope_t scope; + long timeo; +@@ -4387,7 +4387,7 @@ static int sctp_getsockopt_peer_addrs(st + addrlen = sctp_get_af_specific(temp.sa.sa_family)->sockaddr_len; + if (space_left < addrlen) + return -ENOMEM; +- if (copy_to_user(to, &temp, addrlen)) ++ if (addrlen > sizeof(temp) || copy_to_user(to, &temp, addrlen)) + return -EFAULT; + to += addrlen; + cnt++; +diff -urNp linux-2.6.35.4/net/socket.c linux-2.6.35.4/net/socket.c +--- linux-2.6.35.4/net/socket.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/socket.c 2010-09-17 20:12:37.000000000 -0400 +@@ -88,6 +88,7 @@ + #include <linux/nsproxy.h> + #include <linux/magic.h> + #include <linux/slab.h> ++#include <linux/in.h> + + #include <asm/uaccess.h> + #include <asm/unistd.h> +@@ -105,6 +106,8 @@ + #include <linux/sockios.h> + #include <linux/atalk.h> + ++#include <linux/grsock.h> ++ + static int sock_no_open(struct inode *irrelevant, struct file *dontcare); + static ssize_t sock_aio_read(struct kiocb *iocb, const struct iovec *iov, + unsigned long nr_segs, loff_t pos); +@@ -322,7 +325,7 @@ static int sockfs_get_sb(struct file_sys + mnt); + } + +-static struct vfsmount *sock_mnt __read_mostly; ++struct vfsmount *sock_mnt __read_mostly; + + static struct file_system_type sock_fs_type = { + .name = "sockfs", +@@ -1291,6 +1294,16 @@ SYSCALL_DEFINE3(socket, int, family, int + if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK)) + flags = (flags & ~SOCK_NONBLOCK) | O_NONBLOCK; + ++ if(!gr_search_socket(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ ++ if (gr_handle_sock_all(family, type, protocol)) { ++ retval = -EACCES; ++ goto out; ++ } ++ + retval = sock_create(family, type, protocol, &sock); + if (retval < 0) + goto out; +@@ -1403,6 +1416,14 @@ SYSCALL_DEFINE3(bind, int, fd, struct so + if (sock) { + err = move_addr_to_kernel(umyaddr, addrlen, (struct sockaddr *)&address); + if (err >= 0) { ++ if (gr_handle_sock_server((struct sockaddr *)&address)) { ++ err = -EACCES; ++ goto error; ++ } ++ err = gr_search_bind(sock, (struct sockaddr_in *)&address); ++ if (err) ++ goto error; ++ + err = security_socket_bind(sock, + (struct sockaddr *)&address, + addrlen); +@@ -1411,6 +1432,7 @@ SYSCALL_DEFINE3(bind, int, fd, struct so + (struct sockaddr *) + &address, addrlen); + } ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1434,10 +1456,20 @@ SYSCALL_DEFINE2(listen, int, fd, int, ba + if ((unsigned)backlog > somaxconn) + backlog = somaxconn; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ goto error; ++ } ++ ++ err = gr_search_listen(sock); ++ if (err) ++ goto error; ++ + err = security_socket_listen(sock, backlog); + if (!err) + err = sock->ops->listen(sock, backlog); + ++error: + fput_light(sock->file, fput_needed); + } + return err; +@@ -1480,6 +1512,18 @@ SYSCALL_DEFINE4(accept4, int, fd, struct + newsock->type = sock->type; + newsock->ops = sock->ops; + ++ if (gr_handle_sock_server_other(sock->sk)) { ++ err = -EPERM; ++ sock_release(newsock); ++ goto out_put; ++ } ++ ++ err = gr_search_accept(sock); ++ if (err) { ++ sock_release(newsock); ++ goto out_put; ++ } ++ + /* + * We don't need try_module_get here, as the listening socket (sock) + * has the protocol module (sock->ops->owner) held. +@@ -1518,6 +1562,8 @@ SYSCALL_DEFINE4(accept4, int, fd, struct + fd_install(newfd, newfile); + err = newfd; + ++ gr_attach_curr_ip(newsock->sk); ++ + out_put: + fput_light(sock->file, fput_needed); + out: +@@ -1550,6 +1596,7 @@ SYSCALL_DEFINE3(connect, int, fd, struct + int, addrlen) + { + struct socket *sock; ++ struct sockaddr *sck; + struct sockaddr_storage address; + int err, fput_needed; + +@@ -1560,6 +1607,17 @@ SYSCALL_DEFINE3(connect, int, fd, struct + if (err < 0) + goto out_put; + ++ sck = (struct sockaddr *)&address; ++ ++ if (gr_handle_sock_client(sck)) { ++ err = -EACCES; ++ goto out_put; ++ } ++ ++ err = gr_search_connect(sock, (struct sockaddr_in *)sck); ++ if (err) ++ goto out_put; ++ + err = + security_socket_connect(sock, (struct sockaddr *)&address, addrlen); + if (err) +diff -urNp linux-2.6.35.4/net/sunrpc/sched.c linux-2.6.35.4/net/sunrpc/sched.c +--- linux-2.6.35.4/net/sunrpc/sched.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/sunrpc/sched.c 2010-09-17 20:12:09.000000000 -0400 +@@ -234,9 +234,9 @@ static int rpc_wait_bit_killable(void *w + #ifdef RPC_DEBUG + static void rpc_task_set_debuginfo(struct rpc_task *task) + { +- static atomic_t rpc_pid; ++ static atomic_unchecked_t rpc_pid; + +- task->tk_pid = atomic_inc_return(&rpc_pid); ++ task->tk_pid = atomic_inc_return_unchecked(&rpc_pid); + } + #else + static inline void rpc_task_set_debuginfo(struct rpc_task *task) +diff -urNp linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c +--- linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/sunrpc/xprtrdma/svc_rdma.c 2010-09-17 20:12:37.000000000 -0400 +@@ -106,7 +106,7 @@ static int read_reset_stat(ctl_table *ta + len -= *ppos; + if (len > *lenp) + len = *lenp; +- if (len && copy_to_user(buffer, str_buf, len)) ++ if (len > sizeof(str_buf) || (len && copy_to_user(buffer, str_buf, len))) + return -EFAULT; + *lenp = len; + *ppos += len; +diff -urNp linux-2.6.35.4/net/sysctl_net.c linux-2.6.35.4/net/sysctl_net.c +--- linux-2.6.35.4/net/sysctl_net.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/sysctl_net.c 2010-09-17 20:12:37.000000000 -0400 +@@ -46,7 +46,7 @@ static int net_ctl_permissions(struct ct + struct ctl_table *table) + { + /* Allow network administrator to have same access as root. */ +- if (capable(CAP_NET_ADMIN)) { ++ if (capable_nolog(CAP_NET_ADMIN)) { + int mode = (table->mode >> 6) & 7; + return (mode << 6) | (mode << 3) | mode; + } +diff -urNp linux-2.6.35.4/net/tipc/socket.c linux-2.6.35.4/net/tipc/socket.c +--- linux-2.6.35.4/net/tipc/socket.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/tipc/socket.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1451,8 +1451,9 @@ static int connect(struct socket *sock, + } else { + if (res == 0) + res = -ETIMEDOUT; +- else +- ; /* leave "res" unchanged */ ++ else { ++ /* leave "res" unchanged */ ++ } + sock->state = SS_DISCONNECTING; + } + +diff -urNp linux-2.6.35.4/net/unix/af_unix.c linux-2.6.35.4/net/unix/af_unix.c +--- linux-2.6.35.4/net/unix/af_unix.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/unix/af_unix.c 2010-09-17 20:12:37.000000000 -0400 +@@ -736,6 +736,12 @@ static struct sock *unix_find_other(stru + err = -ECONNREFUSED; + if (!S_ISSOCK(inode->i_mode)) + goto put_fail; ++ ++ if (!gr_acl_handle_unix(path.dentry, path.mnt)) { ++ err = -EACCES; ++ goto put_fail; ++ } ++ + u = unix_find_socket_byinode(net, inode); + if (!u) + goto put_fail; +@@ -756,6 +762,13 @@ static struct sock *unix_find_other(stru + if (u) { + struct dentry *dentry; + dentry = unix_sk(u)->dentry; ++ ++ if (!gr_handle_chroot_unix(u->sk_peercred.pid)) { ++ err = -EPERM; ++ sock_put(u); ++ goto fail; ++ } ++ + if (dentry) + touch_atime(unix_sk(u)->mnt, dentry); + } else +@@ -841,11 +854,18 @@ static int unix_bind(struct socket *sock + err = security_path_mknod(&nd.path, dentry, mode, 0); + if (err) + goto out_mknod_drop_write; ++ if (!gr_acl_handle_mknod(dentry, nd.path.dentry, nd.path.mnt, mode)) { ++ err = -EACCES; ++ goto out_mknod_drop_write; ++ } + err = vfs_mknod(nd.path.dentry->d_inode, dentry, mode, 0); + out_mknod_drop_write: + mnt_drop_write(nd.path.mnt); + if (err) + goto out_mknod_dput; ++ ++ gr_handle_create(dentry, nd.path.mnt); ++ + mutex_unlock(&nd.path.dentry->d_inode->i_mutex); + dput(nd.path.dentry); + nd.path.dentry = dentry; +@@ -863,6 +883,10 @@ out_mknod_drop_write: + goto out_unlock; + } + ++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX ++ sk->sk_peercred.pid = current->pid; ++#endif ++ + list = &unix_socket_table[addr->hash]; + } else { + list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)]; +@@ -2161,7 +2185,11 @@ static int unix_seq_show(struct seq_file + unix_state_lock(s); + + seq_printf(seq, "%p: %08X %08X %08X %04X %02X %5lu", ++#ifdef CONFIG_GRKERNSEC_HIDESYM ++ NULL, ++#else + s, ++#endif + atomic_read(&s->sk_refcnt), + 0, + s->sk_state == TCP_LISTEN ? __SO_ACCEPTCON : 0, +diff -urNp linux-2.6.35.4/net/wireless/reg.c linux-2.6.35.4/net/wireless/reg.c +--- linux-2.6.35.4/net/wireless/reg.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/wireless/reg.c 2010-09-17 20:12:09.000000000 -0400 +@@ -50,7 +50,7 @@ + printk(KERN_DEBUG format , ## args); \ + } while (0) + #else +-#define REG_DBG_PRINT(args...) ++#define REG_DBG_PRINT(args...) do {} while (0) + #endif + + /* Receipt of information from last regulatory request */ +diff -urNp linux-2.6.35.4/net/wireless/wext-core.c linux-2.6.35.4/net/wireless/wext-core.c +--- linux-2.6.35.4/net/wireless/wext-core.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/wireless/wext-core.c 2010-09-17 20:12:09.000000000 -0400 +@@ -744,8 +744,7 @@ static int ioctl_standard_iw_point(struc + */ + + /* Support for very large requests */ +- if ((descr->flags & IW_DESCR_FLAG_NOMAX) && +- (user_length > descr->max_tokens)) { ++ if (user_length > descr->max_tokens) { + /* Allow userspace to GET more than max so + * we can support any size GET requests. + * There is still a limit : -ENOMEM. +diff -urNp linux-2.6.35.4/net/xfrm/xfrm_policy.c linux-2.6.35.4/net/xfrm/xfrm_policy.c +--- linux-2.6.35.4/net/xfrm/xfrm_policy.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/net/xfrm/xfrm_policy.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1502,7 +1502,7 @@ free_dst: + goto out; + } + +-static int inline ++static inline int + xfrm_dst_alloc_copy(void **target, void *src, int size) + { + if (!*target) { +@@ -1514,7 +1514,7 @@ xfrm_dst_alloc_copy(void **target, void + return 0; + } + +-static int inline ++static inline int + xfrm_dst_update_parent(struct dst_entry *dst, struct xfrm_selector *sel) + { + #ifdef CONFIG_XFRM_SUB_POLICY +@@ -1526,7 +1526,7 @@ xfrm_dst_update_parent(struct dst_entry + #endif + } + +-static int inline ++static inline int + xfrm_dst_update_origin(struct dst_entry *dst, struct flowi *fl) + { + #ifdef CONFIG_XFRM_SUB_POLICY +diff -urNp linux-2.6.35.4/scripts/basic/fixdep.c linux-2.6.35.4/scripts/basic/fixdep.c +--- linux-2.6.35.4/scripts/basic/fixdep.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/basic/fixdep.c 2010-09-17 20:12:09.000000000 -0400 +@@ -222,9 +222,9 @@ static void use_config(char *m, int slen + + static void parse_config_file(char *map, size_t len) + { +- int *end = (int *) (map + len); ++ unsigned int *end = (unsigned int *) (map + len); + /* start at +1, so that p can never be < map */ +- int *m = (int *) map + 1; ++ unsigned int *m = (unsigned int *) map + 1; + char *p, *q; + + for (; m < end; m++) { +@@ -371,7 +371,7 @@ static void print_deps(void) + static void traps(void) + { + static char test[] __attribute__((aligned(sizeof(int)))) = "CONF"; +- int *p = (int *)test; ++ unsigned int *p = (unsigned int *)test; + + if (*p != INT_CONF) { + fprintf(stderr, "fixdep: sizeof(int) != 4 or wrong endianess? %#x\n", +diff -urNp linux-2.6.35.4/scripts/kallsyms.c linux-2.6.35.4/scripts/kallsyms.c +--- linux-2.6.35.4/scripts/kallsyms.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/kallsyms.c 2010-09-17 20:12:09.000000000 -0400 +@@ -43,10 +43,10 @@ struct text_range { + + static unsigned long long _text; + static struct text_range text_ranges[] = { +- { "_stext", "_etext" }, +- { "_sinittext", "_einittext" }, +- { "_stext_l1", "_etext_l1" }, /* Blackfin on-chip L1 inst SRAM */ +- { "_stext_l2", "_etext_l2" }, /* Blackfin on-chip L2 SRAM */ ++ { "_stext", "_etext", 0, 0 }, ++ { "_sinittext", "_einittext", 0, 0 }, ++ { "_stext_l1", "_etext_l1", 0, 0 }, /* Blackfin on-chip L1 inst SRAM */ ++ { "_stext_l2", "_etext_l2", 0, 0 }, /* Blackfin on-chip L2 SRAM */ + }; + #define text_range_text (&text_ranges[0]) + #define text_range_inittext (&text_ranges[1]) +diff -urNp linux-2.6.35.4/scripts/mod/file2alias.c linux-2.6.35.4/scripts/mod/file2alias.c +--- linux-2.6.35.4/scripts/mod/file2alias.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/mod/file2alias.c 2010-09-17 20:12:09.000000000 -0400 +@@ -72,7 +72,7 @@ static void device_id_check(const char * + unsigned long size, unsigned long id_size, + void *symval) + { +- int i; ++ unsigned int i; + + if (size % id_size || size < id_size) { + if (cross_build != 0) +@@ -102,7 +102,7 @@ static void device_id_check(const char * + /* USB is special because the bcdDevice can be matched against a numeric range */ + /* Looks like "usb:vNpNdNdcNdscNdpNicNiscNipN" */ + static void do_usb_entry(struct usb_device_id *id, +- unsigned int bcdDevice_initial, int bcdDevice_initial_digits, ++ unsigned int bcdDevice_initial, unsigned int bcdDevice_initial_digits, + unsigned char range_lo, unsigned char range_hi, + unsigned char max, struct module *mod) + { +@@ -437,7 +437,7 @@ static void do_pnp_device_entry(void *sy + for (i = 0; i < count; i++) { + const char *id = (char *)devs[i].id; + char acpi_id[sizeof(devs[0].id)]; +- int j; ++ unsigned int j; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); +@@ -467,7 +467,7 @@ static void do_pnp_card_entries(void *sy + + for (j = 0; j < PNP_MAX_DEVICES; j++) { + const char *id = (char *)card->devs[j].id; +- int i2, j2; ++ unsigned int i2, j2; + int dup = 0; + + if (!id[0]) +@@ -493,7 +493,7 @@ static void do_pnp_card_entries(void *sy + /* add an individual alias for every device entry */ + if (!dup) { + char acpi_id[sizeof(card->devs[0].id)]; +- int k; ++ unsigned int k; + + buf_printf(&mod->dev_table_buf, + "MODULE_ALIAS(\"pnp:d%s*\");\n", id); +@@ -768,7 +768,7 @@ static void dmi_ascii_filter(char *d, co + static int do_dmi_entry(const char *filename, struct dmi_system_id *id, + char *alias) + { +- int i, j; ++ unsigned int i, j; + + sprintf(alias, "dmi*"); + +diff -urNp linux-2.6.35.4/scripts/mod/modpost.c linux-2.6.35.4/scripts/mod/modpost.c +--- linux-2.6.35.4/scripts/mod/modpost.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/mod/modpost.c 2010-09-17 20:12:09.000000000 -0400 +@@ -846,6 +846,7 @@ enum mismatch { + ANY_INIT_TO_ANY_EXIT, + ANY_EXIT_TO_ANY_INIT, + EXPORT_TO_INIT_EXIT, ++ DATA_TO_TEXT + }; + + struct sectioncheck { +@@ -954,6 +955,12 @@ const struct sectioncheck sectioncheck[] + .tosec = { INIT_SECTIONS, EXIT_SECTIONS, NULL }, + .mismatch = EXPORT_TO_INIT_EXIT, + .symbol_white_list = { DEFAULT_SYMBOL_WHITE_LIST, NULL }, ++}, ++/* Do not reference code from writable data */ ++{ ++ .fromsec = { DATA_SECTIONS, NULL }, ++ .tosec = { TEXT_SECTIONS, NULL }, ++ .mismatch = DATA_TO_TEXT + } + }; + +@@ -1060,10 +1067,10 @@ static Elf_Sym *find_elf_symbol(struct e + continue; + if (ELF_ST_TYPE(sym->st_info) == STT_SECTION) + continue; +- if (sym->st_value == addr) +- return sym; + /* Find a symbol nearby - addr are maybe negative */ + d = sym->st_value - addr; ++ if (d == 0) ++ return sym; + if (d < 0) + d = addr - sym->st_value; + if (d < distance) { +@@ -1306,6 +1313,14 @@ static void report_sec_mismatch(const ch + "or drop the export.\n", + tosym, sec2annotation(tosec), sec2annotation(tosec), tosym); + break; ++ case DATA_TO_TEXT: ++/* ++ fprintf(stderr, ++ "The variable %s references\n" ++ "the %s %s%s%s\n", ++ fromsym, to, sec2annotation(tosec), tosym, to_p); ++*/ ++ break; + } + fprintf(stderr, "\n"); + } +@@ -1629,7 +1644,7 @@ void __attribute__((format(printf, 2, 3) + va_end(ap); + } + +-void buf_write(struct buffer *buf, const char *s, int len) ++void buf_write(struct buffer *buf, const char *s, unsigned int len) + { + if (buf->size - buf->pos < len) { + buf->size += len + SZ; +@@ -1841,7 +1856,7 @@ static void write_if_changed(struct buff + if (fstat(fileno(file), &st) < 0) + goto close_write; + +- if (st.st_size != b->pos) ++ if (st.st_size != (off_t)b->pos) + goto close_write; + + tmp = NOFAIL(malloc(b->pos)); +diff -urNp linux-2.6.35.4/scripts/mod/modpost.h linux-2.6.35.4/scripts/mod/modpost.h +--- linux-2.6.35.4/scripts/mod/modpost.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/mod/modpost.h 2010-09-17 20:12:09.000000000 -0400 +@@ -92,15 +92,15 @@ void *do_nofail(void *ptr, const char *e + + struct buffer { + char *p; +- int pos; +- int size; ++ unsigned int pos; ++ unsigned int size; + }; + + void __attribute__((format(printf, 2, 3))) + buf_printf(struct buffer *buf, const char *fmt, ...); + + void +-buf_write(struct buffer *buf, const char *s, int len); ++buf_write(struct buffer *buf, const char *s, unsigned int len); + + struct module { + struct module *next; +diff -urNp linux-2.6.35.4/scripts/mod/sumversion.c linux-2.6.35.4/scripts/mod/sumversion.c +--- linux-2.6.35.4/scripts/mod/sumversion.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/mod/sumversion.c 2010-09-17 20:12:09.000000000 -0400 +@@ -455,7 +455,7 @@ static void write_version(const char *fi + goto out; + } + +- if (write(fd, sum, strlen(sum)+1) != strlen(sum)+1) { ++ if (write(fd, sum, strlen(sum)+1) != (ssize_t)strlen(sum)+1) { + warn("writing sum in %s failed: %s\n", + filename, strerror(errno)); + goto out; +diff -urNp linux-2.6.35.4/scripts/pnmtologo.c linux-2.6.35.4/scripts/pnmtologo.c +--- linux-2.6.35.4/scripts/pnmtologo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/scripts/pnmtologo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -237,14 +237,14 @@ static void write_header(void) + fprintf(out, " * Linux logo %s\n", logoname); + fputs(" */\n\n", out); + fputs("#include <linux/linux_logo.h>\n\n", out); +- fprintf(out, "static unsigned char %s_data[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_data[] = {\n", + logoname); + } + + static void write_footer(void) + { + fputs("\n};\n\n", out); +- fprintf(out, "const struct linux_logo %s __initconst = {\n", logoname); ++ fprintf(out, "const struct linux_logo %s = {\n", logoname); + fprintf(out, "\t.type\t\t= %s,\n", logo_types[logo_type]); + fprintf(out, "\t.width\t\t= %d,\n", logo_width); + fprintf(out, "\t.height\t\t= %d,\n", logo_height); +@@ -374,7 +374,7 @@ static void write_logo_clut224(void) + fputs("\n};\n\n", out); + + /* write logo clut */ +- fprintf(out, "static unsigned char %s_clut[] __initdata = {\n", ++ fprintf(out, "static unsigned char %s_clut[] = {\n", + logoname); + write_hex_cnt = 0; + for (i = 0; i < logo_clutsize; i++) { +diff -urNp linux-2.6.35.4/security/commoncap.c linux-2.6.35.4/security/commoncap.c +--- linux-2.6.35.4/security/commoncap.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/commoncap.c 2010-09-17 20:12:37.000000000 -0400 +@@ -28,6 +28,7 @@ + #include <linux/prctl.h> + #include <linux/securebits.h> + #include <linux/syslog.h> ++#include <net/sock.h> + + /* + * If a non-root user executes a setuid-root binary in +@@ -51,9 +52,11 @@ static void warn_setuid_and_fcaps_mixed( + } + } + ++extern kernel_cap_t gr_cap_rtnetlink(struct sock *sk); ++ + int cap_netlink_send(struct sock *sk, struct sk_buff *skb) + { +- NETLINK_CB(skb).eff_cap = current_cap(); ++ NETLINK_CB(skb).eff_cap = gr_cap_rtnetlink(sk); + return 0; + } + +diff -urNp linux-2.6.35.4/security/integrity/ima/ima_api.c linux-2.6.35.4/security/integrity/ima/ima_api.c +--- linux-2.6.35.4/security/integrity/ima/ima_api.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/integrity/ima/ima_api.c 2010-09-17 20:12:09.000000000 -0400 +@@ -75,7 +75,7 @@ void ima_add_violation(struct inode *ino + int result; + + /* can overflow, only indicator */ +- atomic_long_inc(&ima_htable.violations); ++ atomic_long_inc_unchecked(&ima_htable.violations); + + entry = kmalloc(sizeof(*entry), GFP_KERNEL); + if (!entry) { +diff -urNp linux-2.6.35.4/security/integrity/ima/ima_fs.c linux-2.6.35.4/security/integrity/ima/ima_fs.c +--- linux-2.6.35.4/security/integrity/ima/ima_fs.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/integrity/ima/ima_fs.c 2010-09-17 20:12:09.000000000 -0400 +@@ -28,12 +28,12 @@ + static int valid_policy = 1; + #define TMPBUFLEN 12 + static ssize_t ima_show_htable_value(char __user *buf, size_t count, +- loff_t *ppos, atomic_long_t *val) ++ loff_t *ppos, atomic_long_unchecked_t *val) + { + char tmpbuf[TMPBUFLEN]; + ssize_t len; + +- len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read(val)); ++ len = scnprintf(tmpbuf, TMPBUFLEN, "%li\n", atomic_long_read_unchecked(val)); + return simple_read_from_buffer(buf, count, ppos, tmpbuf, len); + } + +diff -urNp linux-2.6.35.4/security/integrity/ima/ima.h linux-2.6.35.4/security/integrity/ima/ima.h +--- linux-2.6.35.4/security/integrity/ima/ima.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/integrity/ima/ima.h 2010-09-17 20:12:09.000000000 -0400 +@@ -83,8 +83,8 @@ void ima_add_violation(struct inode *ino + extern spinlock_t ima_queue_lock; + + struct ima_h_table { +- atomic_long_t len; /* number of stored measurements in the list */ +- atomic_long_t violations; ++ atomic_long_unchecked_t len; /* number of stored measurements in the list */ ++ atomic_long_unchecked_t violations; + struct hlist_head queue[IMA_MEASURE_HTABLE_SIZE]; + }; + extern struct ima_h_table ima_htable; +diff -urNp linux-2.6.35.4/security/integrity/ima/ima_queue.c linux-2.6.35.4/security/integrity/ima/ima_queue.c +--- linux-2.6.35.4/security/integrity/ima/ima_queue.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/integrity/ima/ima_queue.c 2010-09-17 20:12:09.000000000 -0400 +@@ -79,7 +79,7 @@ static int ima_add_digest_entry(struct i + INIT_LIST_HEAD(&qe->later); + list_add_tail_rcu(&qe->later, &ima_measurements); + +- atomic_long_inc(&ima_htable.len); ++ atomic_long_inc_unchecked(&ima_htable.len); + key = ima_hash_key(entry->digest); + hlist_add_head_rcu(&qe->hnext, &ima_htable.queue[key]); + return 0; +diff -urNp linux-2.6.35.4/security/Kconfig linux-2.6.35.4/security/Kconfig +--- linux-2.6.35.4/security/Kconfig 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/Kconfig 2010-09-17 20:12:37.000000000 -0400 +@@ -4,6 +4,505 @@ + + menu "Security options" + ++source grsecurity/Kconfig ++ ++menu "PaX" ++ ++ config PAX_PER_CPU_PGD ++ bool ++ ++ config TASK_SIZE_MAX_SHIFT ++ int ++ depends on X86_64 ++ default 47 if !PAX_PER_CPU_PGD ++ default 42 if PAX_PER_CPU_PGD ++ ++ config PAX_ENABLE_PAE ++ bool ++ default y if (X86_32 && (MPENTIUM4 || MK8 || MPSC || MCORE2 || MATOM)) ++ ++config PAX ++ bool "Enable various PaX features" ++ depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ help ++ This allows you to enable various PaX features. PaX adds ++ intrusion prevention mechanisms to the kernel that reduce ++ the risks posed by exploitable memory corruption bugs. ++ ++menu "PaX Control" ++ depends on PAX ++ ++config PAX_SOFTMODE ++ bool 'Support soft mode' ++ select PAX_PT_PAX_FLAGS ++ help ++ Enabling this option will allow you to run PaX in soft mode, that ++ is, PaX features will not be enforced by default, only on executables ++ marked explicitly. You must also enable PT_PAX_FLAGS support as it ++ is the only way to mark executables for soft mode use. ++ ++ Soft mode can be activated by using the "pax_softmode=1" kernel command ++ line option on boot. Furthermore you can control various PaX features ++ at runtime via the entries in /proc/sys/kernel/pax. ++ ++config PAX_EI_PAX ++ bool 'Use legacy ELF header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'chpax' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ an otherwise reserved part of the ELF header. This marking has ++ numerous drawbacks (no support for soft-mode, toolchain does not ++ know about the non-standard use of the ELF header) therefore it ++ has been deprecated in favour of PT_PAX_FLAGS support. ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF ++ program header then you MUST enable this option otherwise they ++ will not get any protection. ++ ++ Note that if you enable PT_PAX_FLAGS marking support as well, ++ the PT_PAX_FLAG marks will override the legacy EI_PAX marks. ++ ++config PAX_PT_PAX_FLAGS ++ bool 'Use ELF program header marking' ++ help ++ Enabling this option will allow you to control PaX features on ++ a per executable basis via the 'paxctl' utility available at ++ http://pax.grsecurity.net/. The control flags will be read from ++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking ++ has the benefits of supporting both soft mode and being fully ++ integrated into the toolchain (the binutils patch is available ++ from http://pax.grsecurity.net). ++ ++ If you have applications not marked by the PT_PAX_FLAGS ELF ++ program header then you MUST enable the EI_PAX marking support ++ otherwise they will not get any protection. ++ ++ Note that if you enable the legacy EI_PAX marking support as well, ++ the EI_PAX marks will be overridden by the PT_PAX_FLAGS marks. ++ ++choice ++ prompt 'MAC system integration' ++ default PAX_HAVE_ACL_FLAGS ++ help ++ Mandatory Access Control systems have the option of controlling ++ PaX flags on a per executable basis, choose the method supported ++ by your particular system. ++ ++ - "none": if your MAC system does not interact with PaX, ++ - "direct": if your MAC system defines pax_set_initial_flags() itself, ++ - "hook": if your MAC system uses the pax_set_initial_flags_func callback. ++ ++ NOTE: this option is for developers/integrators only. ++ ++ config PAX_NO_ACL_FLAGS ++ bool 'none' ++ ++ config PAX_HAVE_ACL_FLAGS ++ bool 'direct' ++ ++ config PAX_HOOK_ACL_FLAGS ++ bool 'hook' ++endchoice ++ ++endmenu ++ ++menu "Non-executable pages" ++ depends on PAX ++ ++config PAX_NOEXEC ++ bool "Enforce non-executable pages" ++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || (ARM && (CPU_V6 || CPU_V7)) || IA64 || MIPS || PARISC || PPC || S390 || SPARC || X86) ++ help ++ By design some architectures do not allow for protecting memory ++ pages against execution or even if they do, Linux does not make ++ use of this feature. In practice this means that if a page is ++ readable (such as the stack or heap) it is also executable. ++ ++ There is a well known exploit technique that makes use of this ++ fact and a common programming mistake where an attacker can ++ introduce code of his choice somewhere in the attacked program's ++ memory (typically the stack or the heap) and then execute it. ++ ++ If the attacked program was running with different (typically ++ higher) privileges than that of the attacker, then he can elevate ++ his own privilege level (e.g. get a root shell, write to files for ++ which he does not have write access to, etc). ++ ++ Enabling this option will let you choose from various features ++ that prevent the injection and execution of 'foreign' code in ++ a program. ++ ++ This will also break programs that rely on the old behaviour and ++ expect that dynamically allocated memory via the malloc() family ++ of functions is executable (which it is not). Notable examples ++ are the XFree86 4.x server, the java runtime and wine. ++ ++config PAX_PAGEEXEC ++ bool "Paging based non-executable pages" ++ depends on PAX_NOEXEC && (!X86_32 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUMM || MCORE2 || MATOM || MPENTIUM4 || MPSC || MK7 || MK8 || MWINCHIPC6 || MWINCHIP2 || MWINCHIP3D || MVIAC3_2 || MVIAC7) ++ select S390_SWITCH_AMODE if S390 ++ select S390_EXEC_PROTECT if S390 ++ help ++ This implementation is based on the paging feature of the CPU. ++ On i386 without hardware non-executable bit support there is a ++ variable but usually low performance impact, however on Intel's ++ P4 core based CPUs it is very high so you should not enable this ++ for kernels meant to be used on such CPUs. ++ ++ On alpha, avr32, ia64, parisc, sparc, sparc64, x86_64 and i386 ++ with hardware non-executable bit support there is no performance ++ impact, on ppc the impact is negligible. ++ ++ Note that several architectures require various emulations due to ++ badly designed userland ABIs, this will cause a performance impact ++ but will disappear as soon as userland is fixed. For example, ppc ++ userland MUST have been built with secure-plt by a recent toolchain. ++ ++config PAX_SEGMEXEC ++ bool "Segmentation based non-executable pages" ++ depends on PAX_NOEXEC && X86_32 ++ help ++ This implementation is based on the segmentation feature of the ++ CPU and has a very small performance impact, however applications ++ will be limited to a 1.5 GB address space instead of the normal ++ 3 GB. ++ ++config PAX_EMUTRAMP ++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || X86) ++ default y if PARISC ++ help ++ There are some programs and libraries that for one reason or ++ another attempt to execute special small code snippets from ++ non-executable memory pages. Most notable examples are the ++ signal handler return code generated by the kernel itself and ++ the GCC trampolines. ++ ++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then ++ such programs will no longer work under your kernel. ++ ++ As a remedy you can say Y here and use the 'chpax' or 'paxctl' ++ utilities to enable trampoline emulation for the affected programs ++ yet still have the protection provided by the non-executable pages. ++ ++ On parisc you MUST enable this option and EMUSIGRT as well, otherwise ++ your system will not even boot. ++ ++ Alternatively you can say N here and use the 'chpax' or 'paxctl' ++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC ++ for the affected files. ++ ++ NOTE: enabling this feature *may* open up a loophole in the ++ protection provided by non-executable pages that an attacker ++ could abuse. Therefore the best solution is to not have any ++ files on your system that would require this option. This can ++ be achieved by not using libc5 (which relies on the kernel ++ signal handler return code) and not using or rewriting programs ++ that make use of the nested function implementation of GCC. ++ Skilled users can just fix GCC itself so that it implements ++ nested function calls in a way that does not interfere with PaX. ++ ++config PAX_EMUSIGRT ++ bool "Automatically emulate sigreturn trampolines" ++ depends on PAX_EMUTRAMP && PARISC ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate signal return trampolines executing on the stack ++ that would otherwise lead to task termination. ++ ++ This solution is intended as a temporary one for users with ++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17, ++ Modula-3 runtime, etc) or executables linked to such, basically ++ everything that does not specify its own SA_RESTORER function in ++ normal executable memory like glibc 2.1+ does. ++ ++ On parisc you MUST enable this option, otherwise your system will ++ not even boot. ++ ++ NOTE: this feature cannot be disabled on a per executable basis ++ and since it *does* open up a loophole in the protection provided ++ by non-executable pages, the best solution is to not have any ++ files on your system that would require this option. ++ ++config PAX_MPROTECT ++ bool "Restrict mprotect()" ++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) ++ help ++ Enabling this option will prevent programs from ++ - changing the executable status of memory pages that were ++ not originally created as executable, ++ - making read-only executable pages writable again, ++ - creating executable pages from anonymous memory, ++ - making read-only-after-relocations (RELRO) data pages writable again. ++ ++ You should say Y here to complete the protection provided by ++ the enforcement of non-executable pages. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_ELFRELOCS ++ bool "Allow ELF text relocations (read help)" ++ depends on PAX_MPROTECT ++ default n ++ help ++ Non-executable pages and mprotect() restrictions are effective ++ in preventing the introduction of new executable code into an ++ attacked task's address space. There remain only two venues ++ for this kind of attack: if the attacker can execute already ++ existing code in the attacked task then he can either have it ++ create and mmap() a file containing his code or have it mmap() ++ an already existing ELF library that does not have position ++ independent code in it and use mprotect() on it to make it ++ writable and copy his code there. While protecting against ++ the former approach is beyond PaX, the latter can be prevented ++ by having only PIC ELF libraries on one's system (which do not ++ need to relocate their code). If you are sure this is your case, ++ as is the case with all modern Linux distributions, then leave ++ this option disabled. You should say 'n' here. ++ ++config PAX_ETEXECRELOCS ++ bool "Allow ELF ET_EXEC text relocations" ++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC) ++ select PAX_ELFRELOCS ++ default y ++ help ++ On some architectures there are incorrectly created applications ++ that require text relocations and would not work without enabling ++ this option. If you are an alpha, ia64 or parisc user, you should ++ enable this option and disable it once you have made sure that ++ none of your applications need it. ++ ++config PAX_EMUPLT ++ bool "Automatically emulate ELF PLT" ++ depends on PAX_MPROTECT && (ALPHA || PARISC || SPARC) ++ default y ++ help ++ Enabling this option will have the kernel automatically detect ++ and emulate the Procedure Linkage Table entries in ELF files. ++ On some architectures such entries are in writable memory, and ++ become non-executable leading to task termination. Therefore ++ it is mandatory that you enable this option on alpha, parisc, ++ sparc and sparc64, otherwise your system would not even boot. ++ ++ NOTE: this feature *does* open up a loophole in the protection ++ provided by the non-executable pages, therefore the proper ++ solution is to modify the toolchain to produce a PLT that does ++ not need to be writable. ++ ++config PAX_DLRESOLVE ++ bool 'Emulate old glibc resolver stub' ++ depends on PAX_EMUPLT && SPARC ++ default n ++ help ++ This option is needed if userland has an old glibc (before 2.4) ++ that puts a 'save' instruction into the runtime generated resolver ++ stub that needs special emulation. ++ ++config PAX_KERNEXEC ++ bool "Enforce non-executable kernel pages" ++ depends on PAX_NOEXEC && (PPC || X86) && (!X86_32 || X86_WP_WORKS_OK) && !XEN ++ select PAX_PER_CPU_PGD if X86_64 || (X86_32 && X86_PAE) ++ help ++ This is the kernel land equivalent of PAGEEXEC and MPROTECT, ++ that is, enabling this option will make it harder to inject ++ and execute 'foreign' code in kernel memory itself. ++ ++config PAX_KERNEXEC_MODULE_TEXT ++ int "Minimum amount of memory reserved for module code" ++ default "4" ++ depends on PAX_KERNEXEC && X86_32 && MODULES ++ help ++ Due to implementation details the kernel must reserve a fixed ++ amount of memory for module code at compile time that cannot be ++ changed at runtime. Here you can specify the minimum amount ++ in MB that will be reserved. Due to the same implementation ++ details this size will always be rounded up to the next 2/4 MB ++ boundary (depends on PAE) so the actually available memory for ++ module code will usually be more than this minimum. ++ ++ The default 4 MB should be enough for most users but if you have ++ an excessive number of modules (e.g., most distribution configs ++ compile many drivers as modules) or use huge modules such as ++ nvidia's kernel driver, you will need to adjust this amount. ++ A good rule of thumb is to look at your currently loaded kernel ++ modules and add up their sizes. ++ ++endmenu ++ ++menu "Address Space Layout Randomization" ++ depends on PAX ++ ++config PAX_ASLR ++ bool "Address Space Layout Randomization" ++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS ++ help ++ Many if not most exploit techniques rely on the knowledge of ++ certain addresses in the attacked program. The following options ++ will allow the kernel to apply a certain amount of randomization ++ to specific parts of the program thereby forcing an attacker to ++ guess them in most cases. Any failed guess will most likely crash ++ the attacked program which allows the kernel to detect such attempts ++ and react on them. PaX itself provides no reaction mechanisms, ++ instead it is strongly encouraged that you make use of Nergal's ++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's ++ (http://www.grsecurity.net/) built-in crash detection features or ++ develop one yourself. ++ ++ By saying Y here you can choose to randomize the following areas: ++ - top of the task's kernel stack ++ - top of the task's userland stack ++ - base address for mmap() requests that do not specify one ++ (this includes all libraries) ++ - base address of the main executable ++ ++ It is strongly recommended to say Y here as address space layout ++ randomization has negligible impact on performance yet it provides ++ a very effective protection. ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control ++ this feature on a per file basis. ++ ++config PAX_RANDKSTACK ++ bool "Randomize kernel stack base" ++ depends on PAX_ASLR && X86_TSC && X86_32 ++ help ++ By saying Y here the kernel will randomize every task's kernel ++ stack on every system call. This will not only force an attacker ++ to guess it but also prevent him from making use of possible ++ leaked information about it. ++ ++ Since the kernel stack is a rather scarce resource, randomization ++ may cause unexpected stack overflows, therefore you should very ++ carefully test your system. Note that once enabled in the kernel ++ configuration, this feature cannot be disabled on a per file basis. ++ ++config PAX_RANDUSTACK ++ bool "Randomize user stack base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will randomize every task's userland ++ stack. The randomization is done in two steps where the second ++ one may apply a big amount of shift to the top of the stack and ++ cause problems for programs that want to use lots of memory (more ++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is). ++ For this reason the second step can be controlled by 'chpax' or ++ 'paxctl' on a per file basis. ++ ++config PAX_RANDMMAP ++ bool "Randomize mmap() base" ++ depends on PAX_ASLR ++ help ++ By saying Y here the kernel will use a randomized base address for ++ mmap() requests that do not specify one themselves. As a result ++ all dynamically loaded libraries will appear at random addresses ++ and therefore be harder to exploit by a technique where an attacker ++ attempts to execute library code for his purposes (e.g. spawn a ++ shell from an exploited program that is running at an elevated ++ privilege level). ++ ++ Furthermore, if a program is relinked as a dynamic ELF file, its ++ base address will be randomized as well, completing the full ++ randomization of the address space layout. Attacking such programs ++ becomes a guess game. You can find an example of doing this at ++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at ++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz . ++ ++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this ++ feature on a per file basis. ++ ++endmenu ++ ++menu "Miscellaneous hardening features" ++ ++config PAX_MEMORY_SANITIZE ++ bool "Sanitize all freed memory" ++ help ++ By saying Y here the kernel will erase memory pages as soon as they ++ are freed. This in turn reduces the lifetime of data stored in the ++ pages, making it less likely that sensitive information such as ++ passwords, cryptographic secrets, etc stay in memory for too long. ++ ++ This is especially useful for programs whose runtime is short, long ++ lived processes and the kernel itself benefit from this as long as ++ they operate on whole memory pages and ensure timely freeing of pages ++ that may hold sensitive information. ++ ++ The tradeoff is performance impact, on a single CPU system kernel ++ compilation sees a 3% slowdown, other systems and workloads may vary ++ and you are advised to test this feature on your expected workload ++ before deploying it. ++ ++ Note that this feature does not protect data stored in live pages, ++ e.g., process memory swapped to disk may stay there for a long time. ++ ++config PAX_MEMORY_UDEREF ++ bool "Prevent invalid userland pointer dereference" ++ depends on X86 && !UML_X86 && !XEN ++ select PAX_PER_CPU_PGD if X86_64 ++ help ++ By saying Y here the kernel will be prevented from dereferencing ++ userland pointers in contexts where the kernel expects only kernel ++ pointers. This is both a useful runtime debugging feature and a ++ security measure that prevents exploiting a class of kernel bugs. ++ ++ The tradeoff is that some virtualization solutions may experience ++ a huge slowdown and therefore you should not enable this feature ++ for kernels meant to run in such environments. Whether a given VM ++ solution is affected or not is best determined by simply trying it ++ out, the performance impact will be obvious right on boot as this ++ mechanism engages from very early on. A good rule of thumb is that ++ VMs running on CPUs without hardware virtualization support (i.e., ++ the majority of IA-32 CPUs) will likely experience the slowdown. ++ ++config PAX_REFCOUNT ++ bool "Prevent various kernel object reference counter overflows" ++ depends on GRKERNSEC && (X86 || SPARC64) ++ help ++ By saying Y here the kernel will detect and prevent overflowing ++ various (but not all) kinds of object reference counters. Such ++ overflows can normally occur due to bugs only and are often, if ++ not always, exploitable. ++ ++ The tradeoff is that data structures protected by an overflowed ++ refcount will never be freed and therefore will leak memory. Note ++ that this leak also happens even without this protection but in ++ that case the overflow can eventually trigger the freeing of the ++ data structure while it is still being used elsewhere, resulting ++ in the exploitable situation that this feature prevents. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++config PAX_USERCOPY ++ bool "Bounds check heap object copies between kernel and userland" ++ depends on X86 || PPC || SPARC ++ depends on GRKERNSEC && (SLAB || SLUB || SLOB) ++ help ++ By saying Y here the kernel will enforce the size of heap objects ++ when they are copied in either direction between the kernel and ++ userland, even if only a part of the heap object is copied. ++ ++ Specifically, this checking prevents information leaking from the ++ kernel heap during kernel to userland copies (if the kernel heap ++ object is otherwise fully initialized) and prevents kernel heap ++ overflows during userland to kernel copies. ++ ++ Note that the current implementation provides the strictest checks ++ for the SLUB allocator. ++ ++ If frame pointers are enabled on x86, this option will also ++ restrict copies into and out of the kernel stack to local variables ++ within a single frame. ++ ++ Since this has a negligible performance impact, you should enable ++ this feature. ++ ++endmenu ++ ++endmenu ++ + config KEYS + bool "Enable access key retention support" + help +@@ -124,7 +623,7 @@ config INTEL_TXT + config LSM_MMAP_MIN_ADDR + int "Low address space for LSM to protect from user allocation" + depends on SECURITY && SECURITY_SELINUX +- default 65536 ++ default 32768 + help + This is the portion of low virtual memory which should be protected + from userspace allocation. Keeping a user from writing to low pages +diff -urNp linux-2.6.35.4/security/min_addr.c linux-2.6.35.4/security/min_addr.c +--- linux-2.6.35.4/security/min_addr.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/min_addr.c 2010-09-17 20:12:37.000000000 -0400 +@@ -14,6 +14,7 @@ unsigned long dac_mmap_min_addr = CONFIG + */ + static void update_mmap_min_addr(void) + { ++#ifndef SPARC + #ifdef CONFIG_LSM_MMAP_MIN_ADDR + if (dac_mmap_min_addr > CONFIG_LSM_MMAP_MIN_ADDR) + mmap_min_addr = dac_mmap_min_addr; +@@ -22,6 +23,7 @@ static void update_mmap_min_addr(void) + #else + mmap_min_addr = dac_mmap_min_addr; + #endif ++#endif + } + + /* +diff -urNp linux-2.6.35.4/security/security.c linux-2.6.35.4/security/security.c +--- linux-2.6.35.4/security/security.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/security.c 2010-09-17 20:12:37.000000000 -0400 +@@ -25,8 +25,8 @@ static __initdata char chosen_lsm[SECURI + /* things that live in capability.c */ + extern void __init security_fixup_ops(struct security_operations *ops); + +-static struct security_operations *security_ops; +-static struct security_operations default_security_ops = { ++static struct security_operations *security_ops __read_only; ++static struct security_operations default_security_ops __read_only = { + .name = "default", + }; + +@@ -67,7 +67,9 @@ int __init security_init(void) + + void reset_security_ops(void) + { ++ pax_open_kernel(); + security_ops = &default_security_ops; ++ pax_close_kernel(); + } + + /* Save user chosen LSM */ +diff -urNp linux-2.6.35.4/security/selinux/hooks.c linux-2.6.35.4/security/selinux/hooks.c +--- linux-2.6.35.4/security/selinux/hooks.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/selinux/hooks.c 2010-09-17 20:12:37.000000000 -0400 +@@ -93,7 +93,6 @@ + #define NUM_SEL_MNT_OPTS 5 + + extern int selinux_nlmsg_lookup(u16 sclass, u16 nlmsg_type, u32 *perm); +-extern struct security_operations *security_ops; + + /* SECMARK reference count */ + atomic_t selinux_secmark_refcount = ATOMIC_INIT(0); +@@ -5428,7 +5427,7 @@ static int selinux_key_getsecurity(struc + + #endif + +-static struct security_operations selinux_ops = { ++static struct security_operations selinux_ops __read_only = { + .name = "selinux", + + .ptrace_access_check = selinux_ptrace_access_check, +diff -urNp linux-2.6.35.4/security/smack/smack_lsm.c linux-2.6.35.4/security/smack/smack_lsm.c +--- linux-2.6.35.4/security/smack/smack_lsm.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/smack/smack_lsm.c 2010-09-17 20:12:09.000000000 -0400 +@@ -3064,7 +3064,7 @@ static int smack_inode_getsecctx(struct + return 0; + } + +-struct security_operations smack_ops = { ++struct security_operations smack_ops __read_only = { + .name = "smack", + + .ptrace_access_check = smack_ptrace_access_check, +diff -urNp linux-2.6.35.4/security/tomoyo/tomoyo.c linux-2.6.35.4/security/tomoyo/tomoyo.c +--- linux-2.6.35.4/security/tomoyo/tomoyo.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/security/tomoyo/tomoyo.c 2010-09-17 20:12:09.000000000 -0400 +@@ -235,7 +235,7 @@ static int tomoyo_sb_pivotroot(struct pa + * tomoyo_security_ops is a "struct security_operations" which is used for + * registering TOMOYO. + */ +-static struct security_operations tomoyo_security_ops = { ++static struct security_operations tomoyo_security_ops __read_only = { + .name = "tomoyo", + .cred_alloc_blank = tomoyo_cred_alloc_blank, + .cred_prepare = tomoyo_cred_prepare, +diff -urNp linux-2.6.35.4/sound/aoa/codecs/onyx.c linux-2.6.35.4/sound/aoa/codecs/onyx.c +--- linux-2.6.35.4/sound/aoa/codecs/onyx.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/aoa/codecs/onyx.c 2010-09-17 20:12:09.000000000 -0400 +@@ -54,7 +54,7 @@ struct onyx { + spdif_locked:1, + analog_locked:1, + original_mute:2; +- int open_count; ++ atomic_t open_count; + struct codec_info *codec_info; + + /* mutex serializes concurrent access to the device +@@ -753,7 +753,7 @@ static int onyx_open(struct codec_info_i + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count++; ++ atomic_inc(&onyx->open_count); + mutex_unlock(&onyx->mutex); + + return 0; +@@ -765,8 +765,7 @@ static int onyx_close(struct codec_info_ + struct onyx *onyx = cii->codec_data; + + mutex_lock(&onyx->mutex); +- onyx->open_count--; +- if (!onyx->open_count) ++ if (atomic_dec_and_test(&onyx->open_count)) + onyx->spdif_locked = onyx->analog_locked = 0; + mutex_unlock(&onyx->mutex); + +diff -urNp linux-2.6.35.4/sound/core/oss/pcm_oss.c linux-2.6.35.4/sound/core/oss/pcm_oss.c +--- linux-2.6.35.4/sound/core/oss/pcm_oss.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/core/oss/pcm_oss.c 2010-09-17 20:12:09.000000000 -0400 +@@ -2966,8 +2966,8 @@ static void snd_pcm_oss_proc_done(struct + } + } + #else /* !CONFIG_SND_VERBOSE_PROCFS */ +-#define snd_pcm_oss_proc_init(pcm) +-#define snd_pcm_oss_proc_done(pcm) ++#define snd_pcm_oss_proc_init(pcm) do {} while (0) ++#define snd_pcm_oss_proc_done(pcm) do {} while (0) + #endif /* CONFIG_SND_VERBOSE_PROCFS */ + + /* +diff -urNp linux-2.6.35.4/sound/core/seq/seq_lock.h linux-2.6.35.4/sound/core/seq/seq_lock.h +--- linux-2.6.35.4/sound/core/seq/seq_lock.h 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/core/seq/seq_lock.h 2010-09-17 20:12:09.000000000 -0400 +@@ -23,10 +23,10 @@ void snd_use_lock_sync_helper(snd_use_lo + #else /* SMP || CONFIG_SND_DEBUG */ + + typedef spinlock_t snd_use_lock_t; /* dummy */ +-#define snd_use_lock_init(lockp) /**/ +-#define snd_use_lock_use(lockp) /**/ +-#define snd_use_lock_free(lockp) /**/ +-#define snd_use_lock_sync(lockp) /**/ ++#define snd_use_lock_init(lockp) do {} while (0) ++#define snd_use_lock_use(lockp) do {} while (0) ++#define snd_use_lock_free(lockp) do {} while (0) ++#define snd_use_lock_sync(lockp) do {} while (0) + + #endif /* SMP || CONFIG_SND_DEBUG */ + +diff -urNp linux-2.6.35.4/sound/drivers/mts64.c linux-2.6.35.4/sound/drivers/mts64.c +--- linux-2.6.35.4/sound/drivers/mts64.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/drivers/mts64.c 2010-09-17 20:12:09.000000000 -0400 +@@ -66,7 +66,7 @@ struct mts64 { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ atomic_t open_count; + int current_midi_output_port; + int current_midi_input_port; + u8 mode[MTS64_NUM_INPUT_PORTS]; +@@ -696,7 +696,7 @@ static int snd_mts64_rawmidi_open(struct + { + struct mts64 *mts = substream->rmidi->private_data; + +- if (mts->open_count == 0) { ++ if (atomic_read(&mts->open_count) == 0) { + /* We don't need a spinlock here, because this is just called + if the device has not been opened before. + So there aren't any IRQs from the device */ +@@ -704,7 +704,7 @@ static int snd_mts64_rawmidi_open(struct + + msleep(50); + } +- ++(mts->open_count); ++ atomic_inc(&mts->open_count); + + return 0; + } +@@ -714,8 +714,7 @@ static int snd_mts64_rawmidi_close(struc + struct mts64 *mts = substream->rmidi->private_data; + unsigned long flags; + +- --(mts->open_count); +- if (mts->open_count == 0) { ++ if (atomic_dec_return(&mts->open_count) == 0) { + /* We need the spinlock_irqsave here because we can still + have IRQs at this point */ + spin_lock_irqsave(&mts->lock, flags); +@@ -724,8 +723,8 @@ static int snd_mts64_rawmidi_close(struc + + msleep(500); + +- } else if (mts->open_count < 0) +- mts->open_count = 0; ++ } else if (atomic_read(&mts->open_count) < 0) ++ atomic_set(&mts->open_count, 0); + + return 0; + } +diff -urNp linux-2.6.35.4/sound/drivers/portman2x4.c linux-2.6.35.4/sound/drivers/portman2x4.c +--- linux-2.6.35.4/sound/drivers/portman2x4.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/drivers/portman2x4.c 2010-09-17 20:12:09.000000000 -0400 +@@ -84,7 +84,7 @@ struct portman { + struct pardevice *pardev; + int pardev_claimed; + +- int open_count; ++ atomic_t open_count; + int mode[PORTMAN_NUM_INPUT_PORTS]; + struct snd_rawmidi_substream *midi_input[PORTMAN_NUM_INPUT_PORTS]; + }; +diff -urNp linux-2.6.35.4/sound/oss/sb_audio.c linux-2.6.35.4/sound/oss/sb_audio.c +--- linux-2.6.35.4/sound/oss/sb_audio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/oss/sb_audio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -901,7 +901,7 @@ sb16_copy_from_user(int dev, + buf16 = (signed short *)(localbuf + localoffs); + while (c) + { +- locallen = (c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); ++ locallen = ((unsigned)c >= LBUFCOPYSIZE ? LBUFCOPYSIZE : c); + if (copy_from_user(lbuf8, + userbuf+useroffs + p, + locallen)) +diff -urNp linux-2.6.35.4/sound/pci/ac97/ac97_codec.c linux-2.6.35.4/sound/pci/ac97/ac97_codec.c +--- linux-2.6.35.4/sound/pci/ac97/ac97_codec.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/ac97/ac97_codec.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1962,7 +1962,7 @@ static int snd_ac97_dev_disconnect(struc + } + + /* build_ops to do nothing */ +-static struct snd_ac97_build_ops null_build_ops; ++static const struct snd_ac97_build_ops null_build_ops; + + #ifdef CONFIG_SND_AC97_POWER_SAVE + static void do_update_power(struct work_struct *work) +diff -urNp linux-2.6.35.4/sound/pci/ac97/ac97_patch.c linux-2.6.35.4/sound/pci/ac97/ac97_patch.c +--- linux-2.6.35.4/sound/pci/ac97/ac97_patch.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/ac97/ac97_patch.c 2010-09-17 20:12:09.000000000 -0400 +@@ -371,7 +371,7 @@ static int patch_yamaha_ymf743_build_spd + return 0; + } + +-static struct snd_ac97_build_ops patch_yamaha_ymf743_ops = { ++static const struct snd_ac97_build_ops patch_yamaha_ymf743_ops = { + .build_spdif = patch_yamaha_ymf743_build_spdif, + .build_3d = patch_yamaha_ymf7x3_3d, + }; +@@ -455,7 +455,7 @@ static int patch_yamaha_ymf753_post_spdi + return 0; + } + +-static struct snd_ac97_build_ops patch_yamaha_ymf753_ops = { ++static const struct snd_ac97_build_ops patch_yamaha_ymf753_ops = { + .build_3d = patch_yamaha_ymf7x3_3d, + .build_post_spdif = patch_yamaha_ymf753_post_spdif + }; +@@ -502,7 +502,7 @@ static int patch_wolfson_wm9703_specific + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9703_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9703_ops = { + .build_specific = patch_wolfson_wm9703_specific, + }; + +@@ -533,7 +533,7 @@ static int patch_wolfson_wm9704_specific + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9704_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9704_ops = { + .build_specific = patch_wolfson_wm9704_specific, + }; + +@@ -677,7 +677,7 @@ static int patch_wolfson_wm9711_specific + return 0; + } + +-static struct snd_ac97_build_ops patch_wolfson_wm9711_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9711_ops = { + .build_specific = patch_wolfson_wm9711_specific, + }; + +@@ -871,7 +871,7 @@ static void patch_wolfson_wm9713_resume + } + #endif + +-static struct snd_ac97_build_ops patch_wolfson_wm9713_ops = { ++static const struct snd_ac97_build_ops patch_wolfson_wm9713_ops = { + .build_specific = patch_wolfson_wm9713_specific, + .build_3d = patch_wolfson_wm9713_3d, + #ifdef CONFIG_PM +@@ -976,7 +976,7 @@ static int patch_sigmatel_stac97xx_speci + return 0; + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9700_ops = { + .build_3d = patch_sigmatel_stac9700_3d, + .build_specific = patch_sigmatel_stac97xx_specific + }; +@@ -1023,7 +1023,7 @@ static int patch_sigmatel_stac9708_speci + return patch_sigmatel_stac97xx_specific(ac97); + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9708_ops = { + .build_3d = patch_sigmatel_stac9708_3d, + .build_specific = patch_sigmatel_stac9708_specific + }; +@@ -1252,7 +1252,7 @@ static int patch_sigmatel_stac9758_speci + return 0; + } + +-static struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = { ++static const struct snd_ac97_build_ops patch_sigmatel_stac9758_ops = { + .build_3d = patch_sigmatel_stac9700_3d, + .build_specific = patch_sigmatel_stac9758_specific + }; +@@ -1327,7 +1327,7 @@ static int patch_cirrus_build_spdif(stru + return 0; + } + +-static struct snd_ac97_build_ops patch_cirrus_ops = { ++static const struct snd_ac97_build_ops patch_cirrus_ops = { + .build_spdif = patch_cirrus_build_spdif + }; + +@@ -1384,7 +1384,7 @@ static int patch_conexant_build_spdif(st + return 0; + } + +-static struct snd_ac97_build_ops patch_conexant_ops = { ++static const struct snd_ac97_build_ops patch_conexant_ops = { + .build_spdif = patch_conexant_build_spdif + }; + +@@ -1486,7 +1486,7 @@ static const struct snd_ac97_res_table a + { AC97_VIDEO, 0x9f1f }, + { AC97_AUX, 0x9f1f }, + { AC97_PCM, 0x9f1f }, +- { } /* terminator */ ++ { 0, 0 } /* terminator */ + }; + + static int patch_ad1819(struct snd_ac97 * ac97) +@@ -1560,7 +1560,7 @@ static void patch_ad1881_chained(struct + } + } + +-static struct snd_ac97_build_ops patch_ad1881_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1881_build_ops = { + #ifdef CONFIG_PM + .resume = ad18xx_resume + #endif +@@ -1647,7 +1647,7 @@ static int patch_ad1885_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_ad1885_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1885_build_ops = { + .build_specific = &patch_ad1885_specific, + #ifdef CONFIG_PM + .resume = ad18xx_resume +@@ -1674,7 +1674,7 @@ static int patch_ad1886_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_ad1886_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1886_build_ops = { + .build_specific = &patch_ad1886_specific, + #ifdef CONFIG_PM + .resume = ad18xx_resume +@@ -1881,7 +1881,7 @@ static int patch_ad1981a_specific(struct + ARRAY_SIZE(snd_ac97_ad1981x_jack_sense)); + } + +-static struct snd_ac97_build_ops patch_ad1981a_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1981a_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1981a_specific, + #ifdef CONFIG_PM +@@ -1936,7 +1936,7 @@ static int patch_ad1981b_specific(struct + ARRAY_SIZE(snd_ac97_ad1981x_jack_sense)); + } + +-static struct snd_ac97_build_ops patch_ad1981b_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1981b_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1981b_specific, + #ifdef CONFIG_PM +@@ -2075,7 +2075,7 @@ static int patch_ad1888_specific(struct + return patch_build_controls(ac97, snd_ac97_ad1888_controls, ARRAY_SIZE(snd_ac97_ad1888_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1888_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1888_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1888_specific, + #ifdef CONFIG_PM +@@ -2124,7 +2124,7 @@ static int patch_ad1980_specific(struct + return patch_build_controls(ac97, &snd_ac97_ad198x_2cmic, 1); + } + +-static struct snd_ac97_build_ops patch_ad1980_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1980_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1980_specific, + #ifdef CONFIG_PM +@@ -2239,7 +2239,7 @@ static int patch_ad1985_specific(struct + ARRAY_SIZE(snd_ac97_ad1985_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1985_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1985_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1985_specific, + #ifdef CONFIG_PM +@@ -2531,7 +2531,7 @@ static int patch_ad1986_specific(struct + ARRAY_SIZE(snd_ac97_ad1985_controls)); + } + +-static struct snd_ac97_build_ops patch_ad1986_build_ops = { ++static const struct snd_ac97_build_ops patch_ad1986_build_ops = { + .build_post_spdif = patch_ad198x_post_spdif, + .build_specific = patch_ad1986_specific, + #ifdef CONFIG_PM +@@ -2636,7 +2636,7 @@ static int patch_alc650_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_alc650_ops = { ++static const struct snd_ac97_build_ops patch_alc650_ops = { + .build_specific = patch_alc650_specific, + .update_jacks = alc650_update_jacks + }; +@@ -2788,7 +2788,7 @@ static int patch_alc655_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_alc655_ops = { ++static const struct snd_ac97_build_ops patch_alc655_ops = { + .build_specific = patch_alc655_specific, + .update_jacks = alc655_update_jacks + }; +@@ -2900,7 +2900,7 @@ static int patch_alc850_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_alc850_ops = { ++static const struct snd_ac97_build_ops patch_alc850_ops = { + .build_specific = patch_alc850_specific, + .update_jacks = alc850_update_jacks + }; +@@ -2962,7 +2962,7 @@ static int patch_cm9738_specific(struct + return patch_build_controls(ac97, snd_ac97_cm9738_controls, ARRAY_SIZE(snd_ac97_cm9738_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9738_ops = { ++static const struct snd_ac97_build_ops patch_cm9738_ops = { + .build_specific = patch_cm9738_specific, + .update_jacks = cm9738_update_jacks + }; +@@ -3053,7 +3053,7 @@ static int patch_cm9739_post_spdif(struc + return patch_build_controls(ac97, snd_ac97_cm9739_controls_spdif, ARRAY_SIZE(snd_ac97_cm9739_controls_spdif)); + } + +-static struct snd_ac97_build_ops patch_cm9739_ops = { ++static const struct snd_ac97_build_ops patch_cm9739_ops = { + .build_specific = patch_cm9739_specific, + .build_post_spdif = patch_cm9739_post_spdif, + .update_jacks = cm9739_update_jacks +@@ -3227,7 +3227,7 @@ static int patch_cm9761_specific(struct + return patch_build_controls(ac97, snd_ac97_cm9761_controls, ARRAY_SIZE(snd_ac97_cm9761_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9761_ops = { ++static const struct snd_ac97_build_ops patch_cm9761_ops = { + .build_specific = patch_cm9761_specific, + .build_post_spdif = patch_cm9761_post_spdif, + .update_jacks = cm9761_update_jacks +@@ -3323,7 +3323,7 @@ static int patch_cm9780_specific(struct + return patch_build_controls(ac97, cm9780_controls, ARRAY_SIZE(cm9780_controls)); + } + +-static struct snd_ac97_build_ops patch_cm9780_ops = { ++static const struct snd_ac97_build_ops patch_cm9780_ops = { + .build_specific = patch_cm9780_specific, + .build_post_spdif = patch_cm9761_post_spdif /* identical with CM9761 */ + }; +@@ -3443,7 +3443,7 @@ static int patch_vt1616_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_vt1616_ops = { ++static const struct snd_ac97_build_ops patch_vt1616_ops = { + .build_specific = patch_vt1616_specific + }; + +@@ -3797,7 +3797,7 @@ static int patch_it2646_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_it2646_ops = { ++static const struct snd_ac97_build_ops patch_it2646_ops = { + .build_specific = patch_it2646_specific, + .update_jacks = it2646_update_jacks + }; +@@ -3831,7 +3831,7 @@ static int patch_si3036_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_si3036_ops = { ++static const struct snd_ac97_build_ops patch_si3036_ops = { + .build_specific = patch_si3036_specific, + }; + +@@ -3864,7 +3864,7 @@ static struct snd_ac97_res_table lm4550_ + { AC97_AUX, 0x1f1f }, + { AC97_PCM, 0x1f1f }, + { AC97_REC_GAIN, 0x0f0f }, +- { } /* terminator */ ++ { 0, 0 } /* terminator */ + }; + + static int patch_lm4550(struct snd_ac97 *ac97) +@@ -3898,7 +3898,7 @@ static int patch_ucb1400_specific(struct + return 0; + } + +-static struct snd_ac97_build_ops patch_ucb1400_ops = { ++static const struct snd_ac97_build_ops patch_ucb1400_ops = { + .build_specific = patch_ucb1400_specific, + }; + +diff -urNp linux-2.6.35.4/sound/pci/ens1370.c linux-2.6.35.4/sound/pci/ens1370.c +--- linux-2.6.35.4/sound/pci/ens1370.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/ens1370.c 2010-09-17 20:12:09.000000000 -0400 +@@ -452,7 +452,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_audio + { PCI_VDEVICE(ENSONIQ, 0x5880), 0, }, /* ES1373 - CT5880 */ + { PCI_VDEVICE(ECTIVA, 0x8938), 0, }, /* Ectiva EV1938 */ + #endif +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, snd_audiopci_ids); +diff -urNp linux-2.6.35.4/sound/pci/hda/patch_hdmi.c linux-2.6.35.4/sound/pci/hda/patch_hdmi.c +--- linux-2.6.35.4/sound/pci/hda/patch_hdmi.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/hda/patch_hdmi.c 2010-09-17 20:12:09.000000000 -0400 +@@ -670,10 +670,10 @@ static void hdmi_non_intrinsic_event(str + cp_ready); + + /* TODO */ +- if (cp_state) +- ; +- if (cp_ready) +- ; ++ if (cp_state) { ++ } ++ if (cp_ready) { ++ } + } + + +diff -urNp linux-2.6.35.4/sound/pci/intel8x0.c linux-2.6.35.4/sound/pci/intel8x0.c +--- linux-2.6.35.4/sound/pci/intel8x0.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/intel8x0.c 2010-09-17 20:12:09.000000000 -0400 +@@ -444,7 +444,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel + { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */ + { PCI_VDEVICE(AMD, 0x7445), DEVICE_INTEL }, /* AMD768 */ + { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, snd_intel8x0_ids); +@@ -2135,7 +2135,7 @@ static struct ac97_quirk ac97_quirks[] _ + .type = AC97_TUNE_HP_ONLY + }, + #endif +- { } /* terminator */ ++ { 0, 0, 0, 0, NULL, 0 } /* terminator */ + }; + + static int __devinit snd_intel8x0_mixer(struct intel8x0 *chip, int ac97_clock, +diff -urNp linux-2.6.35.4/sound/pci/intel8x0m.c linux-2.6.35.4/sound/pci/intel8x0m.c +--- linux-2.6.35.4/sound/pci/intel8x0m.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/sound/pci/intel8x0m.c 2010-09-17 20:12:09.000000000 -0400 +@@ -239,7 +239,7 @@ static DEFINE_PCI_DEVICE_TABLE(snd_intel + { PCI_VDEVICE(AMD, 0x746d), DEVICE_INTEL }, /* AMD8111 */ + { PCI_VDEVICE(AL, 0x5455), DEVICE_ALI }, /* Ali5455 */ + #endif +- { 0, } ++ { 0, 0, 0, 0, 0, 0, 0 } + }; + + MODULE_DEVICE_TABLE(pci, snd_intel8x0m_ids); +@@ -1264,7 +1264,7 @@ static struct shortname_table { + { 0x5455, "ALi M5455" }, + { 0x746d, "AMD AMD8111" }, + #endif +- { 0 }, ++ { 0, NULL }, + }; + + static int __devinit snd_intel8x0m_probe(struct pci_dev *pci, +diff -urNp linux-2.6.35.4/usr/gen_init_cpio.c linux-2.6.35.4/usr/gen_init_cpio.c +--- linux-2.6.35.4/usr/gen_init_cpio.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/usr/gen_init_cpio.c 2010-09-17 20:12:09.000000000 -0400 +@@ -299,7 +299,7 @@ static int cpio_mkfile(const char *name, + int retval; + int rc = -1; + int namesize; +- int i; ++ unsigned int i; + + mode |= S_IFREG; + +@@ -386,9 +386,10 @@ static char *cpio_replace_env(char *new_ + *env_var = *expanded = '\0'; + strncat(env_var, start + 2, end - start - 2); + strncat(expanded, new_location, start - new_location); +- strncat(expanded, getenv(env_var), PATH_MAX); +- strncat(expanded, end + 1, PATH_MAX); ++ strncat(expanded, getenv(env_var), PATH_MAX - strlen(expanded)); ++ strncat(expanded, end + 1, PATH_MAX - strlen(expanded)); + strncpy(new_location, expanded, PATH_MAX); ++ new_location[PATH_MAX] = 0; + } else + break; + } +diff -urNp linux-2.6.35.4/virt/kvm/kvm_main.c linux-2.6.35.4/virt/kvm/kvm_main.c +--- linux-2.6.35.4/virt/kvm/kvm_main.c 2010-08-26 19:47:12.000000000 -0400 ++++ linux-2.6.35.4/virt/kvm/kvm_main.c 2010-09-17 20:12:09.000000000 -0400 +@@ -1284,6 +1284,7 @@ static int kvm_vcpu_release(struct inode + return 0; + } + ++/* cannot be const */ + static struct file_operations kvm_vcpu_fops = { + .release = kvm_vcpu_release, + .unlocked_ioctl = kvm_vcpu_ioctl, +@@ -1738,6 +1739,7 @@ static int kvm_vm_mmap(struct file *file + return 0; + } + ++/* cannot be const */ + static struct file_operations kvm_vm_fops = { + .release = kvm_vm_release, + .unlocked_ioctl = kvm_vm_ioctl, +@@ -1835,6 +1837,7 @@ out: + return r; + } + ++/* cannot be const */ + static struct file_operations kvm_chardev_ops = { + .unlocked_ioctl = kvm_dev_ioctl, + .compat_ioctl = kvm_dev_ioctl, +@@ -1844,6 +1847,9 @@ static struct miscdevice kvm_dev = { + KVM_MINOR, + "kvm", + &kvm_chardev_ops, ++ {NULL, NULL}, ++ NULL, ++ NULL + }; + + static void hardware_enable(void *junk) +@@ -2178,7 +2184,7 @@ static void kvm_sched_out(struct preempt + kvm_arch_vcpu_put(vcpu); + } + +-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, ++int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align, + struct module *module) + { + int r; diff --git a/2.6.35/4421_grsec-remove-localversion-grsec.patch b/2.6.35/4421_grsec-remove-localversion-grsec.patch new file mode 100644 index 0000000..31cf878 --- /dev/null +++ b/2.6.35/4421_grsec-remove-localversion-grsec.patch @@ -0,0 +1,9 @@ +From: Kerin Millar <kerframil@gmail.com> + +Remove grsecurity's localversion-grsec file as it is inconsistent with +Gentoo's kernel practices and naming scheme. + +--- a/localversion-grsec 2008-02-24 14:26:59.000000000 +0000 ++++ b/localversion-grsec 1970-01-01 01:00:00.000000000 +0100 +@@ -1 +0,0 @@ +--grsec diff --git a/2.6.35/4422_grsec-mute-warnings.patch b/2.6.35/4422_grsec-mute-warnings.patch new file mode 100644 index 0000000..1e72e63 --- /dev/null +++ b/2.6.35/4422_grsec-mute-warnings.patch @@ -0,0 +1,35 @@ +From: Jory A. Pratt <anarchy@gentoo.org> +Updated patch for kernel 2.6.32 + +The credits/description from the original version of this patch remain accurate +and are included below. + +--- +From: Gordon Malm <gengor@gentoo.org> + +Updated patch for kernel series 2.6.24. + +The credits/description from the original version of this patch remain accurate +and are included below. + +--- +From: Alexander Gabert <gaberta@fh-trier.de> + +This patch removes the warnings introduced by grsec patch 2.1.9 and later. +It removes the -W options added by the patch and restores the original +warning flags of vanilla kernel versions. + +Acked-by: Christian Heim <phreak@gentoo.org> +--- + +--- a/Makefile 2009-07-29 05:34:01.695857499 +0100 ++++ b/Makefile 2009-07-29 05:58:15.098857201 +0100 +@@ -230,7 +230,7 @@ + + HOSTCC = gcc + HOSTCXX = g++ +-HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks ++HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks + HOSTCXXFLAGS = -O2 -fno-delete-null-pointer-checks + + # Decide whether to build built-in, modular, or both. diff --git a/2.6.35/4423_grsec-remove-protected-paths.patch b/2.6.35/4423_grsec-remove-protected-paths.patch new file mode 100644 index 0000000..9c0fd88 --- /dev/null +++ b/2.6.35/4423_grsec-remove-protected-paths.patch @@ -0,0 +1,20 @@ +From: Anthony G. Basile <basile@opensource.dyc.edu> + +We don't want to allow GRSEC's Makefile to change permissions on +paths in the filesystem. + +--- a/grsecurity/Makefile 2010-05-21 06:52:24.000000000 -0400 ++++ b/grsecurity/Makefile 2010-05-21 06:54:54.000000000 -0400 +@@ -22,8 +22,8 @@ + ifdef CONFIG_GRKERNSEC_HIDESYM + extra-y := grsec_hidesym.o + $(obj)/grsec_hidesym.o: +- @-chmod -f 500 /boot +- @-chmod -f 500 /lib/modules +- @-chmod -f 700 . +- @echo ' grsec: protected kernel image paths' ++ # @-chmod -f 500 /boot ++ # @-chmod -f 500 /lib/modules ++ # @-chmod -f 700 . ++ # @echo ' grsec: protected kernel image paths' + endif diff --git a/2.6.35/4425_grsec-pax-without-grsec.patch b/2.6.35/4425_grsec-pax-without-grsec.patch new file mode 100644 index 0000000..4e81b4b --- /dev/null +++ b/2.6.35/4425_grsec-pax-without-grsec.patch @@ -0,0 +1,92 @@ +From: Jory Pratt <anarchy@gentoo.org> +Updated patch for kernel 2.6.32 + +The credits/description from the original version of this patch remain accurate +and are included below. +-- +From: Gordon Malm <gengor@gentoo.org> + +Allow PaX options to be selected without first selecting CONFIG_GRKERNSEC. + +This patch has been updated to keep current with newer kernel versions. +The original version of this patch contained no credits/description. + +--- a/arch/x86/mm/fault.c ++++ b/arch/x86/mm/fault.c +@@ -653,10 +653,12 @@ + + #ifdef CONFIG_PAX_KERNEXEC + if (init_mm.start_code <= address && address < init_mm.end_code) { ++#ifdef CONFIG_GRKERNSEC + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); + else ++#endif + printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", + current->comm, task_pid_nr(current), current_uid(), current_euid()); + } +--- a/fs/exec.c ++++ b/fs/exec.c +@@ -1762,9 +1762,11 @@ + } + up_read(&mm->mmap_sem); + } ++#ifdef CONFIG_GRKERNSEC + if (tsk->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset); + else ++#endif + printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset); + printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, " + "PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk), +@@ -1779,10 +1781,12 @@ + #ifdef CONFIG_PAX_REFCOUNT + void pax_report_refcount_overflow(struct pt_regs *regs) + { ++#ifdef CONFIG_GRKERNSEC + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", + ¤t->signal->curr_ip, current->comm, task_pid_nr(current), current_uid(), current_euid()); + else ++#endif + printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", + current->comm, task_pid_nr(current), current_uid(), current_euid()); + print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs)); +@@ -1850,10 +1854,12 @@ + + void pax_report_leak_to_user(const void *ptr, unsigned long len) + { ++#ifdef CONFIG_GRKERNSEC + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: kernel memory leak attempt detected from %p (%lu bytes)\n", + ¤t->signal->curr_ip, ptr, len); + else ++#endif + printk(KERN_ERR "PAX: kernel memory leak attempt detected from %p (%lu bytes)\n", ptr, len); + dump_stack(); + do_group_exit(SIGKILL); +@@ -1861,10 +1867,12 @@ + + void pax_report_overflow_from_user(const void *ptr, unsigned long len) + { ++#ifdef CONFIG_GRKERNSEC + if (current->signal->curr_ip) + printk(KERN_ERR "PAX: From %pI4: kernel memory overflow attempt detected to %p (%lu bytes)\n", + ¤t->signal->curr_ip, ptr, len); + else ++#endif + printk(KERN_ERR "PAX: kernel memory overflow attempt detected to %p (%lu bytes)\n", ptr, len); + dump_stack(); + do_group_exit(SIGKILL); +--- a/security/Kconfig ++++ b/security/Kconfig +@@ -23,7 +23,7 @@ + + config PAX + bool "Enable various PaX features" +- depends on GRKERNSEC && (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) ++ depends on (ALPHA || ARM || AVR32 || IA64 || MIPS || PARISC || PPC || SPARC || X86) + help + This allows you to enable various PaX features. PaX adds + intrusion prevention mechanisms to the kernel that reduce diff --git a/2.6.35/4430_grsec-kconfig-default-gids.patch b/2.6.35/4430_grsec-kconfig-default-gids.patch new file mode 100644 index 0000000..7ba8aa2 --- /dev/null +++ b/2.6.35/4430_grsec-kconfig-default-gids.patch @@ -0,0 +1,76 @@ +From: Kerin Millar <kerframil@gmail.com> + +grsecurity contains a number of options which allow certain protections +to be applied to or exempted from members of a given group. However, the +default GIDs specified in the upstream patch are entirely arbitrary and +there is no telling which (if any) groups the GIDs will correlate with +on an end-user's system. Because some users don't pay a great deal of +attention to the finer points of kernel configuration, it is probably +wise to specify some reasonable defaults so as to stop careless users +from shooting themselves in the foot. + +--- a/grsecurity/Kconfig ++++ b/grsecurity/Kconfig +@@ -402,7 +402,7 @@ + config GRKERNSEC_PROC_GID + int "GID for special group" + depends on GRKERNSEC_PROC_USERGROUP +- default 1001 ++ default 10 + + config GRKERNSEC_PROC_ADD + bool "Additional restrictions" +@@ -611,7 +611,7 @@ + config GRKERNSEC_AUDIT_GID + int "GID for auditing" + depends on GRKERNSEC_AUDIT_GROUP +- default 1007 ++ default 100 + + config GRKERNSEC_EXECLOG + bool "Exec logging" +@@ -785,7 +785,7 @@ + config GRKERNSEC_TPE_GID + int "GID for untrusted users" + depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT +- default 1005 ++ default 100 + help + Setting this GID determines what group TPE restrictions will be + *enabled* for. If the sysctl option is enabled, a sysctl option +@@ -794,7 +794,7 @@ + config GRKERNSEC_TPE_GID + int "GID for trusted users" + depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT +- default 1005 ++ default 10 + help + Setting this GID determines what group TPE restrictions will be + *disabled* for. If the sysctl option is enabled, a sysctl option +@@ -865,7 +865,7 @@ + config GRKERNSEC_SOCKET_ALL_GID + int "GID to deny all sockets for" + depends on GRKERNSEC_SOCKET_ALL +- default 1004 ++ default 65534 + help + Here you can choose the GID to disable socket access for. Remember to + add the users you want socket access disabled for to the GID +@@ -886,7 +886,7 @@ + config GRKERNSEC_SOCKET_CLIENT_GID + int "GID to deny client sockets for" + depends on GRKERNSEC_SOCKET_CLIENT +- default 1003 ++ default 65534 + help + Here you can choose the GID to disable client socket access for. + Remember to add the users you want client socket access disabled for to +@@ -904,7 +904,7 @@ + config GRKERNSEC_SOCKET_SERVER_GID + int "GID to deny server sockets for" + depends on GRKERNSEC_SOCKET_SERVER +- default 1002 ++ default 65534 + help + Here you can choose the GID to disable server socket access for. + Remember to add the users you want server socket access disabled for to diff --git a/2.6.35/4435_grsec-kconfig-gentoo.patch b/2.6.35/4435_grsec-kconfig-gentoo.patch new file mode 100644 index 0000000..c9fbc5f --- /dev/null +++ b/2.6.35/4435_grsec-kconfig-gentoo.patch @@ -0,0 +1,444 @@ +From: Gordon Malm <gengor@gentoo.org> +From: Jory A. Pratt <anarchy@gentoo.org> +From: Kerin Millar <kerframil@gmail.com> + +Add Hardened Gentoo [server/workstation] predefined grsecurity +levels. They're designed to provide a comparitively high level of +security while remaining generally suitable for as great a majority +of the userbase as possible (particularly new users). + +Make Hardened Gentoo [workstation] predefined grsecurity level the +default. The Hardened Gentoo [server] level is more restrictive +and conflicts with some software and thus would be less suitable. + +The original version of this patch was conceived and created by: +Ned Ludd <solar@gentoo.org> + +--- a/grsecurity/Kconfig 2009-07-31 02:34:44.661115764 +0100 ++++ b/grsecurity/Kconfig 2009-08-01 02:04:02.047475888 +0100 +@@ -18,7 +18,7 @@ + choice + prompt "Security Level" + depends on GRKERNSEC +- default GRKERNSEC_CUSTOM ++ default GRKERNSEC_HARDENED_WORKSTATION_NO_RBAC + + config GRKERNSEC_LOW + bool "Low" +@@ -191,6 +191,416 @@ + - Ptrace restrictions + - Restricted vm86 mode + ++config GRKERNSEC_HARDENED_SERVER ++ bool "Hardened Gentoo [server]" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_PROC_ADD ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_IO if (X86) ++ select GRKERNSEC_PROC_IPADDR ++ select GRKERNSEC_SYSCTL ++ select GRKERNSEC_SYSCTL_ON ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ select PAX_KERNEXEC if (X86 && (!X86_32 || X86_WP_WORKS_OK)) ++ select PAX_MEMORY_UDEREF if (X86_32) ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB)) ++ select PAX_MEMORY_SANITIZE ++ help ++ If you say Y here, a configuration will be used that is endorsed by ++ the Hardened Gentoo project. Therefore, many of the protections ++ made available by grsecurity and PaX will be enabled. ++ ++ Hardened Gentoo's pre-defined security levels are designed to provide ++ a high level of security while minimizing incompatibilities with the ++ majority of available software. For further information, please ++ view <http://www.grsecurity.net> and <http://pax.grsecurity.net> as ++ well as the Hardened Gentoo Primer at ++ <http://www.gentoo.org/proj/en/hardened/primer.xml>. ++ ++ This Hardened Gentoo [server] level is identical to the ++ Hardened Gentoo [workstation] level, but with the GRKERNSEC_IO, ++ PAX_KERNEXEC and PAX_NOELFRELOCS security features enabled. ++ Accordingly, this is the preferred security level if the system will ++ not be utilizing software incompatible with the aforementioned ++ grsecurity/PaX features. ++ ++ You may wish to emerge paxctl, a utility which allows you to toggle ++ PaX features on problematic binaries on an individual basis. Note that ++ this only works for ELF binaries that contain a PT_PAX_FLAGS header. ++ Translated, this means that if you wish to toggle PaX features on ++ binaries provided by applications that are distributed only in binary ++ format (rather than being built locally from sources), you will need to ++ run paxctl -C on the binaries beforehand so as to inject the missing ++ headers. ++ ++ When this level is selected, some options cannot be changed. However, ++ you may opt to fully customize the options that are selected by ++ choosing "Custom" in the Security Level menu. You may find it helpful ++ to inherit the options selected by the "Hardened Gentoo [server]" ++ security level as a starting point for further configuration. To ++ accomplish this, select this security level then exit the menuconfig ++ interface, saving changes when prompted. Then, run make menuconfig ++ again and select the "Custom" level. ++ ++ Note that this security level probably should not be used if the ++ target system is a 32bit x86 virtualized guest. If you intend to run ++ the kernel in a 32bit x86 virtualized guest you will likely need to ++ disable the PAX_MEMORY_UDEREF option in order to avoid an unacceptable ++ impact on performance. ++ ++config GRKERNSEC_HARDENED_SERVER_NO_RBAC ++ bool "Hardened Gentoo [server no rbac]" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_PROC_ADD ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_IO if (X86) ++ select GRKERNSEC_PROC_IPADDR ++ select GRKERNSEC_SYSCTL ++ select GRKERNSEC_SYSCTL_ON ++ select GRKERNSEC_NO_RBAC ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_NO_ACL_FLAGS ++ select PAX_KERNEXEC if (X86 && (!X86_32 || X86_WP_WORKS_OK)) ++ select PAX_MEMORY_UDEREF if (X86_32) ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB)) ++ select PAX_MEMORY_SANITIZE ++ help ++ If you say Y here, a configuration will be used that is endorsed by ++ the Hardened Gentoo project. Therefore, many of the protections ++ made available by grsecurity and PaX will be enabled. ++ ++ Hardened Gentoo's pre-defined security levels are designed to provide ++ a high level of security while minimizing incompatibilities with the ++ majority of available software. For further information, please ++ view <http://www.grsecurity.net> and <http://pax.grsecurity.net> as ++ well as the Hardened Gentoo Primer at ++ <http://www.gentoo.org/proj/en/hardened/primer.xml>. ++ ++ This Hardened Gentoo [server] level is identical to the ++ Hardened Gentoo [workstation] level, but with the GRKERNSEC_IO, ++ PAX_KERNEXEC and PAX_NOELFRELOCS security features enabled. ++ Accordingly, this is the preferred security level if the system will ++ not be utilizing software incompatible with the aforementioned ++ grsecurity/PaX features. ++ ++ You may wish to emerge paxctl, a utility which allows you to toggle ++ PaX features on problematic binaries on an individual basis. Note that ++ this only works for ELF binaries that contain a PT_PAX_FLAGS header. ++ Translated, this means that if you wish to toggle PaX features on ++ binaries provided by applications that are distributed only in binary ++ format (rather than being built locally from sources), you will need to ++ run paxctl -C on the binaries beforehand so as to inject the missing ++ headers. ++ ++ When this level is selected, some options cannot be changed. However, ++ you may opt to fully customize the options that are selected by ++ choosing "Custom" in the Security Level menu. You may find it helpful ++ to inherit the options selected by the "Hardened Gentoo [server]" ++ security level as a starting point for further configuration. To ++ accomplish this, select this security level then exit the menuconfig ++ interface, saving changes when prompted. Then, run make menuconfig ++ again and select the "Custom" level. ++ ++ Note that this security level probably should not be used if the ++ target system is a 32bit x86 virtualized guest. If you intend to run ++ the kernel in a 32bit x86 virtualized guest you will likely need to ++ disable the PAX_MEMORY_UDEREF option in order to avoid an unacceptable ++ impact on performance. ++ ++config GRKERNSEC_HARDENED_WORKSTATION ++ bool "Hardened Gentoo [workstation]" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_PROC_IPADDR ++ select GRKERNSEC_SYSCTL ++ select GRKERNSEC_SYSCTL_ON ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_HAVE_ACL_FLAGS ++ # select PAX_KERNEXEC if (X86 && (!X86_32 || X86_WP_WORKS_OK)) ++ select PAX_MEMORY_UDEREF if (X86_32) ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB)) ++ select PAX_MEMORY_SANITIZE ++ help ++ If you say Y here, a configuration will be used that is endorsed by ++ the Hardened Gentoo project. Therefore, many of the protections ++ made available by grsecurity and PaX will be enabled. ++ ++ Hardened Gentoo's pre-defined security levels are designed to provide ++ a high level of security while minimizing incompatibilities with the ++ majority of available software. For further information, please ++ view <http://www.grsecurity.net> and <http://pax.grsecurity.net> as ++ well as the Hardened Gentoo Primer at ++ <http://www.gentoo.org/proj/en/hardened/primer.xml>. ++ ++ This Hardened Gentoo [workstation] level is designed for machines ++ which are intended to run software not compatible with the ++ GRKERNSEC_IO, PAX_KERNEXEC and PAX_NOELFRELOCS features of grsecurity. ++ Accordingly, this security level is suitable for use with the X server ++ "Xorg" and/or any system that will act as host OS to the virtualization ++ softwares vmware-server or virtualbox. ++ ++ You may wish to emerge paxctl, a utility which allows you to toggle ++ PaX features on problematic binaries on an individual basis. Note that ++ this only works for ELF binaries that contain a PT_PAX_FLAGS header. ++ Translated, this means that if you wish to toggle PaX features on ++ binaries provided by applications that are distributed only in binary ++ format (rather than being built locally from sources), you will need to ++ run paxctl -C on the binaries beforehand so as to inject the missing ++ headers. ++ ++ When this level is selected, some options cannot be changed. However, ++ you may opt to fully customize the options that are selected by ++ choosing "Custom" in the Security Level menu. You may find it helpful ++ to inherit the options selected by the "Hardened Gentoo [workstation]" ++ security level as a starting point for further configuration. To ++ accomplish this, select this security level then exit the menuconfig ++ interface, saving changes when prompted. Then, run make menuconfig ++ again and select the "Custom" level. ++ ++ Note that this security level probably should not be used if the ++ target system is a 32bit x86 virtualized guest. If you intend to run ++ the kernel in a 32bit x86 virtualized guest you will likely need to ++ disable the PAX_MEMORY_UDEREF option in order to avoid an unacceptable ++ impact on performance. ++ ++config GRKERNSEC_HARDENED_WORKSTATION_NO_RBAC ++ bool "Hardened Gentoo [workstation no rbac]" ++ select GRKERNSEC_LINK ++ select GRKERNSEC_FIFO ++ select GRKERNSEC_EXECVE ++ select GRKERNSEC_DMESG ++ select GRKERNSEC_FORKFAIL ++ select GRKERNSEC_TIME ++ select GRKERNSEC_SIGNAL ++ select GRKERNSEC_CHROOT ++ select GRKERNSEC_CHROOT_SHMAT ++ select GRKERNSEC_CHROOT_UNIX ++ select GRKERNSEC_CHROOT_MOUNT ++ select GRKERNSEC_CHROOT_FCHDIR ++ select GRKERNSEC_CHROOT_PIVOT ++ select GRKERNSEC_CHROOT_DOUBLE ++ select GRKERNSEC_CHROOT_CHDIR ++ select GRKERNSEC_CHROOT_MKNOD ++ select GRKERNSEC_CHROOT_CAPS ++ select GRKERNSEC_CHROOT_SYSCTL ++ select GRKERNSEC_CHROOT_FINDTASK ++ select GRKERNSEC_PROC ++ select GRKERNSEC_PROC_MEMMAP if (PAX_NOEXEC || PAX_ASLR) ++ select GRKERNSEC_HIDESYM ++ select GRKERNSEC_BRUTE ++ select GRKERNSEC_PROC_USERGROUP ++ select GRKERNSEC_KMEM ++ select GRKERNSEC_RESLOG ++ select GRKERNSEC_RANDNET ++ select GRKERNSEC_CHROOT_CHMOD ++ select GRKERNSEC_CHROOT_NICE ++ select GRKERNSEC_AUDIT_MOUNT ++ select GRKERNSEC_MODHARDEN if (MODULES) ++ select GRKERNSEC_VM86 if (X86_32) ++ select GRKERNSEC_PROC_IPADDR ++ select GRKERNSEC_SYSCTL ++ select GRKERNSEC_SYSCTL_ON ++ select GRKERNSEC_NO_RBAC ++ select PAX ++ select PAX_RANDUSTACK ++ select PAX_ASLR ++ select PAX_RANDMMAP ++ select PAX_NOEXEC ++ select PAX_MPROTECT ++ select PAX_EI_PAX ++ select PAX_PT_PAX_FLAGS ++ select PAX_NO_ACL_FLAGS ++ # select PAX_KERNEXEC if (X86 && (!X86_32 || X86_WP_WORKS_OK)) ++ select PAX_MEMORY_UDEREF if (X86_32) ++ select PAX_RANDKSTACK if (X86_TSC && !X86_64) ++ select PAX_SEGMEXEC if (X86_32) ++ select PAX_PAGEEXEC ++ select PAX_EMUPLT if (ALPHA || PARISC || SPARC32 || SPARC64) ++ select PAX_EMUTRAMP if (PARISC) ++ select PAX_EMUSIGRT if (PARISC) ++ select PAX_ETEXECRELOCS if (ALPHA || IA64 || PARISC) ++ select PAX_REFCOUNT if (X86 || SPARC64) ++ select PAX_USERCOPY if ((X86 || PPC32 || PPC64 || SPARC32 || SPARC64) && (SLAB || SLUB || SLOB)) ++ select PAX_MEMORY_SANITIZE ++ help ++ If you say Y here, a configuration will be used that is endorsed by ++ the Hardened Gentoo project. Therefore, many of the protections ++ made available by grsecurity and PaX will be enabled. ++ ++ Hardened Gentoo's pre-defined security levels are designed to provide ++ a high level of security while minimizing incompatibilities with the ++ majority of available software. For further information, please ++ view <http://www.grsecurity.net> and <http://pax.grsecurity.net> as ++ well as the Hardened Gentoo Primer at ++ <http://www.gentoo.org/proj/en/hardened/primer.xml>. ++ ++ This Hardened Gentoo [workstation] level is designed for machines ++ which are intended to run software not compatible with the ++ GRKERNSEC_IO, PAX_KERNEXEC and PAX_NOELFRELOCS features of grsecurity. ++ Accordingly, this security level is suitable for use with the X server ++ "Xorg" and/or any system that will act as host OS to the virtualization ++ softwares vmware-server or virtualbox. ++ ++ You may wish to emerge paxctl, a utility which allows you to toggle ++ PaX features on problematic binaries on an individual basis. Note that ++ this only works for ELF binaries that contain a PT_PAX_FLAGS header. ++ Translated, this means that if you wish to toggle PaX features on ++ binaries provided by applications that are distributed only in binary ++ format (rather than being built locally from sources), you will need to ++ run paxctl -C on the binaries beforehand so as to inject the missing ++ headers. ++ ++ When this level is selected, some options cannot be changed. However, ++ you may opt to fully customize the options that are selected by ++ choosing "Custom" in the Security Level menu. You may find it helpful ++ to inherit the options selected by the "Hardened Gentoo [workstation]" ++ security level as a starting point for further configuration. To ++ accomplish this, select this security level then exit the menuconfig ++ interface, saving changes when prompted. Then, run make menuconfig ++ again and select the "Custom" level. ++ ++ Note that this security level probably should not be used if the ++ target system is a 32bit x86 virtualized guest. If you intend to run ++ the kernel in a 32bit x86 virtualized guest you will likely need to ++ disable the PAX_MEMORY_UDEREF option in order to avoid an unacceptable ++ impact on performance. ++ + config GRKERNSEC_CUSTOM + bool "Custom" + help diff --git a/2.6.35/4440_selinux-avc_audit-log-curr_ip.patch b/2.6.35/4440_selinux-avc_audit-log-curr_ip.patch new file mode 100644 index 0000000..64d6cf3 --- /dev/null +++ b/2.6.35/4440_selinux-avc_audit-log-curr_ip.patch @@ -0,0 +1,65 @@ +From: Gordon Malm <gengor@gentoo.org> + +This is a reworked version of the original +*_selinux-avc_audit-log-curr_ip.patch carried in earlier releases of +hardened-sources. + +Dropping the patch, or simply fixing the #ifdef of the original patch +could break automated logging setups so this route was necessary. + +Suggestions for improving the help text are welcome. + +The original patch's description is still accurate and included below. + +--- +Provides support for a new field ipaddr within the SELinux +AVC audit log, relying in task_struct->curr_ip (ipv4 only) +provided by grSecurity patch to be applied before. + +Signed-off-by: Lorenzo Hernandez Garcia-Hierro <lorenzo@gnu.org> +--- + +--- a/grsecurity/Kconfig ++++ b/grsecurity/Kconfig +@@ -1371,6 +1371,27 @@ + menu "Logging Options" + depends on GRKERNSEC + ++config GRKERNSEC_SELINUX_AVC_LOG_IPADDR ++ def_bool n ++ prompt "Add source IP address to SELinux AVC log messages" ++ depends on GRKERNSEC && SECURITY_SELINUX ++ help ++ If you say Y here, a new field "ipaddr=" will be added to many SELinux ++ AVC log messages. The value of this field in any given message ++ represents the source IP address of the remote machine/user that created ++ the offending process. ++ ++ This information is sourced from task_struct->curr_ip provided by ++ grsecurity's GRKERNSEC top-level configuration option. One limitation ++ is that only IPv4 is supported. ++ ++ In many instances SELinux AVC log messages already log a superior level ++ of information that also includes source port and destination ip/port. ++ Additionally, SELinux's AVC log code supports IPv6. ++ ++ However, grsecurity's task_struct->curr_ip will sometimes (often?) ++ provide the offender's IP address where stock SELinux logging fails to. ++ + config GRKERNSEC_FLOODTIME + int "Seconds in between log messages (minimum)" + default 10 +--- a/security/selinux/avc.c ++++ b/security/selinux/avc.c +@@ -143,6 +143,11 @@ static void avc_dump_query(struct audit_ + char *scontext; + u32 scontext_len; + ++#ifdef CONFIG_GRKERNSEC_SELINUX_AVC_LOG_IPADDR ++ if (current->signal->curr_ip) ++ audit_log_format(ab, "ipaddr=%u.%u.%u.%u ", NIPQUAD(current->signal->curr_ip)); ++#endif ++ + rc = security_sid_to_context(ssid, &scontext, &scontext_len); + if (rc) + audit_log_format(ab, "ssid=%d", ssid); diff --git a/2.6.35/4445_disable-compat_vdso.patch b/2.6.35/4445_disable-compat_vdso.patch new file mode 100644 index 0000000..8781034 --- /dev/null +++ b/2.6.35/4445_disable-compat_vdso.patch @@ -0,0 +1,46 @@ +No need to wrap vdso calls as gentoo does not use any version of +glibc <=2.3.3 +--- +From: Gordon Malm <gengor@gentoo.org> +From: Kerin Millar <kerframil@gmail.com> +From: Jory A. Pratt <anarchy@gentoo.org> + +COMPAT_VDSO is inappropriate for any modern Hardened Gentoo system. It +conflicts with various parts of PaX, crashing the system if enabled +while PaX's NOEXEC or UDEREF features are active. Moreover, it prevents +a number of important PaX options from appearing in the configuration +menu, including all PaX NOEXEC implementations. Unfortunately, the +reason for the disappearance of these PaX configuration options is +often far from obvious to inexperienced users. + +Therefore, we disable the COMPAT_VDSO menu entry entirely. However, +COMPAT_VDSO operation can still be enabled via bootparam and sysctl +interfaces. Consequently, we must also disable the ability to select +COMPAT_VDSO operation at boot or runtime. Here we patch the kernel so +that selecting COMPAT_VDSO operation at boot/runtime has no effect if +conflicting PaX options are enabled, leaving VDSO_ENABLED operation +intact. + +Closes bug: http://bugs.gentoo.org/show_bug.cgi?id=210138 + +diff -urp a/arch/x86/Kconfig b/arch/x86/Kconfig +--- a/arch/x86/Kconfig 2009-07-31 01:36:57.323857684 +0100 ++++ b/arch/x86/Kconfig 2009-07-31 01:51:39.395749681 +0100 +@@ -1645,17 +1645,8 @@ + + config COMPAT_VDSO + def_bool n +- prompt "Compat VDSO support" + depends on X86_32 || IA32_EMULATION + depends on !PAX_NOEXEC && !PAX_MEMORY_UDEREF +- ---help--- +- Map the 32-bit VDSO to the predictable old-style address too. +- +- Say N here if you are running a sufficiently recent glibc +- version (2.3.3 or later), to remove the high-mapped +- VDSO mapping and to exclusively use the randomized VDSO. +- +- If unsure, say Y. + + config CMDLINE_BOOL + bool "Built-in kernel command line" diff --git a/2.6.35/4450_check_ssp_fix.patch b/2.6.35/4450_check_ssp_fix.patch new file mode 100644 index 0000000..b22bc77 --- /dev/null +++ b/2.6.35/4450_check_ssp_fix.patch @@ -0,0 +1,17 @@ +2010-03-31 Magnus Granberg <zorry@gentoo.org> + + #312335 + arch/x86/Makefile: Add KBUILD_CPPFLAGS to the SSP test + commandline for else it build that file with -fPIE + +--- a/arch/x86/Makefile 2010-03-31 16:39:32.000000000 +0200 ++++ b/arch/x86/Makefile 2010-03-31 16:36:53.000000000 +0200 +@@ -74,7 +74,7 @@ + + ifdef CONFIG_CC_STACKPROTECTOR + cc_has_sp := $(srctree)/scripts/gcc-x86_$(BITS)-has-stack-protector.sh +- ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(biarch)),y) ++ ifeq ($(shell $(CONFIG_SHELL) $(cc_has_sp) $(CC) $(KBUILD_CPPFLAGS) $(biarch)),y) + stackp-y := -fstack-protector + KBUILD_CFLAGS += $(stackp-y) + else |