summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
Diffstat (limited to 'src/kernel')
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/0000_README94
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/1000_grsecurity-2.0.1-2.6.7.patch22165
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/1010_grsec_no_depend_pax.patch12
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/1300_linux-2.6.4-selinux-hooks.patch137
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/1305_linux-2.6.7-selinux-ipaddr.patch14
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/1310_linux-2.6.5-extra_sec_ops.patch63
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/2010_tcp-stealth-2.6.7.patch184
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/3000_netdev-random-core-2.6.7.patch300
-rw-r--r--src/kernel/hardened-patches/hardened-patches-2.6-7.6/3005_netdev-random-drivers-2.6.7.patch2362
9 files changed, 25331 insertions, 0 deletions
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/0000_README b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/0000_README
new file mode 100644
index 0000000000..30ea648724
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/0000_README
@@ -0,0 +1,94 @@
+README
+-------------------------------------------------------------------------------
+This patchset is to be the 2.6 series of hardened-sources.
+It includes both SELinux and GRSecurity, as well as enhancements to each.
+Also included are additional hardening features useful in either system
+(note that with this release that GRSecurity and SELinux can also be used
+in tandem.
+
+
+
+Patchset Numbering Scheme
+-------------------------------------------------------------------------------
+1XXX Base patches
+ 2XX GRSecurity extras
+ 3XX SELinux extras
+2XXX Universal hardening features
+3XXX Netdevrand
+
+Invididual Patch Descriptions:
+-------------------------------------------------------------------------------
+Patch: 1000_grsecurity-2.0.1-2.6.7.patch
+from: Brad Spengler, http://grsecurity.net
+desc: GRSecurity for 2.6.7, prerelease pulled 01-Jul-2004 16:35
+
+Patch: 1010_grsec_no_depend_pax.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: remove idiotic dependancy on grsecurity for PaX
+
+Patch: 1300_linux-2.6.4-selinux-hooks.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: PaX hooks for SELinux
+
+Patch: 1305_linux-2.6.4-selinux-ipaddr.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: Support for SELinux to log an IP address of the origin of an abuse
+
+Patch: 1310_linux-2.6.5-extra_sec_ops.patch
+from: Joshua Brindle <method@gentoo.org>
+desc: Adds additional secondary ops to selinux
+
+Patch: 2010_tcp-stealth-2.6.7.patch
+from: Updated for 2.6.7 by Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Stealth TCP features
+
+Patch: 3000_netdev-random-core-2.6.7.patch
+from: Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Core functionality for netdev random
+
+Patch: 3005_netdev-random-drivers-2.6.7.patch
+from: Michal Purzynski <albeiro@zeus.polsl.gliwice.pl>
+desc: Patch to allow network drivers to contribute to system entropy
+
+Included From genpatches-base:
+--------------------------------------------------------------------
+
+Patch: 1100_ip_tables.patch
+from: http://thread.gmane.org/gmane.comp.security.bugtraq/12272
+Desc: Fix iptables DoS
+
+Patch: 1105_CAN-2004-0497.patch
+From: Chris Wright
+Desc: fix CAN-2004-0497
+
+Patch: 1110_proc.patch
+From: Chris Wright
+Desc: another proc security fix.
+
+Patch: 1310_k8_cardbus_io.patch
+from:
+desc: Patch to prevent emachines amd64 laptops from hanging when unplugging
+ power cord, or closing lid
+
+Patch: 1315_alpha-sysctl-uac.patch
+from:
+desc: enable control of the unaligned access control policy from sysctl
+
+Patch: 1320_x86_64-2.6.7-2.patch
+from: ftp://ftp.x86-64.org/pub/linux/v2.6/x86_64-2.6.7-2.bz2
+desc: x86-64 patch for 2.6.7, version 2
+
+Patch: 1325_iptables-headers.patch
+from: http://bugs.gentoo.org/show_bug.cgi?id=55501
+desc: fixes bug 55501, and is already included in mainline tree. To be
+ removed from this package once 2.6.8 is out.
+
+Patch: 2115_fa311-mac-address-fix.patch
+from: -mm broken-out
+desc: fix for netgear fa311 MAC address. without this fix the MAC is
+ byteswapped and has an incorrect vendor ID (and therefore broken card
+ ID too)
+
+Patch: 2700_ppc-pegasos-2.6.6.patch
+from: http://bugs.gentoo.org/show_bug.cgi?id=54684
+desc: Allow pegasos PCI hardware to work properly.
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1000_grsecurity-2.0.1-2.6.7.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1000_grsecurity-2.0.1-2.6.7.patch
new file mode 100644
index 0000000000..3197640698
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1000_grsecurity-2.0.1-2.6.7.patch
@@ -0,0 +1,22165 @@
+diff -urN linux-2.6.7/Makefile linux-2.6.7/Makefile
+--- linux-2.6.7/Makefile 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/Makefile 2004-06-25 23:36:20 -0400
+@@ -1,7 +1,7 @@
+ VERSION = 2
+ PATCHLEVEL = 6
+ SUBLEVEL = 7
+-EXTRAVERSION =
++EXTRAVERSION = -grsec
+ NAME=Zonked Quokka
+
+ # *DOCUMENTATION*
+@@ -484,7 +484,7 @@
+
+
+ ifeq ($(KBUILD_EXTMOD),)
+-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/
++core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ grsecurity/
+
+ vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
+ $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
+diff -urN linux-2.6.7/arch/alpha/kernel/osf_sys.c linux-2.6.7/arch/alpha/kernel/osf_sys.c
+--- linux-2.6.7/arch/alpha/kernel/osf_sys.c 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/arch/alpha/kernel/osf_sys.c 2004-06-25 17:41:53 -0400
+@@ -37,6 +37,7 @@
+ #include <linux/namei.h>
+ #include <linux/uio.h>
+ #include <linux/vfs.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/fpu.h>
+ #include <asm/io.h>
+@@ -179,6 +180,11 @@
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ #if 0
+ if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
+ printk("%s: unimplemented OSF mmap flags %04lx\n",
+@@ -189,6 +195,13 @@
+ if (!file)
+ goto out;
+ }
++
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ ret = -EACCES;
++ goto out;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(&current->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, off);
+@@ -1274,6 +1287,10 @@
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+@@ -1281,8 +1298,16 @@
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++
++ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ addr += current->mm->delta_mmap;
++#endif
++
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
++
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+diff -urN linux-2.6.7/arch/alpha/kernel/ptrace.c linux-2.6.7/arch/alpha/kernel/ptrace.c
+--- linux-2.6.7/arch/alpha/kernel/ptrace.c 2004-06-16 01:19:53 -0400
++++ linux-2.6.7/arch/alpha/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/user.h>
+ #include <linux/slab.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -288,6 +289,9 @@
+ if (!child)
+ goto out_notsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out;
+diff -urN linux-2.6.7/arch/alpha/mm/fault.c linux-2.6.7/arch/alpha/mm/fault.c
+--- linux-2.6.7/arch/alpha/mm/fault.c 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/arch/alpha/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -56,6 +57,142 @@
+ __reload_thread(pcb);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->r26 == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long*)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U)== 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -133,8 +270,34 @@
+ good_area:
+ si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)rdusp());
++ do_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -urN linux-2.6.7/arch/i386/Kconfig linux-2.6.7/arch/i386/Kconfig
+--- linux-2.6.7/arch/i386/Kconfig 2004-06-16 01:18:59 -0400
++++ linux-2.6.7/arch/i386/Kconfig 2004-06-25 17:41:53 -0400
+@@ -396,7 +396,7 @@
+
+ config X86_ALIGNMENT_16
+ bool
+- depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
++ depends on MWINCHIP3D || MWINCHIP2 || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2
+ default y
+
+ config X86_GOOD_APIC
+diff -urN linux-2.6.7/arch/i386/kernel/apm.c linux-2.6.7/arch/i386/kernel/apm.c
+--- linux-2.6.7/arch/i386/kernel/apm.c 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/arch/i386/kernel/apm.c 2004-06-25 17:41:53 -0400
+@@ -597,19 +597,40 @@
+ int cpu;
+ struct desc_struct save_desc_40;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr3;
++#endif
++
+ cpus = apm_save_cpus();
+
+ cpu = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
++#ifndef CONFIG_PAX_KERNEXEC
+ local_save_flags(flags);
+ APM_DO_CLI;
++#endif
++
+ APM_DO_SAVE_SEGS;
+ apm_bios_call_asm(func, ebx_in, ecx_in, eax, ebx, ecx, edx, esi);
+ APM_DO_RESTORE_SEGS;
++
++#ifndef CONFIG_PAX_KERNEXEC
+ local_irq_restore(flags);
++#endif
++
+ cpu_gdt_table[cpu][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+ apm_restore_cpus(cpus);
+
+@@ -639,20 +660,40 @@
+ int cpu;
+ struct desc_struct save_desc_40;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr3;
++#endif
+
+ cpus = apm_save_cpus();
+
+ cpu = get_cpu();
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
++#ifndef CONFIG_PAX_KERNEXEC
+ local_save_flags(flags);
+ APM_DO_CLI;
++#endif
++
+ APM_DO_SAVE_SEGS;
+ error = apm_bios_call_simple_asm(func, ebx_in, ecx_in, eax);
+ APM_DO_RESTORE_SEGS;
++
++#ifndef CONFIG_PAX_KERNEXEC
+ local_irq_restore(flags);
++#endif
++
+ cpu_gdt_table[smp_processor_id()][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+ apm_restore_cpus(cpus);
+ return error;
+diff -urN linux-2.6.7/arch/i386/kernel/cpu/common.c linux-2.6.7/arch/i386/kernel/cpu/common.c
+--- linux-2.6.7/arch/i386/kernel/cpu/common.c 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/arch/i386/kernel/cpu/common.c 2004-06-25 17:41:53 -0400
+@@ -358,6 +358,10 @@
+ if (this_cpu->c_init)
+ this_cpu->c_init(c);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_NOVSYSCALL)
++ clear_bit(X86_FEATURE_SEP, c->x86_capability);
++#endif
++
+ /* Disable the PN if appropriate */
+ squash_the_stupid_serial_number(c);
+
+@@ -554,7 +558,7 @@
+ set_tss_desc(cpu,t);
+ cpu_gdt_table[cpu][GDT_ENTRY_TSS].b &= 0xfffffdff;
+ load_TR_desc();
+- load_LDT(&init_mm.context);
++ _load_LDT(&init_mm.context);
+
+ /* Set up doublefault TSS pointer in the GDT */
+ __set_tss_desc(cpu, GDT_ENTRY_DOUBLEFAULT_TSS, &doublefault_tss);
+diff -urN linux-2.6.7/arch/i386/kernel/entry.S linux-2.6.7/arch/i386/kernel/entry.S
+--- linux-2.6.7/arch/i386/kernel/entry.S 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/arch/i386/kernel/entry.S 2004-06-25 17:41:53 -0400
+@@ -266,6 +266,11 @@
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx
+ jne syscall_exit_work
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ /* if something modifies registers it must also disable sysexit */
+ movl EIP(%esp), %edx
+ movl OLDESP(%esp), %ecx
+@@ -293,6 +298,11 @@
+ movl TI_flags(%ebp), %ecx
+ testw $_TIF_ALLWORK_MASK, %cx # current->work
+ jne syscall_exit_work
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ call pax_randomize_kstack
++#endif
++
+ restore_all:
+ RESTORE_ALL
+
+@@ -600,7 +610,7 @@
+ pushl $do_spurious_interrupt_bug
+ jmp error_code
+
+-.data
++.section .rodata,"a",@progbits
+ ENTRY(sys_call_table)
+ .long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
+ .long sys_exit
+diff -urN linux-2.6.7/arch/i386/kernel/head.S linux-2.6.7/arch/i386/kernel/head.S
+--- linux-2.6.7/arch/i386/kernel/head.S 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/arch/i386/kernel/head.S 2004-06-25 17:41:53 -0400
+@@ -48,6 +48,12 @@
+
+
+ /*
++ * Real beginning of normal "text" segment
++ */
++ENTRY(stext)
++ENTRY(_stext)
++
++/*
+ * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
+ * %esi points to the real-mode code as a 32-bit pointer.
+ * CS and DS must be 4 GB flat segments, but we don't depend on
+@@ -92,9 +98,9 @@
+
+ movl $(pg0 - __PAGE_OFFSET), %edi
+ movl $(swapper_pg_dir - __PAGE_OFFSET), %edx
+- movl $0x007, %eax /* 0x007 = PRESENT+RW+USER */
++ movl $0x067, %eax /* 0x067 = DIRTY+ACCESSED+PRESENT+RW+USER */
+ 10:
+- leal 0x007(%edi),%ecx /* Create PDE entry */
++ leal 0x067(%edi),%ecx /* Create PDE entry */
+ movl %ecx,(%edx) /* Store identity PDE entry */
+ movl %ecx,page_pde_offset(%edx) /* Store kernel PDE entry */
+ addl $4,%edx
+@@ -104,8 +110,8 @@
+ addl $0x1000,%eax
+ loop 11b
+ /* End condition: we must map up to and including INIT_MAP_BEYOND_END */
+- /* bytes beyond the end of our own page tables; the +0x007 is the attribute bits */
+- leal (INIT_MAP_BEYOND_END+0x007)(%edi),%ebp
++ /* bytes beyond the end of our own page tables; the +0x067 is the attribute bits */
++ leal (INIT_MAP_BEYOND_END+0x067)(%edi),%ebp
+ cmpl %ebp,%eax
+ jb 10b
+ movl %edi,(init_pg_tables_end - __PAGE_OFFSET)
+@@ -164,7 +170,7 @@
+ movl %cr0,%eax
+ orl $0x80000000,%eax
+ movl %eax,%cr0 /* ..and set paging (PG) bit */
+- ljmp $__BOOT_CS,$1f /* Clear prefetch and normalize %eip */
++ ljmp $__BOOT_CS,$1f + __KERNEL_TEXT_OFFSET /* Clear prefetch and normalize %eip */
+ 1:
+ /* Set up the stack pointer */
+ lss stack_start,%esp
+@@ -379,31 +385,39 @@
+ iret
+
+ /*
+- * Real beginning of normal "text" segment
++ * This starts the data section.
+ */
+-ENTRY(stext)
+-ENTRY(_stext)
++.data
++ready: .byte 0
+
+-/*
+- * BSS section
+- */
+-.section ".bss.page_aligned","w"
++.section .data.swapper_pg_dir,"a",@progbits
+ ENTRY(swapper_pg_dir)
+ .fill 1024,4,0
++
++#ifdef CONFIG_PAX_KERNEXEC
++ENTRY(kernexec_pg_dir)
++ .fill 1024,4,0
++#endif
++
++.section .rodata.empty_zero_page,"a",@progbits
+ ENTRY(empty_zero_page)
+ .fill 4096,1,0
+
+ /*
+- * This starts the data section.
+- */
+-.data
++ * The IDT has to be page-aligned to simplify the Pentium
++ * F0 0F bug workaround.. We have a special link segment
++ * for this.
++ */
++.section .rodata.idt,"a",@progbits
++ENTRY(idt_table)
++ .fill 256,8,0
+
++.section .rodata,"a",@progbits
+ ENTRY(stack_start)
+ .long init_thread_union+THREAD_SIZE
+ .long __BOOT_DS
+
+-ready: .byte 0
+-
++/* This is the default interrupt "handler" :-) */
+ int_msg:
+ .asciz "Unknown interrupt or fault at EIP %p %p %p\n"
+
+@@ -445,8 +459,8 @@
+ .align L1_CACHE_BYTES
+ ENTRY(boot_gdt_table)
+ .fill GDT_ENTRY_BOOT_CS,8,0
+- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
++ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
++ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
+
+ /*
+ * The Global Descriptor Table contains 28 quadwords, per-CPU.
+@@ -457,7 +471,13 @@
+ .quad 0x0000000000000000 /* 0x0b reserved */
+ .quad 0x0000000000000000 /* 0x13 reserved */
+ .quad 0x0000000000000000 /* 0x1b reserved */
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++ .quad 0x00cf9b000000ffff /* 0x20 kernel 4GB code at 0x00000000 */
++#else
+ .quad 0x0000000000000000 /* 0x20 unused */
++#endif
++
+ .quad 0x0000000000000000 /* 0x28 unused */
+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
+@@ -466,27 +486,32 @@
+ .quad 0x0000000000000000 /* 0x53 reserved */
+ .quad 0x0000000000000000 /* 0x5b reserved */
+
+- .quad 0x00cf9a000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
+- .quad 0x00cffa000000ffff /* 0x73 user 4GB code at 0x00000000 */
+- .quad 0x00cff2000000ffff /* 0x7b user 4GB data at 0x00000000 */
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0xc0cf9b400000ffff /* 0x60 kernel 4GB code at 0xc0400000 */
++#else
++ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
++#endif
++
++ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
+
+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
+
+ /* Segments used for calling PnP BIOS */
+- .quad 0x00c09a0000000000 /* 0x90 32-bit code */
+- .quad 0x00809a0000000000 /* 0x98 16-bit code */
+- .quad 0x0080920000000000 /* 0xa0 16-bit data */
+- .quad 0x0080920000000000 /* 0xa8 16-bit data */
+- .quad 0x0080920000000000 /* 0xb0 16-bit data */
++ .quad 0x00c09b0000000000 /* 0x90 32-bit code */
++ .quad 0x00809b0000000000 /* 0x98 16-bit code */
++ .quad 0x0080930000000000 /* 0xa0 16-bit data */
++ .quad 0x0080930000000000 /* 0xa8 16-bit data */
++ .quad 0x0080930000000000 /* 0xb0 16-bit data */
+ /*
+ * The APM segments have byte granularity and their bases
+ * and limits are set at run time.
+ */
+- .quad 0x00409a0000000000 /* 0xb8 APM CS code */
+- .quad 0x00009a0000000000 /* 0xc0 APM CS 16 code (16 bit) */
+- .quad 0x0040920000000000 /* 0xc8 APM DS data */
++ .quad 0x00409b0000000000 /* 0xb8 APM CS code */
++ .quad 0x00009b0000000000 /* 0xc0 APM CS 16 code (16 bit) */
++ .quad 0x0040930000000000 /* 0xc8 APM DS data */
+
+ .quad 0x0000000000000000 /* 0xd0 - unused */
+ .quad 0x0000000000000000 /* 0xd8 - unused */
+diff -urN linux-2.6.7/arch/i386/kernel/ioport.c linux-2.6.7/arch/i386/kernel/ioport.c
+--- linux-2.6.7/arch/i386/kernel/ioport.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/arch/i386/kernel/ioport.c 2004-06-25 14:07:21 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/stddef.h>
+ #include <linux/slab.h>
+ #include <linux/thread_info.h>
++#include <linux/grsecurity.h>
+
+ /* Set EXTENT bits starting at BASE in BITMAP to value TURN_ON. */
+ static void set_bitmap(unsigned long *bitmap, unsigned int base, unsigned int extent, int new_value)
+@@ -62,9 +63,16 @@
+
+ if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
+ return -EINVAL;
++#ifdef CONFIG_GRKERNSEC_IO
++ if (turn_on) {
++ gr_handle_ioperm();
++#else
+ if (turn_on && !capable(CAP_SYS_RAWIO))
++#endif
+ return -EPERM;
+-
++#ifdef CONFIG_GRKERNSEC_IO
++ }
++#endif
+ /*
+ * If it's the first ioperm() call in this thread's lifetime, set the
+ * IO bitmap up. ioperm() is much less timing critical than clone(),
+@@ -115,8 +123,13 @@
+ return -EINVAL;
+ /* Trying to gain more privileges? */
+ if (level > old) {
++#ifdef CONFIG_GRKERNSEC_IO
++ gr_handle_iopl();
++ return -EPERM;
++#else
+ if (!capable(CAP_SYS_RAWIO))
+ return -EPERM;
++#endif
+ }
+ regs->eflags = (regs->eflags &~ 0x3000UL) | (level << 12);
+ /* Make sure we return the long way (not sysenter) */
+diff -urN linux-2.6.7/arch/i386/kernel/ldt.c linux-2.6.7/arch/i386/kernel/ldt.c
+--- linux-2.6.7/arch/i386/kernel/ldt.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/arch/i386/kernel/ldt.c 2004-06-25 17:41:53 -0400
+@@ -102,6 +102,19 @@
+ retval = copy_ldt(&mm->context, &old_mm->context);
+ up(&old_mm->context.sem);
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (!mm->context.user_cs_limit) {
++ mm->context.user_cs_base = 0UL;
++ mm->context.user_cs_limit = ~0UL;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpus_clear(mm->context.cpu_user_cs_mask);
++#endif
++
++ }
++#endif
++
+ return retval;
+ }
+
+@@ -154,7 +167,7 @@
+ {
+ int err;
+ unsigned long size;
+- void *address;
++ const void *address;
+
+ err = 0;
+ address = &default_ldt[0];
+@@ -211,6 +224,13 @@
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (ldt_info.contents & 2)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ entry_1 = LDT_entry_a(&ldt_info);
+ entry_2 = LDT_entry_b(&ldt_info);
+ if (oldmode)
+diff -urN linux-2.6.7/arch/i386/kernel/process.c linux-2.6.7/arch/i386/kernel/process.c
+--- linux-2.6.7/arch/i386/kernel/process.c 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/arch/i386/kernel/process.c 2004-06-25 17:41:53 -0400
+@@ -352,7 +352,7 @@
+ struct task_struct *tsk;
+ int err;
+
+- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
++ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info - sizeof(unsigned long))) - 1;
+ struct_cpy(childregs, regs);
+ childregs->eax = 0;
+ childregs->esp = esp;
+@@ -454,9 +454,8 @@
+ int dump_task_regs(struct task_struct *tsk, elf_gregset_t *regs)
+ {
+ struct pt_regs ptregs;
+-
+- ptregs = *(struct pt_regs *)
+- ((unsigned long)tsk->thread_info+THREAD_SIZE - sizeof(ptregs));
++
++ ptregs = *(struct pt_regs *)(tsk->thread.esp0 - sizeof(ptregs));
+ ptregs.xcs &= 0xffff;
+ ptregs.xds &= 0xffff;
+ ptregs.xes &= 0xffff;
+@@ -509,10 +508,18 @@
+ int cpu = smp_processor_id();
+ struct tss_struct *tss = init_tss + cpu;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++#endif
++
+ /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
+
+ __unlazy_fpu(prev_p);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ */
+@@ -523,6 +530,10 @@
+ */
+ load_TLS(next, cpu);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ /*
+ * Save away %fs and %gs. No need to save %es and %ds, as
+ * those are always kernel segments while inside the kernel.
+@@ -688,6 +699,10 @@
+ struct desc_struct *desc;
+ int cpu, idx;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++#endif
++
+ if (copy_from_user(&info, u_info, sizeof(info)))
+ return -EFAULT;
+ idx = info.entry_number;
+@@ -721,8 +736,17 @@
+ desc->a = LDT_entry_a(&info);
+ desc->b = LDT_entry_b(&info);
+ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel(flags, cr3);
++#endif
++
+ load_TLS(t, cpu);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
+ put_cpu();
+
+ return 0;
+@@ -776,3 +800,29 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
++{
++ struct tss_struct *tss = init_tss + smp_processor_id();
++ unsigned long time;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (!pax_aslr)
++ return;
++#endif
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 1;
++#else
++ time &= 0x1FUL;
++ time <<= 2;
++#endif
++
++ tss->esp0 ^= time;
++ current->thread.esp0 = tss->esp0;
++}
++#endif
+diff -urN linux-2.6.7/arch/i386/kernel/ptrace.c linux-2.6.7/arch/i386/kernel/ptrace.c
+--- linux-2.6.7/arch/i386/kernel/ptrace.c 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/arch/i386/kernel/ptrace.c 2004-06-25 14:08:27 -0400
+@@ -15,6 +15,7 @@
+ #include <linux/user.h>
+ #include <linux/security.h>
+ #include <linux/audit.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -264,6 +265,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+@@ -342,6 +346,17 @@
+ if(addr == (long) &dummy->u_debugreg[5]) break;
+ if(addr < (long) &dummy->u_debugreg[4] &&
+ ((unsigned long) data) >= TASK_SIZE-3) break;
++
++#ifdef CONFIG_GRKERNSEC
++ if(addr >= (long) &dummy->u_debugreg[0] &&
++ addr <= (long) &dummy->u_debugreg[3]){
++ long reg = (addr - (long) &dummy->u_debugreg[0]) >> 2;
++ long type = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 4*reg)) & 3;
++ long align = (child->thread.debugreg[7] >> (DR_CONTROL_SHIFT + 2 + 4*reg)) & 3;
++ if((type & 1) && (data & align))
++ break;
++ }
++#endif
+
+ if(addr == (long) &dummy->u_debugreg[7]) {
+ data &= ~DR_CONTROL_RESERVED;
+diff -urN linux-2.6.7/arch/i386/kernel/reboot.c linux-2.6.7/arch/i386/kernel/reboot.c
+--- linux-2.6.7/arch/i386/kernel/reboot.c 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/arch/i386/kernel/reboot.c 2004-06-25 17:41:53 -0400
+@@ -74,18 +74,18 @@
+ doesn't work with at least one type of 486 motherboard. It is easy
+ to stop this code working; hence the copious comments. */
+
+-static unsigned long long
++static const unsigned long long
+ real_mode_gdt_entries [3] =
+ {
+ 0x0000000000000000ULL, /* Null descriptor */
+- 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
+- 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
++ 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
++ 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
+ };
+
+ static struct
+ {
+ unsigned short size __attribute__ ((packed));
+- unsigned long long * base __attribute__ ((packed));
++ const unsigned long long * base __attribute__ ((packed));
+ }
+ real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
+ real_mode_idt = { 0x3ff, 0 },
+diff -urN linux-2.6.7/arch/i386/kernel/setup.c linux-2.6.7/arch/i386/kernel/setup.c
+--- linux-2.6.7/arch/i386/kernel/setup.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/arch/i386/kernel/setup.c 2004-06-25 17:41:53 -0400
+@@ -1137,7 +1137,7 @@
+
+ code_resource.start = virt_to_phys(_text);
+ code_resource.end = virt_to_phys(_etext)-1;
+- data_resource.start = virt_to_phys(_etext);
++ data_resource.start = virt_to_phys(_data);
+ data_resource.end = virt_to_phys(_edata)-1;
+
+ parse_cmdline_early(cmdline_p);
+@@ -1197,6 +1197,15 @@
+ #endif
+ }
+
++#ifdef CONFIG_PAX_SOFTMODE
++static int __init setup_pax_softmode(char *str)
++{
++ get_option (&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ #include "setup_arch_post.h"
+ /*
+ * Local Variables:
+diff -urN linux-2.6.7/arch/i386/kernel/signal.c linux-2.6.7/arch/i386/kernel/signal.c
+--- linux-2.6.7/arch/i386/kernel/signal.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/arch/i386/kernel/signal.c 2004-06-25 17:41:53 -0400
+@@ -367,7 +367,17 @@
+ if (err)
+ goto give_sigsegv;
+
++#ifdef CONFIG_PAX_NOVSYSCALL
++ restorer = frame->retcode;
++#else
+ restorer = &__kernel_sigreturn;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ restorer -= SEGMEXEC_TASK_SIZE;
++#endif
++#endif
++
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+
+@@ -450,7 +460,18 @@
+ goto give_sigsegv;
+
+ /* Set up to return from userspace. */
++
++#ifdef CONFIG_PAX_NOVSYSCALL
++ restorer = frame->retcode;
++#else
+ restorer = &__kernel_rt_sigreturn;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ restorer -= SEGMEXEC_TASK_SIZE;
++#endif
++#endif
++
+ if (ka->sa.sa_flags & SA_RESTORER)
+ restorer = ka->sa.sa_restorer;
+ err |= __put_user(restorer, &frame->pretcode);
+diff -urN linux-2.6.7/arch/i386/kernel/sys_i386.c linux-2.6.7/arch/i386/kernel/sys_i386.c
+--- linux-2.6.7/arch/i386/kernel/sys_i386.c 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/arch/i386/kernel/sys_i386.c 2004-06-25 17:41:53 -0400
+@@ -19,6 +19,7 @@
+ #include <linux/mman.h>
+ #include <linux/file.h>
+ #include <linux/utsname.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -49,6 +50,11 @@
+ int error = -EBADF;
+ struct file * file = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+@@ -56,6 +62,12 @@
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ error = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+@@ -106,6 +118,77 @@
+ return err;
+ }
+
++unsigned long
++arch_get_unmapped_area(struct file *filp, unsigned long addr,
++ unsigned long len, unsigned long pgoff, unsigned long flags)
++{
++ struct mm_struct *mm = current->mm;
++ struct vm_area_struct *vma;
++ unsigned long start_addr, start_mmap, task_unmapped_base, task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
++ if (len > task_size)
++ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
++ if (addr) {
++ addr = PAGE_ALIGN(addr);
++ vma = find_vma(mm, addr);
++ if (task_size - len >= addr &&
++ (!vma || addr + len <= vma->vm_start))
++ return addr;
++ }
++ start_addr = addr = mm->free_area_cache;
++ start_mmap = PAGE_ALIGN(task_size/3);
++ task_unmapped_base = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP) {
++ start_mmap += mm->delta_mmap;
++ task_unmapped_base += mm->delta_mmap;
++ }
++#endif
++
++ if (!(flags & MAP_EXECUTABLE) && start_addr < start_mmap)
++ start_addr = addr = start_mmap;
++ else if ((flags & MAP_EXECUTABLE) && start_addr >= start_mmap)
++ start_addr = addr = task_unmapped_base;
++
++full_search:
++ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
++ /* At this point: (!vma || addr < vma->vm_end). */
++ if (task_size - len < addr) {
++ /*
++ * Start a new search - just in case we missed
++ * some holes.
++ */
++ if (start_addr != task_unmapped_base) {
++ start_addr = addr = task_unmapped_base;
++ goto full_search;
++ }
++ return -ENOMEM;
++ }
++ if (!vma || (addr + len <= vma->vm_start && (addr + len <= mm->start_brk || start_mmap <= addr))) {
++ /*
++ * Remember the place where we stopped the search:
++ */
++ mm->free_area_cache = addr + len;
++ return addr;
++ }
++ if (addr < start_mmap && addr + len > mm->start_brk) {
++ addr = start_mmap;
++ goto full_search;
++ } else
++ addr = vma->vm_end;
++ }
++}
+
+ struct sel_arg_struct {
+ unsigned long n;
+diff -urN linux-2.6.7/arch/i386/kernel/sysenter.c linux-2.6.7/arch/i386/kernel/sysenter.c
+--- linux-2.6.7/arch/i386/kernel/sysenter.c 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/arch/i386/kernel/sysenter.c 2004-06-25 17:41:53 -0400
+@@ -41,6 +41,7 @@
+ extern const char vsyscall_int80_start, vsyscall_int80_end;
+ extern const char vsyscall_sysenter_start, vsyscall_sysenter_end;
+
++#ifndef CONFIG_PAX_NOVSYSCALL
+ static int __init sysenter_setup(void)
+ {
+ unsigned long page = get_zeroed_page(GFP_ATOMIC);
+@@ -63,3 +64,4 @@
+ }
+
+ __initcall(sysenter_setup);
++#endif
+diff -urN linux-2.6.7/arch/i386/kernel/trampoline.S linux-2.6.7/arch/i386/kernel/trampoline.S
+--- linux-2.6.7/arch/i386/kernel/trampoline.S 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/arch/i386/kernel/trampoline.S 2004-06-25 17:41:53 -0400
+@@ -58,7 +58,7 @@
+ inc %ax # protected mode (PE) bit
+ lmsw %ax # into protected mode
+ # flush prefetch and jump to startup_32_smp in arch/i386/kernel/head.S
+- ljmpl $__BOOT_CS, $(startup_32_smp-__PAGE_OFFSET)
++ ljmpl $__BOOT_CS, $(startup_32_smp+__KERNEL_TEXT_OFFSET-__PAGE_OFFSET)
+
+ # These need to be in the same 64K segment as the above;
+ # hence we don't use the boot_gdt_descr defined in head.S
+diff -urN linux-2.6.7/arch/i386/kernel/traps.c linux-2.6.7/arch/i386/kernel/traps.c
+--- linux-2.6.7/arch/i386/kernel/traps.c 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/arch/i386/kernel/traps.c 2004-06-25 17:41:53 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/kallsyms.h>
+ #include <linux/ptrace.h>
+ #include <linux/version.h>
++#include <linux/binfmts.h>
+
+ #ifdef CONFIG_EISA
+ #include <linux/ioport.h>
+@@ -59,18 +60,13 @@
+ asmlinkage void lcall7(void);
+ asmlinkage void lcall27(void);
+
+-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++const struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 } };
+
+ /* Do we ignore FPU interrupts ? */
+ char ignore_fpu_irq = 0;
+
+-/*
+- * The IDT has to be page-aligned to simplify the Pentium
+- * F0 0F bug workaround.. We have a special link segment
+- * for this.
+- */
+-struct desc_struct idt_table[256] __attribute__((__section__(".data.idt"))) = { {0, 0}, };
++extern struct desc_struct idt_table[256];
+
+ asmlinkage void divide_error(void);
+ asmlinkage void debug(void);
+@@ -108,13 +104,15 @@
+ unsigned long ebp)
+ {
+ unsigned long addr;
++ int i = kstack_depth_to_print;
+
+- while (valid_stack_ptr(task, (void *)ebp)) {
++ while (i && valid_stack_ptr(task, (void *)ebp)) {
+ addr = *(unsigned long *)(ebp + 4);
+ printk(" [<%08lx>] ", addr);
+ print_symbol("%s", addr);
+ printk("\n");
+ ebp = *(unsigned long *)ebp;
++ --i;
+ }
+ }
+ #else
+@@ -240,14 +238,23 @@
+ show_stack(NULL, (unsigned long*)esp);
+
+ printk("Code: ");
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if(regs->eip < PAGE_OFFSET)
+ goto bad;
++#endif
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if(__get_user(c, &((unsigned char*)regs->eip)[i+__KERNEL_TEXT_OFFSET])) {
++#else
+ if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
+ bad:
++#endif
++
+ printk(" Bad EIP value.");
+ break;
+ }
+@@ -270,8 +277,13 @@
+
+ eip = regs->eip;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ eip += __KERNEL_TEXT_OFFSET;
++#else
+ if (eip < PAGE_OFFSET)
+ goto no_bug;
++#endif
++
+ if (__get_user(ud2, (unsigned short *)eip))
+ goto no_bug;
+ if (ud2 != 0x0b0f)
+@@ -279,7 +291,13 @@
+ if (__get_user(line, (unsigned short *)(eip + 2)))
+ goto bug;
+ if (__get_user(file, (char **)(eip + 4)) ||
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __get_user(c, file + __KERNEL_TEXT_OFFSET))
++#else
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++#endif
++
+ file = "<bad filename>";
+
+ printk("------------[ cut here ]------------\n");
+@@ -441,6 +459,22 @@
+ if (!(regs->xcs & 3))
+ goto gp_in_kernel;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC)) {
++ struct mm_struct *mm = current->mm;
++ unsigned long limit;
++
++ down_write(&mm->mmap_sem);
++ limit = mm->context.user_cs_limit;
++ if (limit < TASK_SIZE) {
++ track_exec_limit(mm, limit, TASK_SIZE, PROT_EXEC);
++ up_write(&mm->mmap_sem);
++ return;
++ }
++ up_write(&mm->mmap_sem);
++ }
++#endif
++
+ current->thread.error_code = error_code;
+ current->thread.trap_no = 13;
+ force_sig(SIGSEGV, current);
+@@ -452,8 +486,16 @@
+ return;
+
+ gp_in_kernel:
+- if (!fixup_exception(regs))
++ if (!fixup_exception(regs)) {
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((regs->xcs & 0xFFFF) == __KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
++ }
+ }
+
+ static void mem_parity_error(unsigned char reason, struct pt_regs * regs)
+@@ -886,7 +928,7 @@
+ _set_gate(idt_table+n,15,3,addr,__KERNEL_CS);
+ }
+
+-static void __init set_call_gate(void *a, void *addr)
++static void __init set_call_gate(const void *a, void *addr)
+ {
+ _set_gate(a,12,3,addr,__KERNEL_CS);
+ }
+diff -urN linux-2.6.7/arch/i386/kernel/vmlinux.lds.S linux-2.6.7/arch/i386/kernel/vmlinux.lds.S
+--- linux-2.6.7/arch/i386/kernel/vmlinux.lds.S 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/arch/i386/kernel/vmlinux.lds.S 2004-06-25 17:41:53 -0400
+@@ -2,7 +2,12 @@
+ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+ */
+
++#include <linux/config.h>
++
+ #include <asm-generic/vmlinux.lds.h>
++#include <asm-i386/page.h>
++#include <asm-i386/segment.h>
++
+ #include <asm/thread_info.h>
+
+ OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+@@ -11,26 +16,16 @@
+ jiffies = jiffies_64;
+ SECTIONS
+ {
+- . = 0xC0000000 + 0x100000;
+- /* read-only */
+- _text = .; /* Text and read-only data */
+- .text : {
+- *(.text)
+- SCHED_TEXT
+- *(.fixup)
+- *(.gnu.warning)
+- } = 0x9090
+-
+- _etext = .; /* End of text section */
+-
+- . = ALIGN(16); /* Exception table */
+- __start___ex_table = .;
+- __ex_table : { *(__ex_table) }
+- __stop___ex_table = .;
+-
+- RODATA
++ . = __PAGE_OFFSET + 0x100000;
++ .text.startup : {
++ BYTE(0xEA) /* jmp far */
++ LONG(startup_32 + __KERNEL_TEXT_OFFSET - __PAGE_OFFSET)
++ SHORT(__BOOT_CS)
++ }
+
+ /* writeable */
++ . = ALIGN(32);
++ _data = .;
+ .data : { /* Data */
+ *(.data)
+ CONSTRUCTORS
+@@ -42,25 +37,29 @@
+ . = ALIGN(4096);
+ __nosave_end = .;
+
+- . = ALIGN(4096);
+- .data.page_aligned : { *(.data.idt) }
+-
+ . = ALIGN(32);
+ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+
+- _edata = .; /* End of data section */
+-
+ . = ALIGN(THREAD_SIZE); /* init_task */
+ .data.init_task : { *(.data.init_task) }
+
++ . = ALIGN(4096);
++ .data.page_aligned : { *(.data.swapper_pg_dir) }
++
++ _edata = .; /* End of data section */
++
++ __bss_start = .; /* BSS */
++ .bss : {
++ *(.bss.page_aligned)
++ *(.bss)
++ LONG(0)
++ }
++ . = ALIGN(4);
++ __bss_stop = .;
++
+ /* will be freed after init */
+ . = ALIGN(4096); /* Init code and data */
+ __init_begin = .;
+- .init.text : {
+- _sinittext = .;
+- *(.init.text)
+- _einittext = .;
+- }
+ .init.data : { *(.init.data) }
+ . = ALIGN(16);
+ __setup_start = .;
+@@ -89,9 +88,13 @@
+ .altinstructions : { *(.altinstructions) }
+ __alt_instructions_end = .;
+ .altinstr_replacement : { *(.altinstr_replacement) }
++
++#ifndef CONFIG_PAX_KERNEXEC
+ /* .exit.text is discard at runtime, not link time, to deal with references
+ from .altinstructions and .eh_frame */
+ .exit.text : { *(.exit.text) }
++#endif
++
+ .exit.data : { *(.exit.data) }
+ . = ALIGN(4096);
+ __initramfs_start = .;
+@@ -101,17 +104,67 @@
+ __per_cpu_start = .;
+ .data.percpu : { *(.data.percpu) }
+ __per_cpu_end = .;
++
++ /* read-only */
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __init_text_start = .;
++ .init.text (. - __KERNEL_TEXT_OFFSET) : AT (__init_text_start) {
++ _sinittext = .;
++ *(.init.text)
++ _einittext = .;
++ *(.exit.text)
++ . = ALIGN(4*1024*1024) - 1;
++ BYTE(0)
++ }
+ . = ALIGN(4096);
+- __init_end = .;
++ __init_end = . + __KERNEL_TEXT_OFFSET;
+ /* freed after init ends here */
+-
+- __bss_start = .; /* BSS */
+- .bss : {
+- *(.bss.page_aligned)
+- *(.bss)
++
++/*
++ * PaX: this must be kept in synch with the KERNEL_CS base
++ * in the GDTs in arch/i386/kernel/head.S
++ */
++ _text = .; /* Text and read-only data */
++ .text : AT (. + __KERNEL_TEXT_OFFSET) {
++#else
++ .init.text : {
++ _sinittext = .;
++ *(.init.text)
++ _einittext = .;
+ }
+- . = ALIGN(4);
+- __bss_stop = .;
++ . = ALIGN(4096);
++ __init_end = .;
++ /* freed after init ends here */
++
++ _text = .; /* Text and read-only data */
++ .text : {
++#endif
++
++ *(.text)
++ SCHED_TEXT
++ *(.fixup)
++ *(.gnu.warning)
++ } = 0x9090
++
++ _etext = .; /* End of text section */
++ . += __KERNEL_TEXT_OFFSET;
++ . = ALIGN(16); /* Exception table */
++ __start___ex_table = .;
++ __ex_table : { *(__ex_table) }
++ __stop___ex_table = .;
++
++ . = ALIGN(4096);
++ .rodata.page_aligned : {
++ *(.rodata.empty_zero_page)
++ *(.rodata.idt)
++ }
++
++ RODATA
++
++#ifdef CONFIG_PAX_KERNEXEC
++ . = ALIGN(4*1024*1024);
++#endif
+
+ _end = . ;
+
+diff -urN linux-2.6.7/arch/i386/mm/fault.c linux-2.6.7/arch/i386/mm/fault.c
+--- linux-2.6.7/arch/i386/mm/fault.c 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/arch/i386/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -21,6 +21,9 @@
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/highmem.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -199,6 +202,27 @@
+
+ asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_EMUTRAMP) || defined(CONFIG_PAX_RANDEXEC)
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/* PaX: called with the page_table_lock spinlock held */
++static inline pte_t * pax_get_pte(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd || !pgd_present(*pgd))
++ return 0;
++ pmd = pmd_offset(pgd, address);
++ if (!pmd || !pmd_present(*pmd))
++ return 0;
++ return pte_offset_map(pmd, address);
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -219,6 +243,11 @@
+ int write;
+ siginfo_t info;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ pte_t *pte;
++ unsigned char pte_mask1, pte_mask2;
++#endif
++
+ /* get the address */
+ __asm__("movl %%cr2,%0":"=r" (address));
+
+@@ -262,6 +291,91 @@
+ if (in_atomic() || !mm)
+ goto bad_area_nosemaphore;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (unlikely(!(tsk->flags & PF_PAX_PAGEEXEC) || (error_code & 5) != 5))
++ goto not_pax_fault;
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely((error_code == 5) && (regs->eip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++
++ spin_lock(&mm->page_table_lock);
++ pte = pax_get_pte(mm, address);
++ if (unlikely(!pte || !(pte_val(*pte) & _PAGE_PRESENT) || pte_exec(*pte))) {
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ goto not_pax_fault;
++ }
++
++ if (unlikely((error_code == 7) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ goto not_pax_fault;
++ }
++
++ pte_mask1 = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1));
++
++#ifdef CONFIG_SMP
++ if (likely(cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)) && address >= get_limit(regs->xcs))
++ pte_mask2 = 0;
++ else
++ pte_mask2 = _PAGE_USER;
++#else
++ pte_mask2 = (address >= get_limit(regs->xcs)) ? 0 : _PAGE_USER;
++#endif
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,%1\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg %0\n"
++#endif
++ "testb $0,%0\n"
++ "xorb %3,%1\n"
++ :
++ : "m" (*(char*)address), "m" (*(char*)pte), "q" (pte_mask1), "q" (pte_mask2)
++ : "memory", "cc");
++ pte_unmap(pte);
++ spin_unlock(&mm->page_table_lock);
++ return;
++
++not_pax_fault:
++#endif
++
+ down_read(&mm->mmap_sem);
+
+ vma = find_vma(mm, address);
+@@ -358,6 +472,34 @@
+ if (is_prefetch(regs, address))
+ return;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++
++#if defined(CONFIG_PAX_EMUTRAMP) || defined(CONFIG_PAX_RANDEXEC)
++ if ((error_code == 4) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ }
++#endif
++
++ if (address >= SEGMEXEC_TASK_SIZE) {
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+@@ -408,6 +550,13 @@
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++
++#ifdef CONFIG_PAX_KERNEXEC
++ else if (init_mm.start_code + __KERNEL_TEXT_OFFSET <= address && address < init_mm.end_code + __KERNEL_TEXT_OFFSET)
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
++ tsk->comm, tsk->pid, tsk->uid, tsk->euid);
++#endif
++
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+@@ -509,3 +658,249 @@
+ return;
+ }
+ }
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_EMUTRAMP) || defined(CONFIG_PAX_RANDEXEC)
++/*
++ * PaX: decide what to do with offenders (regs->eip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when gcc trampoline was detected
++ * 3 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ static const unsigned char trans[8] = {6, 1, 2, 0, 13, 5, 3, 4};
++#endif
++
++#if defined(CONFIG_PAX_RANDEXEC) || defined(CONFIG_PAX_EMUTRAMP)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ unsigned long esp_4;
++
++ if (regs->eip >= current->mm->start_code &&
++ regs->eip < current->mm->end_code)
++ {
++ err = get_user(esp_4, (unsigned long*)(regs->esp-4UL));
++ if (err || esp_4 == regs->eip)
++ return 1;
++
++ regs->eip += current->mm->delta_exec;
++ return 3;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov1, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(mov2, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(jmp, (unsigned short *)(regs->eip + 10));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov1 & 0xF8) == 0xB8 &&
++ (mov2 & 0xF8) == 0xB8 &&
++ (mov1 & 0x07) != (mov2 & 0x07) &&
++ (jmp & 0xF8FF) == 0xE0FF &&
++ (mov2 & 0x07) == ((jmp>>8) & 0x07) &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1;
++ ((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2;
++ regs->eip = addr2;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #3 */
++ unsigned char mov, jmp;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-3));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x55FF)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->ebp + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #4 */
++ unsigned char mov, jmp, sib;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-4));
++ err |= get_user(sib, (unsigned char *)(ret-2));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x54FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #5 */
++ unsigned char mov, jmp, sib;
++ unsigned long addr1, addr2, ret, offset;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-7));
++ err |= get_user(sib, (unsigned char *)(ret-5));
++ err |= get_user(offset, (unsigned long *)(ret-4));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x94FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1; /* PaX in action */
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%02x ", c);
++ }
++ printk("\n");
++
++ printk(KERN_ERR "PAX: bytes at SP: ");
++ for (i = 0; i < 20; i++) {
++ unsigned long c;
++ if (get_user(c, (unsigned long*)sp+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08lx ", c);
++ }
++ printk("\n");
++}
++#endif
+diff -urN linux-2.6.7/arch/i386/mm/init.c linux-2.6.7/arch/i386/mm/init.c
+--- linux-2.6.7/arch/i386/mm/init.c 2004-06-16 01:19:44 -0400
++++ linux-2.6.7/arch/i386/mm/init.c 2004-06-25 17:41:53 -0400
+@@ -40,6 +40,7 @@
+ #include <asm/tlb.h>
+ #include <asm/tlbflush.h>
+ #include <asm/sections.h>
++#include <asm/desc.h>
+
+ DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
+ unsigned long highstart_pfn, highend_pfn;
+@@ -415,6 +416,10 @@
+ #endif
+ __flush_tlb_all();
+
++#ifdef CONFIG_PAX_KERNEXEC
++ memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
++#endif
++
+ kmap_init();
+ zone_sizes_init();
+ }
+@@ -509,7 +514,7 @@
+ set_highmem_pages_init(bad_ppro);
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ kclist_add(&kcore_mem, __va(0), max_low_pfn << PAGE_SHIFT);
+@@ -608,6 +613,42 @@
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (__init_end - __init_begin) >> 10);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ /* PaX: limit KERNEL_CS to actual size */
++ {
++ unsigned long limit;
++ int cpu;
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ limit = (unsigned long)&_etext >> PAGE_SHIFT;
++ for (cpu = 0; cpu < NR_CPUS; cpu++) {
++ cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].a = (cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
++ cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].b = (cpu_gdt_table[cpu][GDT_ENTRY_KERNEL_CS].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
++
++#ifdef CONFIG_PCI_BIOS
++ printk(KERN_INFO "PAX: warning, PCI BIOS might still be in use, keeping flat KERNEL_CS.\n");
++#endif
++
++ }
++
++ /* PaX: make KERNEL_CS read-only */
++ for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
++ pgd = pgd_offset_k(addr);
++ pmd = pmd_offset(pgd, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_GLOBAL));
++ }
++ memcpy(kernexec_pg_dir, swapper_pg_dir, sizeof(kernexec_pg_dir));
++ for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
++ pgd = pgd_offset_k(addr);
++ pmd = pmd_offset(pgd, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++ flush_tlb_all();
++ }
++#endif
++
+ }
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+diff -urN linux-2.6.7/arch/i386/pci/pcbios.c linux-2.6.7/arch/i386/pci/pcbios.c
+--- linux-2.6.7/arch/i386/pci/pcbios.c 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/arch/i386/pci/pcbios.c 2004-06-25 17:41:53 -0400
+@@ -6,7 +6,7 @@
+ #include <linux/init.h>
+ #include "pci.h"
+ #include "pci-functions.h"
+-
++#include <asm/desc.h>
+
+ /* BIOS32 signature: "_32_" */
+ #define BIOS32_SIGNATURE (('_' << 0) + ('3' << 8) + ('2' << 16) + ('_' << 24))
+@@ -33,6 +33,12 @@
+ * and the PCI BIOS specification.
+ */
+
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++#define __FLAT_KERNEL_CS 0x20
++#else
++#define __FLAT_KERNEL_CS __KERNEL_CS
++#endif
++
+ union bios32 {
+ struct {
+ unsigned long signature; /* _32_ */
+@@ -55,7 +61,7 @@
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect = { 0, __FLAT_KERNEL_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+@@ -96,7 +102,9 @@
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect = { 0, __FLAT_KERNEL_CS };
++
++#undef __FLAT_KERNEL_CS
+
+ static int pci_bios_present;
+
+diff -urN linux-2.6.7/arch/ia64/ia32/binfmt_elf32.c linux-2.6.7/arch/ia64/ia32/binfmt_elf32.c
+--- linux-2.6.7/arch/ia64/ia32/binfmt_elf32.c 2004-06-16 01:20:03 -0400
++++ linux-2.6.7/arch/ia64/ia32/binfmt_elf32.c 2004-06-25 17:41:53 -0400
+@@ -41,6 +41,17 @@
+ #undef SET_PERSONALITY
+ #define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+diff -urN linux-2.6.7/arch/ia64/ia32/ia32priv.h linux-2.6.7/arch/ia64/ia32/ia32priv.h
+--- linux-2.6.7/arch/ia64/ia32/ia32priv.h 2004-06-16 01:20:19 -0400
++++ linux-2.6.7/arch/ia64/ia32/ia32priv.h 2004-06-25 17:41:53 -0400
+@@ -295,7 +295,14 @@
+ #define ELF_ARCH EM_386
+
+ #define IA32_PAGE_OFFSET 0xc0000000
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
+
+ /*
+ * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
+diff -urN linux-2.6.7/arch/ia64/ia32/sys_ia32.c linux-2.6.7/arch/ia64/ia32/sys_ia32.c
+--- linux-2.6.7/arch/ia64/ia32/sys_ia32.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/arch/ia64/ia32/sys_ia32.c 2004-06-25 17:41:53 -0400
+@@ -433,6 +433,11 @@
+
+ flags = a.flags;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(a.fd);
+@@ -454,6 +459,11 @@
+ struct file *file = NULL;
+ unsigned long retval;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -urN linux-2.6.7/arch/ia64/kernel/ptrace.c linux-2.6.7/arch/ia64/kernel/ptrace.c
+--- linux-2.6.7/arch/ia64/kernel/ptrace.c 2004-06-16 01:20:25 -0400
++++ linux-2.6.7/arch/ia64/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -17,6 +17,7 @@
+ #include <linux/smp_lock.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -1314,6 +1315,9 @@
+ if (pid == 1) /* no messing around with init! */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.7/arch/ia64/kernel/sys_ia64.c linux-2.6.7/arch/ia64/kernel/sys_ia64.c
+--- linux-2.6.7/arch/ia64/kernel/sys_ia64.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/arch/ia64/kernel/sys_ia64.c 2004-06-25 17:41:53 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/highuid.h>
+ #include <linux/hugetlb.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/shmparam.h>
+ #include <asm/uaccess.h>
+@@ -27,7 +28,7 @@
+ unsigned long pgoff, unsigned long flags)
+ {
+ long map_shared = (flags & MAP_SHARED);
+- unsigned long start_addr, align_mask = PAGE_SIZE - 1;
++ unsigned long start_addr, align_mask = PAGE_SIZE - 1, task_unmapped_base = TASK_UNMAPPED_BASE;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+
+@@ -38,6 +39,15 @@
+ if (REGION_NUMBER(addr) == REGION_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ task_unmapped_base += mm->delta_mmap;
++ if ((current->flags & PF_PAX_RANDMMAP) && addr && filp)
++ addr = mm->free_area_cache;
++ else
++#endif
++
+ if (!addr)
+ addr = mm->free_area_cache;
+
+@@ -56,9 +66,9 @@
+ for (vma = find_vma(mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (TASK_SIZE - len < addr || RGN_MAP_LIMIT - len < REGION_OFFSET(addr)) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
++ if (start_addr != task_unmapped_base) {
+ /* Start a new search --- just in case we missed some holes. */
+- addr = TASK_UNMAPPED_BASE;
++ addr = task_unmapped_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+@@ -185,6 +195,11 @@
+ unsigned long roff;
+ struct file *file = 0;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+@@ -222,6 +237,11 @@
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ addr = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+ addr = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/arch/ia64/mm/fault.c linux-2.6.7/arch/ia64/mm/fault.c
+--- linux-2.6.7/arch/ia64/mm/fault.c 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/arch/ia64/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -9,6 +9,7 @@
+ #include <linux/mm.h>
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
++#include <linux/binfmts.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+@@ -70,6 +71,54 @@
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->cr_iip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_RANDEXEC
++ int err;
++
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->cr_iip >= current->mm->start_code &&
++ regs->cr_iip < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if (regs->b0 == regs->cr_iip)
++ return 1;
++#endif
++ regs->cr_iip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -125,9 +174,31 @@
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
+ | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->cr_iip, (void*)regs->r12);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urN linux-2.6.7/arch/mips/kernel/binfmt_elfn32.c linux-2.6.7/arch/mips/kernel/binfmt_elfn32.c
+--- linux-2.6.7/arch/mips/kernel/binfmt_elfn32.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/arch/mips/kernel/binfmt_elfn32.c 2004-06-25 17:41:53 -0400
+@@ -50,6 +50,17 @@
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -urN linux-2.6.7/arch/mips/kernel/binfmt_elfo32.c linux-2.6.7/arch/mips/kernel/binfmt_elfo32.c
+--- linux-2.6.7/arch/mips/kernel/binfmt_elfo32.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/arch/mips/kernel/binfmt_elfo32.c 2004-06-25 17:41:53 -0400
+@@ -52,6 +52,17 @@
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -urN linux-2.6.7/arch/mips/kernel/syscall.c linux-2.6.7/arch/mips/kernel/syscall.c
+--- linux-2.6.7/arch/mips/kernel/syscall.c 2004-06-16 01:19:44 -0400
++++ linux-2.6.7/arch/mips/kernel/syscall.c 2004-06-25 17:41:53 -0400
+@@ -86,6 +86,11 @@
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -96,6 +101,13 @@
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ addr = TASK_UNMAPPED_BASE;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+diff -urN linux-2.6.7/arch/mips/mm/fault.c linux-2.6.7/arch/mips/mm/fault.c
+--- linux-2.6.7/arch/mips/mm/fault.c 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/arch/mips/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -27,6 +27,24 @@
+ #include <asm/uaccess.h>
+ #include <asm/ptrace.h>
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -urN linux-2.6.7/arch/parisc/kernel/ptrace.c linux-2.6.7/arch/parisc/kernel/ptrace.c
+--- linux-2.6.7/arch/parisc/kernel/ptrace.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/arch/parisc/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -17,6 +17,7 @@
+ #include <linux/personality.h>
+ #include <linux/security.h>
+ #include <linux/compat.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgtable.h>
+@@ -114,6 +115,9 @@
+ if (pid == 1) /* no messing around with init! */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.7/arch/parisc/kernel/sys_parisc.c linux-2.6.7/arch/parisc/kernel/sys_parisc.c
+--- linux-2.6.7/arch/parisc/kernel/sys_parisc.c 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/arch/parisc/kernel/sys_parisc.c 2004-06-25 17:41:53 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/shm.h>
+ #include <linux/smp_lock.h>
+ #include <linux/syscalls.h>
++#include <linux/grsecurity.h>
+
+ int sys_pipe(int *fildes)
+ {
+@@ -104,6 +105,13 @@
+ {
+ if (len > TASK_SIZE)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -123,12 +131,23 @@
+ {
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ return -EACCES;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/arch/parisc/kernel/sys_parisc32.c linux-2.6.7/arch/parisc/kernel/sys_parisc32.c
+--- linux-2.6.7/arch/parisc/kernel/sys_parisc32.c 2004-06-16 01:19:11 -0400
++++ linux-2.6.7/arch/parisc/kernel/sys_parisc32.c 2004-06-25 14:07:21 -0400
+@@ -48,6 +48,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/swap.h>
+ #include <linux/syscalls.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/types.h>
+ #include <asm/uaccess.h>
+diff -urN linux-2.6.7/arch/parisc/kernel/traps.c linux-2.6.7/arch/parisc/kernel/traps.c
+--- linux-2.6.7/arch/parisc/kernel/traps.c 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/arch/parisc/kernel/traps.c 2004-06-25 17:41:53 -0400
+@@ -656,9 +656,7 @@
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -urN linux-2.6.7/arch/parisc/mm/fault.c linux-2.6.7/arch/parisc/mm/fault.c
+--- linux-2.6.7/arch/parisc/mm/fault.c 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/arch/parisc/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -16,6 +16,8 @@
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/unistd.h>
++#include <linux/binfmts.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -54,7 +56,7 @@
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -140,6 +142,139 @@
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUTRAMP)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (instruction_pointer(regs) >= current->mm->start_code &&
++ instruction_pointer(regs) < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if ((regs->gr[2] & ~3UL) == instruction_pointer(regs))
++ return 1;
++#endif
++ regs->iaoq[0] += current->mm->delta_exec;
++ if ((regs->iaoq[1] & ~3UL) >= current->mm->start_code &&
++ (regs->iaoq[1] & ~3UL) < current->mm->end_code)
++ regs->iaoq[1] += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int*)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int*)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int*)addr);
++ err |= get_user(bv, (unsigned int*)(addr+4));
++ err |= get_user(ldw2, (unsigned int*)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int*)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int*)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void do_page_fault(struct pt_regs *regs, unsigned long code,
+ unsigned long address)
+ {
+@@ -165,8 +300,38 @@
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)instruction_pointer(regs), (void*)regs->gr[30]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -urN linux-2.6.7/arch/ppc/kernel/ptrace.c linux-2.6.7/arch/ppc/kernel/ptrace.c
+--- linux-2.6.7/arch/ppc/kernel/ptrace.c 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/arch/ppc/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/user.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/page.h>
+@@ -202,6 +203,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.7/arch/ppc/kernel/syscalls.c linux-2.6.7/arch/ppc/kernel/syscalls.c
+--- linux-2.6.7/arch/ppc/kernel/syscalls.c 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/arch/ppc/kernel/syscalls.c 2004-06-25 17:41:53 -0400
+@@ -36,6 +36,7 @@
+ #include <linux/utsname.h>
+ #include <linux/file.h>
+ #include <linux/unistd.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -165,12 +166,23 @@
+ struct file * file = NULL;
+ int ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+ goto out;
+ }
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ ret = -EACCES;
++ goto out;
++ }
++
+ down_write(&current->mm->mmap_sem);
+ ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
+ up_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/arch/ppc/mm/fault.c linux-2.6.7/arch/ppc/mm/fault.c
+--- linux-2.6.7/arch/ppc/mm/fault.c 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/arch/ppc/mm/fault.c 2004-06-30 13:03:30 -0400
+@@ -28,6 +28,11 @@
+ #include <linux/interrupt.h>
+ #include <linux/highmem.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
++#include <linux/unistd.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -51,6 +56,363 @@
+ unsigned long pte_errors; /* updated by do_page_fault() */
+ unsigned int probingmem;
+
++#ifdef CONFIG_PAX_EMUSIGRT
++void pax_syscall_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_syscall = 0UL;
++}
++
++static struct page* pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x44000002U; /* sc */
++ __flush_dcache_icache(kaddr);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_syscall_close,
++ nopage: pax_syscall_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ memset(vma, 0, sizeof(*vma));
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched GOT trampoline was detected
++ * 3 when patched PLT trampoline was detected
++ * 4 when unpatched PLT trampoline was detected
++ * 5 when legitimate ET_EXEC was detected
++ * 6 when sigreturn trampoline was detected
++ * 7 when rt_sigreturn trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->nip >= current->mm->start_code &&
++ regs->nip < current->mm->end_code)
++ {
++ if (regs->link == regs->nip)
++ return 1;
++
++ regs->nip += current->mm->delta_exec;
++ return 5;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched GOT emulation */
++ unsigned int blrl;
++
++ err = get_user(blrl, (unsigned int*)regs->nip);
++
++ if (!err && blrl == 0x4E800021U) {
++ unsigned long temp = regs->nip;
++
++ regs->nip = regs->link & 0xFFFFFFFCUL;
++ regs->link = temp + 4UL;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int b;
++
++ err = get_user(b, (unsigned int *)regs->nip);
++
++ if (!err && (b & 0xFC000003U) == 0x48000000U) {
++ regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
++ return 3;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation #1 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(rlwinm, (unsigned int*)addr);
++ err |= get_user(add, (unsigned int*)(addr+4));
++ err |= get_user(li2, (unsigned int*)(addr+8));
++ err |= get_user(addis2, (unsigned int*)(addr+12));
++ err |= get_user(mtctr, (unsigned int*)(addr+16));
++ err |= get_user(li3, (unsigned int*)(addr+20));
++ err |= get_user(addis3, (unsigned int*)(addr+24));
++ err |= get_user(bctr, (unsigned int*)(addr+28));
++
++ if (err)
++ break;
++
++ if (rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++
++#if 0
++ do { /* PaX: unpatched PLT emulation #2 */
++ unsigned int lis, lwzu, b, bctr;
++
++ err = get_user(lis, (unsigned int *)regs->nip);
++ err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
++ err |= get_user(b, (unsigned int *)(regs->nip+8));
++ err |= get_user(bctr, (unsigned int *)(regs->nip+12));
++
++ if (err)
++ break;
++
++ if ((lis & 0xFFFF0000U) == 0x39600000U &&
++ (lwzu & 0xU) == 0xU &&
++ (b & 0xFC000003U) == 0x48000000U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(addi, (unsigned int*)(addr+4));
++ err |= get_user(rlwinm, (unsigned int*)(addr+8));
++ err |= get_user(add, (unsigned int*)(addr+12));
++ err |= get_user(li2, (unsigned int*)(addr+16));
++ err |= get_user(addis2, (unsigned int*)(addr+20));
++ err |= get_user(mtctr, (unsigned int*)(addr+24));
++ err |= get_user(li3, (unsigned int*)(addr+28));
++ err |= get_user(addis3, (unsigned int*)(addr+32));
++ err |= get_user(bctr, (unsigned int*)(addr+36));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (addi & 0xFFFF0000U) == 0x396B0000U &&
++ rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] =
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: unpatched PLT emulation #3 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int addis, lwz, mtctr, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(lwz, (unsigned int*)(addr+4));
++ err |= get_user(mtctr, (unsigned int*)(addr+8));
++ err |= get_user(bctr, (unsigned int*)(addr+12));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (lwz & 0xFFFF0000U) == 0x816B0000U &&
++ mtctr == 0x7D6903A6U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int r11;
++
++ addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++
++ err = get_user(r11, (unsigned int*)addr);
++ if (err)
++ break;
++
++ regs->gpr[PT_R11] = r11;
++ regs->ctr = r11;
++ regs->nip = r11;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ do { /* PaX: sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned long call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->gpr[PT_R0] = __NR_sigreturn;
++ regs->nip = call_syscall;
++ return 6;
++ }
++ } while (0);
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38000000U + __NR_rt_sigreturn && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned int call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto rt_emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto rt_emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++rt_emulate:
++ regs->gpr[PT_R0] = __NR_rt_sigreturn;
++ regs->nip = call_syscall;
++ return 7;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -111,7 +473,7 @@
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (TRAP(regs) == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & 0x02000000;
+ #endif /* CONFIG_4xx */
+@@ -205,15 +567,14 @@
+ } else if (TRAP(regs) == 0x400) {
+ pte_t *ptep;
+
+-#if 0
++#if 1
+ /* It would be nice to actually enforce the VM execute
+ permission on CPUs which can do so, but far too
+ much stuff in userspace doesn't get the permissions
+ right, so we let any page be executed for now. */
+ if (! (vma->vm_flags & VM_EXEC))
+ goto bad_area;
+-#endif
+-
++#else
+ /* Since 4xx supports per-page execute permission,
+ * we lazily flush dcache to icache. */
+ ptep = NULL;
+@@ -233,6 +594,7 @@
+ if (ptep != NULL)
+ pte_unmap(ptep);
+ #endif
++#endif
+ /* a read */
+ } else {
+ /* protection fault */
+@@ -278,6 +640,38 @@
+
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC) {
++ if ((TRAP(regs) == 0x400) && (regs->nip == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ case 4:
++ return 0;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 5:
++ return 0;
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ case 6:
++ case 7:
++ return 0;
++#endif
++
++ }
++
++ pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = code;
+diff -urN linux-2.6.7/arch/ppc64/kernel/syscalls.c linux-2.6.7/arch/ppc64/kernel/syscalls.c
+--- linux-2.6.7/arch/ppc64/kernel/syscalls.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/arch/ppc64/kernel/syscalls.c 2004-06-25 17:41:53 -0400
+@@ -177,6 +177,11 @@
+ struct file * file = NULL;
+ unsigned long ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+ goto out;
+diff -urN linux-2.6.7/arch/ppc64/mm/fault.c linux-2.6.7/arch/ppc64/mm/fault.c
+--- linux-2.6.7/arch/ppc64/mm/fault.c 2004-06-16 01:19:44 -0400
++++ linux-2.6.7/arch/ppc64/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -29,6 +29,7 @@
+ #include <linux/interrupt.h>
+ #include <linux/smp_lock.h>
+ #include <linux/module.h>
++#include <linux/binfmts.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -75,6 +76,54 @@
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#if defined(CONFIG_PAX_EMUPLT) || defined(CONFIG_PAX_EMUSIGRT)
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->nip >= current->mm->start_code &&
++ regs->nip < current->mm->end_code)
++ {
++ if (regs->link == regs->nip)
++ return 1;
++
++ regs->nip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * The error_code parameter is
+ * - DSISR for a non-SLB data access fault,
+@@ -202,6 +251,25 @@
+
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC) {
++ if ((regs->trap == 0x400) && (regs->nip == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++
++ pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = code;
+diff -urN linux-2.6.7/arch/sparc/kernel/ptrace.c linux-2.6.7/arch/sparc/kernel/ptrace.c
+--- linux-2.6.7/arch/sparc/kernel/ptrace.c 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/arch/sparc/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -18,6 +18,7 @@
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/system.h>
+@@ -322,6 +323,11 @@
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, request)) {
++ pt_error_return(regs, EPERM);
++ goto out_tsk;
++ }
++
+ if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+ || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+ if (ptrace_attach(child)) {
+diff -urN linux-2.6.7/arch/sparc/kernel/sys_sparc.c linux-2.6.7/arch/sparc/kernel/sys_sparc.c
+--- linux-2.6.7/arch/sparc/kernel/sys_sparc.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/arch/sparc/kernel/sys_sparc.c 2004-06-25 17:41:53 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/utsname.h>
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -55,6 +56,13 @@
+ return -ENOMEM;
+ if (ARCH_SUN4C_SUN4 && len > 0x20000000)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -228,6 +236,11 @@
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+@@ -246,6 +259,12 @@
+ if (len > TASK_SIZE - PAGE_SIZE || addr + len > TASK_SIZE - PAGE_SIZE)
+ goto out_putf;
+
++ if (gr_handle_mmap(file, prot)) {
++ fput(file);
++ retval = -EACCES;
++ goto out;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/arch/sparc/kernel/sys_sunos.c linux-2.6.7/arch/sparc/kernel/sys_sunos.c
+--- linux-2.6.7/arch/sparc/kernel/sys_sunos.c 2004-06-16 01:19:17 -0400
++++ linux-2.6.7/arch/sparc/kernel/sys_sunos.c 2004-06-25 17:41:53 -0400
+@@ -71,6 +71,11 @@
+ struct file * file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -urN linux-2.6.7/arch/sparc/mm/fault.c linux-2.6.7/arch/sparc/mm/fault.c
+--- linux-2.6.7/arch/sparc/mm/fault.c 2004-06-16 01:19:11 -0400
++++ linux-2.6.7/arch/sparc/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -21,6 +21,10 @@
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
+ #include <linux/module.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/segment.h>
+@@ -220,6 +224,269 @@
+ return safe_compute_effective_address(regs, insn);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ memset(vma, 0, sizeof(*vma));
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code &&
++ regs->npc < current->mm->end_code)
++ regs->npc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->pc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->pc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->pc -= current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->npc < current->mm->end_code + current->mm->delta_exec)
++ regs->npc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->pc);
++ err |= get_user(sethi2, (unsigned int*)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + 4 + (((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(ba, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->pc-4));
++ err |= get_user(call, (unsigned int*)regs->pc);
++ err |= get_user(nop, (unsigned int*)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -283,6 +550,29 @@
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)regs->u_regs[UREG_FP]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -urN linux-2.6.7/arch/sparc/mm/init.c linux-2.6.7/arch/sparc/mm/init.c
+--- linux-2.6.7/arch/sparc/mm/init.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/arch/sparc/mm/init.c 2004-06-25 17:41:53 -0400
+@@ -337,17 +337,17 @@
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -urN linux-2.6.7/arch/sparc/mm/srmmu.c linux-2.6.7/arch/sparc/mm/srmmu.c
+--- linux-2.6.7/arch/sparc/mm/srmmu.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/arch/sparc/mm/srmmu.c 2004-06-25 17:41:53 -0400
+@@ -2149,6 +2149,13 @@
+ BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC));
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+ pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
+diff -urN linux-2.6.7/arch/sparc64/kernel/itlb_base.S linux-2.6.7/arch/sparc64/kernel/itlb_base.S
+--- linux-2.6.7/arch/sparc64/kernel/itlb_base.S 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/arch/sparc64/kernel/itlb_base.S 2004-06-25 17:41:53 -0400
+@@ -41,7 +41,9 @@
+ CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
+ 1: brgez,pn %g5, 3f ! Not valid, branch out
+- nop ! Delay-slot
++ and %g5, _PAGE_EXEC, %g4
++ brz,pn %g4, 3f ! Not executable, branch out
++ nop ! Delay-slot
+ 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
+ retry ! Trap return
+ 3: rdpr %pstate, %g4 ! Move into alternate globals
+@@ -74,8 +76,6 @@
+ nop
+ nop
+ nop
+- nop
+- nop
+ CREATE_VPTE_NOP
+
+ #undef CREATE_VPTE_OFFSET1
+diff -urN linux-2.6.7/arch/sparc64/kernel/ptrace.c linux-2.6.7/arch/sparc64/kernel/ptrace.c
+--- linux-2.6.7/arch/sparc64/kernel/ptrace.c 2004-06-16 01:20:18 -0400
++++ linux-2.6.7/arch/sparc64/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -19,6 +19,7 @@
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/asi.h>
+ #include <asm/pgtable.h>
+@@ -173,6 +174,11 @@
+ goto out;
+ }
+
++ if (gr_handle_ptrace(child, (long)request)) {
++ pt_error_return(regs, EPERM);
++ goto out_tsk;
++ }
++
+ if ((current->personality == PER_SUNOS && request == PTRACE_SUNATTACH)
+ || (current->personality != PER_SUNOS && request == PTRACE_ATTACH)) {
+ if (ptrace_attach(child)) {
+diff -urN linux-2.6.7/arch/sparc64/kernel/sys_sparc.c linux-2.6.7/arch/sparc64/kernel/sys_sparc.c
+--- linux-2.6.7/arch/sparc64/kernel/sys_sparc.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/arch/sparc64/kernel/sys_sparc.c 2004-06-25 17:41:53 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/ipc.h>
+ #include <linux/personality.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/ipc.h>
+@@ -49,7 +50,7 @@
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct * vma;
+- unsigned long task_size = TASK_SIZE;
++ unsigned long task_size = TASK_SIZE, task_unmapped_base = TASK_UNMAPPED_BASE;
+ unsigned long start_addr;
+ int do_color_align;
+
+@@ -72,6 +73,12 @@
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ task_unmapped_base += mm->delta_mmap;
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -101,8 +108,8 @@
+ vma = find_vma(mm, PAGE_OFFSET);
+ }
+ if (task_size < addr) {
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != task_unmapped_base) {
++ start_addr = addr = task_unmapped_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+@@ -319,11 +326,22 @@
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
++
++ if (gr_handle_mmap(file, prot)) {
++ retval = -EACCES;
++ goto out_putf;
++ }
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ len = PAGE_ALIGN(len);
+ retval = -EINVAL;
+diff -urN linux-2.6.7/arch/sparc64/kernel/sys_sparc32.c linux-2.6.7/arch/sparc64/kernel/sys_sparc32.c
+--- linux-2.6.7/arch/sparc64/kernel/sys_sparc32.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/arch/sparc64/kernel/sys_sparc32.c 2004-06-25 14:21:03 -0400
+@@ -54,6 +54,7 @@
+ #include <linux/netfilter_ipv4/ip_tables.h>
+ #include <linux/ptrace.h>
+ #include <linux/highuid.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/types.h>
+ #include <asm/ipc.h>
+diff -urN linux-2.6.7/arch/sparc64/kernel/sys_sunos32.c linux-2.6.7/arch/sparc64/kernel/sys_sunos32.c
+--- linux-2.6.7/arch/sparc64/kernel/sys_sunos32.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/arch/sparc64/kernel/sys_sunos32.c 2004-06-25 17:41:53 -0400
+@@ -66,6 +66,11 @@
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -urN linux-2.6.7/arch/sparc64/mm/fault.c linux-2.6.7/arch/sparc64/mm/fault.c
+--- linux-2.6.7/arch/sparc64/mm/fault.c 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/arch/sparc64/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -18,6 +18,10 @@
+ #include <linux/smp_lock.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
++#include <linux/binfmts.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -310,6 +314,361 @@
+ unhandled_fault (address, current, regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_EMUPLT
++static void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int *type)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return NOPAGE_OOM;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ if (type)
++ *type = VM_FAULT_MAJOR;
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ memset(vma, 0, sizeof(*vma));
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->tpc >= current->mm->start_code &&
++ regs->tpc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->tpc)
++ return 1;
++
++ regs->tpc += current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code &&
++ regs->tnpc < current->mm->end_code)
++ regs->tnpc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->tpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tpc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->tpc -= current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tnpc < current->mm->end_code + current->mm->delta_exec)
++ regs->tnpc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + 4 + (((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int mov1, call, mov2;
++
++ err = get_user(mov1, (unsigned int*)regs->tpc);
++ err |= get_user(call, (unsigned int*)(regs->tpc+4));
++ err |= get_user(mov2, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if (mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(or1, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or2, (unsigned int*)(regs->tpc+12));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020 &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or, (unsigned int*)(regs->tpc+12));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+16));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+20));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020 &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(ba, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->tpc-4));
++ err |= get_user(call, (unsigned int*)regs->tpc);
++ err |= get_user(nop, (unsigned int*)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -347,8 +706,10 @@
+ goto intr_or_no_mm;
+
+ if (test_thread_flag(TIF_32BIT)) {
+- if (!(regs->tstate & TSTATE_PRIV))
++ if (!(regs->tstate & TSTATE_PRIV)) {
+ regs->tpc &= 0xffffffff;
++ regs->tnpc &= 0xffffffff;
++ }
+ address &= 0xffffffff;
+ }
+
+@@ -357,6 +718,34 @@
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((current->flags & PF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ goto fault_done;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ goto fault_done;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -urN linux-2.6.7/arch/sparc64/solaris/misc.c linux-2.6.7/arch/sparc64/solaris/misc.c
+--- linux-2.6.7/arch/sparc64/solaris/misc.c 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/arch/sparc64/solaris/misc.c 2004-06-25 17:41:53 -0400
+@@ -56,6 +56,11 @@
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ /* Do we need it here? */
+ set_personality(PER_SVR4);
+ if (flags & MAP_NORESERVE) {
+diff -urN linux-2.6.7/arch/x86_64/ia32/ia32_binfmt.c linux-2.6.7/arch/x86_64/ia32/ia32_binfmt.c
+--- linux-2.6.7/arch/x86_64/ia32/ia32_binfmt.c 2004-06-16 01:18:55 -0400
++++ linux-2.6.7/arch/x86_64/ia32/ia32_binfmt.c 2004-06-25 17:41:53 -0400
+@@ -185,6 +185,17 @@
+ //#include <asm/ia32.h>
+ #include <linux/elf.h>
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#endif
++
+ typedef struct user_i387_ia32_struct elf_fpregset_t;
+ typedef struct user32_fxsr_struct elf_fpxregset_t;
+
+@@ -361,7 +372,13 @@
+ else if (executable_stack == EXSTACK_DISABLE_X)
+ mpnt->vm_flags = vm_stack_flags32 & ~VM_EXEC;
+ else
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ mpnt->vm_flags = VM_STACK_FLAGS;
++#else
+ mpnt->vm_flags = vm_stack_flags32;
++#endif
++
+ mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
+ PAGE_COPY_EXEC : PAGE_COPY;
+ insert_vm_struct(mm, mpnt);
+diff -urN linux-2.6.7/arch/x86_64/ia32/sys_ia32.c linux-2.6.7/arch/x86_64/ia32/sys_ia32.c
+--- linux-2.6.7/arch/x86_64/ia32/sys_ia32.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/arch/x86_64/ia32/sys_ia32.c 2004-06-25 17:41:53 -0400
+@@ -212,6 +212,11 @@
+ if (a.offset & ~PAGE_MASK)
+ return -EINVAL;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (a.flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(a.flags & MAP_ANONYMOUS)) {
+ file = fget(a.fd);
+ if (!file)
+@@ -1037,6 +1042,11 @@
+ unsigned long error;
+ struct file * file = NULL;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -urN linux-2.6.7/arch/x86_64/kernel/ptrace.c linux-2.6.7/arch/x86_64/kernel/ptrace.c
+--- linux-2.6.7/arch/x86_64/kernel/ptrace.c 2004-06-16 01:19:09 -0400
++++ linux-2.6.7/arch/x86_64/kernel/ptrace.c 2004-06-25 14:07:21 -0400
+@@ -213,6 +213,9 @@
+ if (pid == 1) /* you may not mess with init */
+ goto out_tsk;
+
++ if (gr_handle_ptrace(child, request))
++ goto out_tsk;
++
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out_tsk;
+diff -urN linux-2.6.7/arch/x86_64/kernel/setup64.c linux-2.6.7/arch/x86_64/kernel/setup64.c
+--- linux-2.6.7/arch/x86_64/kernel/setup64.c 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/arch/x86_64/kernel/setup64.c 2004-06-25 17:41:53 -0400
+@@ -43,8 +43,15 @@
+
+ unsigned long __supported_pte_mask = ~0UL;
+ static int do_not_nx __initdata = 0;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++unsigned long vm_stack_flags = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++unsigned long vm_stack_flags32 = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++#else
+ unsigned long vm_stack_flags = __VM_STACK_FLAGS;
+ unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
++#endif
++
+ unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_force_exec32 = PROT_EXEC;
+diff -urN linux-2.6.7/arch/x86_64/kernel/sys_x86_64.c linux-2.6.7/arch/x86_64/kernel/sys_x86_64.c
+--- linux-2.6.7/arch/x86_64/kernel/sys_x86_64.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/arch/x86_64/kernel/sys_x86_64.c 2004-06-25 17:41:53 -0400
+@@ -48,6 +48,11 @@
+ if (off & ~PAGE_MASK)
+ goto out;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ goto out;
++#endif
++
+ error = -EBADF;
+ file = NULL;
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+@@ -102,6 +107,15 @@
+
+ find_start_end(flags, &begin, &end);
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp)) {
++ if (begin == 0x40000000)
++ begin += current->mm->delta_mmap & 0x0FFFFFFFU;
++ else
++ begin += current->mm->delta_mmap;
++ }
++#endif
++
+ if (len > end)
+ return -ENOMEM;
+
+diff -urN linux-2.6.7/arch/x86_64/mm/fault.c linux-2.6.7/arch/x86_64/mm/fault.c
+--- linux-2.6.7/arch/x86_64/mm/fault.c 2004-06-16 01:18:52 -0400
++++ linux-2.6.7/arch/x86_64/mm/fault.c 2004-06-25 17:41:53 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/vt_kern.h> /* For unblank_screen() */
+ #include <linux/compiler.h>
+ #include <linux/module.h>
++#include <linux/binfmts.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -218,6 +219,63 @@
+ (tsk->sighand->action[sig-1].sa.sa_handler == SIG_DFL);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->rip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_RANDEXEC
++ int err;
++
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->rip >= current->mm->start_code &&
++ regs->rip < current->mm->end_code)
++ {
++ if (test_thread_flag(TIF_IA32)) {
++ unsigned int esp_4;
++
++ err = get_user(esp_4, (unsigned int*)(regs->rsp-4UL));
++ if (err || esp_4 == regs->rip)
++ return 1;
++ } else {
++ unsigned long esp_8;
++
++ err = get_user(esp_8, (unsigned long*)(regs->rsp-8UL));
++ if (err || esp_8 == regs->rip)
++ return 1;
++ }
++
++ regs->rip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc, void *sp)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int page_fault_trace;
+ int exception_trace = 1;
+
+@@ -303,6 +361,23 @@
+ * we can handle it..
+ */
+ good_area:
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (error_code & 16) && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+diff -urN linux-2.6.7/drivers/char/keyboard.c linux-2.6.7/drivers/char/keyboard.c
+--- linux-2.6.7/drivers/char/keyboard.c 2004-06-16 01:20:27 -0400
++++ linux-2.6.7/drivers/char/keyboard.c 2004-06-25 14:07:21 -0400
+@@ -606,6 +606,16 @@
+ kbd->kbdmode == VC_MEDIUMRAW) &&
+ value != KVAL(K_SAK))
+ return; /* SAK is allowed even in raw mode */
++
++#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
++ {
++ void *func = fn_handler[value];
++ if (func == fn_show_state || func == fn_show_ptregs ||
++ func == fn_show_mem)
++ return;
++ }
++#endif
++
+ fn_handler[value](vc, regs);
+ }
+
+diff -urN linux-2.6.7/drivers/char/mem.c linux-2.6.7/drivers/char/mem.c
+--- linux-2.6.7/drivers/char/mem.c 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/drivers/char/mem.c 2004-06-30 12:55:14 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/devfs_fs_kernel.h>
+ #include <linux/ptrace.h>
+ #include <linux/device.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -39,6 +40,10 @@
+ extern void tapechar_init(void);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern struct file_operations grsec_fops;
++#endif
++
+ /*
+ * Architectures vary in how they handle caching for addresses
+ * outside of main memory.
+@@ -191,6 +196,12 @@
+
+ if (!valid_phys_addr_range(p, &count))
+ return -EFAULT;
++
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_mem_write();
++ return -EPERM;
++#endif
++
+ return do_write_mem(__va(p), p, buf, count, ppos);
+ }
+
+@@ -205,6 +216,11 @@
+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ if (gr_handle_mem_mmap(offset, vma))
++ return -EPERM;
++#endif
++
+ /* Don't try to swap out physical pages.. */
+ vma->vm_flags |= VM_RESERVED;
+
+@@ -298,6 +314,11 @@
+ ssize_t written;
+ char * kbuf; /* k-addr because vwrite() takes vmlist_lock rwlock */
+
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_kmem_write();
++ return -EPERM;
++#endif
++
+ if (p < (unsigned long) high_memory) {
+
+ wrote = count;
+@@ -424,7 +445,23 @@
+ count = size;
+
+ zap_page_range(vma, addr, count, NULL);
+- zeromap_page_range(vma, addr, count, PAGE_COPY);
++ zeromap_page_range(vma, addr, count, vma->vm_page_prot);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ unsigned long addr_m;
++ struct vm_area_struct * vma_m;
++
++ addr_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(mm, addr_m);
++ if (vma_m && vma_m->vm_start == addr_m && (vma_m->vm_flags & VM_MIRROR)) {
++ addr_m = addr + vma->vm_mirror;
++ zap_page_range(vma_m, addr_m, count, NULL);
++ } else
++ printk(KERN_ERR "PAX: VMMIRROR: read_zero bug, %08lx, %08lx\n",
++ addr, vma->vm_start);
++ }
++#endif
+
+ size -= count;
+ buf += count;
+@@ -573,6 +610,16 @@
+
+ static int open_port(struct inode * inode, struct file * filp)
+ {
++#ifdef CONFIG_GRKERNSEC_KMEM
++ gr_handle_open_port();
++ return -EPERM;
++#endif
++
++ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
++}
++
++static int open_mem(struct inode * inode, struct file * filp)
++{
+ return capable(CAP_SYS_RAWIO) ? 0 : -EPERM;
+ }
+
+@@ -581,7 +628,6 @@
+ #define full_lseek null_lseek
+ #define write_zero write_null
+ #define read_full read_zero
+-#define open_mem open_port
+ #define open_kmem open_mem
+
+ static struct file_operations mem_fops = {
+@@ -682,6 +728,11 @@
+ case 11:
+ filp->f_op = &kmsg_fops;
+ break;
++#ifdef CONFIG_GRKERNSEC
++ case 12:
++ filp->f_op = &grsec_fops;
++ break;
++#endif
+ default:
+ return -ENXIO;
+ }
+@@ -711,6 +762,9 @@
+ {8, "random", S_IRUGO | S_IWUSR, &random_fops},
+ {9, "urandom", S_IRUGO | S_IWUSR, &urandom_fops},
+ {11,"kmsg", S_IRUGO | S_IWUSR, &kmsg_fops},
++#ifdef CONFIG_GRKERNSEC
++ {12,"grsec", S_IRUSR | S_IWUGO, &grsec_fops},
++#endif
+ };
+
+ static struct class_simple *mem_class;
+diff -urN linux-2.6.7/drivers/char/random.c linux-2.6.7/drivers/char/random.c
+--- linux-2.6.7/drivers/char/random.c 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/drivers/char/random.c 2004-06-25 17:41:53 -0400
+@@ -263,9 +263,15 @@
+ /*
+ * Configuration information
+ */
++#ifdef CONFIG_GRKERNSEC_RANDNET
++#define DEFAULT_POOL_SIZE 1024
++#define SECONDARY_POOL_SIZE 256
++#define BATCH_ENTROPY_SIZE 512
++#else
+ #define DEFAULT_POOL_SIZE 512
+ #define SECONDARY_POOL_SIZE 128
+ #define BATCH_ENTROPY_SIZE 256
++#endif
+ #define USE_SHA
+
+ /*
+@@ -2379,6 +2385,29 @@
+ return halfMD4Transform(hash, keyptr->secret);
+ }
+
++#ifdef CONFIG_GRKERNSEC
++/* the following function is provided by PaX under the GPL */
++unsigned long get_random_long(void)
++{
++ static time_t rekey_time;
++ static __u32 secret[12];
++ time_t t;
++
++ /*
++ * Pick a random secret every REKEY_INTERVAL seconds
++ */
++ t = get_seconds();
++ if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
++ rekey_time = t;
++ get_random_bytes(secret, sizeof(secret));
++ }
++
++ secret[1] = halfMD4Transform(secret+8, secret);
++ secret[0] = halfMD4Transform(secret+8, secret);
++ return *(unsigned long *)secret;
++}
++#endif
++
+ #ifdef CONFIG_SYN_COOKIES
+ /*
+ * Secure SYN cookie computation. This is the algorithm worked out by
+@@ -2478,3 +2507,25 @@
+ return (cookie - tmp[17]) & COOKIEMASK; /* Leaving the data behind */
+ }
+ #endif
++
++#ifdef CONFIG_PAX_ASLR
++unsigned long pax_get_random_long(void)
++{
++ static time_t rekey_time;
++ static __u32 secret[12];
++ time_t t;
++
++ /*
++ * Pick a random secret every REKEY_INTERVAL seconds.
++ */
++ t = get_seconds();
++ if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
++ rekey_time = t;
++ get_random_bytes(secret, sizeof(secret));
++ }
++
++ secret[1] = halfMD4Transform(secret+8, secret);
++ secret[0] = halfMD4Transform(secret+8, secret);
++ return *(unsigned long *)secret;
++}
++#endif
+diff -urN linux-2.6.7/drivers/char/vt_ioctl.c linux-2.6.7/drivers/char/vt_ioctl.c
+--- linux-2.6.7/drivers/char/vt_ioctl.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/drivers/char/vt_ioctl.c 2004-06-25 14:07:21 -0400
+@@ -96,6 +96,12 @@
+ case KDSKBENT:
+ if (!perm)
+ return -EPERM;
++
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG))
++ return -EPERM;
++#endif
++
+ if (!i && v == K_NOSUCHMAP) {
+ /* disallocate map */
+ key_map = key_maps[s];
+@@ -233,6 +239,13 @@
+ goto reterr;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!capable(CAP_SYS_TTY_CONFIG)) {
++ return -EPERM;
++ goto reterr;
++ }
++#endif
++
+ q = func_table[i];
+ first_free = funcbufptr + (funcbufsize - funcbufleft);
+ for (j = i+1; j < MAX_NR_FUNC && !func_table[j]; j++)
+diff -urN linux-2.6.7/drivers/pci/proc.c linux-2.6.7/drivers/pci/proc.c
+--- linux-2.6.7/drivers/pci/proc.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/drivers/pci/proc.c 2004-06-25 14:07:21 -0400
+@@ -565,7 +565,15 @@
+
+ static void legacy_proc_init(void)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ struct proc_dir_entry * entry = create_proc_entry("pci", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ struct proc_dir_entry * entry = create_proc_entry("pci", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ struct proc_dir_entry * entry = create_proc_entry("pci", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_pci_operations;
+ }
+@@ -594,7 +602,15 @@
+ {
+ struct proc_dir_entry *entry;
+ struct pci_dev *dev = NULL;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR, proc_bus);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_bus_pci_dir = proc_mkdir_mode("pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, proc_bus);
++#endif
++#else
+ proc_bus_pci_dir = proc_mkdir("pci", proc_bus);
++#endif
+ entry = create_proc_entry("devices", 0, proc_bus_pci_dir);
+ if (entry)
+ entry->proc_fops = &proc_bus_pci_dev_operations;
+diff -urN linux-2.6.7/drivers/pnp/pnpbios/bioscalls.c linux-2.6.7/drivers/pnp/pnpbios/bioscalls.c
+--- linux-2.6.7/drivers/pnp/pnpbios/bioscalls.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/drivers/pnp/pnpbios/bioscalls.c 2004-06-25 17:41:53 -0400
+@@ -79,7 +79,7 @@
+ set_limit(cpu_gdt_table[cpu][(selname) >> 3], size); \
+ } while(0)
+
+-static struct desc_struct bad_bios_desc = { 0, 0x00409200 };
++static struct desc_struct bad_bios_desc = { 0, 0x00409300 };
+
+ /*
+ * At some point we want to use this stack frame pointer to unwind
+@@ -107,6 +107,10 @@
+ struct desc_struct save_desc_40;
+ int cpu;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long cr3;
++#endif
++
+ /*
+ * PnP BIOSes are generally not terribly re-entrant.
+ * Also, don't rely on them to save everything correctly.
+@@ -115,12 +119,17 @@
+ return PNP_FUNCTION_NOT_SUPPORTED;
+
+ cpu = get_cpu();
+- save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
+- cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
+
+ /* On some boxes IRQ's during PnP BIOS calls are deadly. */
+ spin_lock_irqsave(&pnp_bios_lock, flags);
+
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_open_kernel_noirq(cr3);
++#endif
++
++ save_desc_40 = cpu_gdt_table[cpu][0x40 / 8];
++ cpu_gdt_table[cpu][0x40 / 8] = bad_bios_desc;
++
+ /* The lock prevents us bouncing CPU here */
+ if (ts1_size)
+ Q2_SET_SEL(smp_processor_id(), PNP_TS1, ts1_base, ts1_size);
+@@ -156,9 +165,14 @@
+ "i" (0)
+ : "memory"
+ );
+- spin_unlock_irqrestore(&pnp_bios_lock, flags);
+
+ cpu_gdt_table[cpu][0x40 / 8] = save_desc_40;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel_noirq(cr3);
++#endif
++
++ spin_unlock_irqrestore(&pnp_bios_lock, flags);
+ put_cpu();
+
+ /* If we get here and this is set then the PnP BIOS faulted on us. */
+diff -urN linux-2.6.7/drivers/scsi/scsi_devinfo.c linux-2.6.7/drivers/scsi/scsi_devinfo.c
+--- linux-2.6.7/drivers/scsi/scsi_devinfo.c 2004-06-16 01:19:11 -0400
++++ linux-2.6.7/drivers/scsi/scsi_devinfo.c 2004-06-25 17:41:53 -0400
+@@ -27,7 +27,7 @@
+ static const char spaces[] = " "; /* 16 of them */
+ static unsigned scsi_default_dev_flags;
+ static LIST_HEAD(scsi_dev_info_list);
+-static __init char scsi_dev_flags[256];
++static __initdata char scsi_dev_flags[256];
+
+ /*
+ * scsi_static_device_list: deprecated list of devices that require
+diff -urN linux-2.6.7/drivers/video/vesafb.c linux-2.6.7/drivers/video/vesafb.c
+--- linux-2.6.7/drivers/video/vesafb.c 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/drivers/video/vesafb.c 2004-06-25 17:41:53 -0400
+@@ -250,7 +250,7 @@
+ if (vesafb_fix.smem_len > 16 * 1024 * 1024)
+ vesafb_fix.smem_len = 16 * 1024 * 1024;
+
+-#ifndef __i386__
++#if !defined(__i386__) || defined(CONFIG_PAX_KERNEXEC)
+ screen_info.vesapm_seg = 0;
+ #endif
+
+diff -urN linux-2.6.7/fs/Kconfig linux-2.6.7/fs/Kconfig
+--- linux-2.6.7/fs/Kconfig 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/fs/Kconfig 2004-06-25 14:07:21 -0400
+@@ -815,6 +815,7 @@
+
+ config PROC_KCORE
+ bool
++ depends on !GRKERNSEC_PROC_ADD
+ default y if !ARM
+
+ config SYSFS
+diff -urN linux-2.6.7/fs/binfmt_aout.c linux-2.6.7/fs/binfmt_aout.c
+--- linux-2.6.7/fs/binfmt_aout.c 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/fs/binfmt_aout.c 2004-06-25 17:41:53 -0400
+@@ -24,6 +24,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/personality.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -118,10 +119,12 @@
+ /* If the size of the dump file exceeds the rlimit, then see what would happen
+ if we wrote the stack, but not the data area. */
+ #ifdef __sparc__
++ gr_learn_resource(current, RLIMIT_CORE, dump.u_dsize+dump.u_ssize, 1);
+ if ((dump.u_dsize+dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+ #else
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_dsize+dump.u_ssize+1) * PAGE_SIZE >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_dsize = 0;
+@@ -129,10 +132,12 @@
+
+ /* Make sure we have enough room to write the stack and data areas. */
+ #ifdef __sparc__
++ gr_learn_resource(current, RLIMIT_CORE, dump.u_ssize, 1);
+ if ((dump.u_ssize) >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+ #else
++ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize+1) * PAGE_SIZE, 1);
+ if ((dump.u_ssize+1) * PAGE_SIZE >
+ current->rlim[RLIMIT_CORE].rlim_cur)
+ dump.u_ssize = 0;
+@@ -281,6 +286,8 @@
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
+ if (rlim >= RLIM_INFINITY)
+ rlim = ~0;
++
++ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
+ if (ex.a_data + ex.a_bss > rlim)
+ return -ENOMEM;
+
+@@ -309,10 +316,33 @@
+ (current->mm->start_brk = N_BSSADDR(ex));
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ current->mm->free_area_cache += current->mm->delta_mmap;
++#endif
++
+ current->mm->rss = 0;
+ current->mm->mmap = NULL;
+ compute_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->flags |= PF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->flags |= PF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ #ifdef __sparc__
+ if (N_MAGIC(ex) == NMAGIC) {
+ loff_t pos = fd_offset;
+@@ -399,7 +429,7 @@
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/fs/binfmt_elf.c linux-2.6.7/fs/binfmt_elf.c
+--- linux-2.6.7/fs/binfmt_elf.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/fs/binfmt_elf.c 2004-06-25 17:44:19 -0400
+@@ -37,11 +37,17 @@
+ #include <linux/pagemap.h>
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
++#include <linux/grsecurity.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/pgalloc.h>
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#include <asm/desc.h>
++#endif
++
+ #include <linux/elf.h>
+
+ static int load_elf_binary(struct linux_binprm * bprm, struct pt_regs * regs);
+@@ -85,14 +91,22 @@
+
+ static int set_brk(unsigned long start, unsigned long end)
+ {
++ current->mm->start_brk = current->mm->brk = end;
+ start = ELF_PAGEALIGN(start);
+ end = ELF_PAGEALIGN(end);
+ if (end > start) {
+ unsigned long addr = do_brk(start, end - start);
+ if (BAD_ADDR(addr))
+ return addr;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC)
++ addr = __do_mmap_pgoff(NULL, ELF_PAGEALIGN(start + current->mm->delta_exec), 0UL, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, start);
++ if (BAD_ADDR(addr))
++ return addr;
++#endif
++
+ }
+- current->mm->start_brk = current->mm->brk = end;
+ return 0;
+ }
+
+@@ -303,6 +317,7 @@
+ unsigned long last_bss = 0, elf_bss = 0;
+ unsigned long error = ~0UL;
+ int retval, i, size;
++ unsigned long task_size = TASK_SIZE;
+
+ /* First of all, some simple consistency checks */
+ if (interp_elf_ex->e_type != ET_EXEC &&
+@@ -336,6 +351,11 @@
+ if (retval < 0)
+ goto out_close;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC)
++ task_size = SEGMEXEC_TASK_SIZE;
++#endif
++
+ eppnt = elf_phdata;
+ for (i=0; i<interp_elf_ex->e_phnum; i++, eppnt++) {
+ if (eppnt->p_type == PT_LOAD) {
+@@ -367,8 +387,8 @@
+ * <= p_memsize so it is only necessary to check p_memsz.
+ */
+ k = load_addr + eppnt->p_vaddr;
+- if (k > TASK_SIZE || eppnt->p_filesz > eppnt->p_memsz ||
+- eppnt->p_memsz > TASK_SIZE || TASK_SIZE - eppnt->p_memsz < k) {
++ if (k > task_size || eppnt->p_filesz > eppnt->p_memsz ||
++ eppnt->p_memsz > task_size || task_size - eppnt->p_memsz < k) {
+ error = -ENOMEM;
+ goto out_close;
+ }
+@@ -458,6 +478,227 @@
+ return elf_entry;
+ }
+
++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC)
++ pax_flags |= PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_PAGEEXEC
++ if (pax_flags & PF_PAX_PAGEEXEC)
++ pax_flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_SEGMEXEC
++ if (pax_flags & PF_PAX_SEGMEXEC)
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDMMAP)
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDEXEC)
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
++ pax_flags |= PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_PAGEEXEC
++ if (pax_flags & PF_PAX_PAGEEXEC)
++ pax_flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_SEGMEXEC
++ if (pax_flags & PF_PAX_SEGMEXEC)
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDEXEC))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++static int pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
++ pax_flags |= PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_PAGEEXEC
++ if (pax_flags & PF_PAX_PAGEEXEC)
++ pax_flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
++#ifdef CONFIG_PAX_DEFAULT_SEGMEXEC
++ if (pax_flags & PF_PAX_SEGMEXEC)
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if ((elf_ex->e_ident[EI_PAX] & EF_PAX_RANDEXEC) && (elf_ex->e_type == ET_EXEC) && (pax_flags & PF_PAX_MPROTECT))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++static int pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++ pax_flags = pax_parse_ei_pax(elf_ex);
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)) ||
++ ((elf_phdata[i].p_flags & PF_RANDEXEC) && ((elf_phdata[i].p_flags & PF_NORANDEXEC) || elf_ex->e_type == ET_DYN || !(elf_phdata[i].p_flags & PF_MPROTECT))) ||
++ (!(elf_phdata[i].p_flags & PF_NORANDEXEC) && (elf_ex->e_type == ET_DYN || (elf_phdata[i].p_flags & PF_NOMPROTECT))))
++ return -EINVAL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->flags |= pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -491,7 +732,13 @@
+ char passed_fileno[6];
+ struct files_struct *files;
+ int executable_stack = EXSTACK_DEFAULT;
+-
++ unsigned long task_size = TASK_SIZE;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ unsigned long load_addr_random = 0UL;
++ unsigned long load_bias_random = 0UL;
++#endif
++
+ /* Get the exec-header */
+ elf_ex = *((struct elfhdr *) bprm->buf);
+
+@@ -615,6 +862,7 @@
+ elf_ppnt++;
+ }
+
++#if 0
+ elf_ppnt = elf_phdata;
+ for (i = 0; i < elf_ex.e_phnum; i++, elf_ppnt++)
+ if (elf_ppnt->p_type == PT_GNU_STACK) {
+@@ -623,6 +871,7 @@
+ else
+ executable_stack = EXSTACK_DISABLE_X;
+ }
++#endif
+
+ /* Some simple consistency checks for the interpreter */
+ if (elf_interpreter) {
+@@ -689,8 +938,64 @@
+ current->mm->end_data = 0;
+ current->mm->end_code = 0;
+ current->mm->mmap = NULL;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ current->mm->call_syscall = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_exec = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
+ current->flags &= ~PF_FORKNOEXEC;
+
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (0 > pax_parse_elf_flags(&elf_ex, elf_phdata)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_flags_func)
++ (pax_set_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if (current->flags & PF_PAX_PAGEEXEC)
++ current->mm->context.user_cs_limit = PAGE_SIZE;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ int cpu = get_cpu();
++
++ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
++ current->mm->context.user_cs_limit = -SEGMEXEC_TASK_SIZE;
++ set_user_cs(current->mm, cpu);
++ put_cpu();
++ task_size = SEGMEXEC_TASK_SIZE;
++ }
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->flags & PF_PAX_RANDMMAP) {
++#define pax_delta_mask(delta, lsb, len) (((delta) & ((1UL << (len)) - 1)) << (lsb))
++
++ current->mm->delta_mmap = pax_delta_mask(pax_get_random_long(), PAX_DELTA_MMAP_LSB(current), PAX_DELTA_MMAP_LEN(current));
++ current->mm->delta_exec = pax_delta_mask(pax_get_random_long(), PAX_DELTA_EXEC_LSB(current), PAX_DELTA_EXEC_LEN(current));
++ current->mm->delta_stack = pax_delta_mask(pax_get_random_long(), PAX_DELTA_STACK_LSB(current), PAX_DELTA_STACK_LEN(current));
++ }
++#endif
++
+ /* Do this immediately, since STACK_TOP as used in setup_arg_pages
+ may depend on the personality. */
+ SET_PERSONALITY(elf_ex, ibcs2_interpreter);
+@@ -699,6 +1004,12 @@
+ change some of these later */
+ current->mm->rss = 0;
+ current->mm->free_area_cache = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ current->mm->free_area_cache += current->mm->delta_mmap;
++#endif
++
+ retval = setup_arg_pages(bprm, executable_stack);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+@@ -753,12 +1064,92 @@
+ /* Try and get dynamic programs out of the way of the default mmap
+ base, as well as whatever program they might try to exec. This
+ is because the brk will follow the loader, and is not movable. */
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE(current) - vaddr);
++ elf_flags |= MAP_FIXED;
++ } else
++#endif
++
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if (current->flags & PF_PAX_RANDMMAP)
++ load_bias += ELF_PAGESTART(current->mm->delta_exec);
++#endif
++
+ }
+
+- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+- if (BAD_ADDR(error))
+- continue;
++#ifdef CONFIG_PAX_RANDEXEC
++ if ((current->flags & PF_PAX_RANDEXEC) && (elf_ex.e_type == ET_EXEC)) {
++ error = -ENOMEM;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot & ~PROT_EXEC, elf_flags);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ unsigned long addr, len;
++
++ addr = ELF_PAGESTART(load_bias + vaddr);
++ len = elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr);
++ if (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)
++ continue;
++ down_write(&current->mm->mmap_sem);
++ error = __do_mmap_pgoff(bprm->file, addr, len, elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT);
++ up_write(&current->mm->mmap_sem);
++ }
++#endif
++
++ if (BAD_ADDR(error))
++ continue;
++
++ /* PaX: mirror at a randomized base */
++ down_write(&current->mm->mmap_sem);
++
++ if (!load_addr_set) {
++ load_addr_random = get_unmapped_area(bprm->file, 0UL, elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT, MAP_PRIVATE);
++ if (BAD_ADDR(load_addr_random)) {
++ up_write(&current->mm->mmap_sem);
++ continue;
++ }
++ load_bias_random = load_addr_random - vaddr;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (elf_prot & PROT_EXEC) {
++ load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), PROT_NONE, MAP_PRIVATE | MAP_FIXED, 0UL);
++ if (!BAD_ADDR(load_addr_random)) {
++ load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr + SEGMEXEC_TASK_SIZE), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ if (!BAD_ADDR(load_addr_random))
++ load_addr_random -= SEGMEXEC_TASK_SIZE;
++ }
++ } else
++ load_addr_random = __do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ if (BAD_ADDR(load_addr_random))
++ continue;
++ } else
++#endif
++
++ {
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
++ if (BAD_ADDR(error))
++ continue;
++ }
+
+ if (!load_addr_set) {
+ load_addr_set = 1;
+@@ -769,6 +1160,11 @@
+ load_addr += load_bias;
+ reloc_func_desc = load_bias;
+ }
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->mm->delta_exec = load_addr_random - load_addr;
++#endif
++
+ }
+ k = elf_ppnt->p_vaddr;
+ if (k < start_code) start_code = k;
+@@ -779,9 +1175,9 @@
+ * allowed task size. Note that p_filesz must always be
+ * <= p_memsz so it is only necessary to check p_memsz.
+ */
+- if (k > TASK_SIZE || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
+- elf_ppnt->p_memsz > TASK_SIZE ||
+- TASK_SIZE - elf_ppnt->p_memsz < k) {
++ if (k > task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
++ elf_ppnt->p_memsz > task_size ||
++ task_size - elf_ppnt->p_memsz < k) {
+ /* set_brk can never work. Avoid overflows. */
+ send_sig(SIGKILL, current, 0);
+ goto out_free_dentry;
+@@ -808,6 +1204,16 @@
+ start_data += load_bias;
+ end_data += load_bias;
+
++#ifdef CONFIG_PAX_RANDMMAP
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ elf_brk += PAGE_SIZE + pax_delta_mask(pax_get_random_long(), 4, PAGE_SHIFT);
++#undef pax_delta_mask
++#endif
++
+ /* Calling set_brk effectively mmaps the pages that we need
+ * for the bss and break sections. We must do this before
+ * mapping in the interpreter, to make sure it doesn't wind
+@@ -1100,8 +1506,11 @@
+ #undef DUMP_SEEK
+
+ #define DUMP_WRITE(addr, nr) \
++ do { \
++ gr_learn_resource(current, RLIMIT_CORE, size + (nr), 1); \
+ if ((size += (nr)) > limit || !dump_write(file, (addr), (nr))) \
+- goto end_coredump;
++ goto end_coredump; \
++ } while (0);
+ #define DUMP_SEEK(off) \
+ if (!dump_seek(file, (off))) \
+ goto end_coredump;
+diff -urN linux-2.6.7/fs/binfmt_flat.c linux-2.6.7/fs/binfmt_flat.c
+--- linux-2.6.7/fs/binfmt_flat.c 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/fs/binfmt_flat.c 2004-06-25 17:41:53 -0400
+@@ -541,7 +541,9 @@
+ realdatastart = (unsigned long) -ENOMEM;
+ printk("Unable to allocate RAM for process data, errno %d\n",
+ (int)-datapos);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
++ up_write(&current->mm->mmap_sem);
+ return realdatastart;
+ }
+ datapos = realdatastart + MAX_SHARED_LIBS * sizeof(unsigned long);
+@@ -562,8 +564,10 @@
+ }
+ if (result >= (unsigned long)-4096) {
+ printk("Unable to read data+bss, errno %d\n", (int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len);
+ do_munmap(current->mm, realdatastart, data_len + extra);
++ up_write(&current->mm->mmap_sem);
+ return result;
+ }
+
+@@ -625,8 +629,10 @@
+ }
+ if (result >= (unsigned long)-4096) {
+ printk("Unable to read code+data+bss, errno %d\n",(int)-result);
++ down_write(&current->mm->mmap_sem);
+ do_munmap(current->mm, textpos, text_len + data_len + extra +
+ MAX_SHARED_LIBS * sizeof(unsigned long));
++ up_write(&current->mm->mmap_sem);
+ return result;
+ }
+ }
+diff -urN linux-2.6.7/fs/binfmt_misc.c linux-2.6.7/fs/binfmt_misc.c
+--- linux-2.6.7/fs/binfmt_misc.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/fs/binfmt_misc.c 2004-06-25 17:41:53 -0400
+@@ -108,9 +108,11 @@
+ int retval;
+
+ retval = -ENOEXEC;
+- if (!enabled)
++ if (!enabled || bprm->misc)
+ goto _ret;
+
++ bprm->misc++;
++
+ /* to keep locking time low, we copy the interpreter string */
+ read_lock(&entries_lock);
+ fmt = check_file(bprm);
+diff -urN linux-2.6.7/fs/buffer.c linux-2.6.7/fs/buffer.c
+--- linux-2.6.7/fs/buffer.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/fs/buffer.c 2004-06-25 14:07:21 -0400
+@@ -37,6 +37,7 @@
+ #include <linux/bio.h>
+ #include <linux/notifier.h>
+ #include <linux/cpu.h>
++#include <linux/grsecurity.h>
+ #include <asm/bitops.h>
+
+ static void invalidate_bh_lrus(void);
+@@ -2231,6 +2232,9 @@
+ int err;
+
+ err = -EFBIG;
++
++ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long) size, 1);
++
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
+ if (limit != RLIM_INFINITY && size > (loff_t)limit) {
+ send_sig(SIGXFSZ, current, 0);
+diff -urN linux-2.6.7/fs/compat.c linux-2.6.7/fs/compat.c
+--- linux-2.6.7/fs/compat.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/fs/compat.c 2004-06-28 10:44:33 -0400
+@@ -40,6 +40,7 @@
+ #include <linux/nfsd/nfsd.h>
+ #include <linux/nfsd/syscall.h>
+ #include <linux/personality.h>
++#include <linux/grsecurity.h>
+
+ #include <net/sock.h> /* siocdevprivate_ioctl */
+
+@@ -1117,6 +1118,11 @@
+ struct file *file;
+ int retval;
+ int i;
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+
+ sched_balance_exec();
+
+@@ -1126,6 +1132,20 @@
+ if (IS_ERR(file))
+ return retval;
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ allow_write_access(file);
++ fput(file);
++ return -EAGAIN;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ allow_write_access(file);
++ fput(file);
++ return -EACCES;
++ }
++
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+@@ -1174,15 +1194,49 @@
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
++
+ retval = search_binary_handler(&bprm,regs);
+ if (retval >= 0) {
+ free_arg_pages(&bprm);
+
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
++
+ /* execve success */
+ security_bprm_free(&bprm);
+ return retval;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+diff -urN linux-2.6.7/fs/dcache.c linux-2.6.7/fs/dcache.c
+--- linux-2.6.7/fs/dcache.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/fs/dcache.c 2004-06-25 14:07:21 -0400
+@@ -1275,7 +1275,7 @@
+ *
+ * "buflen" should be positive. Caller holds the dcache_lock.
+ */
+-static char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
++char * __d_path( struct dentry *dentry, struct vfsmount *vfsmnt,
+ struct dentry *root, struct vfsmount *rootmnt,
+ char *buffer, int buflen)
+ {
+diff -urN linux-2.6.7/fs/exec.c linux-2.6.7/fs/exec.c
+--- linux-2.6.7/fs/exec.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/fs/exec.c 2004-08-03 17:51:18 -0400
+@@ -46,6 +46,8 @@
+ #include <linux/security.h>
+ #include <linux/syscalls.h>
+ #include <linux/rmap.h>
++#include <linux/grsecurity.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+@@ -62,6 +64,20 @@
+ static struct linux_binfmt *formats;
+ static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+
++#ifdef CONFIG_PAX_SOFTMODE
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++unsigned int pax_aslr=1;
++#endif
++
++unsigned int pax_softmode;
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_flags_func)(struct linux_binprm * bprm);
++EXPORT_SYMBOL(pax_set_flags_func);
++#endif
++
+ int register_binfmt(struct linux_binfmt * fmt)
+ {
+ struct linux_binfmt ** tmp = &formats;
+@@ -306,6 +322,10 @@
+ if (unlikely(anon_vma_prepare(vma)))
+ goto out_sig;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) == 1)
++#endif
++
+ flush_dcache_page(page);
+ pgd = pgd_offset(mm, address);
+
+@@ -321,6 +341,11 @@
+ goto out;
+ }
+ mm->rss++;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) == 1)
++#endif
++
+ lru_cache_add_active(page);
+ set_pte(pte, pte_mkdirty(pte_mkwrite(mk_pte(
+ page, vma->vm_page_prot))));
+@@ -345,6 +370,10 @@
+ int i;
+ long arg_size;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *mpnt_m = NULL;
++#endif
++
+ #ifdef CONFIG_STACK_GROWSUP
+ /* Move the argument and environment strings to the bottom of the
+ * stack space.
+@@ -404,8 +433,24 @@
+ if (!mpnt)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (VM_STACK_FLAGS & VM_MAYEXEC)) {
++ mpnt_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!mpnt_m) {
++ kmem_cache_free(vm_area_cachep, mpnt);
++ return -ENOMEM;
++ }
++ }
++#endif
++
+ if (security_vm_enough_memory(arg_size >> PAGE_SHIFT)) {
+ kmem_cache_free(vm_area_cachep, mpnt);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mpnt_m)
++ kmem_cache_free(vm_area_cachep, mpnt_m);
++#endif
++
+ return -ENOMEM;
+ }
+
+@@ -431,9 +476,36 @@
+ mpnt->vm_flags = VM_STACK_FLAGS & ~VM_EXEC;
+ else
+ mpnt->vm_flags = VM_STACK_FLAGS;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC))
++ mpnt->vm_page_prot = protection_map[(mpnt->vm_flags | VM_EXEC) & 0x7];
++ else
++#endif
++
+ mpnt->vm_page_prot = protection_map[mpnt->vm_flags & 0x7];
+ insert_vm_struct(mm, mpnt);
+ mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mpnt_m) {
++ *mpnt_m = *mpnt;
++ if (!(mpnt->vm_flags & VM_EXEC)) {
++ mpnt_m->vm_flags &= ~(VM_READ | VM_WRITE | VM_EXEC);
++ mpnt_m->vm_page_prot = PAGE_NONE;
++ }
++ mpnt_m->vm_start += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_end += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_flags |= VM_MIRROR;
++ mpnt->vm_flags |= VM_MIRROR;
++ mpnt_m->vm_mirror = mpnt->vm_start - mpnt_m->vm_start;
++ mpnt->vm_mirror = mpnt_m->vm_start - mpnt->vm_start;
++ insert_vm_struct(mm, mpnt_m);
++ mpnt_m->vm_pgoff = mpnt->vm_pgoff;
++ mm->total_vm += (mpnt_m->vm_end - mpnt_m->vm_start) >> PAGE_SHIFT;
++ }
++#endif
++
+ }
+
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+@@ -441,6 +513,14 @@
+ if (page) {
+ bprm->page[i] = NULL;
+ install_arg_page(mpnt, page, stack_base);
++
++#if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
++ if (mpnt_m) {
++ page_cache_get(page);
++ install_arg_page(mpnt_m, page, stack_base + SEGMEXEC_TASK_SIZE);
++ }
++#endif
++
+ }
+ stack_base += PAGE_SIZE;
+ }
+@@ -836,6 +916,30 @@
+ }
+ current->comm[i] = '\0';
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ current->flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ current->flags &= ~PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ current->flags &= ~PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->flags &= ~PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->flags &= ~PF_PAX_RANDEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ current->flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
+ flush_thread();
+
+ if (bprm->e_uid != current->euid || bprm->e_gid != current->egid ||
+@@ -1073,6 +1177,11 @@
+ struct file *file;
+ int retval;
+ int i;
++#ifdef CONFIG_GRKERNSEC
++ struct file *old_exec_file;
++ struct acl_subject_label *old_acl;
++ struct rlimit old_rlim[RLIM_NLIMITS];
++#endif
+
+ file = open_exec(filename);
+
+@@ -1082,13 +1191,39 @@
+
+ sched_balance_exec();
+
++ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&current->user->processes), 1);
++
++ if (gr_handle_nproc()) {
++ allow_write_access(file);
++ fput(file);
++ return -EAGAIN;
++ }
++
++ if (!gr_acl_handle_execve(file->f_dentry, file->f_vfsmnt)) {
++ allow_write_access(file);
++ fput(file);
++ return -EACCES;
++ }
++
++
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ bprm.file = file;
+ bprm.filename = filename;
+ bprm.interp = filename;
+ bprm.sh_bang = 0;
++ bprm.misc = 0;
+ bprm.loader = 0;
+ bprm.exec = 0;
+ bprm.security = NULL;
+@@ -1117,11 +1252,26 @@
+ if (retval < 0)
+ goto out;
+
++ if (!gr_tpe_allow(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_check_crash_exec(file)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = copy_strings_kernel(1, &bprm.filename, &bprm);
+ if (retval < 0)
+ goto out;
+
+ bprm.exec = bprm.p;
++
++ gr_log_chroot_exec(file->f_dentry, file->f_vfsmnt);
++
++ gr_handle_exec_args(&bprm, argv);
++
+ retval = copy_strings(bprm.envc, envp, &bprm);
+ if (retval < 0)
+ goto out;
+@@ -1130,8 +1280,24 @@
+ if (retval < 0)
+ goto out;
+
++#ifdef CONFIG_GRKERNSEC
++ old_acl = current->acl;
++ memcpy(old_rlim, current->rlim, sizeof(old_rlim));
++ old_exec_file = current->exec_file;
++ get_file(file);
++ current->exec_file = file;
++#endif
++
++ retval = gr_set_proc_label(file->f_dentry, file->f_vfsmnt);
++ if (retval < 0)
++ goto out_fail;
++
+ retval = search_binary_handler(&bprm,regs);
+ if (retval >= 0) {
++#ifdef CONFIG_GRKERNSEC
++ if (old_exec_file)
++ fput(old_exec_file);
++#endif
+ free_arg_pages(&bprm);
+
+ /* execve success */
+@@ -1139,6 +1305,14 @@
+ return retval;
+ }
+
++out_fail:
++#ifdef CONFIG_GRKERNSEC
++ current->acl = old_acl;
++ memcpy(current->rlim, old_rlim, sizeof(old_rlim));
++ fput(current->exec_file);
++ current->exec_file = old_exec_file;
++#endif
++
+ out:
+ /* Something went wrong, return the inode and free the argument pages*/
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+@@ -1296,6 +1470,138 @@
+ *out_ptr = 0;
+ }
+
++int pax_check_flags(unsigned long * flags)
++{
++ int retval = 0;
++
++#if !defined(__i386__) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & PF_PAX_SEGMEXEC)
++ {
++ *flags &= ~PF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & PF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & PF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_RANDEXEC)
++
++#ifdef CONFIG_PAX_RANDEXEC
++ && !(*flags & PF_PAX_MPROTECT)
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_RANDEXEC;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char* buffer_exec = (char*)__get_free_page(GFP_ATOMIC);
++ char* buffer_fault = (char*)__get_free_page(GFP_ATOMIC);
++ char* path_exec=NULL;
++ char* path_fault=NULL;
++ unsigned long start=0UL, end=0UL, offset=0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct* vma, * vma_exec=NULL, * vma_fault=NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(vma_exec->vm_file->f_dentry, vma_exec->vm_file->f_vfsmnt, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(vma_fault->vm_file->f_dentry, vma_fault->vm_file->f_vfsmnt, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ if (current->curr_ip && gr_acl_is_enabled())
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: (%.64s:%c:%.950s) terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", NIPQUAD(tsk->curr_ip), tsk->role->rolename, gr_roletype_to_char(),
++ tsk->acl->filename, path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ else if (current->curr_ip)
++ printk(KERN_ERR "PAX: From %u.%u.%u.%u: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", NIPQUAD(tsk->curr_ip), path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ else if (gr_acl_is_enabled())
++ printk(KERN_ERR "PAX: (%.64s:%c:%.950s) terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", tsk->role->rolename, gr_roletype_to_char(),
++ tsk->acl->filename, path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ else
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ if (buffer_exec) free_page((unsigned long)buffer_exec);
++ if (buffer_fault) free_page((unsigned long)buffer_fault);
++ pax_report_insns(pc, sp);
++ do_coredump(SIGKILL, SIGKILL, regs);
++}
++#endif
++
+ static void zap_threads (struct mm_struct *mm)
+ {
+ struct task_struct *g, *p;
+@@ -1365,6 +1671,10 @@
+ current->signal->group_exit_code = exit_code;
+ coredump_wait(mm);
+
++ if (signr == SIGKILL || signr == SIGILL)
++ gr_handle_brute_attach(current);
++
++ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
+ if (current->rlim[RLIMIT_CORE].rlim_cur < binfmt->min_coredump)
+ goto fail_unlock;
+
+@@ -1384,7 +1694,7 @@
+ goto close_fail;
+ if (!file->f_op->write)
+ goto close_fail;
+- if (do_truncate(file->f_dentry, 0) != 0)
++ if (do_truncate(file->f_dentry, 0, file->f_vfsmnt) != 0)
+ goto close_fail;
+
+ retval = binfmt->core_dump(signr, regs, file);
+diff -urN linux-2.6.7/fs/fcntl.c linux-2.6.7/fs/fcntl.c
+--- linux-2.6.7/fs/fcntl.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/fs/fcntl.c 2004-06-25 14:07:21 -0400
+@@ -14,6 +14,7 @@
+ #include <linux/module.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/poll.h>
+ #include <asm/siginfo.h>
+@@ -86,6 +87,9 @@
+ int error;
+
+ error = -EINVAL;
++
++ gr_learn_resource(current, RLIMIT_NOFILE, orig_start, 0);
++
+ if (orig_start >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+@@ -105,6 +109,9 @@
+ }
+
+ error = -EMFILE;
++
++ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
++
+ if (newfd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+@@ -154,6 +161,8 @@
+ struct file * file, *tofree;
+ struct files_struct * files = current->files;
+
++ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
++
+ spin_lock(&files->file_lock);
+ if (!(file = fcheck(oldfd)))
+ goto out_unlock;
+@@ -493,13 +502,15 @@
+ if (pid > 0) {
+ p = find_task_by_pid(pid);
+ if (p) {
+- send_sigio_to_task(p, fown, fd, band);
++ if (!gr_check_protected_task(p))
++ send_sigio_to_task(p, fown, fd, band);
+ }
+ } else {
+ struct list_head *l;
+ struct pid *pidptr;
+ for_each_task_pid(-pid, PIDTYPE_PGID, p, l, pidptr) {
+- send_sigio_to_task(p, fown, fd, band);
++ if (!gr_check_protected_task(p) && !gr_pid_is_chrooted(p))
++ send_sigio_to_task(p, fown, fd, band);
+ }
+ }
+ read_unlock(&tasklist_lock);
+diff -urN linux-2.6.7/fs/namei.c linux-2.6.7/fs/namei.c
+--- linux-2.6.7/fs/namei.c 2004-06-16 01:19:12 -0400
++++ linux-2.6.7/fs/namei.c 2004-06-25 17:29:01 -0400
+@@ -27,6 +27,7 @@
+ #include <linux/security.h>
+ #include <linux/mount.h>
+ #include <linux/audit.h>
++#include <linux/grsecurity.h>
+ #include <asm/namei.h>
+ #include <asm/uaccess.h>
+
+@@ -413,6 +414,13 @@
+ err = security_inode_follow_link(dentry, nd);
+ if (err)
+ goto loop;
++
++ if (gr_handle_follow_link(dentry->d_parent->d_inode,
++ dentry->d_inode, dentry, nd->mnt)) {
++ err = -EACCES;
++ goto loop;
++ }
++
+ current->link_count++;
+ current->total_link_count++;
+ touch_atime(nd->mnt, dentry);
+@@ -764,6 +772,10 @@
+ break;
+ }
+ return_base:
++ if (!gr_acl_handle_hidden_file(nd->dentry, nd->mnt)) {
++ path_release(nd);
++ return -ENOENT;
++ }
+ return 0;
+ out_dput:
+ dput(next.dentry);
+@@ -1225,7 +1237,7 @@
+ if (!error) {
+ DQUOT_INIT(inode);
+
+- error = do_truncate(dentry, 0);
++ error = do_truncate(dentry, 0, nd->mnt);
+ }
+ put_write_access(inode);
+ if (error)
+@@ -1276,6 +1288,17 @@
+ error = path_lookup(pathname, lookup_flags(flag)|LOOKUP_OPEN, nd);
+ if (error)
+ return error;
++
++ if (gr_handle_rawio(nd->dentry->d_inode)) {
++ error = -EPERM;
++ goto exit;
++ }
++
++ if (!gr_acl_handle_open(nd->dentry, nd->mnt, flag)) {
++ error = -EACCES;
++ goto exit;
++ }
++
+ goto ok;
+ }
+
+@@ -1309,9 +1332,19 @@
+
+ /* Negative dentry, just create the file */
+ if (!dentry->d_inode) {
++ if (!gr_acl_handle_creat(dentry, nd->dentry, nd->mnt, flag, mode)) {
++ error = -EACCES;
++ up(&dir->d_inode->i_sem);
++ goto exit_dput;
++ }
++
+ if (!IS_POSIXACL(dir->d_inode))
+ mode &= ~current->fs->umask;
+ error = vfs_create(dir->d_inode, dentry, mode, nd);
++
++ if (!error)
++ gr_handle_create(dentry, nd->mnt);
++
+ up(&dir->d_inode->i_sem);
+ dput(nd->dentry);
+ nd->dentry = dentry;
+@@ -1326,6 +1359,25 @@
+ /*
+ * It already exists.
+ */
++
++ if (gr_handle_rawio(dentry->d_inode)) {
++ error = -EPERM;
++ up(&dir->d_inode->i_sem);
++ goto exit_dput;
++ }
++
++ if (!gr_acl_handle_open(dentry, nd->mnt, flag)) {
++ up(&dir->d_inode->i_sem);
++ error = -EACCES;
++ goto exit_dput;
++ }
++
++ if (gr_handle_fifo(dentry, nd->mnt, dir, flag, acc_mode)) {
++ up(&dir->d_inode->i_sem);
++ error = -EACCES;
++ goto exit_dput;
++ }
++
+ up(&dir->d_inode->i_sem);
+
+ error = -EEXIST;
+@@ -1379,6 +1431,13 @@
+ error = security_inode_follow_link(dentry, nd);
+ if (error)
+ goto exit_dput;
++
++ if (gr_handle_follow_link(dentry->d_parent->d_inode, dentry->d_inode,
++ dentry, nd->mnt)) {
++ error = -EACCES;
++ goto exit_dput;
++ }
++
+ touch_atime(nd->mnt, dentry);
+ error = dentry->d_inode->i_op->follow_link(dentry, nd);
+ dput(dentry);
+@@ -1486,6 +1545,22 @@
+ if (!IS_POSIXACL(nd.dentry->d_inode))
+ mode &= ~current->fs->umask;
+ if (!IS_ERR(dentry)) {
++ if (gr_handle_chroot_mknod(dentry, nd.mnt, mode)) {
++ error = -EPERM;
++ dput(dentry);
++ up(&nd.dentry->d_inode->i_sem);
++ path_release(&nd);
++ goto out;
++ }
++
++ if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ dput(dentry);
++ up(&nd.dentry->d_inode->i_sem);
++ path_release(&nd);
++ goto out;
++ }
++
+ switch (mode & S_IFMT) {
+ case 0: case S_IFREG:
+ error = vfs_create(nd.dentry->d_inode,dentry,mode,&nd);
+@@ -1503,6 +1578,10 @@
+ default:
+ error = -EINVAL;
+ }
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
++
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1554,9 +1633,19 @@
+ dentry = lookup_create(&nd, 1);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
++ error = 0;
+ if (!IS_POSIXACL(nd.dentry->d_inode))
+ mode &= ~current->fs->umask;
+- error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
++
++ if (!gr_acl_handle_mkdir(dentry, nd.dentry, nd.mnt))
++ error = -EACCES;
++
++ if (!error)
++ error = vfs_mkdir(nd.dentry->d_inode, dentry, mode);
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
++
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1640,6 +1729,8 @@
+ char * name;
+ struct dentry *dentry;
+ struct nameidata nd;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ name = getname(pathname);
+ if(IS_ERR(name))
+@@ -1664,7 +1755,21 @@
+ dentry = lookup_hash(&nd.last, nd.dentry);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+- error = vfs_rmdir(nd.dentry->d_inode, dentry);
++ error = 0;
++ if (dentry->d_inode) {
++ if (dentry->d_inode->i_nlink <= 1) {
++ saved_ino = dentry->d_inode->i_ino;
++ saved_dev = dentry->d_inode->i_sb->s_dev;
++ }
++
++ if (!gr_acl_handle_rmdir(dentry, nd.mnt))
++ error = -EACCES;
++ }
++
++ if (!error)
++ error = vfs_rmdir(nd.dentry->d_inode, dentry);
++ if (!error && (saved_dev || saved_ino))
++ gr_handle_delete(saved_ino, saved_dev);
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1718,6 +1823,8 @@
+ struct dentry *dentry;
+ struct nameidata nd;
+ struct inode *inode = NULL;
++ ino_t saved_ino = 0;
++ dev_t saved_dev = 0;
+
+ name = getname(pathname);
+ if(IS_ERR(name))
+@@ -1733,13 +1840,26 @@
+ dentry = lookup_hash(&nd.last, nd.dentry);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
++ error = 0;
+ /* Why not before? Because we want correct error value */
+ if (nd.last.name[nd.last.len])
+ goto slashes;
+ inode = dentry->d_inode;
+- if (inode)
++ if (inode) {
++ if (inode->i_nlink <= 1) {
++ saved_ino = inode->i_ino;
++ saved_dev = inode->i_sb->s_dev;
++ }
++
++ if (!gr_acl_handle_unlink(dentry, nd.mnt))
++ error = -EACCES;
++
+ atomic_inc(&inode->i_count);
+- error = vfs_unlink(nd.dentry->d_inode, dentry);
++ }
++ if (!error)
++ error = vfs_unlink(nd.dentry->d_inode, dentry);
++ if (!error && (saved_ino || saved_dev))
++ gr_handle_delete(saved_ino, saved_dev);
+ exit2:
+ dput(dentry);
+ }
+@@ -1803,7 +1923,15 @@
+ dentry = lookup_create(&nd, 0);
+ error = PTR_ERR(dentry);
+ if (!IS_ERR(dentry)) {
+- error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
++ error = 0;
++ if (!gr_acl_handle_symlink(dentry, nd.dentry, nd.mnt, from))
++ error = -EACCES;
++
++ if (!error)
++ error = vfs_symlink(nd.dentry->d_inode, dentry, from, S_IALLUGO);
++
++ if (!error)
++ gr_handle_create(dentry, nd.mnt);
+ dput(dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -1887,7 +2015,20 @@
+ new_dentry = lookup_create(&nd, 0);
+ error = PTR_ERR(new_dentry);
+ if (!IS_ERR(new_dentry)) {
+- error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
++ error = 0;
++ if (gr_handle_hardlink(old_nd.dentry, old_nd.mnt,
++ old_nd.dentry->d_inode,
++ old_nd.dentry->d_inode->i_mode, to))
++ error = -EPERM;
++ if (!gr_acl_handle_link(new_dentry, nd.dentry, nd.mnt,
++ old_nd.dentry, old_nd.mnt, to))
++ error = -EACCES;
++ if (!error)
++ error = vfs_link(old_nd.dentry, nd.dentry->d_inode, new_dentry);
++
++ if (!error)
++ gr_handle_create(new_dentry, nd.mnt);
++
+ dput(new_dentry);
+ }
+ up(&nd.dentry->d_inode->i_sem);
+@@ -2109,8 +2250,16 @@
+ if (new_dentry == trap)
+ goto exit5;
+
+- error = vfs_rename(old_dir->d_inode, old_dentry,
++ error = gr_acl_handle_rename(new_dentry, newnd.dentry, newnd.mnt,
++ old_dentry, old_dir->d_inode, oldnd.mnt,
++ newname);
++
++ if (!error)
++ error = vfs_rename(old_dir->d_inode, old_dentry,
+ new_dir->d_inode, new_dentry);
++ if (!error)
++ gr_handle_rename(old_dir->d_inode, newnd.dentry->d_inode, old_dentry,
++ new_dentry, oldnd.mnt, new_dentry->d_inode ? 1 : 0);
+ exit5:
+ dput(new_dentry);
+ exit4:
+diff -urN linux-2.6.7/fs/namespace.c linux-2.6.7/fs/namespace.c
+--- linux-2.6.7/fs/namespace.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/fs/namespace.c 2004-06-25 14:07:21 -0400
+@@ -21,6 +21,8 @@
+ #include <linux/namei.h>
+ #include <linux/security.h>
+ #include <linux/mount.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
+@@ -402,6 +404,8 @@
+ lock_kernel();
+ retval = do_remount_sb(sb, MS_RDONLY, 0, 0);
+ unlock_kernel();
++
++ gr_log_remount(mnt->mnt_devname, retval);
+ }
+ up_write(&sb->s_umount);
+ return retval;
+@@ -430,6 +434,9 @@
+ if (retval)
+ security_sb_umount_busy(mnt);
+ up_write(&current->namespace->sem);
++
++ gr_log_unmount(mnt->mnt_devname, retval);
++
+ return retval;
+ }
+
+@@ -852,6 +859,11 @@
+ if (retval)
+ goto dput_out;
+
++ if (gr_handle_chroot_mount(nd.dentry, nd.mnt, dev_name)) {
++ retval = -EPERM;
++ goto dput_out;
++ }
++
+ if (flags & MS_REMOUNT)
+ retval = do_remount(&nd, flags & ~MS_REMOUNT, mnt_flags,
+ data_page);
+@@ -864,6 +876,9 @@
+ dev_name, data_page);
+ dput_out:
+ path_release(&nd);
++
++ gr_log_mount(dev_name, dir_name, retval);
++
+ return retval;
+ }
+
+@@ -1086,6 +1101,9 @@
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
++ if (gr_handle_chroot_pivot())
++ return -EPERM;
++
+ lock_kernel();
+
+ error = __user_walk(new_root, LOOKUP_FOLLOW|LOOKUP_DIRECTORY, &new_nd);
+diff -urN linux-2.6.7/fs/open.c linux-2.6.7/fs/open.c
+--- linux-2.6.7/fs/open.c 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/fs/open.c 2004-06-25 14:07:21 -0400
+@@ -22,6 +22,7 @@
+ #include <asm/uaccess.h>
+ #include <linux/fs.h>
+ #include <linux/pagemap.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/unistd.h>
+
+@@ -191,7 +192,7 @@
+ return error;
+ }
+
+-int do_truncate(struct dentry *dentry, loff_t length)
++int do_truncate(struct dentry *dentry, loff_t length, struct vfsmount *mnt)
+ {
+ int err;
+ struct iattr newattrs;
+@@ -200,6 +201,9 @@
+ if (length < 0)
+ return -EINVAL;
+
++ if (!gr_acl_handle_truncate(dentry, mnt))
++ return -EACCES;
++
+ newattrs.ia_size = length;
+ newattrs.ia_valid = ATTR_SIZE | ATTR_CTIME;
+ down(&dentry->d_inode->i_sem);
+@@ -260,7 +264,7 @@
+ error = locks_verify_truncate(inode, NULL, length);
+ if (!error) {
+ DQUOT_INIT(inode);
+- error = do_truncate(nd.dentry, length);
++ error = do_truncate(nd.dentry, length, nd.mnt);
+ }
+ put_write_access(inode);
+
+@@ -312,7 +316,7 @@
+
+ error = locks_verify_truncate(inode, file, length);
+ if (!error)
+- error = do_truncate(dentry, length);
++ error = do_truncate(dentry, length, file->f_vfsmnt);
+ out_putf:
+ fput(file);
+ out:
+@@ -391,6 +395,11 @@
+ (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ goto dput_and_out;
+ }
++ if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ error = notify_change(nd.dentry, &newattrs);
+ up(&inode->i_sem);
+@@ -444,6 +453,12 @@
+ (error = permission(inode,MAY_WRITE,&nd)) != 0)
+ goto dput_and_out;
+ }
++
++ if (!gr_acl_handle_utime(nd.dentry, nd.mnt)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ error = notify_change(nd.dentry, &newattrs);
+ up(&inode->i_sem);
+@@ -505,6 +520,10 @@
+ if(!res && (mode & S_IWOTH) && IS_RDONLY(nd.dentry->d_inode)
+ && !special_file(nd.dentry->d_inode->i_mode))
+ res = -EROFS;
++
++ if (!res && !gr_acl_handle_access(nd.dentry, nd.mnt, mode))
++ res = -EACCES;
++
+ path_release(&nd);
+ }
+
+@@ -528,6 +547,8 @@
+ if (error)
+ goto dput_and_out;
+
++ gr_log_chdir(nd.dentry, nd.mnt);
++
+ set_fs_pwd(current->fs, nd.mnt, nd.dentry);
+
+ dput_and_out:
+@@ -558,6 +579,13 @@
+ goto out_putf;
+
+ error = permission(inode, MAY_EXEC, NULL);
++
++ if (!error && !gr_chroot_fchdir(dentry, mnt))
++ error = -EPERM;
++
++ if (!error)
++ gr_log_chdir(dentry, mnt);
++
+ if (!error)
+ set_fs_pwd(current->fs, mnt, dentry);
+ out_putf:
+@@ -583,8 +611,16 @@
+ if (!capable(CAP_SYS_CHROOT))
+ goto dput_and_out;
+
++ if (gr_handle_chroot_chroot(nd.dentry, nd.mnt))
++ goto dput_and_out;
++
+ set_fs_root(current->fs, nd.mnt, nd.dentry);
+ set_fs_altroot();
++
++ gr_handle_chroot_caps(current);
++
++ gr_handle_chroot_chdir(nd.dentry, nd.mnt);
++
+ error = 0;
+ dput_and_out:
+ path_release(&nd);
+@@ -613,9 +649,22 @@
+ err = -EPERM;
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto out_putf;
++
++ if (!gr_acl_handle_fchmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EACCES;
++ goto out_putf;
++ }
++
+ down(&inode->i_sem);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(dentry, file->f_vfsmnt, mode)) {
++ err = -EPERM;
++ up(&inode->i_sem);
++ goto out_putf;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ err = notify_change(dentry, &newattrs);
+@@ -647,9 +696,21 @@
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto dput_and_out;
+
++ if (!gr_acl_handle_chmod(nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ goto dput_and_out;
++ }
++
+ down(&inode->i_sem);
+ if (mode == (mode_t) -1)
+ mode = inode->i_mode;
++
++ if (gr_handle_chroot_chmod(nd.dentry, nd.mnt, mode)) {
++ error = -EACCES;
++ up(&inode->i_sem);
++ goto dput_and_out;
++ }
++
+ newattrs.ia_mode = (mode & S_IALLUGO) | (inode->i_mode & ~S_IALLUGO);
+ newattrs.ia_valid = ATTR_MODE | ATTR_CTIME;
+ error = notify_change(nd.dentry, &newattrs);
+@@ -661,7 +722,7 @@
+ return error;
+ }
+
+-static int chown_common(struct dentry * dentry, uid_t user, gid_t group)
++static int chown_common(struct dentry * dentry, uid_t user, gid_t group, struct vfsmount *mnt)
+ {
+ struct inode * inode;
+ int error;
+@@ -678,6 +739,12 @@
+ error = -EPERM;
+ if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
+ goto out;
++
++ if (!gr_acl_handle_chown(dentry, mnt)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ newattrs.ia_valid = ATTR_CTIME;
+ if (user != (uid_t) -1) {
+ newattrs.ia_valid |= ATTR_UID;
+@@ -703,7 +770,7 @@
+
+ error = user_path_walk(filename, &nd);
+ if (!error) {
+- error = chown_common(nd.dentry, user, group);
++ error = chown_common(nd.dentry, user, group, nd.mnt);
+ path_release(&nd);
+ }
+ return error;
+@@ -716,7 +783,7 @@
+
+ error = user_path_walk_link(filename, &nd);
+ if (!error) {
+- error = chown_common(nd.dentry, user, group);
++ error = chown_common(nd.dentry, user, group, nd.mnt);
+ path_release(&nd);
+ }
+ return error;
+@@ -730,7 +797,8 @@
+
+ file = fget(fd);
+ if (file) {
+- error = chown_common(file->f_dentry, user, group);
++ error = chown_common(file->f_dentry, user,
++ group, file->f_vfsmnt);
+ fput(file);
+ }
+ return error;
+@@ -852,6 +920,7 @@
+ * N.B. For clone tasks sharing a files structure, this test
+ * will limit the total number of files that can be opened.
+ */
++ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
+ if (fd >= current->rlim[RLIMIT_NOFILE].rlim_cur)
+ goto out;
+
+diff -urN linux-2.6.7/fs/proc/array.c linux-2.6.7/fs/proc/array.c
+--- linux-2.6.7/fs/proc/array.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/fs/proc/array.c 2004-06-25 17:41:53 -0400
+@@ -274,6 +274,19 @@
+ cap_t(p->cap_effective));
+ }
+
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++static inline char *task_pax(struct task_struct *p, char *buffer)
++{
++ return buffer + sprintf(buffer, "PaX:\t%c%c%c%c%c%c\n",
++ p->flags & PF_PAX_PAGEEXEC ? 'P' : 'p',
++ p->flags & PF_PAX_EMUTRAMP ? 'E' : 'e',
++ p->flags & PF_PAX_MPROTECT ? 'M' : 'm',
++ p->flags & PF_PAX_RANDMMAP ? 'R' : 'r',
++ p->flags & PF_PAX_RANDEXEC ? 'X' : 'x',
++ p->flags & PF_PAX_SEGMEXEC ? 'S' : 's');
++}
++#endif
++
+ extern char *task_mem(struct mm_struct *, char *);
+ int proc_pid_status(struct task_struct *task, char * buffer)
+ {
+@@ -292,9 +305,20 @@
+ #if defined(CONFIG_ARCH_S390)
+ buffer = task_show_regs(task, buffer);
+ #endif
++
++#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
++ buffer = task_pax(task, buffer);
++#endif
++
+ return buffer - orig;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \
++ task->flags & PF_PAX_SEGMEXEC || \
++ task->flags & PF_PAX_RANDEXEC)
++#endif
++
+ extern unsigned long task_vsize(struct mm_struct *);
+ int proc_pid_stat(struct task_struct *task, char * buffer)
+ {
+@@ -326,6 +350,19 @@
+
+ wchan = get_wchan(task);
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ if (PAX_RAND_FLAGS) {
++ eip = 0;
++ esp = 0;
++ wchan = 0;
++ }
++#endif
++#ifdef CONFIG_GRKERNSEC_HIDESYM
++ wchan = 0;
++ eip =0;
++ esp =0;
++#endif
++
+ sigemptyset(&sigign);
+ sigemptyset(&sigcatch);
+ read_lock(&tasklist_lock);
+@@ -385,9 +422,15 @@
+ vsize,
+ mm ? mm->rss : 0, /* you might want to shift this left 3 */
+ task->rlim[RLIMIT_RSS].rlim_cur,
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->start_code : 0),
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->end_code : 0),
++ PAX_RAND_FLAGS ? 0 : (mm ? mm->start_stack : 0),
++#else
+ mm ? mm->start_code : 0,
+ mm ? mm->end_code : 0,
+ mm ? mm->start_stack : 0,
++#endif
+ esp,
+ eip,
+ /* The signal information here is obsolete.
+@@ -427,3 +470,14 @@
+ return sprintf(buffer,"%d %d %d %d %d %d %d\n",
+ size, resident, shared, text, lib, data, 0);
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct *task, char * buffer)
++{
++ int len;
++
++ len = sprintf(buffer, "%u.%u.%u.%u\n", NIPQUAD(task->curr_ip));
++ return len;
++}
++#endif
++
+diff -urN linux-2.6.7/fs/proc/base.c linux-2.6.7/fs/proc/base.c
+--- linux-2.6.7/fs/proc/base.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/fs/proc/base.c 2004-06-25 14:07:21 -0400
+@@ -32,6 +32,7 @@
+ #include <linux/mount.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+
+ /*
+ * For hysterical raisins we keep the same inumbers as in the old procfs.
+@@ -67,6 +68,9 @@
+ PROC_TGID_ATTR_EXEC,
+ PROC_TGID_ATTR_FSCREATE,
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ PROC_TGID_IPADDR,
++#endif
+ PROC_TGID_FD_DIR,
+ PROC_TID_INO,
+ PROC_TID_STATUS,
+@@ -117,6 +121,9 @@
+ E(PROC_TGID_ROOT, "root", S_IFLNK|S_IRWXUGO),
+ E(PROC_TGID_EXE, "exe", S_IFLNK|S_IRWXUGO),
+ E(PROC_TGID_MOUNTS, "mounts", S_IFREG|S_IRUGO),
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ E(PROC_TGID_IPADDR, "ipaddr", S_IFREG|S_IRUSR),
++#endif
+ #ifdef CONFIG_SECURITY
+ E(PROC_TGID_ATTR, "attr", S_IFDIR|S_IRUGO|S_IXUGO),
+ #endif
+@@ -181,6 +188,9 @@
+ int proc_pid_status(struct task_struct*,char*);
+ int proc_pid_statm(struct task_struct*,char*);
+ int proc_pid_cpu(struct task_struct*,char*);
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++int proc_pid_ipaddr(struct task_struct*,char*);
++#endif
+
+ static int proc_fd_link(struct inode *inode, struct dentry **dentry, struct vfsmount **mnt)
+ {
+@@ -277,7 +287,7 @@
+ (task == current || \
+ (task->parent == current && \
+ (task->ptrace & PT_PTRACED) && task->state == TASK_STOPPED && \
+- security_ptrace(current,task) == 0))
++ security_ptrace(current,task) == 0 && !gr_handle_proc_ptrace(task)))
+
+ static int may_ptrace_attach(struct task_struct *task)
+ {
+@@ -292,13 +302,15 @@
+ (current->uid != task->uid) ||
+ (current->gid != task->egid) ||
+ (current->gid != task->sgid) ||
+- (current->gid != task->gid)) && !capable(CAP_SYS_PTRACE))
++ (current->gid != task->gid)) && !capable_nolog(CAP_SYS_PTRACE))
+ goto out;
+ rmb();
+- if (!task->mm->dumpable && !capable(CAP_SYS_PTRACE))
++ if (!task->mm->dumpable && !capable_nolog(CAP_SYS_PTRACE))
+ goto out;
+ if (security_ptrace(current, task))
+ goto out;
++ if (gr_handle_proc_ptrace(task))
++ goto out;
+
+ retval = 1;
+ out:
+@@ -445,9 +457,22 @@
+
+ static int proc_permission(struct inode *inode, int mask, struct nameidata *nd)
+ {
++ int ret;
++ struct task_struct *task;
++
+ if (vfs_permission(inode, mask) != 0)
+ return -EACCES;
+- return proc_check_root(inode);
++ ret = proc_check_root(inode);
++
++ if (ret)
++ return ret;
++
++ task = proc_task(inode);
++
++ if (!task)
++ return 0;
++
++ return gr_acl_handle_procpidmem(task);
+ }
+
+ extern struct seq_operations proc_pid_maps_op;
+@@ -954,6 +979,9 @@
+ inode->i_uid = task->euid;
+ inode->i_gid = task->egid;
+ }
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#endif
+ security_task_to_inode(task, inode);
+
+ out:
+@@ -982,7 +1010,9 @@
+ if (pid_alive(task)) {
+ if (proc_type(inode) == PROC_TGID_INO || proc_type(inode) == PROC_TID_INO || task_dumpable(task)) {
+ inode->i_uid = task->euid;
++#ifndef CONFIG_GRKERNSEC_PROC_USERGROUP
+ inode->i_gid = task->egid;
++#endif
+ } else {
+ inode->i_uid = 0;
+ inode->i_gid = 0;
+@@ -1318,6 +1348,12 @@
+ inode->i_fop = &proc_info_file_operations;
+ ei->op.proc_read = proc_pid_status;
+ break;
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ case PROC_TGID_IPADDR:
++ inode->i_fop = &proc_info_file_operations;
++ ei->op.proc_read = proc_pid_ipaddr;
++ break;
++#endif
+ case PROC_TID_STAT:
+ case PROC_TGID_STAT:
+ inode->i_fop = &proc_info_file_operations;
+@@ -1567,6 +1603,22 @@
+ if (!task)
+ goto out;
+
++ if (gr_check_hidden_task(task)) {
++ put_task_struct(task);
++ goto out;
++ }
++
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (current->uid && (task->uid != current->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ ) {
++ put_task_struct(task);
++ goto out;
++ }
++#endif
++
+ inode = proc_pid_make_inode(dir->i_sb, task, PROC_TGID_INO);
+
+
+@@ -1574,7 +1626,15 @@
+ put_task_struct(task);
+ goto out;
+ }
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR|S_IRGRP|S_IXGRP;
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
++#endif
+ inode->i_op = &proc_tgid_base_inode_operations;
+ inode->i_fop = &proc_tgid_base_operations;
+ inode->i_nlink = 3;
+@@ -1658,6 +1718,9 @@
+ static int get_tgid_list(int index, unsigned long version, unsigned int *tgids)
+ {
+ struct task_struct *p;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ struct task_struct *tmp = current;
++#endif
+ int nr_tgids = 0;
+
+ index--;
+@@ -1678,6 +1741,18 @@
+ int tgid = p->pid;
+ if (!pid_alive(p))
+ continue;
++ if (gr_pid_is_chrooted(p))
++ continue;
++ if (gr_check_hidden_task(p))
++ continue;
++#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
++ if (tmp->uid && (p->uid != tmp->uid)
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ && !in_group_p(CONFIG_GRKERNSEC_PROC_GID)
++#endif
++ )
++ continue;
++#endif
+ if (--index >= 0)
+ continue;
+ tgids[nr_tgids] = tgid;
+diff -urN linux-2.6.7/fs/proc/inode.c linux-2.6.7/fs/proc/inode.c
+--- linux-2.6.7/fs/proc/inode.c 2004-06-16 01:20:03 -0400
++++ linux-2.6.7/fs/proc/inode.c 2004-06-25 14:07:21 -0400
+@@ -209,7 +209,11 @@
+ if (de->mode) {
+ inode->i_mode = de->mode;
+ inode->i_uid = de->uid;
++#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
++ inode->i_gid = CONFIG_GRKERNSEC_PROC_GID;
++#else
+ inode->i_gid = de->gid;
++#endif
+ }
+ if (de->size)
+ inode->i_size = de->size;
+diff -urN linux-2.6.7/fs/proc/proc_misc.c linux-2.6.7/fs/proc/proc_misc.c
+--- linux-2.6.7/fs/proc/proc_misc.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/fs/proc/proc_misc.c 2004-06-25 14:07:21 -0400
+@@ -654,6 +654,8 @@
+ void __init proc_misc_init(void)
+ {
+ struct proc_dir_entry *entry;
++ int gr_mode = 0;
++
+ static struct {
+ char *name;
+ int (*read_proc)(char*,char**,off_t,int,int*,void*);
+@@ -668,9 +670,13 @@
+ #ifdef CONFIG_STRAM_PROC
+ {"stram", stram_read_proc},
+ #endif
++#ifndef CONFIG_GRKERNSEC_PROC_ADD
+ {"devices", devices_read_proc},
++#endif
+ {"filesystems", filesystems_read_proc},
++#ifndef CONFIG_GRKERNSEC_PROC_ADD
+ {"cmdline", cmdline_read_proc},
++#endif
+ #ifdef CONFIG_SGI_DS1286
+ {"rtc", ds1286_read_proc},
+ #endif
+@@ -681,24 +687,39 @@
+ for (p = simple_ones; p->name; p++)
+ create_proc_read_entry(p->name, 0, NULL, p->read_proc, NULL);
+
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ gr_mode = S_IRUSR;
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ gr_mode = S_IRUSR | S_IRGRP;
++#endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ create_proc_read_entry("devices", gr_mode, NULL, &devices_read_proc, NULL);
++ create_proc_read_entry("cmdline", gr_mode, NULL, &cmdline_read_proc, NULL);
++#endif
++
+ proc_symlink("mounts", NULL, "self/mounts");
+
+ /* And now for trickier ones */
+ entry = create_proc_entry("kmsg", S_IRUSR, &proc_root);
+ if (entry)
+ entry->proc_fops = &proc_kmsg_operations;
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++ create_seq_entry("cpuinfo", gr_mode, &proc_cpuinfo_operations);
++ create_seq_entry("slabinfo",gr_mode,&proc_slabinfo_operations);
++#else
+ create_seq_entry("cpuinfo", 0, &proc_cpuinfo_operations);
++ create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
++#endif
+ create_seq_entry("partitions", 0, &proc_partitions_operations);
+ create_seq_entry("stat", 0, &proc_stat_operations);
+ create_seq_entry("interrupts", 0, &proc_interrupts_operations);
+- create_seq_entry("slabinfo",S_IWUSR|S_IRUGO,&proc_slabinfo_operations);
+ create_seq_entry("buddyinfo",S_IRUGO, &fragmentation_file_operations);
+ create_seq_entry("vmstat",S_IRUGO, &proc_vmstat_file_operations);
+ create_seq_entry("diskstats", 0, &proc_diskstats_operations);
+ #ifdef CONFIG_MODULES
+- create_seq_entry("modules", 0, &proc_modules_operations);
++ create_seq_entry("modules", gr_mode, &proc_modules_operations);
+ #endif
+-#ifdef CONFIG_PROC_KCORE
++#if defined(CONFIG_PROC_KCORE)
+ proc_root_kcore = create_proc_entry("kcore", S_IRUSR, NULL);
+ if (proc_root_kcore) {
+ proc_root_kcore->proc_fops = &proc_kcore_operations;
+diff -urN linux-2.6.7/fs/proc/root.c linux-2.6.7/fs/proc/root.c
+--- linux-2.6.7/fs/proc/root.c 2004-06-16 01:19:44 -0400
++++ linux-2.6.7/fs/proc/root.c 2004-06-25 14:07:21 -0400
+@@ -52,13 +52,26 @@
+ return;
+ }
+ proc_misc_init();
++
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_net = proc_mkdir_mode("net", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#else
+ proc_net = proc_mkdir("net", 0);
++#endif
+ #ifdef CONFIG_SYSVIPC
+ proc_mkdir("sysvipc", 0);
+ #endif
+ #ifdef CONFIG_SYSCTL
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_sys_root = proc_mkdir_mode("sys", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_sys_root = proc_mkdir_mode("sys", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#else
+ proc_sys_root = proc_mkdir("sys", 0);
+ #endif
++#endif
+ #if defined(CONFIG_BINFMT_MISC) || defined(CONFIG_BINFMT_MISC_MODULE)
+ proc_mkdir("sys/fs", 0);
+ proc_mkdir("sys/fs/binfmt_misc", 0);
+@@ -74,7 +87,15 @@
+ #ifdef CONFIG_PROC_DEVICETREE
+ proc_device_tree_init();
+ #endif
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, 0);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ proc_bus = proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, 0);
++#endif
++#else
+ proc_bus = proc_mkdir("bus", 0);
++#endif
+ }
+
+ static struct dentry *proc_root_lookup(struct inode * dir, struct dentry * dentry, struct nameidata *nd)
+diff -urN linux-2.6.7/fs/proc/task_mmu.c linux-2.6.7/fs/proc/task_mmu.c
+--- linux-2.6.7/fs/proc/task_mmu.c 2004-06-16 01:18:59 -0400
++++ linux-2.6.7/fs/proc/task_mmu.c 2004-06-25 17:41:53 -0400
+@@ -34,12 +34,23 @@
+ "VmData:\t%8lu kB\n"
+ "VmStk:\t%8lu kB\n"
+ "VmExe:\t%8lu kB\n"
+- "VmLib:\t%8lu kB\n",
+- mm->total_vm << (PAGE_SHIFT-10),
++ "VmLib:\t%8lu kB\n"
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
++#endif
++
++ ,mm->total_vm << (PAGE_SHIFT-10),
+ mm->locked_vm << (PAGE_SHIFT-10),
+ mm->rss << (PAGE_SHIFT-10),
+ data - stack, stack,
+- exec - lib, lib);
++ exec - lib, lib
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ , mm->context.user_cs_base, mm->context.user_cs_limit
++#endif
++
++);
+ up_read(&mm->mmap_sem);
+ return buffer;
+ }
+@@ -76,8 +87,17 @@
+ return size;
+ }
+
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++#define PAX_RAND_FLAGS (task->flags & PF_PAX_RANDMMAP || \
++ task->flags & PF_PAX_SEGMEXEC || \
++ task->flags & PF_PAX_RANDEXEC)
++#endif
++
+ static int show_map(struct seq_file *m, void *v)
+ {
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ struct task_struct *task = m->private;
++#endif
+ struct vm_area_struct *map = v;
+ struct file *file = map->vm_file;
+ int flags = map->vm_flags;
+@@ -92,8 +112,14 @@
+ }
+
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
++#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
++ PAX_RAND_FLAGS ? 0UL : map->vm_start,
++ PAX_RAND_FLAGS ? 0UL : map->vm_end,
++#else
+ map->vm_start,
+ map->vm_end,
++#endif
++
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
+diff -urN linux-2.6.7/fs/readdir.c linux-2.6.7/fs/readdir.c
+--- linux-2.6.7/fs/readdir.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/fs/readdir.c 2004-06-25 17:29:52 -0400
+@@ -15,6 +15,8 @@
+ #include <linux/dirent.h>
+ #include <linux/security.h>
+ #include <linux/unistd.h>
++#include <linux/namei.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+
+@@ -65,6 +67,7 @@
+ struct readdir_callback {
+ struct old_linux_dirent __user * dirent;
+ int result;
++ struct nameidata nd;
+ };
+
+ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -75,6 +78,10 @@
+
+ if (buf->result)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ buf->result++;
+ dirent = buf->dirent;
+ if (!access_ok(VERIFY_WRITE, dirent,
+@@ -107,6 +114,9 @@
+ buf.result = 0;
+ buf.dirent = dirent;
+
++ buf.nd.dentry = file->f_dentry;
++ buf.nd.mnt = file->f_vfsmnt;
++
+ error = vfs_readdir(file, fillonedir, &buf);
+ if (error >= 0)
+ error = buf.result;
+@@ -134,6 +144,7 @@
+ struct linux_dirent __user * previous;
+ int count;
+ int error;
++ struct nameidata nd;
+ };
+
+ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -146,6 +157,10 @@
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -193,6 +208,9 @@
+ buf.count = count;
+ buf.error = 0;
+
++ buf.nd.dentry = file->f_dentry;
++ buf.nd.mnt = file->f_vfsmnt;
++
+ error = vfs_readdir(file, filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+@@ -218,6 +236,7 @@
+ struct linux_dirent64 __user * previous;
+ int count;
+ int error;
++ struct nameidata nd;
+ };
+
+ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
+@@ -230,6 +249,10 @@
+ buf->error = -EINVAL; /* only used if we fail.. */
+ if (reclen > buf->count)
+ return -EINVAL;
++
++ if (!gr_acl_handle_filldir(buf->nd.dentry, buf->nd.mnt, ino))
++ return 0;
++
+ dirent = buf->previous;
+ if (dirent) {
+ if (__put_user(offset, &dirent->d_off))
+@@ -279,6 +302,9 @@
+ buf.count = count;
+ buf.error = 0;
+
++ buf.nd.mnt = file->f_vfsmnt;
++ buf.nd.dentry = file->f_dentry;
++
+ error = vfs_readdir(file, filldir64, &buf);
+ if (error < 0)
+ goto out_putf;
+diff -urN linux-2.6.7/grsecurity/Kconfig linux-2.6.7/grsecurity/Kconfig
+--- linux-2.6.7/grsecurity/Kconfig 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/Kconfig 2004-08-03 17:49:27 -0400
+@@ -0,0 +1,877 @@
++#
++# grecurity configuration
++#
++
++menu "Grsecurity"
++
++config GRKERNSEC
++ bool "Grsecurity"
++ select CRYPTO
++ select CRYPTO_SHA256
++ help
++ If you say Y here, you will be able to configure many features
++ that will enhance the security of your system. It is highly
++ recommended that you say Y here and read through the help
++ for each option so that you fully understand the features and
++ can evaluate their usefulness for your machine.
++
++choice
++ prompt "Security Level"
++ depends GRKERNSEC
++ default GRKERNSEC_CUSTOM
++
++config GRKERNSEC_LOW
++ bool "Low"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_CHROOT_CHDIR
++ help
++ If you choose this option, several of the grsecurity options will
++ be enabled that will give you greater protection against a number
++ of attacks, while assuring that none of your software will have any
++ conflicts with the additional security measures. If you run a lot
++ of unusual software, or you are having problems with the higher
++ security levels, you should say Y here. With this option, the
++ following features are enabled:
++
++ - Linking Restrictions
++ - FIFO Restrictions
++ - Randomized PIDs
++ - Enforcing RLIMIT_NPROC on execve
++ - Restricted dmesg
++ - Randomized IP IDs
++ - Enforced chdir("/") on chroot
++
++config GRKERNSEC_MEDIUM
++ bool "Medium"
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select GRKERNSEC_PROC_MEMMAP
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_RANDSRC
++ select GRKERNSEC_RANDRPC
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_USERGROUP
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++
++ help
++ If you say Y here, several features in addition to those included
++ in the low additional security level will be enabled. These
++ features provide even more security to your system, though in rare
++ cases they may be incompatible with very old or poorly written
++ software. If you enable this option, make sure that your auth
++ service (identd) is running as gid 1001. With this option,
++ the following features (in addition to those provided in the
++ low additional security level) will be enabled:
++
++ - Randomized TCP Source Ports
++ - Failed Fork Logging
++ - Time Change Logging
++ - Signal Logging
++ - Deny Mounts in chroot
++ - Deny Double chrooting
++ - Deny Sysctl Writes in chroot
++ - Deny Mknod in chroot
++ - Deny Access to Abstract AF_UNIX Sockets out of chroot
++ - Deny pivot_root in chroot
++ - Denied Writes of /dev/kmem, /dev/mem, and /dev/port
++ - /proc restrictions with special GID set to 10 (usually wheel)
++ - Address Space Layout Randomization (ASLR)
++
++config GRKERNSEC_HIGH
++ bool "High"
++ select GRKERNSEC_LINK
++ select GRKERNSEC_FIFO
++ select GRKERNSEC_RANDPID
++ select GRKERNSEC_EXECVE
++ select GRKERNSEC_DMESG
++ select GRKERNSEC_RANDID
++ select GRKERNSEC_RANDSRC
++ select GRKERNSEC_RANDRPC
++ select GRKERNSEC_FORKFAIL
++ select GRKERNSEC_TIME
++ select GRKERNSEC_SIGNAL
++ select GRKERNSEC_CHROOT_SHMAT
++ select GRKERNSEC_CHROOT_UNIX
++ select GRKERNSEC_CHROOT_MOUNT
++ select GRKERNSEC_CHROOT_FCHDIR
++ select GRKERNSEC_CHROOT_PIVOT
++ select GRKERNSEC_CHROOT_DOUBLE
++ select GRKERNSEC_CHROOT_CHDIR
++ select GRKERNSEC_CHROOT_MKNOD
++ select GRKERNSEC_CHROOT_CAPS
++ select GRKERNSEC_CHROOT_SYSCTL
++ select GRKERNSEC_CHROOT_FINDTASK
++ select GRKERNSEC_PROC
++ select GRKERNSEC_PROC_MEMMAP
++ select GRKERNSEC_HIDESYM
++ select GRKERNSEC_BRUTE
++ select GRKERNSEC_PROC_USERGROUP
++ select GRKERNSEC_KMEM
++ select GRKERNSEC_RESLOG
++ select GRKERNSEC_RANDNET
++ select GRKERNSEC_RANDISN
++ select GRKERNSEC_PROC_ADD
++ select GRKERNSEC_CHROOT_CHMOD
++ select GRKERNSEC_CHROOT_NICE
++ select GRKERNSEC_AUDIT_MOUNT
++ select PAX_RANDUSTACK
++ select PAX_ASLR
++ select PAX_RANDMMAP
++ select PAX_NOEXEC
++ select PAX_MPROTECT
++ select PAX_EI_PAX
++ select PAX_PT_PAX_FLAGS
++ select PAX_HAVE_ACL_FLAGS
++ select PAX_KERNEXEC
++ select PAX_RANDKSTACK
++ select PAX_RANDEXEC
++ select PAX_SEGMEXEC
++ select PAX_EMUTRAMP
++ select PAX_NOVSYSCALL
++ help
++ If you say Y here, many of the features of grsecurity will be
++ enabled, which will protect you against many kinds of attacks
++ against your system. The heightened security comes at a cost
++ of an increased chance of incompatibilities with rare software
++ on your machine. Since this security level enables PaX, you should
++ view <http://pax.grsecurity.net> and read about the PaX
++ project. While you are there, download chpax and run it on
++ binaries that cause problems with PaX. Also remember that
++ since the /proc restrictions are enabled, you must run your
++ identd as gid 1001. This security level enables the following
++ features in addition to those listed in the low and medium
++ security levels:
++
++ - Additional /proc Restrictions
++ - Chmod Restrictions in chroot
++ - No Signals, Ptrace, or Viewing of Processes Outside of chroot
++ - Capability Restrictions in chroot
++ - Deny fchdir out of chroot
++ - Priority Restrictions in chroot
++ - Segmentation-based Implementation of PaX
++ - Mprotect Restrictions
++ - Removal of Addresses from /proc/<pid>/[maps|stat]
++ - Kernel Stack Randomization
++ - Mount/Unmount/Remount Logging
++ - Kernel Symbol Hiding
++
++config GRKERNSEC_CUSTOM
++ bool "Custom"
++ help
++ If you say Y here, you will be able to configure every grsecurity
++ option, which allows you to enable many more features that aren't
++ covered in the basic security levels. These additional features
++ include TPE, socket restrictions, and the sysctl system for
++ grsecurity. It is advised that you read through the help for
++ each option to determine its usefulness in your situation.
++
++endchoice
++
++menu "Address Space Protection"
++depends on GRKERNSEC
++
++config GRKERNSEC_KMEM
++ bool "Deny writing to /dev/kmem, /dev/mem, and /dev/port"
++ help
++ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
++ be written to via mmap or otherwise to modify the running kernel.
++ /dev/port will also not be allowed to be opened. If you have module
++ support disabled, enabling this will close up four ways that are
++ currently used to insert malicious code into the running kernel.
++ Even with all these features enabled, we still highly recommend that
++ you use the ACL system, as it is still possible for an attacker to
++ modify the running kernel through privileged I/O granted by ioperm/iopl.
++ If you are not using XFree86, you may be able to stop this additional
++ case by enabling the 'Disable privileged I/O' option. Though nothing
++ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
++ but only to video memory, which is the only writing we allow in this
++ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
++ not be allowed to mprotect it with PROT_WRITE later.
++ Enabling this feature could make certain apps like VMWare stop working,
++ as they need to write to other locations in /dev/mem.
++ It is highly recommended that you say Y here if you meet all the
++ conditions above.
++
++config GRKERNSEC_IO
++ bool "Disable privileged I/O"
++ depends on X86
++ select RTC
++ help
++ If you say Y here, all ioperm and iopl calls will return an error.
++ Ioperm and iopl can be used to modify the running kernel.
++ Unfortunately, some programs need this access to operate properly,
++ the most notable of which are XFree86 and hwclock. hwclock can be
++ remedied by having RTC support in the kernel, so CONFIG_RTC is
++ enabled if this option is enabled, to ensure that hwclock operates
++ correctly. XFree86 still will not operate correctly with this option
++ enabled, so DO NOT CHOOSE Y IF YOU USE XFree86. If you use XFree86
++ and you still want to protect your kernel against modification,
++ use the ACL system.
++
++config GRKERNSEC_PROC_MEMMAP
++ bool "Remove addresses from /proc/<pid>/[maps|stat]"
++ help
++ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
++ give no information about the addresses of its mappings if
++ PaX features that rely on random addresses are enabled on the task.
++ If you use PaX it is greatly recommended that you say Y here as it
++ closes up a hole that makes the full ASLR useless for suid
++ binaries.
++
++config GRKERNSEC_BRUTE
++ bool "Deter exploit bruteforcing"
++ help
++ If you say Y here, attempts to bruteforce exploits against forking
++ daemons such as apache or sshd will be deterred. When a child of a
++ forking daemon is killed by PaX or crashes due to an illegal
++ instruction, the parent process will be delayed 30 seconds upon every
++ subsequent fork until the administrator is able to assess the
++ situation and restart the daemon. It is recommended that you also
++ enable signal logging in the auditing section so that logs are
++ generated when a process performs an illegal instruction.
++
++config GRKERNSEC_HIDESYM
++ bool "Hide kernel symbols"
++ help
++ If you say Y here, getting information on loaded modules, and
++ displaying all kernel symbols through a syscall will be restricted
++ to users with CAP_SYS_MODULE. This option is only effective
++ provided the following conditions are met:
++ 1) The kernel using grsecurity is not precompiled by some distribution
++ 2) You are using the ACL system and hiding other files such as your
++ kernel image and System.map
++ 3) You have the additional /proc restrictions enabled, which removes
++ /proc/kcore
++ If the above conditions are met, this option will aid to provide a
++ useful protection against local and remote kernel exploitation of
++ overflows and arbitrary read/write vulnerabilities.
++
++endmenu
++menu "Role Based Access Control Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_ACL_HIDEKERN
++ bool "Hide kernel processes"
++ help
++ If you say Y here, when the RBAC system is enabled via gradm -E,
++ an additional ACL will be passed to the kernel that hides all kernel
++ processes. These processes will only be viewable by the authenticated
++ admin, or processes that have viewing access set.
++
++config GRKERNSEC_ACL_MAXTRIES
++ int "Maximum tries before password lockout"
++ default 3
++ help
++ This option enforces the maximum number of times a user can attempt
++ to authorize themselves with the grsecurity ACL system before being
++ denied the ability to attempt authorization again for a specified time.
++ The lower the number, the harder it will be to brute-force a password.
++
++config GRKERNSEC_ACL_TIMEOUT
++ int "Time to wait after max password tries, in seconds"
++ default 30
++ help
++ This option specifies the time the user must wait after attempting to
++ authorize to the ACL system with the maximum number of invalid
++ passwords. The higher the number, the harder it will be to brute-force
++ a password.
++
++endmenu
++menu "Filesystem Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_PROC
++ bool "Proc restrictions"
++ help
++ If you say Y here, the permissions of the /proc filesystem
++ will be altered to enhance system security and privacy. Depending
++ upon the options you choose, you can either restrict users to see
++ only the processes they themselves run, or choose a group that can
++ view all processes and files normally restricted to root if you choose
++ the "restrict to user only" option. NOTE: If you're running identd as
++ a non-root user, you will have to run it as the group you specify here.
++
++config GRKERNSEC_PROC_USER
++ bool "Restrict /proc to user only"
++ depends on GRKERNSEC_PROC
++ help
++ If you say Y here, non-root users will only be able to view their own
++ processes, and restricts them from viewing network-related information,
++ and viewing kernel symbol and module information.
++
++config GRKERNSEC_PROC_USERGROUP
++ bool "Allow special group"
++ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
++ help
++ If you say Y here, you will be able to select a group that will be
++ able to view all processes, network-related information, and
++ kernel and symbol information. This option is useful if you want
++ to run identd as a non-root user.
++
++config GRKERNSEC_PROC_GID
++ int "GID for special group"
++ depends on GRKERNSEC_PROC_USERGROUP
++ default 1001
++
++config GRKERNSEC_PROC_ADD
++ bool "Additional restrictions"
++ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
++ help
++ If you say Y here, additional restrictions will be placed on
++ /proc that keep normal users from viewing cpu and device information.
++
++config GRKERNSEC_LINK
++ bool "Linking restrictions"
++ help
++ If you say Y here, /tmp race exploits will be prevented, since users
++ will no longer be able to follow symlinks owned by other users in
++ world-writable +t directories (i.e. /tmp), unless the owner of the
++ symlink is the owner of the directory. users will also not be
++ able to hardlink to files they do not own. If the sysctl option is
++ enabled, a sysctl option with name "linking_restrictions" is created.
++
++config GRKERNSEC_FIFO
++ bool "FIFO restrictions"
++ help
++ If you say Y here, users will not be able to write to FIFOs they don't
++ own in world-writable +t directories (i.e. /tmp), unless the owner of
++ the FIFO is the same owner of the directory it's held in. If the sysctl
++ option is enabled, a sysctl option with name "fifo_restrictions" is
++ created.
++
++config GRKERNSEC_CHROOT
++ bool "Chroot jail restrictions"
++ help
++ If you say Y here, you will be able to choose several options that will
++ make breaking out of a chrooted jail much more difficult. If you
++ encounter no software incompatibilities with the following options, it
++ is recommended that you enable each one.
++
++config GRKERNSEC_CHROOT_MOUNT
++ bool "Deny mounts"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ mount or remount filesystems. If the sysctl option is enabled, a
++ sysctl option with name "chroot_deny_mount" is created.
++
++config GRKERNSEC_CHROOT_DOUBLE
++ bool "Deny double-chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chroot
++ again outside the chroot. This is a widely used method of breaking
++ out of a chroot jail and should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name
++ "chroot_deny_chroot" is created.
++
++config GRKERNSEC_CHROOT_PIVOT
++ bool "Deny pivot_root in chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to use
++ a function called pivot_root() that was introduced in Linux 2.3.41. It
++ works similar to chroot in that it changes the root filesystem. This
++ function could be misused in a chrooted process to attempt to break out
++ of the chroot, and therefore should not be allowed. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_pivot" is
++ created.
++
++config GRKERNSEC_CHROOT_CHDIR
++ bool "Enforce chdir(\"/\") on all chroots"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the current working directory of all newly-chrooted
++ applications will be set to the the root directory of the chroot.
++ The man page on chroot(2) states:
++ Note that this call does not change the current working
++ directory, so that `.' can be outside the tree rooted at
++ `/'. In particular, the super-user can escape from a
++ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
++
++ It is recommended that you say Y here, since it's not known to break
++ any software. If the sysctl option is enabled, a sysctl option with
++ name "chroot_enforce_chdir" is created.
++
++config GRKERNSEC_CHROOT_CHMOD
++ bool "Deny (f)chmod +s"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to chmod
++ or fchmod files to make them have suid or sgid bits. This protects
++ against another published method of breaking a chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_deny_chmod" is
++ created.
++
++config GRKERNSEC_CHROOT_FCHDIR
++ bool "Deny fchdir out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, a well-known method of breaking chroots by fchdir'ing
++ to a file descriptor of the chrooting process that points to a directory
++ outside the filesystem will be stopped. If the sysctl option
++ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
++
++config GRKERNSEC_CHROOT_MKNOD
++ bool "Deny mknod"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be allowed to
++ mknod. The problem with using mknod inside a chroot is that it
++ would allow an attacker to create a device entry that is the same
++ as one on the physical root of your system, which could range from
++ anything from the console device to a device for your harddrive (which
++ they could then use to wipe the drive or steal data). It is recommended
++ that you say Y here, unless you run into software incompatibilities.
++ If the sysctl option is enabled, a sysctl option with name
++ "chroot_deny_mknod" is created.
++
++config GRKERNSEC_CHROOT_SHMAT
++ bool "Deny shmat() out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to attach
++ to shared memory segments that were created outside of the chroot jail.
++ It is recommended that you say Y here. If the sysctl option is enabled,
++ a sysctl option with name "chroot_deny_shmat" is created.
++
++config GRKERNSEC_CHROOT_UNIX
++ bool "Deny access to abstract AF_UNIX sockets out of chroot"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ connect to abstract (meaning not belonging to a filesystem) Unix
++ domain sockets that were bound outside of a chroot. It is recommended
++ that you say Y here. If the sysctl option is enabled, a sysctl option
++ with name "chroot_deny_unix" is created.
++
++config GRKERNSEC_CHROOT_FINDTASK
++ bool "Protect outside processes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to
++ kill, send signals with fcntl, ptrace, capget, setpgid, getpgid,
++ getsid, or view any process outside of the chroot. If the sysctl
++ option is enabled, a sysctl option with name "chroot_findtask" is
++ created.
++
++config GRKERNSEC_CHROOT_NICE
++ bool "Restrict priority changes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, processes inside a chroot will not be able to raise
++ the priority of processes in the chroot, or alter the priority of
++ processes outside the chroot. This provides more security than simply
++ removing CAP_SYS_NICE from the process' capability set. If the
++ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
++ is created.
++
++config GRKERNSEC_CHROOT_SYSCTL
++ bool "Deny sysctl writes"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, an attacker in a chroot will not be able to
++ write to sysctl entries, either by sysctl(2) or through a /proc
++ interface. It is strongly recommended that you say Y here. If the
++ sysctl option is enabled, a sysctl option with name
++ "chroot_deny_sysctl" is created.
++
++config GRKERNSEC_CHROOT_CAPS
++ bool "Capability restrictions"
++ depends on GRKERNSEC_CHROOT
++ help
++ If you say Y here, the capabilities on all root processes within a
++ chroot jail will be lowered to stop module insertion, raw i/o,
++ system and net admin tasks, rebooting the system, modifying immutable
++ files, modifying IPC owned by another, and changing the system time.
++ This is left an option because it can break some apps. Disable this
++ if your chrooted apps are having problems performing those kinds of
++ tasks. If the sysctl option is enabled, a sysctl option with
++ name "chroot_caps" is created.
++
++endmenu
++menu "Kernel Auditing"
++depends on GRKERNSEC
++
++config GRKERNSEC_AUDIT_GROUP
++ bool "Single group for auditing"
++ help
++ If you say Y here, the exec, chdir, (un)mount, and ipc logging features
++ will only operate on a group you specify. This option is recommended
++ if you only want to watch certain users instead of having a large
++ amount of logs from the entire system. If the sysctl option is enabled,
++ a sysctl option with name "audit_group" is created.
++
++config GRKERNSEC_AUDIT_GID
++ int "GID for auditing"
++ depends on GRKERNSEC_AUDIT_GROUP
++ default 1007
++
++config GRKERNSEC_EXECLOG
++ bool "Exec logging"
++ help
++ If you say Y here, all execve() calls will be logged (since the
++ other exec*() calls are frontends to execve(), all execution
++ will be logged). Useful for shell-servers that like to keep track
++ of their users. If the sysctl option is enabled, a sysctl option with
++ name "exec_logging" is created.
++ WARNING: This option when enabled will produce a LOT of logs, especially
++ on an active system.
++
++config GRKERNSEC_RESLOG
++ bool "Resource logging"
++ help
++ If you say Y here, all attempts to overstep resource limits will
++ be logged with the resource name, the requested size, and the current
++ limit. It is highly recommended that you say Y here.
++
++config GRKERNSEC_CHROOT_EXECLOG
++ bool "Log execs within chroot"
++ help
++ If you say Y here, all executions inside a chroot jail will be logged
++ to syslog. This can cause a large amount of logs if certain
++ applications (eg. djb's daemontools) are installed on the system, and
++ is therefore left as an option. If the sysctl option is enabled, a
++ sysctl option with name "chroot_execlog" is created.
++
++config GRKERNSEC_AUDIT_CHDIR
++ bool "Chdir logging"
++ help
++ If you say Y here, all chdir() calls will be logged. If the sysctl
++ option is enabled, a sysctl option with name "audit_chdir" is created.
++
++config GRKERNSEC_AUDIT_MOUNT
++ bool "(Un)Mount logging"
++ help
++ If you say Y here, all mounts and unmounts will be logged. If the
++ sysctl option is enabled, a sysctl option with name "audit_mount" is
++ created.
++
++config GRKERNSEC_AUDIT_IPC
++ bool "IPC logging"
++ help
++ If you say Y here, creation and removal of message queues, semaphores,
++ and shared memory will be logged. If the sysctl option is enabled, a
++ sysctl option with name "audit_ipc" is created.
++
++config GRKERNSEC_SIGNAL
++ bool "Signal logging"
++ help
++ If you say Y here, certain important signals will be logged, such as
++ SIGSEGV, which will as a result inform you of when a error in a program
++ occurred, which in some cases could mean a possible exploit attempt.
++ If the sysctl option is enabled, a sysctl option with name
++ "signal_logging" is created.
++
++config GRKERNSEC_FORKFAIL
++ bool "Fork failure logging"
++ help
++ If you say Y here, all failed fork() attempts will be logged.
++ This could suggest a fork bomb, or someone attempting to overstep
++ their process limit. If the sysctl option is enabled, a sysctl option
++ with name "forkfail_logging" is created.
++
++config GRKERNSEC_TIME
++ bool "Time change logging"
++ help
++ If you say Y here, any changes of the system clock will be logged.
++ If the sysctl option is enabled, a sysctl option with name
++ "timechange_logging" is created.
++
++config GRKERNSEC_PROC_IPADDR
++ bool "/proc/<pid>/ipaddr support"
++ help
++ If you say Y here, a new entry will be added to each /proc/<pid>
++ directory that contains the IP address of the person using the task.
++ The IP is carried across local TCP and AF_UNIX stream sockets.
++ This information can be useful for IDS/IPSes to perform remote response
++ to a local attack. The entry is readable by only the owner of the
++ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
++ the RBAC system), and thus does not create privacy concerns.
++
++config GRKERNSEC_AUDIT_TEXTREL
++ bool 'ELF text relocations logging (READ HELP)'
++ depends on PAX_MPROTECT
++ help
++ If you say Y here, text relocations will be logged with the filename
++ of the offending library or binary. The purpose of the feature is
++ to help Linux distribution developers get rid of libraries and
++ binaries that need text relocations which hinder the future progress
++ of PaX. Only Linux distribution developers should say Y here, and
++ never on a production machine, as this option creates an information
++ leak that could aid an attacker in defeating the randomization of
++ a single memory region. If the sysctl option is enabled, a sysctl
++ option with name "audit_textrel" is created.
++
++endmenu
++
++menu "Executable Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_EXECVE
++ bool "Enforce RLIMIT_NPROC on execs"
++ help
++ If you say Y here, users with a resource limit on processes will
++ have the value checked during execve() calls. The current system
++ only checks the system limit during fork() calls. If the sysctl option
++ is enabled, a sysctl option with name "execve_limiting" is created.
++
++config GRKERNSEC_DMESG
++ bool "Dmesg(8) restriction"
++ help
++ If you say Y here, non-root users will not be able to use dmesg(8)
++ to view up to the last 4kb of messages in the kernel's log buffer.
++ If the sysctl option is enabled, a sysctl option with name "dmesg" is
++ created.
++
++config GRKERNSEC_RANDPID
++ bool "Randomized PIDs"
++ help
++ If you say Y here, all PIDs created on the system will be
++ pseudo-randomly generated. This is extremely effective along
++ with the /proc restrictions to disallow an attacker from guessing
++ pids of daemons, etc. PIDs are also used in some cases as part
++ of a naming system for temporary files, so this option would keep
++ those filenames from being predicted as well. We also use code
++ to make sure that PID numbers aren't reused too soon. If the sysctl
++ option is enabled, a sysctl option with name "rand_pids" is created.
++
++config GRKERNSEC_TPE
++ bool "Trusted Path Execution (TPE)"
++ help
++ If you say Y here, you will be able to choose a gid to add to the
++ supplementary groups of users you want to mark as "untrusted."
++ These users will not be able to execute any files that are not in
++ root-owned directories writable only by root. If the sysctl option
++ is enabled, a sysctl option with name "tpe" is created.
++
++config GRKERNSEC_TPE_ALL
++ bool "Partially restrict non-root users"
++ depends on GRKERNSEC_TPE
++ help
++ If you say Y here, All non-root users other than the ones in the
++ group specified in the main TPE option will only be allowed to
++ execute files in directories they own that are not group or
++ world-writable, or in directories owned by root and writable only by
++ root. If the sysctl option is enabled, a sysctl option with name
++ "tpe_restrict_all" is created.
++
++config GRKERNSEC_TPE_GID
++ int "GID for untrusted users"
++ depends on GRKERNSEC_TPE
++ default 1005
++ help
++ Here you can choose the GID to enable trusted path protection for.
++ Remember to add the users you want protection enabled for to the GID
++ specified here. If the sysctl option is enabled, whatever you choose
++ here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "tpe_gid" is created.
++
++endmenu
++menu "Network Protections"
++depends on GRKERNSEC
++
++config GRKERNSEC_RANDNET
++ bool "Larger entropy pools"
++ help
++ If you say Y here, the entropy pools used for many features of Linux
++ and grsecurity will be doubled in size. Since several grsecurity
++ features use additional randomness, it is recommended that you say Y
++ here. Saying Y here has a similar effect as modifying
++ /proc/sys/kernel/random/poolsize.
++
++config GRKERNSEC_RANDISN
++ bool "Truly random TCP ISN selection"
++ help
++ If you say Y here, Linux's default selection of TCP Initial Sequence
++ Numbers (ISNs) will be replaced with that of OpenBSD. Linux uses
++ an MD4 hash based on the connection plus a time value to create the
++ ISN, while OpenBSD's selection is random. If the sysctl option is
++ enabled, a sysctl option with name "rand_isns" is created.
++
++config GRKERNSEC_RANDID
++ bool "Randomized IP IDs"
++ help
++ If you say Y here, all the id field on all outgoing packets
++ will be randomized. This hinders os fingerprinters and
++ keeps your machine from being used as a bounce for an untraceable
++ portscan. Ids are used for fragmented packets, fragments belonging
++ to the same packet have the same id. By default linux only
++ increments the id value on each packet sent to an individual host.
++ We use a port of the OpenBSD random ip id code to achieve the
++ randomness, while keeping the possibility of id duplicates to
++ near none. If the sysctl option is enabled, a sysctl option with name
++ "rand_ip_ids" is created.
++
++config GRKERNSEC_RANDSRC
++ bool "Randomized TCP source ports"
++ default n if GRKERNSEC_LOW || GRKERNSEC_MID
++ default y if GRKERNSEC_HIGH
++ help
++ If you say Y here, situations where a source port is generated on the
++ fly for the TCP protocol (ie. with connect() ) will be altered so that
++ the source port is generated at random, instead of a simple incrementing
++ algorithm. If the sysctl option is enabled, a sysctl option with name
++ "rand_tcp_src_ports" is created.
++
++config GRKERNSEC_RANDRPC
++ bool "Randomized RPC XIDs"
++ help
++ If you say Y here, the method of determining XIDs for RPC requests will
++ be randomized, instead of using linux's default behavior of simply
++ incrementing the XID. If you want your RPC connections to be more
++ secure, say Y here. If the sysctl option is enabled, a sysctl option
++ with name "rand_rpc" is created.
++
++config GRKERNSEC_SOCKET
++ bool "Socket restrictions"
++ help
++ If you say Y here, you will be able to choose from several options.
++ If you assign a GID on your system and add it to the supplementary
++ groups of users you want to restrict socket access to, this patch
++ will perform up to three things, based on the option(s) you choose.
++
++config GRKERNSEC_SOCKET_ALL
++ bool "Deny any sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine or run server
++ applications from your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_all" is created.
++
++config GRKERNSEC_SOCKET_ALL_GID
++ int "GID to deny all sockets for"
++ depends on GRKERNSEC_SOCKET_ALL
++ default 1004
++ help
++ Here you can choose the GID to disable socket access for. Remember to
++ add the users you want socket access disabled for to the GID
++ specified here. If the sysctl option is enabled, whatever you choose
++ here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_all_gid" is created.
++
++config GRKERNSEC_SOCKET_CLIENT
++ bool "Deny client sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to connect to other hosts from your machine, but will be
++ able to run servers. If this option is enabled, all users in the group
++ you specify will have to use passive mode when initiating ftp transfers
++ from the shell on your machine. If the sysctl option is enabled, a
++ sysctl option with name "socket_client" is created.
++
++config GRKERNSEC_SOCKET_CLIENT_GID
++ int "GID to deny client sockets for"
++ depends on GRKERNSEC_SOCKET_CLIENT
++ default 1003
++ help
++ Here you can choose the GID to disable client socket access for.
++ Remember to add the users you want client socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, whatever you
++ choose here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_client_gid" is created.
++
++config GRKERNSEC_SOCKET_SERVER
++ bool "Deny server sockets to group"
++ depends on GRKERNSEC_SOCKET
++ help
++ If you say Y here, you will be able to choose a GID of whose users will
++ be unable to run server applications from your machine. If the sysctl
++ option is enabled, a sysctl option with name "socket_server" is created.
++
++config GRKERNSEC_SOCKET_SERVER_GID
++ int "GID to deny server sockets for"
++ depends on GRKERNSEC_SOCKET_SERVER
++ default 1002
++ help
++ Here you can choose the GID to disable server socket access for.
++ Remember to add the users you want server socket access disabled for to
++ the GID specified here. If the sysctl option is enabled, whatever you
++ choose here won't matter. You'll have to specify the GID in your bootup
++ script by echoing the GID to the proper /proc entry. View the help
++ on the sysctl option for more information. If the sysctl option is
++ enabled, a sysctl option with name "socket_server_gid" is created.
++
++endmenu
++menu "Sysctl support"
++depends on GRKERNSEC && SYSCTL
++
++config GRKERNSEC_SYSCTL
++ bool "Sysctl support"
++ help
++ If you say Y here, you will be able to change the options that
++ grsecurity runs with at bootup, without having to recompile your
++ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
++ to enable (1) or disable (0) various features. All the sysctl entries
++ are mutable until the "grsec_lock" entry is set to a non-zero value.
++ All features are disabled by default. Please note that this option could
++ reduce the effectiveness of the added security of this patch if an ACL
++ system is not put in place. Your init scripts should be read-only, and
++ root should not have access to adding modules or performing raw i/o
++ operations. All options should be set at startup, and the grsec_lock
++ entry should be set to a non-zero value after all the options are set.
++ *THIS IS EXTREMELY IMPORTANT*
++
++endmenu
++menu "Logging Options"
++depends on GRKERNSEC
++
++config GRKERNSEC_FLOODTIME
++ int "Seconds in between log messages (minimum)"
++ default 10
++ help
++ This option allows you to enforce the number of seconds between
++ grsecurity log messages. The default should be suitable for most
++ people, however, if you choose to change it, choose a value small enough
++ to allow informative logs to be produced, but large enough to
++ prevent flooding.
++
++config GRKERNSEC_FLOODBURST
++ int "Number of messages in a burst (maximum)"
++ default 4
++ help
++ This option allows you to choose the maximum number of messages allowed
++ within the flood time interval you chose in a separate option. The
++ default should be suitable for most people, however if you find that
++ many of your logs are being interpreted as flooding, you may want to
++ raise this value.
++
++endmenu
++
++endmenu
+diff -urN linux-2.6.7/grsecurity/Makefile linux-2.6.7/grsecurity/Makefile
+--- linux-2.6.7/grsecurity/Makefile 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/Makefile 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,21 @@
++# grsecurity's ACL system was originally written in 2001 by Michael Dalton
++# during 2001, 2002, and 2003 it has been completely redesigned by
++# Brad Spengler
++#
++# All code in this directory and various hooks inserted throughout the kernel
++# are copyright Brad Spengler, and released under the GPL, unless otherwise
++# noted (as in obsd_rand.c)
++
++obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
++ grsec_mount.o grsec_rand.o grsec_sig.o grsec_sock.o grsec_sysctl.o \
++ grsec_time.o grsec_tpe.o grsec_ipc.o grsec_link.o grsec_textrel.o
++
++obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_ip.o gracl_segv.o obsd_rand.o \
++ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
++ gracl_learn.o
++obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
++
++ifndef CONFIG_GRKERNSEC
++obj-y += grsec_disabled.o
++endif
++
+diff -urN linux-2.6.7/grsecurity/gracl.c linux-2.6.7/grsecurity/gracl.c
+--- linux-2.6.7/grsecurity/gracl.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl.c 2004-07-08 15:12:02 -0400
+@@ -0,0 +1,3436 @@
++/*
++ * grsecurity/gracl.c
++ * Copyright Brad Spengler 2001, 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/namei.h>
++#include <linux/mount.h>
++#include <linux/tty.h>
++#include <linux/proc_fs.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/types.h>
++#include <linux/capability.h>
++#include <linux/sysctl.h>
++#include <linux/ptrace.h>
++#include <linux/gracl.h>
++#include <linux/gralloc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/percpu.h>
++
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++
++static struct acl_role_db acl_role_set;
++static struct acl_role_label *role_list_head;
++static struct name_db name_set;
++static struct name_db inodev_set;
++
++/* for keeping track of userspace pointers used for subjects, so we
++ can share references in the kernel as well
++*/
++
++static struct dentry *real_root;
++static struct vfsmount *real_root_mnt;
++
++static struct acl_subj_map_db subj_map_set;
++
++static struct acl_role_label *default_role;
++
++static u16 acl_sp_role_value;
++
++extern char *gr_shared_page[4];
++static DECLARE_MUTEX(gr_dev_sem);
++rwlock_t gr_inode_lock = RW_LOCK_UNLOCKED;
++
++struct gr_arg *gr_usermode;
++
++static unsigned long gr_status = GR_STATUS_INIT;
++
++extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
++extern void gr_clear_learn_entries(void);
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++extern void gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt);
++#endif
++
++extern char * __d_path(struct dentry *dentry, struct vfsmount *vfsmnt,
++ struct dentry *root, struct vfsmount *rootmnt,
++ char *buffer, int buflen);
++
++unsigned char *gr_system_salt;
++unsigned char *gr_system_sum;
++
++static struct sprole_pw **acl_special_roles = NULL;
++static __u16 num_sprole_pws = 0;
++
++static struct acl_role_label *kernel_role = NULL;
++
++/* The following are used to keep a place held in the hash table when we move
++ entries around. They can be replaced during insert. */
++
++static struct acl_subject_label *deleted_subject;
++static struct acl_object_label *deleted_object;
++static struct name_entry *deleted_inodev;
++
++/* for keeping track of the last and final allocated subjects, since
++ nested subject parsing is tricky
++*/
++static struct acl_subject_label *s_last = NULL;
++static struct acl_subject_label *s_final = NULL;
++
++static unsigned int gr_auth_attempts = 0;
++static unsigned long gr_auth_expires = 0UL;
++
++extern int gr_init_uidset(void);
++extern void gr_free_uidset(void);
++extern void gr_remove_uid(uid_t uid);
++extern int gr_find_uid(uid_t uid);
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return (gr_status & GR_READY);
++}
++
++char gr_roletype_to_char(void)
++{
++ switch (current->role->roletype &
++ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
++ GR_ROLE_SPECIAL)) {
++ case GR_ROLE_DEFAULT:
++ return 'D';
++ case GR_ROLE_USER:
++ return 'U';
++ case GR_ROLE_GROUP:
++ return 'G';
++ case GR_ROLE_SPECIAL:
++ return 'S';
++ }
++
++ return 'X';
++}
++
++__inline__ int
++gr_acl_tpe_check(void)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++ if (current->role->roletype & GR_ROLE_TPE)
++ return 1;
++ else
++ return 0;
++}
++
++int
++gr_handle_rawio(const struct inode *inode)
++{
++ if (inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO) &&
++ ((gr_status & GR_READY)
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ || (grsec_enable_chroot_caps && proc_is_chrooted(current))
++#endif
++ ))
++ return 1;
++ return 0;
++}
++
++
++static __inline__ int
++gr_streq(const char *a, const char *b, const __u16 lena, const __u16 lenb)
++{
++ int i;
++ unsigned long *l1;
++ unsigned long *l2;
++ unsigned char *c1;
++ unsigned char *c2;
++ int num_longs;
++
++ if (likely(lena != lenb))
++ return 0;
++
++ l1 = (unsigned long *)a;
++ l2 = (unsigned long *)b;
++
++ num_longs = lena / sizeof(unsigned long);
++
++ for (i = num_longs; i--; l1++, l2++) {
++ if (unlikely(*l1 != *l2))
++ return 0;
++ }
++
++ c1 = (unsigned char *) l1;
++ c2 = (unsigned char *) l2;
++
++ i = lena - (num_longs * sizeof(unsigned long));
++
++ for (; i--; c1++, c2++) {
++ if (unlikely(*c1 != *c2))
++ return 0;
++ }
++
++ return 1;
++}
++
++static char *
++__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++
++ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
++ read_lock(&child_reaper->fs->lock);
++ root = dget(child_reaper->fs->root);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ read_unlock(&child_reaper->fs->lock);
++
++ res = __d_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
++ if (unlikely(IS_ERR(res)))
++ res = strcpy(buf, "<path too long>");
++ dput(root);
++ mntput(rootmnt);
++ return res;
++}
++
++char *
++gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++static char *
++d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
++ char *buf, int buflen)
++{
++ char *res;
++ struct dentry *root;
++ struct vfsmount *rootmnt;
++
++ /* we can't use real_root, real_root_mnt, because they belong only to the RBAC system */
++ read_lock(&child_reaper->fs->lock);
++ root = dget(child_reaper->fs->root);
++ rootmnt = mntget(child_reaper->fs->rootmnt);
++ read_unlock(&child_reaper->fs->lock);
++
++ spin_lock(&dcache_lock);
++ res = __d_path((struct dentry *)dentry, (struct vfsmount *)vfsmnt, root, rootmnt, buf, buflen);
++ spin_unlock(&dcache_lock);
++ if (unlikely(IS_ERR(res)))
++ res = strcpy(buf, "<path too long>");
++ dput(root);
++ mntput(rootmnt);
++ return res;
++}
++
++char *
++gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++char *
++gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
++ PAGE_SIZE);
++}
++
++__inline__ __u32
++to_gr_audit(const __u32 reqmode)
++{
++ __u32 retmode = 0;
++
++ retmode |= (reqmode & GR_READ) ? GR_AUDIT_READ : 0;
++ retmode |= (reqmode & GR_WRITE) ? GR_AUDIT_WRITE | GR_AUDIT_APPEND : 0;
++ retmode |= (reqmode & GR_APPEND) ? GR_AUDIT_APPEND : 0;
++ retmode |= (reqmode & GR_EXEC) ? GR_AUDIT_EXEC : 0;
++ retmode |= (reqmode & GR_INHERIT) ? GR_AUDIT_INHERIT : 0;
++ retmode |= (reqmode & GR_FIND) ? GR_AUDIT_FIND : 0;
++ retmode |= (reqmode & GR_SETID) ? GR_AUDIT_SETID : 0;
++ retmode |= (reqmode & GR_CREATE) ? GR_AUDIT_CREATE : 0;
++ retmode |= (reqmode & GR_DELETE) ? GR_AUDIT_DELETE : 0;
++
++ return retmode;
++}
++
++__inline__ struct acl_subject_label *
++lookup_subject_map(const struct acl_subject_label *userp)
++{
++ unsigned long index = shash(userp, subj_map_set.s_size);
++ struct subject_map *match;
++ __u8 i = 0;
++
++ match = subj_map_set.s_hash[index];
++
++ while (match && match->user != userp) {
++ index = (index + (1 << i)) % subj_map_set.s_size;
++ match = subj_map_set.s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match)
++ return match->kernel;
++ else
++ return NULL;
++}
++
++static void
++insert_subj_map_entry(struct subject_map *subjmap)
++{
++ unsigned long index = shash(subjmap->user, subj_map_set.s_size);
++ struct subject_map **curr;
++ __u8 i = 0;
++
++ curr = &subj_map_set.s_hash[index];
++
++ while (*curr) {
++ index = (index + (1 << i)) % subj_map_set.s_size;
++ curr = &subj_map_set.s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = subjmap;
++
++ return;
++}
++
++__inline__ struct acl_role_label *
++lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
++ const gid_t gid)
++{
++ unsigned long index = rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
++ struct acl_role_label *match;
++ struct role_allowed_ip *ipp;
++ int x;
++ __u8 i = 0;
++
++ match = acl_role_set.r_hash[index];
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == uid)
++ goto found;
++ }
++ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
++ break;
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ match = acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++found:
++ if (match == NULL) {
++ try_group:
++ index = rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
++ match = acl_role_set.r_hash[index];
++ i = 0;
++
++ while (match) {
++ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
++ for (x = 0; x < match->domain_child_num; x++) {
++ if (match->domain_children[x] == gid)
++ goto found2;
++ }
++ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
++ break;
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ match = acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++found2:
++ if (match == NULL)
++ match = default_role;
++ if (match->allowed_ips == NULL)
++ return match;
++ else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(task->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ match = default_role;
++ }
++ } else if (match->allowed_ips == NULL) {
++ return match;
++ } else {
++ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
++ if (likely
++ ((ntohl(task->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask)))
++ return match;
++ }
++ goto try_group;
++ }
++
++ return match;
++}
++
++__inline__ struct acl_subject_label *
++lookup_acl_subj_label(const ino_t ino, const dev_t dev,
++ const struct acl_role_label *role)
++{
++ unsigned long subj_size = role->subj_hash_size;
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long index = fhash(ino, dev, subj_size);
++ struct acl_subject_label *match;
++ __u8 i = 0;
++
++ match = s_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj_size;
++ match = s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_subject) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++lookup_acl_obj_label(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned long obj_size = subj->obj_hash_size;
++ struct acl_object_label **o_hash = subj->obj_hash;
++ unsigned long index = fhash(ino, dev, obj_size);
++ struct acl_object_label *match;
++ __u8 i = 0;
++
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
++ const struct acl_subject_label *subj)
++{
++ unsigned long obj_size = subj->obj_hash_size;
++ struct acl_object_label **o_hash = subj->obj_hash;
++ unsigned long index = fhash(ino, dev, obj_size);
++ struct acl_object_label *match;
++ __u8 i = 0;
++
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ !(match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && (match->mode & GR_DELETED))
++ return match;
++
++ i = 0;
++ index = fhash(ino, dev, obj_size);
++ match = o_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev ||
++ (match->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % obj_size;
++ match = o_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_object) && !(match->mode & GR_DELETED))
++ return match;
++ else
++ return NULL;
++}
++
++static __inline__ struct name_entry *
++lookup_name_entry(const char *name)
++{
++ __u16 len = strlen(name);
++ unsigned long index = nhash(name, len, name_set.n_size);
++ struct name_entry *match;
++ __u8 i = 0;
++
++ match = name_set.n_hash[index];
++
++ while (match && !gr_streq(match->name, name, match->len, len)) {
++ index = (index + (1 << i)) % name_set.n_size;
++ match = name_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ return match;
++}
++
++static __inline__ struct name_entry *
++lookup_inodev_entry(const ino_t ino, const dev_t dev)
++{
++ unsigned long index = fhash(ino, dev, inodev_set.n_size);
++ struct name_entry *match;
++ __u8 i = 0;
++
++ match = inodev_set.n_hash[index];
++
++ while (match && (match->inode != ino || match->device != dev)) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ match = inodev_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (match && (match != deleted_inodev))
++ return match;
++ else
++ return NULL;
++}
++
++static void
++insert_inodev_entry(struct name_entry *nentry)
++{
++ unsigned long index = fhash(nentry->inode, nentry->device,
++ inodev_set.n_size);
++ struct name_entry **curr;
++ __u8 i = 0;
++
++ curr = &inodev_set.n_hash[index];
++
++ while (*curr && *curr != deleted_inodev) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ curr = &inodev_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = nentry;
++
++ return;
++}
++
++static void
++__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
++{
++ unsigned long index =
++ rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
++ struct acl_role_label **curr;
++ __u8 i = 0;
++
++ curr = &acl_role_set.r_hash[index];
++
++ while (*curr) {
++ index = (index + (1 << i)) % acl_role_set.r_size;
++ curr = &acl_role_set.r_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = role;
++
++ return;
++}
++
++static void
++insert_acl_role_label(struct acl_role_label *role)
++{
++ int i;
++
++ if (role->roletype & GR_ROLE_DOMAIN) {
++ for (i = 0; i < role->domain_child_num; i++)
++ __insert_acl_role_label(role, role->domain_children[i]);
++ } else
++ __insert_acl_role_label(role, role->uidgid);
++}
++
++static int
++insert_name_entry(char *name, const ino_t inode, const dev_t device)
++{
++ struct name_entry **curr;
++ __u8 i = 0;
++ __u16 len = strlen(name);
++ unsigned long index = nhash(name, len, name_set.n_size);
++
++ curr = &name_set.n_hash[index];
++
++ while (*curr && !gr_streq((*curr)->name, name, (*curr)->len, len)) {
++ index = (index + (1 << i)) % name_set.n_size;
++ curr = &name_set.n_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (!(*curr)) {
++ struct name_entry *nentry =
++ acl_alloc(sizeof (struct name_entry));
++ if (!nentry)
++ return 0;
++ nentry->name = name;
++ nentry->inode = inode;
++ nentry->device = device;
++ nentry->len = len;
++ *curr = nentry;
++ /* insert us into the table searchable by inode/dev */
++ insert_inodev_entry(nentry);
++ }
++
++ return 1;
++}
++
++static void
++insert_acl_obj_label(struct acl_object_label *obj,
++ struct acl_subject_label *subj)
++{
++ unsigned long index =
++ fhash(obj->inode, obj->device, subj->obj_hash_size);
++ struct acl_object_label **curr;
++ __u8 i = 0;
++
++ curr = &subj->obj_hash[index];
++
++ while (*curr && *curr != deleted_object) {
++ index = (index + (1 << i)) % subj->obj_hash_size;
++ curr = &subj->obj_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = obj;
++
++ return;
++}
++
++static void
++insert_acl_subj_label(struct acl_subject_label *obj,
++ struct acl_role_label *role)
++{
++ unsigned long subj_size = role->subj_hash_size;
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long index = fhash(obj->inode, obj->device, subj_size);
++ struct acl_subject_label **curr;
++ __u8 i = 0;
++
++ curr = &s_hash[index];
++
++ while (*curr && *curr != deleted_subject) {
++ index = (index + (1 << i)) % subj_size;
++ curr = &s_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ *curr = obj;
++
++ return;
++}
++
++static void **
++create_table(__u32 * len)
++{
++ unsigned long table_sizes[] = {
++ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
++ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
++ 4194301, 8388593, 16777213, 33554393, 67108859, 134217689,
++ 268435399, 536870909, 1073741789, 2147483647
++ };
++ void *newtable = NULL;
++ unsigned int pwr = 0;
++
++ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
++ table_sizes[pwr] <= (2 * (*len)))
++ pwr++;
++
++ if (table_sizes[pwr] <= (2 * (*len)))
++ return newtable;
++
++ if ((table_sizes[pwr] * sizeof (void *)) <= PAGE_SIZE)
++ newtable =
++ kmalloc(table_sizes[pwr] * sizeof (void *), GFP_KERNEL);
++ else
++ newtable = vmalloc(table_sizes[pwr] * sizeof (void *));
++
++ *len = table_sizes[pwr];
++
++ return newtable;
++}
++
++static int
++init_variables(const struct gr_arg *arg)
++{
++ unsigned long stacksize;
++
++ subj_map_set.s_size = arg->role_db.num_subjects;
++ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
++ name_set.n_size = arg->role_db.num_objects;
++ inodev_set.n_size = arg->role_db.num_objects;
++
++ if (!gr_init_uidset())
++ return 1;
++
++ /* set up the stack that holds allocation info */
++
++ stacksize = arg->role_db.num_pointers + 5;
++
++ if (!acl_alloc_stack_init(stacksize))
++ return 1;
++
++ /* create our empty, fake deleted acls */
++ deleted_subject =
++ (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label));
++ deleted_object =
++ (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ deleted_inodev =
++ (struct name_entry *) acl_alloc(sizeof (struct name_entry));
++
++ if (!deleted_subject || !deleted_object || !deleted_inodev)
++ return 1;
++
++ memset(deleted_subject, 0, sizeof (struct acl_subject_label));
++ memset(deleted_object, 0, sizeof (struct acl_object_label));
++ memset(deleted_inodev, 0, sizeof (struct name_entry));
++
++ /* grab reference for the real root dentry and vfsmount */
++ read_lock(&child_reaper->fs->lock);
++ real_root_mnt = mntget(child_reaper->fs->rootmnt);
++ real_root = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++
++
++ /* We only want 50% full tables for now */
++
++ subj_map_set.s_hash =
++ (struct subject_map **) create_table(&subj_map_set.s_size);
++ acl_role_set.r_hash =
++ (struct acl_role_label **) create_table(&acl_role_set.r_size);
++ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size);
++ inodev_set.n_hash =
++ (struct name_entry **) create_table(&inodev_set.n_size);
++
++ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
++ !name_set.n_hash || !inodev_set.n_hash)
++ return 1;
++
++ memset(subj_map_set.s_hash, 0,
++ sizeof(struct subject_map *) * subj_map_set.s_size);
++ memset(acl_role_set.r_hash, 0,
++ sizeof (struct acl_role_label *) * acl_role_set.r_size);
++ memset(name_set.n_hash, 0,
++ sizeof (struct name_entry *) * name_set.n_size);
++ memset(inodev_set.n_hash, 0,
++ sizeof (struct name_entry *) * inodev_set.n_size);
++
++ return 0;
++}
++
++/* free information not needed after startup
++ currently contains user->kernel pointer mappings for subjects
++*/
++
++static void
++free_init_variables(void)
++{
++ __u32 i;
++
++ if (subj_map_set.s_hash) {
++ for (i = 0; i < subj_map_set.s_size; i++) {
++ if (subj_map_set.s_hash[i]) {
++ kfree(subj_map_set.s_hash[i]);
++ subj_map_set.s_hash[i] = NULL;
++ }
++ }
++
++ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
++ PAGE_SIZE)
++ kfree(subj_map_set.s_hash);
++ else
++ vfree(subj_map_set.s_hash);
++ }
++
++ return;
++}
++
++static void
++free_variables(void)
++{
++ struct acl_subject_label *s;
++ struct acl_role_label *r;
++ struct task_struct *task, *task2;
++
++ gr_clear_learn_entries();
++
++ read_lock(&tasklist_lock);
++ for_each_process(task) {
++ task2 = task;
++ do {
++ task2->acl_sp_role = 0;
++ task2->acl_role_id = 0;
++ task2->acl = NULL;
++ task2->role = NULL;
++ } while ((task2 = next_thread(task2)) != task);
++ }
++ read_unlock(&tasklist_lock);
++
++ /* release the reference to the real root dentry and vfsmount */
++ if (real_root)
++ dput(real_root);
++ real_root = NULL;
++ if (real_root_mnt)
++ mntput(real_root_mnt);
++ real_root_mnt = NULL;
++
++ /* free all object hash tables */
++
++ if (role_list_head) {
++ for (r = role_list_head; r; r = r->next) {
++ if (!r->subj_hash)
++ break;
++ for (s = r->hash->first; s; s = s->next) {
++ if (!s->obj_hash)
++ break;
++ if ((s->obj_hash_size *
++ sizeof (struct acl_object_label *)) <=
++ PAGE_SIZE)
++ kfree(s->obj_hash);
++ else
++ vfree(s->obj_hash);
++ }
++ if ((r->subj_hash_size *
++ sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
++ kfree(r->subj_hash);
++ else
++ vfree(r->subj_hash);
++ }
++ }
++
++ acl_free_all();
++
++ if (acl_role_set.r_hash) {
++ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
++ PAGE_SIZE)
++ kfree(acl_role_set.r_hash);
++ else
++ vfree(acl_role_set.r_hash);
++ }
++ if (name_set.n_hash) {
++ if ((name_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(name_set.n_hash);
++ else
++ vfree(name_set.n_hash);
++ }
++
++ if (inodev_set.n_hash) {
++ if ((inodev_set.n_size * sizeof (struct name_entry *)) <=
++ PAGE_SIZE)
++ kfree(inodev_set.n_hash);
++ else
++ vfree(inodev_set.n_hash);
++ }
++
++ gr_free_uidset();
++
++ memset(&name_set, 0, sizeof (struct name_db));
++ memset(&inodev_set, 0, sizeof (struct name_db));
++ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
++ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
++
++ role_list_head = NULL;
++ default_role = NULL;
++
++ return;
++}
++
++static __u32
++count_user_objs(struct acl_object_label *userp)
++{
++ struct acl_object_label o_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ break;
++
++ userp = o_tmp.prev;
++ num++;
++ }
++
++ return num;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role);
++
++static int
++copy_user_glob(struct acl_object_label *obj)
++{
++ struct acl_object_label *g_tmp, **guser, *glast = NULL;
++ unsigned int len;
++ char *tmp;
++
++ if (obj->globbed == NULL)
++ return 0;
++
++ guser = &obj->globbed;
++ while (*guser) {
++ g_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label));
++ if (g_tmp == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(g_tmp, *guser,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ len = strnlen_user(g_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, g_tmp->filename, len))
++ return -EFAULT;
++
++ g_tmp->filename = tmp;
++
++ if (glast)
++ glast->next = g_tmp;
++ g_tmp->prev = glast;
++ *guser = g_tmp;
++ glast = g_tmp;
++ guser = &((*guser)->next);
++ }
++
++ return 0;
++}
++
++static int
++copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
++ struct acl_role_label *role)
++{
++ struct acl_object_label *o_tmp;
++ unsigned int len;
++ int ret;
++ char *tmp;
++
++ while (userp) {
++ if ((o_tmp = (struct acl_object_label *)
++ acl_alloc(sizeof (struct acl_object_label))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(o_tmp, userp,
++ sizeof (struct acl_object_label)))
++ return -EFAULT;
++
++ userp = o_tmp->prev;
++
++ len = strnlen_user(o_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, o_tmp->filename, len))
++ return -EFAULT;
++
++ o_tmp->filename = tmp;
++
++ insert_acl_obj_label(o_tmp, subj);
++ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
++ o_tmp->device))
++ return -ENOMEM;
++
++ ret = copy_user_glob(o_tmp);
++ if (ret)
++ return ret;
++
++ if (o_tmp->nested) {
++ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role);
++ if (IS_ERR(o_tmp->nested))
++ return PTR_ERR(o_tmp->nested);
++
++ s_final = o_tmp->nested;
++ }
++ }
++
++ return 0;
++}
++
++static __u32
++count_user_subjs(struct acl_subject_label *userp)
++{
++ struct acl_subject_label s_tmp;
++ __u32 num = 0;
++
++ while (userp) {
++ if (copy_from_user(&s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ break;
++
++ userp = s_tmp.prev;
++ /* do not count nested subjects against this count, since
++ they are not included in the hash table, but are
++ attached to objects. We have already counted
++ the subjects in userspace for the allocation
++ stack
++ */
++ if (!(s_tmp.mode & GR_NESTED))
++ num++;
++ }
++
++ return num;
++}
++
++static int
++copy_user_allowedips(struct acl_role_label *rolep)
++{
++ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
++
++ ruserip = rolep->allowed_ips;
++
++ while (ruserip) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_allowed_ip *)
++ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, ruserip,
++ sizeof (struct role_allowed_ip)))
++ return -EFAULT;
++
++ ruserip = rtmp->prev;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->allowed_ips = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!ruserip)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static int
++copy_user_transitions(struct acl_role_label *rolep)
++{
++ struct role_transition *rusertp, *rtmp = NULL, *rlast;
++ unsigned int len;
++ char *tmp;
++
++ rusertp = rolep->transitions;
++
++ while (rusertp) {
++ rlast = rtmp;
++
++ if ((rtmp = (struct role_transition *)
++ acl_alloc(sizeof (struct role_transition))) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(rtmp, rusertp,
++ sizeof (struct role_transition)))
++ return -EFAULT;
++
++ rusertp = rtmp->prev;
++
++ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN)
++ return -EINVAL;
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return -ENOMEM;
++
++ if (copy_from_user(tmp, rtmp->rolename, len))
++ return -EFAULT;
++
++ rtmp->rolename = tmp;
++
++ if (!rlast) {
++ rtmp->prev = NULL;
++ rolep->transitions = rtmp;
++ } else {
++ rlast->next = rtmp;
++ rtmp->prev = rlast;
++ }
++
++ if (!rusertp)
++ rtmp->next = NULL;
++ }
++
++ return 0;
++}
++
++static struct acl_subject_label *
++do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
++ unsigned int len;
++ char *tmp;
++ __u32 num_objs;
++ struct acl_ip_label **i_tmp, *i_utmp2;
++ struct gr_hash_struct ghash;
++ struct subject_map *subjmap;
++ unsigned long i_num;
++ int err;
++
++ s_tmp = lookup_subject_map(userp);
++
++ /* we've already copied this subject into the kernel, just return
++ the reference to it, and don't copy it over again
++ */
++ if (s_tmp)
++ return(s_tmp);
++
++
++ if ((s_tmp = (struct acl_subject_label *)
++ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
++ if (subjmap == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ subjmap->user = userp;
++ subjmap->kernel = s_tmp;
++ insert_subj_map_entry(subjmap);
++
++ if (copy_from_user(s_tmp, userp,
++ sizeof (struct acl_subject_label)))
++ return ERR_PTR(-EFAULT);
++
++ if (!s_last) {
++ s_tmp->prev = NULL;
++ role->hash->first = s_tmp;
++ } else {
++ s_last->next = s_tmp;
++ s_tmp->prev = s_last;
++ }
++
++ s_last = s_tmp;
++
++ len = strnlen_user(s_tmp->filename, PATH_MAX);
++
++ if (!len || len >= PATH_MAX)
++ return ERR_PTR(-EINVAL);
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL)
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user(tmp, s_tmp->filename, len))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->filename = tmp;
++
++ if (!strcmp(s_tmp->filename, "/"))
++ role->root_label = s_tmp;
++
++ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
++ return ERR_PTR(-EFAULT);
++
++ /* copy user and group transition tables */
++
++ if (s_tmp->user_trans_num) {
++ uid_t *uidlist;
++
++ uidlist = (uid_t *)acl_alloc(s_tmp->user_trans_num * sizeof(uid_t));
++ if (uidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->user_transitions = uidlist;
++ }
++
++ if (s_tmp->group_trans_num) {
++ gid_t *gidlist;
++
++ gidlist = (gid_t *)acl_alloc(s_tmp->group_trans_num * sizeof(gid_t));
++ if (gidlist == NULL)
++ return ERR_PTR(-ENOMEM);
++ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
++ return ERR_PTR(-EFAULT);
++
++ s_tmp->group_transitions = gidlist;
++ }
++
++ /* set up object hash table */
++ num_objs = count_user_objs(ghash.first);
++
++ s_tmp->obj_hash_size = num_objs;
++ s_tmp->obj_hash =
++ (struct acl_object_label **)
++ create_table(&(s_tmp->obj_hash_size));
++
++ if (!s_tmp->obj_hash)
++ return ERR_PTR(-ENOMEM);
++
++ memset(s_tmp->obj_hash, 0,
++ s_tmp->obj_hash_size *
++ sizeof (struct acl_object_label *));
++
++ /* copy before adding in objects, since a nested
++ acl could be found and be the final subject
++ copied
++ */
++
++ s_final = s_tmp;
++
++ /* add in objects */
++ err = copy_user_objs(ghash.first, s_tmp, role);
++
++ if (err)
++ return ERR_PTR(err);
++
++ /* set pointer for parent subject */
++ if (s_tmp->parent_subject) {
++ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role);
++
++ if (IS_ERR(s_tmp2))
++ return s_tmp2;
++
++ s_tmp->parent_subject = s_tmp2;
++ }
++
++ /* add in ip acls */
++
++ if (!s_tmp->ip_num) {
++ s_tmp->ips = NULL;
++ goto insert;
++ }
++
++ i_tmp =
++ (struct acl_ip_label **) acl_alloc(s_tmp->ip_num *
++ sizeof (struct
++ acl_ip_label *));
++
++ if (!i_tmp)
++ return ERR_PTR(-ENOMEM);
++
++ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
++ *(i_tmp + i_num) =
++ (struct acl_ip_label *)
++ acl_alloc(sizeof (struct acl_ip_label));
++ if (!*(i_tmp + i_num))
++ return ERR_PTR(-ENOMEM);
++
++ if (copy_from_user
++ (&i_utmp2, s_tmp->ips + i_num,
++ sizeof (struct acl_ip_label *)))
++ return ERR_PTR(-EFAULT);
++
++ if (copy_from_user
++ (*(i_tmp + i_num), i_utmp2,
++ sizeof (struct acl_ip_label)))
++ return ERR_PTR(-EFAULT);
++ }
++
++ s_tmp->ips = i_tmp;
++
++insert:
++ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
++ s_tmp->device))
++ return ERR_PTR(-ENOMEM);
++
++ return s_tmp;
++}
++
++static int
++copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
++{
++ struct acl_subject_label s_pre;
++ struct acl_subject_label * ret;
++ int err;
++
++ while (userp) {
++ if (copy_from_user(&s_pre, userp,
++ sizeof (struct acl_subject_label)))
++ return -EFAULT;
++
++ /* do not add nested subjects here, add
++ while parsing objects
++ */
++
++ if (s_pre.mode & GR_NESTED) {
++ userp = s_pre.prev;
++ continue;
++ }
++
++ ret = do_copy_user_subj(userp, role);
++
++ err = PTR_ERR(ret);
++ if (IS_ERR(ret))
++ return err;
++
++ insert_acl_subj_label(ret, role);
++
++ userp = s_pre.prev;
++ }
++
++ s_final->next = NULL;
++
++ return 0;
++}
++
++static int
++copy_user_acl(struct gr_arg *arg)
++{
++ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2, *r_last;
++ struct sprole_pw *sptmp;
++ struct gr_hash_struct *ghash;
++ uid_t *domainlist;
++ unsigned long r_num;
++ unsigned int len;
++ char *tmp;
++ int err = 0;
++ __u16 i;
++ __u32 num_subjs;
++
++ /* we need a default and kernel role */
++ if (arg->role_db.num_roles < 2)
++ return -EINVAL;
++
++ /* copy special role authentication info from userspace */
++
++ num_sprole_pws = arg->num_sprole_pws;
++ acl_special_roles = (struct sprole_pw **) acl_alloc(num_sprole_pws * sizeof(struct sprole_pw *));
++
++ if (!acl_special_roles) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
++ if (!sptmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(sptmp, arg->sprole_pws + i,
++ sizeof (struct sprole_pw))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ len =
++ strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= GR_SPROLE_LEN) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(tmp, sptmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Copying special role %s\n", tmp);
++#endif
++ sptmp->rolename = tmp;
++ acl_special_roles[i] = sptmp;
++ }
++
++ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
++
++ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
++ r_last = r_tmp;
++
++ r_tmp = acl_alloc(sizeof (struct acl_role_label));
++
++ if (!r_tmp) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ if (copy_from_user(&r_utmp2, r_utmp + r_num,
++ sizeof (struct acl_role_label *))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (copy_from_user(r_tmp, r_utmp2,
++ sizeof (struct acl_role_label))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ if (!r_last) {
++ r_tmp->prev = NULL;
++ role_list_head = r_tmp;
++ } else {
++ r_last->next = r_tmp;
++ r_tmp->prev = r_last;
++ }
++
++ if (r_num == (arg->role_db.num_roles - 1))
++ r_tmp->next = NULL;
++
++ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
++
++ if (!len || len >= PATH_MAX) {
++ err = -EINVAL;
++ goto cleanup;
++ }
++
++ if ((tmp = (char *) acl_alloc(len)) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(tmp, r_tmp->rolename, len)) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ r_tmp->rolename = tmp;
++
++ if (!strcmp(r_tmp->rolename, "default")
++ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
++ default_role = r_tmp;
++ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
++ kernel_role = r_tmp;
++ }
++
++ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++
++ r_tmp->hash = ghash;
++
++ num_subjs = count_user_subjs(r_tmp->hash->first);
++
++ r_tmp->subj_hash_size = num_subjs;
++ r_tmp->subj_hash =
++ (struct acl_subject_label **)
++ create_table(&(r_tmp->subj_hash_size));
++
++ if (!r_tmp->subj_hash) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++
++ err = copy_user_allowedips(r_tmp);
++ if (err)
++ goto cleanup;
++
++ /* copy domain info */
++ if (r_tmp->domain_children != NULL) {
++ domainlist = acl_alloc(r_tmp->domain_child_num * sizeof(uid_t));
++ if (domainlist == NULL) {
++ err = -ENOMEM;
++ goto cleanup;
++ }
++ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t))) {
++ err = -EFAULT;
++ goto cleanup;
++ }
++ r_tmp->domain_children = domainlist;
++ }
++
++ err = copy_user_transitions(r_tmp);
++ if (err)
++ goto cleanup;
++
++ memset(r_tmp->subj_hash, 0,
++ r_tmp->subj_hash_size *
++ sizeof (struct acl_subject_label *));
++
++ s_last = NULL;
++
++ err = copy_user_subjs(r_tmp->hash->first, r_tmp);
++
++ if (err)
++ goto cleanup;
++
++ insert_acl_role_label(r_tmp);
++ }
++
++ goto return_err;
++ cleanup:
++ free_variables();
++ return_err:
++ return err;
++
++}
++
++static int
++gracl_init(struct gr_arg *args)
++{
++ int error = 0;
++
++ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
++ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
++
++ if (init_variables(args)) {
++ security_alert_good(GR_INITF_ACL_MSG, GR_VERSION);
++ error = -ENOMEM;
++ free_variables();
++ goto out;
++ }
++
++ error = copy_user_acl(args);
++ free_init_variables();
++ if (error) {
++ free_variables();
++ goto out;
++ }
++
++ if ((error = gr_set_acls(0))) {
++ free_variables();
++ goto out;
++ }
++
++ gr_status |= GR_READY;
++ out:
++ return error;
++}
++
++/* derived from glibc fnmatch() 0: match, 1: no match*/
++
++static int
++glob_match(const char *pattern, const char *string)
++{
++ const char *p = pattern, *n = string;
++ char c;
++
++ while ((c = *p++) != '\0') {
++ switch (c) {
++ case '?':
++ if (*n == '\0')
++ return 1;
++ else if (*n == '/')
++ return 1;
++ break;
++ case '\\':
++ if (*n != c)
++ return 1;
++ break;
++ case '*':
++ for (c = *p++; c == '?' || c == '*'; c = *p++, ++n)
++ if ((*n == '/') || (c == '?' && *n == '\0'))
++ return 1;
++ if (c == '\0')
++ return 0;
++ {
++ char c1 = c;
++ for (--p; *n != '\0'; ++n)
++ if (((c == '[') || (*n == c1)) && !glob_match(p, n))
++ return 0;
++ return 1;
++ }
++ case '[':
++ {
++ int not;
++
++ if (*n == '\0')
++ return 1;
++ not = (*p == '!' || *p == '^');
++ if (not)
++ ++p;
++
++ c = *p++;
++ for (;;) {
++ char cstart = c, cend = c;
++
++ if (c == '\0')
++ return 1;
++ c = *p++;
++
++ if (c == '/')
++ return 1;
++
++ if (c == '-' && *p != ']') {
++ cend = *p++;
++ if (cend == '\0')
++ return 1;
++ c = *p++;
++ }
++
++ if (*n >= cstart && *n <= cend)
++ goto matched;
++
++ if (c == ']')
++ break;
++ }
++ if (!not)
++ return 1;
++ break;
++ matched:
++ while (c != ']') {
++ if (c == '\0')
++ return 1;
++
++ c = *p++;
++ }
++ if (not)
++ return 1;
++ }
++ break;
++ default:
++ if (c != *n)
++ return 1;
++ }
++
++ ++n;
++ }
++
++ if (*n == '\0')
++ return 0;
++
++ if (*n == '/')
++ return 0;
++
++ return 1;
++}
++
++static struct acl_object_label *
++chk_glob_label(struct acl_object_label *globbed,
++ struct dentry *dentry, struct vfsmount *mnt, char **path)
++{
++ struct acl_object_label *tmp;
++
++ if (*path == NULL)
++ *path = gr_to_filename_nolock(dentry, mnt);
++
++ tmp = globbed;
++
++ while (tmp) {
++ if (!glob_match(tmp->filename, *path))
++ return tmp;
++ tmp = tmp->next;
++ }
++
++ return NULL;
++}
++
++static __inline__ struct acl_object_label *
++full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
++ struct dentry *curr_dentry,
++ const struct acl_subject_label *subj, char **path)
++{
++ struct acl_subject_label *tmpsubj;
++ struct acl_object_label *retval;
++ struct acl_object_label *retval2;
++
++ tmpsubj = (struct acl_subject_label *) subj;
++ read_lock(&gr_inode_lock);
++ do {
++ retval = lookup_acl_obj_label(curr_dentry->d_inode->i_ino,
++ curr_dentry->d_inode->i_sb->s_dev, tmpsubj);
++ if (retval) {
++ if (retval->globbed) {
++ retval2 = chk_glob_label(retval->globbed, (struct dentry *)orig_dentry,
++ (struct vfsmount *)orig_mnt, path);
++ if (retval2)
++ retval = retval2;
++ }
++ break;
++ }
++ } while ((tmpsubj = tmpsubj->parent_subject));
++ read_unlock(&gr_inode_lock);
++
++ return retval;
++}
++
++static struct acl_object_label *
++chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_object_label *retval;
++ char *path = NULL;
++
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (dentry == real_root && mnt == real_root_mnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path);
++out:
++ spin_unlock(&dcache_lock);
++
++ return retval;
++}
++
++static struct acl_object_label *
++chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_subject_label *subj, char *path)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_object_label *retval;
++
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (dentry == real_root && mnt == real_root_mnt)
++ break;
++ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++ if (retval != NULL)
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path);
++
++ if (retval == NULL)
++ retval = full_lookup(l_dentry, l_mnt, real_root, subj, &path);
++out:
++ spin_unlock(&dcache_lock);
++
++ return retval;
++}
++
++static struct acl_subject_label *
++chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
++ const struct acl_role_label *role)
++{
++ struct dentry *dentry = (struct dentry *) l_dentry;
++ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
++ struct acl_subject_label *retval;
++
++ spin_lock(&dcache_lock);
++
++ for (;;) {
++ if (unlikely(dentry == real_root && mnt == real_root_mnt))
++ break;
++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
++ if (mnt->mnt_parent == mnt)
++ break;
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (unlikely(retval != NULL))
++ goto out;
++
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ if (unlikely(retval != NULL))
++ goto out;
++
++ dentry = dentry->d_parent;
++ }
++
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++
++ if (unlikely(retval == NULL)) {
++ read_lock(&gr_inode_lock);
++ retval =
++ lookup_acl_subj_label(real_root->d_inode->i_ino,
++ real_root->d_inode->i_sb->s_dev, role);
++ read_unlock(&gr_inode_lock);
++ }
++ out:
++ spin_unlock(&dcache_lock);
++
++ return retval;
++}
++
++static __inline__ void
++gr_log_learn(const struct acl_role_label *role, const uid_t uid, const gid_t gid,
++ const struct task_struct *task, const char *pathname,
++ const __u32 mode)
++{
++ security_learn(GR_LEARN_AUDIT_MSG, role->rolename, role->roletype,
++ uid, gid, task->exec_file ? gr_to_filename1(task->exec_file->f_dentry,
++ task->exec_file->f_vfsmnt) : task->acl->filename, task->acl->filename,
++ 1, 1, pathname, (unsigned long) mode, NIPQUAD(task->curr_ip));
++
++ return;
++}
++
++__u32
++gr_check_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
++{
++ struct acl_object_label *obj;
++ __u32 oldmode, newmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (GR_WRITE | GR_CREATE);
++
++ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
++ oldmode = obj->mode;
++
++ if (current->acl->mode & GR_LEARN)
++ oldmode |= (GR_WRITE | GR_CREATE);
++ newmode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ oldmode | GR_CREATE | GR_AUDIT_CREATE |
++ GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if ((newmode & oldmode) == oldmode)
++ return newmode;
++ else if (current->acl->mode & GR_LEARN) {
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(old_dentry, old_mnt), oldmode);
++ return (GR_WRITE | GR_CREATE);
++ } else if (newmode & GR_SUPPRESS)
++ return GR_SUPPRESS;
++ else
++ return 0;
++}
++
++__u32
++gr_search_file(const struct dentry * dentry, const __u32 mode,
++ const struct vfsmount * mnt)
++{
++ __u32 retval = mode;
++ struct acl_subject_label *curracl;
++ struct acl_object_label *currobj;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ curracl = current->acl;
++
++ currobj = chk_obj_label(dentry, mnt, curracl);
++ retval = currobj->mode & mode;
++
++ if (unlikely
++ ((curracl->mode & GR_LEARN) && !(mode & GR_NOPTRACE)
++ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ retval = new_mode;
++
++ if (!(mode & GR_NOLEARN))
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(dentry, mnt), new_mode);
++ }
++
++ return retval;
++}
++
++__u32
++gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
++ const struct vfsmount * mnt, const __u32 mode)
++{
++ struct name_entry *match;
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *curracl;
++ char *path;
++ __u32 retval;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return (mode & ~GR_AUDITS);
++
++ preempt_disable();
++ path = gr_to_filename(new_dentry, mnt);
++ match = lookup_name_entry(path);
++
++ if (!match)
++ goto check_parent;
++
++ curracl = current->acl;
++
++ read_lock(&gr_inode_lock);
++ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
++ read_unlock(&gr_inode_lock);
++
++ if (matchpo) {
++ if ((matchpo->mode & mode) !=
++ (mode & ~(GR_AUDITS | GR_SUPPRESS))
++ && curracl->mode & GR_LEARN) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(new_dentry, mnt), new_mode);
++
++ preempt_enable();
++ return new_mode;
++ }
++ preempt_enable();
++ return (matchpo->mode & mode);
++ }
++
++ check_parent:
++ curracl = current->acl;
++
++ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
++ retval = matchpo->mode & mode;
++
++ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
++ && (curracl->mode & GR_LEARN)) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, gr_to_filename(new_dentry, mnt), new_mode);
++ preempt_enable();
++ return new_mode;
++ }
++
++ preempt_enable();
++ return retval;
++}
++
++int
++gr_check_hidden_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ if (!(task->acl->mode & GR_FIND) && !(current->acl->mode & GR_VIEW))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_check_protected_task(const struct task_struct *task)
++{
++ if (unlikely(!(gr_status & GR_READY) || !task))
++ return 0;
++
++ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL))
++ return 1;
++
++ return 0;
++}
++
++__inline__ void
++gr_copy_label(struct task_struct *tsk)
++{
++ tsk->used_accept = 0;
++ tsk->acl_sp_role = 0;
++ tsk->acl_role_id = current->acl_role_id;
++ tsk->acl = current->acl;
++ tsk->role = current->role;
++ tsk->curr_ip = current->curr_ip;
++ if (current->exec_file)
++ get_file(current->exec_file);
++ tsk->exec_file = current->exec_file;
++ tsk->is_writable = current->is_writable;
++ if (unlikely(current->used_accept))
++ current->curr_ip = 0;
++
++ return;
++}
++
++static __inline__ void
++gr_set_proc_res(void)
++{
++ struct acl_subject_label *proc;
++ unsigned short i;
++
++ proc = current->acl;
++
++ if (proc->mode & GR_LEARN)
++ return;
++
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(proc->resmask & (1 << i)))
++ continue;
++
++ current->rlim[i].rlim_cur = proc->res[i].rlim_cur;
++ current->rlim[i].rlim_max = proc->res[i].rlim_max;
++ }
++
++ return;
++}
++
++static __inline__ void
++do_set_role_label(struct task_struct *task, const uid_t uid, const gid_t gid)
++{
++ task->role = lookup_acl_role_label(task, uid, gid);
++
++ return;
++}
++
++int
++gr_check_user_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ uid_t *uidlist;
++ int curuid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ num = current->acl->user_trans_num;
++ uidlist = current->acl->user_transitions;
++
++ if (uidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->user_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ realok = 1;
++ if (effective == curuid)
++ effectiveok = 1;
++ if (fs == curuid)
++ fsok = 1;
++ }
++ } else if (current->acl->user_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curuid = (int)uidlist[i];
++ if (real == curuid)
++ break;
++ if (effective == curuid)
++ break;
++ if (fs == curuid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ security_alert(GR_USRCHANGE_ACL_MSG,
++ realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
++ return 1;
++ }
++}
++
++int
++gr_check_group_change(int real, int effective, int fs)
++{
++ unsigned int i;
++ __u16 num;
++ gid_t *gidlist;
++ int curgid;
++ int realok = 0;
++ int effectiveok = 0;
++ int fsok = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ num = current->acl->group_trans_num;
++ gidlist = current->acl->group_transitions;
++
++ if (gidlist == NULL)
++ return 0;
++
++ if (real == -1)
++ realok = 1;
++ if (effective == -1)
++ effectiveok = 1;
++ if (fs == -1)
++ fsok = 1;
++
++ if (current->acl->group_trans_type & GR_ID_ALLOW) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ realok = 1;
++ if (effective == curgid)
++ effectiveok = 1;
++ if (fs == curgid)
++ fsok = 1;
++ }
++ } else if (current->acl->group_trans_type & GR_ID_DENY) {
++ for (i = 0; i < num; i++) {
++ curgid = (int)gidlist[i];
++ if (real == curgid)
++ break;
++ if (effective == curgid)
++ break;
++ if (fs == curgid)
++ break;
++ }
++ /* not in deny list */
++ if (i == num) {
++ realok = 1;
++ effectiveok = 1;
++ fsok = 1;
++ }
++ }
++
++ if (realok && effectiveok && fsok)
++ return 0;
++ else {
++ security_alert(GR_GRPCHANGE_ACL_MSG,
++ realok ? (effectiveok ? (fsok ? 0 : fs) : effective) : real, DEFAULTSECARGS);
++ return 1;
++ }
++}
++
++void
++gr_set_role_label(struct task_struct *task, const uid_t uid, const uid_t gid)
++{
++ struct acl_object_label *obj;
++ struct file *filp;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ filp = task->exec_file;
++
++ /* kernel process, we'll give them the kernel role */
++ if (unlikely(!filp)) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ return;
++ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
++ do_set_role_label(task, uid, gid);
++
++ task->acl =
++ chk_subj_label(filp->f_dentry, filp->f_vfsmnt, task->role);
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++
++ gr_set_proc_res();
++
++ return;
++}
++
++int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *newacl;
++ struct acl_object_label *obj;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ newacl = chk_subj_label(dentry, mnt, task->role);
++
++ task_lock(task);
++ if (((task->ptrace & PT_PTRACED) && !(task->acl->mode &
++ GR_OVERRIDE) && (task->acl != newacl) &&
++ !(task->role->roletype & GR_ROLE_GOD) &&
++ !gr_search_file(dentry, GR_PTRACERD, mnt)) ||
++ (atomic_read(&task->fs->count) > 1 ||
++ atomic_read(&task->files->count) > 1 ||
++ atomic_read(&task->sighand->count) > 1)) {
++ task_unlock(task);
++ security_alert(GR_PTRACE_EXEC_ACL_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EACCES;
++ }
++ obj = chk_obj_label(dentry, mnt, task->acl);
++ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
++
++ if ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT)) {
++ if (obj->nested)
++ task->acl = obj->nested;
++ else
++ task->acl = newacl;
++ task_unlock(task);
++ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT) {
++ task_unlock(task);
++ security_audit(GR_INHERIT_ACL_MSG, task->acl->filename,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ } else
++ task_unlock(task);
++
++ task->is_writable = 0;
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(dentry, mnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(dentry, mnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++ gr_set_proc_res();
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ return 0;
++}
++
++static __inline__ void
++do_handle_delete(const ino_t ino, const dev_t dev)
++{
++ struct acl_object_label *matchpo;
++ struct acl_subject_label *matchps;
++ struct acl_subject_label *i;
++ struct acl_role_label *role;
++
++ for (role = role_list_head; role; role = role->next) {
++ for (i = role->hash->first; i; i = i->next) {
++ if (unlikely((i->mode & GR_NESTED) &&
++ (i->inode == ino) &&
++ (i->device == dev)))
++ i->mode |= GR_DELETED;
++ if (unlikely((matchpo =
++ lookup_acl_obj_label(ino, dev, i)) != NULL))
++ matchpo->mode |= GR_DELETED;
++ }
++
++ if (unlikely((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL))
++ matchps->mode |= GR_DELETED;
++ }
++
++ return;
++}
++
++void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ write_lock(&gr_inode_lock);
++ if (unlikely((unsigned long)lookup_inodev_entry(ino, dev)))
++ do_handle_delete(ino, dev);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static __inline__ void
++update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_subject_label *subj)
++{
++ unsigned long index = fhash(oldinode, olddevice, subj->obj_hash_size);
++ struct acl_object_label **match;
++ struct acl_object_label *tmp;
++ __u8 i = 0;
++
++ match = &subj->obj_hash[index];
++
++ while (*match && ((*match)->inode != oldinode ||
++ (*match)->device != olddevice ||
++ !((*match)->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj->obj_hash_size;
++ match = &subj->obj_hash[index];
++ i = (i + 1) % 32;
++ }
++
++ if (*match && ((*match) != deleted_object)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)
++ && ((*match)->mode & GR_DELETED)) {
++ tmp = *match;
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++ tmp->mode &= ~GR_DELETED;
++
++ *match = deleted_object;
++
++ insert_acl_obj_label(tmp, subj);
++ }
++
++ return;
++}
++
++static __inline__ void
++update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice,
++ struct acl_role_label *role)
++{
++ struct acl_subject_label **s_hash = role->subj_hash;
++ unsigned long subj_size = role->subj_hash_size;
++ unsigned long index = fhash(oldinode, olddevice, subj_size);
++ struct acl_subject_label **match;
++ struct acl_subject_label *tmp;
++ __u8 i = 0;
++
++ match = &s_hash[index];
++
++ while (*match && ((*match)->inode != oldinode ||
++ (*match)->device != olddevice ||
++ !((*match)->mode & GR_DELETED))) {
++ index = (index + (1 << i)) % subj_size;
++ i = (i + 1) % 32;
++ match = &s_hash[index];
++ }
++
++ if (*match && (*match != deleted_subject)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)
++ && ((*match)->mode & GR_DELETED)) {
++ tmp = *match;
++
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++ tmp->mode &= ~GR_DELETED;
++
++ *match = deleted_subject;
++
++ insert_acl_subj_label(tmp, role);
++ }
++
++ return;
++}
++
++static __inline__ void
++update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
++ const ino_t newinode, const dev_t newdevice)
++{
++ unsigned long index = fhash(oldinode, olddevice, inodev_set.n_size);
++ struct name_entry **match;
++ struct name_entry *tmp;
++ __u8 i = 0;
++
++ match = &inodev_set.n_hash[index];
++
++ while (*match
++ && ((*match)->inode != oldinode
++ || (*match)->device != olddevice)) {
++ index = (index + (1 << i)) % inodev_set.n_size;
++ i = (i + 1) % 32;
++ match = &inodev_set.n_hash[index];
++ }
++
++ if (*match && (*match != deleted_inodev)
++ && ((*match)->inode == oldinode)
++ && ((*match)->device == olddevice)) {
++ tmp = *match;
++
++ tmp->inode = newinode;
++ tmp->device = newdevice;
++
++ *match = deleted_inodev;
++
++ insert_inodev_entry(tmp);
++ }
++
++ return;
++}
++
++static __inline__ void
++do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
++ const struct vfsmount *mnt)
++{
++ struct acl_subject_label *i;
++ struct acl_role_label *role;
++
++ for (role = role_list_head; role; role = role->next) {
++ update_acl_subj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, role);
++
++ for (i = role->hash->first; i; i = i->next) {
++ if (unlikely((i->mode & GR_NESTED) &&
++ (i->inode == dentry->d_inode->i_ino) &&
++ (i->device == dentry->d_inode->i_sb->s_dev))) {
++ i->inode = dentry->d_inode->i_ino;
++ i->device = dentry->d_inode->i_sb->s_dev;
++ }
++ update_acl_obj_label(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino,
++ dentry->d_inode->i_sb->s_dev, i);
++ }
++ }
++
++ update_inodev_entry(matchn->inode, matchn->device,
++ dentry->d_inode->i_ino, dentry->d_inode->i_sb->s_dev);
++
++ return;
++}
++
++void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename(dentry, mnt));
++ preempt_enable();
++
++ if (unlikely((unsigned long)matchn)) {
++ write_lock(&gr_inode_lock);
++ do_handle_create(matchn, dentry, mnt);
++ write_unlock(&gr_inode_lock);
++ }
++
++ return;
++}
++
++void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ struct name_entry *matchn;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ preempt_disable();
++ matchn = lookup_name_entry(gr_to_filename(new_dentry, mnt));
++ preempt_enable();
++
++ /* we wouldn't have to check d_inode if it weren't for
++ NFS silly-renaming
++ */
++
++ write_lock(&gr_inode_lock);
++ if (unlikely(replace && new_dentry->d_inode)) {
++ if (unlikely(lookup_inodev_entry(new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev) &&
++ (old_dentry->d_inode->i_nlink <= 1)))
++ do_handle_delete(new_dentry->d_inode->i_ino,
++ new_dentry->d_inode->i_sb->s_dev);
++ }
++
++ if (unlikely(lookup_inodev_entry(old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev) &&
++ (old_dentry->d_inode->i_nlink <= 1)))
++ do_handle_delete(old_dentry->d_inode->i_ino,
++ old_dentry->d_inode->i_sb->s_dev);
++
++ if (unlikely((unsigned long)matchn))
++ do_handle_create(matchn, old_dentry, mnt);
++ write_unlock(&gr_inode_lock);
++
++ return;
++}
++
++static int
++lookup_special_role_auth(const char *rolename, unsigned char **salt,
++ unsigned char **sum)
++{
++ struct acl_role_label *r;
++ struct role_allowed_ip *ipp;
++ struct role_transition *trans;
++ __u16 i;
++ int found = 0;
++
++ /* check transition table */
++
++ for (trans = current->role->transitions; trans; trans = trans->next) {
++ if (!strcmp(rolename, trans->rolename)) {
++ found = 1;
++ break;
++ }
++ }
++
++ if (!found)
++ return 0;
++
++ /* handle special roles that do not require authentication
++ and check ip */
++
++ for (r = role_list_head; r; r = r->next) {
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL)) {
++ found = 0;
++ if (r->allowed_ips != NULL) {
++ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
++ if ((ntohl(current->curr_ip) & ipp->netmask) ==
++ (ntohl(ipp->addr) & ipp->netmask))
++ found = 1;
++ }
++ } else
++ found = 2;
++ if (!found)
++ return 0;
++
++ if (r->roletype & GR_ROLE_NOPW) {
++ *salt = NULL;
++ *sum = NULL;
++ return 1;
++ }
++ }
++ }
++
++ for (i = 0; i < num_sprole_pws; i++) {
++ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
++ *salt = acl_special_roles[i]->salt;
++ *sum = acl_special_roles[i]->sum;
++ return 1;
++ }
++ }
++
++ return 0;
++}
++
++static void
++assign_special_role(char *rolename)
++{
++ struct acl_object_label *obj;
++ struct acl_role_label *r;
++ struct acl_role_label *assigned = NULL;
++ struct task_struct *tsk;
++ struct file *filp;
++
++ for (r = role_list_head; r; r = r->next)
++ if (!strcmp(rolename, r->rolename) &&
++ (r->roletype & GR_ROLE_SPECIAL))
++ assigned = r;
++
++ if (!assigned)
++ return;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++
++ tsk = current->parent;
++ if (tsk == NULL) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++ }
++
++ filp = tsk->exec_file;
++ if (filp == NULL) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++ }
++
++ tsk->is_writable = 0;
++
++ acl_sp_role_value = (acl_sp_role_value % 65535) + 1;
++ tsk->acl_sp_role = 1;
++ tsk->acl_role_id = acl_sp_role_value;
++ tsk->role = assigned;
++ tsk->acl = chk_subj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role);
++
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, tsk->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ tsk->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, tsk->pid);
++#endif
++
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return;
++}
++
++ssize_t
++write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
++{
++ struct gr_arg_wrapper uwrap;
++ unsigned char *sprole_salt;
++ unsigned char *sprole_sum;
++ int error = sizeof (struct gr_arg_wrapper);
++ int error2 = 0;
++
++ down(&gr_dev_sem);
++
++ if (count != sizeof (struct gr_arg_wrapper)) {
++ security_alert_good(GR_DEV_ACL_MSG, (int)count,
++ (int) sizeof (struct gr_arg_wrapper));
++ error = -EINVAL;
++ goto out;
++ }
++
++ if ((gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
++ && time_before_eq(gr_auth_expires, get_seconds())) {
++ gr_auth_expires = 0;
++ gr_auth_attempts = 0;
++ }
++
++ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
++ error = -EINVAL;
++ goto out;
++ }
++
++ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
++ error = -EFAULT;
++ goto out;
++ }
++
++ if (gr_usermode->mode != SPROLE && time_after(gr_auth_expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ /* if non-root trying to do anything other than use a special role,
++ do not attempt authentication, do not count towards authentication
++ locking
++ */
++
++ if (gr_usermode->mode != SPROLE && current->uid) {
++ error = -EPERM;
++ goto out;
++ }
++
++ /* ensure pw and special role name are null terminated */
++
++ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
++ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
++
++ /* Okay.
++ * We have our enough of the argument structure..(we have yet
++ * to copy_from_user the tables themselves) . Copy the tables
++ * only if we need them, i.e. for loading operations. */
++
++ switch (gr_usermode->mode) {
++ case STATUS:
++ if (gr_status & GR_READY)
++ error = 1;
++ else
++ error = 2;
++ goto out;
++ case SHUTDOWN:
++ if ((gr_status & GR_READY)
++ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ gr_status &= ~GR_READY;
++ security_alert_good(GR_SHUTS_ACL_MSG, DEFAULTSECARGS);
++ free_variables();
++ memset(gr_usermode, 0, sizeof (struct gr_arg));
++ memset(gr_system_salt, 0, GR_SALT_LEN);
++ memset(gr_system_sum, 0, GR_SHA_LEN);
++ } else if (gr_status & GR_READY) {
++ security_alert(GR_SHUTF_ACL_MSG, DEFAULTSECARGS);
++ error = -EPERM;
++ } else {
++ security_alert_good(GR_SHUTI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ }
++ break;
++ case ENABLE:
++ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
++ security_alert_good(GR_ENABLE_ACL_MSG, GR_VERSION);
++ else {
++ if (gr_status & GR_READY)
++ error = -EAGAIN;
++ else
++ error = error2;
++ security_alert(GR_ENABLEF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ }
++ break;
++ case RELOAD:
++ if (!(gr_status & GR_READY)) {
++ security_alert_good(GR_RELOADI_ACL_MSG);
++ error = -EAGAIN;
++ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ lock_kernel();
++ gr_status &= ~GR_READY;
++ free_variables();
++ if (!(error2 = gracl_init(gr_usermode))) {
++ unlock_kernel();
++ security_alert_good(GR_RELOAD_ACL_MSG,
++ GR_VERSION);
++ } else {
++ unlock_kernel();
++ error = error2;
++ security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ }
++ } else {
++ security_alert(GR_RELOADF_ACL_MSG, GR_VERSION,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ }
++ break;
++ case SEGVMOD:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_SEGVMODI_ACL_MSG,
++ DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
++ security_alert_good(GR_SEGVMODS_ACL_MSG,
++ DEFAULTSECARGS);
++ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
++ struct acl_subject_label *segvacl;
++ segvacl =
++ lookup_acl_subj_label(gr_usermode->segv_inode,
++ gr_usermode->segv_device,
++ current->role);
++ if (segvacl) {
++ segvacl->crashes = 0;
++ segvacl->expires = 0;
++ }
++ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
++ gr_remove_uid(gr_usermode->segv_uid);
++ }
++ } else {
++ security_alert(GR_SEGVMODF_ACL_MSG, DEFAULTSECARGS);
++ error = -EPERM;
++ }
++ break;
++ case SPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_SPROLEI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if ((current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES)
++ && time_before_eq(current->role->expires, get_seconds())) {
++ current->role->expires = 0;
++ current->role->auth_attempts = 0;
++ }
++
++ if (time_after(current->role->expires, get_seconds())) {
++ error = -EBUSY;
++ goto out;
++ }
++
++ if (lookup_special_role_auth
++ (gr_usermode->sp_role, &sprole_salt, &sprole_sum)
++ && ((!sprole_salt && !sprole_sum)
++ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
++ assign_special_role(gr_usermode->sp_role);
++ security_alert_good(GR_SPROLES_ACL_MSG,
++ (current->parent) ? current->
++ parent->role->rolename : "",
++ acl_sp_role_value, DEFAULTSECARGS);
++ } else {
++ security_alert(GR_SPROLEF_ACL_MSG, gr_usermode->sp_role,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ current->role->auth_attempts++;
++ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
++ current->role->expires =
++ get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++ security_alert(GR_MAXROLEPW_ACL_MSG,
++ CONFIG_GRKERNSEC_ACL_MAXTRIES,
++ gr_usermode->sp_role, DEFAULTSECARGS);
++ }
++
++ goto out;
++ }
++ break;
++ case UNSPROLE:
++ if (unlikely(!(gr_status & GR_READY))) {
++ security_alert_good(GR_UNSPROLEI_ACL_MSG, DEFAULTSECARGS);
++ error = -EAGAIN;
++ break;
++ }
++
++ if (current->role->roletype & GR_ROLE_SPECIAL) {
++ security_alert_good(GR_UNSPROLES_ACL_MSG,
++ (current->parent) ? current->
++ parent->role->rolename : "",
++ (current->parent) ? current->
++ parent->acl_role_id : 0, DEFAULTSECARGS);
++ gr_set_acls(1);
++ } else {
++ security_alert(GR_UNSPROLEF_ACL_MSG, current->role->rolename,
++ DEFAULTSECARGS);
++ error = -EPERM;
++ goto out;
++ }
++ break;
++ default:
++ security_alert(GR_INVMODE_ACL_MSG, gr_usermode->mode,
++ DEFAULTSECARGS);
++ error = -EINVAL;
++ break;
++ }
++
++ if (error != -EPERM)
++ goto out;
++
++ gr_auth_attempts++;
++
++ if (gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES) {
++ security_alert(GR_MAXPW_ACL_MSG, CONFIG_GRKERNSEC_ACL_MAXTRIES);
++ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
++ }
++
++ out:
++ up(&gr_dev_sem);
++ return error;
++}
++
++int
++gr_set_acls(const int type)
++{
++ struct acl_object_label *obj;
++ struct task_struct *task, *task2;
++ struct file *filp;
++ unsigned short i;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ for_each_process(task2) {
++ task = task2;
++ do {
++ /* check to see if we're called from the exit handler,
++ if so, only replace ACLs that have inherited the admin
++ ACL */
++
++ if (type && (task->role != current->role ||
++ task->acl_role_id != current->acl_role_id))
++ continue;
++
++ task->acl_role_id = 0;
++ task->acl_sp_role = 0;
++
++ if ((filp = task->exec_file)) {
++ do_set_role_label(task, task->uid, task->gid);
++
++ task->acl =
++ chk_subj_label(filp->f_dentry, filp->f_vfsmnt,
++ task->role);
++ if (task->acl) {
++ struct acl_subject_label *curr;
++ curr = task->acl;
++
++ task->is_writable = 0;
++ /* ignore additional mmap checks for processes that are writable
++ by the default ACL */
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, task->role->root_label);
++ if (unlikely(obj->mode & GR_WRITE))
++ task->is_writable = 1;
++
++#ifdef CONFIG_GRKERNSEC_ACL_DEBUG
++ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task->pid, task->role->rolename, task->acl->filename);
++#endif
++ if (!(curr->mode & GR_LEARN))
++ for (i = 0; i < RLIM_NLIMITS; i++) {
++ if (!(curr->resmask & (1 << i)))
++ continue;
++
++ task->rlim[i].rlim_cur =
++ curr->res[i].rlim_cur;
++ task->rlim[i].rlim_max =
++ curr->res[i].rlim_max;
++ }
++ } else {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ security_alert_good(GR_DEFACL_MSG, task->comm,
++ task->pid);
++ return 1;
++ }
++ } else {
++ // it's a kernel process
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
++ task->acl->mode &= ~GR_FIND;
++#endif
++ }
++ } while ((task = next_thread(task)) != task2);
++ }
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_learn_resource);
++
++void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ struct acl_subject_label *acl;
++
++ if (unlikely((gr_status & GR_READY) &&
++ task->acl && (task->acl->mode & GR_LEARN)))
++ goto skip_reslog;
++
++#ifdef CONFIG_GRKERNSEC_RESLOG
++ gr_log_resource(task, res, wanted, gt);
++#endif
++ skip_reslog:
++
++ if (unlikely(!(gr_status & GR_READY) || !wanted))
++ return;
++
++ acl = task->acl;
++
++ if (likely(!acl || !(acl->mode & GR_LEARN) ||
++ !(acl->resmask & (1 << (unsigned short) res))))
++ return;
++
++ if (wanted >= acl->res[res].rlim_cur) {
++ unsigned long res_add;
++
++ res_add = wanted;
++ switch (res) {
++ case RLIMIT_CPU:
++ res_add += GR_RLIM_CPU_BUMP;
++ break;
++ case RLIMIT_FSIZE:
++ res_add += GR_RLIM_FSIZE_BUMP;
++ break;
++ case RLIMIT_DATA:
++ res_add += GR_RLIM_DATA_BUMP;
++ break;
++ case RLIMIT_STACK:
++ res_add += GR_RLIM_STACK_BUMP;
++ break;
++ case RLIMIT_CORE:
++ res_add += GR_RLIM_CORE_BUMP;
++ break;
++ case RLIMIT_RSS:
++ res_add += GR_RLIM_RSS_BUMP;
++ break;
++ case RLIMIT_NPROC:
++ res_add += GR_RLIM_NPROC_BUMP;
++ break;
++ case RLIMIT_NOFILE:
++ res_add += GR_RLIM_NOFILE_BUMP;
++ break;
++ case RLIMIT_MEMLOCK:
++ res_add += GR_RLIM_MEMLOCK_BUMP;
++ break;
++ case RLIMIT_AS:
++ res_add += GR_RLIM_AS_BUMP;
++ break;
++ case RLIMIT_LOCKS:
++ res_add += GR_RLIM_LOCKS_BUMP;
++ break;
++ }
++
++ acl->res[res].rlim_cur = res_add;
++
++ if (wanted > acl->res[res].rlim_max)
++ acl->res[res].rlim_max = res_add;
++
++ security_learn(GR_LEARN_AUDIT_MSG, current->role->rolename,
++ current->role->roletype, acl->filename,
++ acl->res[res].rlim_cur, acl->res[res].rlim_max,
++ "", (unsigned long) res);
++ }
++
++ return;
++}
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++void
++pax_set_flags(struct linux_binprm *bprm)
++{
++ struct task_struct *task = current;
++ struct acl_subject_label *proc;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return;
++
++ proc = task->acl;
++
++ if (proc->mode & GR_PAXPAGE)
++ task->flags &= ~PF_PAX_PAGEEXEC;
++ if (proc->mode & GR_PAXSEGM)
++ task->flags &= ~PF_PAX_SEGMEXEC;
++ if (proc->mode & GR_PAXGCC)
++ task->flags |= PF_PAX_EMUTRAMP;
++ if (proc->mode & GR_PAXMPROTECT)
++ task->flags &= ~PF_PAX_MPROTECT;
++ if (proc->mode & GR_PAXRANDMMAP)
++ task->flags &= ~PF_PAX_RANDMMAP;
++ if (proc->mode & GR_PAXRANDEXEC)
++ task->flags |= PF_PAX_RANDEXEC;
++
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++extern struct proc_dir_entry *proc_sys_root;
++
++
++/* the following function is called under the BKL */
++
++__u32
++gr_handle_sysctl(const struct ctl_table *table, const void *oldval,
++ const void *newval)
++{
++ struct proc_dir_entry *tmp;
++ struct nameidata nd;
++ const char *proc_sys = "/proc/sys";
++ char *path;
++ struct acl_object_label *obj;
++ unsigned short len = 0, pos = 0, depth = 0, i;
++ __u32 err = 0;
++ __u32 mode = 0;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 1;
++
++ path = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
++
++ if (oldval)
++ mode |= GR_READ;
++ if (newval)
++ mode |= GR_WRITE;
++
++ /* convert the requested sysctl entry into a pathname */
++
++ for (tmp = table->de; tmp != proc_sys_root; tmp = tmp->parent) {
++ len += strlen(tmp->name);
++ len++;
++ depth++;
++ }
++
++ if ((len + depth + strlen(proc_sys) + 1) > PAGE_SIZE)
++ return 0; /* deny */
++
++ memset(path, 0, PAGE_SIZE);
++
++ memcpy(path, proc_sys, strlen(proc_sys));
++
++ pos += strlen(proc_sys);
++
++ for (; depth > 0; depth--) {
++ path[pos] = '/';
++ pos++;
++ for (i = 1, tmp = table->de; tmp != proc_sys_root;
++ tmp = tmp->parent) {
++ if (depth == i) {
++ memcpy(path + pos, tmp->name,
++ strlen(tmp->name));
++ pos += strlen(tmp->name);
++ }
++ i++;
++ }
++ }
++
++ err = path_lookup(path, LOOKUP_FOLLOW, &nd);
++
++ if (err)
++ goto out;
++
++ obj = chk_obj_label(nd.dentry, nd.mnt, current->acl);
++ err = obj->mode & (mode | to_gr_audit(mode) | GR_SUPPRESS);
++
++ if (unlikely((current->acl->mode & GR_LEARN) && ((err & mode) != mode))) {
++ __u32 new_mode = mode;
++
++ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
++
++ err = new_mode;
++ gr_log_learn(current->role, current->uid, current->gid,
++ current, path, new_mode);
++ } else if ((err & mode) != mode && !(err & GR_SUPPRESS)) {
++ security_alert(GR_SYSCTL_ACL_MSG, "denied", path,
++ (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "",
++ DEFAULTSECARGS);
++ err = 0;
++ } else if ((err & mode) != mode) {
++ err = 0;
++ } else if (((err & mode) == mode) && (err & GR_AUDITS)) {
++ security_audit(GR_SYSCTL_ACL_MSG, "successful",
++ path, (mode & GR_READ) ? " reading" : "",
++ (mode & GR_WRITE) ? " writing" : "",
++ DEFAULTSECARGS);
++ }
++
++ path_release(&nd);
++
++ out:
++ return err;
++}
++#endif
++
++int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ struct file *filp;
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ filp = task->exec_file;
++
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++
++ if (!filp || (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE))) {
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++ return 1;
++ }
++
++ retmode = gr_search_file(filp->f_dentry, GR_NOPTRACE, filp->f_vfsmnt);
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++
++ if (retmode & GR_NOPTRACE)
++ return 1;
++
++ if (!(current->acl->mode & GR_OVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
++ && (current->acl != task->acl || (current->acl != current->role->root_label
++ && current->pid != task->pid)))
++ return 1;
++
++ return 0;
++}
++
++int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ struct task_struct *tmp = task;
++ struct task_struct *curtemp = current;
++ __u32 retmode;
++
++ if (unlikely(!(gr_status & GR_READY)))
++ return 0;
++
++ read_lock(&tasklist_lock);
++ while (tmp->pid > 0) {
++ if (tmp == curtemp)
++ break;
++ tmp = tmp->parent;
++ }
++ read_unlock(&tasklist_lock);
++
++ if (tmp->pid == 0 && !(current->acl->mode & GR_RELAXPTRACE)) {
++ security_alert(GR_PTRACE_ACL_MSG, task->exec_file ?
++ gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt)
++ : "(none)", task->comm, task->pid,
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ read_lock(&grsec_exec_file_lock);
++ if (unlikely(!task->exec_file)) {
++ read_unlock(&grsec_exec_file_lock);
++ return 0;
++ }
++
++ retmode = gr_search_file(task->exec_file->f_dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_vfsmnt);
++ read_unlock(&grsec_exec_file_lock);
++
++ if (retmode & GR_NOPTRACE) {
++ security_alert(GR_PTRACE_ACL_MSG, gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt),
++ task->comm, task->pid, DEFAULTSECARGS);
++ return 1;
++ }
++
++ if (retmode & GR_PTRACERD) {
++ switch (request) {
++ case PTRACE_POKETEXT:
++ case PTRACE_POKEDATA:
++ case PTRACE_POKEUSR:
++#if !defined(CONFIG_PPC32) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA)
++ case PTRACE_SETREGS:
++ case PTRACE_SETFPREGS:
++#endif
++#ifdef CONFIG_X86
++ case PTRACE_SETFPXREGS:
++#endif
++#ifdef CONFIG_ALTIVEC
++ case PTRACE_SETVRREGS:
++#endif
++ return 1;
++ default:
++ return 0;
++ }
++ } else if (!(current->acl->mode & GR_OVERRIDE) &&
++ !(current->role->roletype & GR_ROLE_GOD) &&
++ (current->acl != task->acl)) {
++ security_alert(GR_PTRACE_ACL_MSG,
++ gr_to_filename(task->exec_file->f_dentry, task->exec_file->f_vfsmnt),
++ task->comm, task->pid, DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_handle_mmap(const struct file *filp, const unsigned long prot)
++{
++ struct acl_object_label *obj, *obj2;
++
++ if (unlikely(!(gr_status & GR_READY) ||
++ (current->acl->mode & GR_OVERRIDE) || !filp ||
++ !(prot & PROT_EXEC)))
++ return 0;
++
++ if (unlikely(current->is_writable))
++ return 0;
++
++ obj = chk_obj_label(filp->f_dentry, filp->f_vfsmnt, default_role->root_label);
++ obj2 = chk_obj_label(filp->f_dentry, filp->f_vfsmnt,
++ current->role->root_label);
++ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
++ security_alert(GR_WRITLIB_ACL_MSG,
++ gr_to_filename(filp->f_dentry, filp->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 0;
++}
++
++int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ mode =
++ gr_search_file(file->f_dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_vfsmnt);
++
++ if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
++ security_alert(GR_MMAP_ACL_MSG, "denied",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ security_audit(GR_MMAP_ACL_MSG, "successful",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 1;
++}
++
++int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ __u32 mode;
++
++ if (unlikely(!file || !(prot & PROT_EXEC)))
++ return 1;
++
++ mode =
++ gr_search_file(file->f_dentry,
++ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
++ file->f_vfsmnt);
++
++ if (unlikely(!gr_tpe_allow(file) || (!(mode & GR_EXEC) && !(mode & GR_SUPPRESS)))) {
++ security_alert(GR_MPROTECT_ACL_MSG, "denied",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!gr_tpe_allow(file) || !(mode & GR_EXEC))) {
++ return 0;
++ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
++ security_audit(GR_MPROTECT_ACL_MSG, "successful",
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 1;
++ }
++
++ return 1;
++}
++
++void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ u64 runtime64;
++ unsigned long runtime;
++ unsigned long cputime;
++ unsigned int wday, cday;
++ __u8 whr, chr;
++ __u8 wmin, cmin;
++ __u8 wsec, csec;
++ char cur_tty[64] = { 0 };
++ char parent_tty[64] = { 0 };
++
++ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
++ !(task->acl->mode & GR_PROCACCT)))
++ return;
++
++ runtime64 = get_jiffies_64() - task->start_time;
++ do_div(runtime64, HZ);
++ runtime = (unsigned long)runtime64;
++ wday = runtime / (3600 * 24);
++ runtime -= wday * (3600 * 24);
++ whr = runtime / 3600;
++ runtime -= whr * 3600;
++ wmin = runtime / 60;
++ runtime -= wmin * 60;
++ wsec = runtime;
++
++ cputime = (task->utime + task->stime) / HZ;
++ cday = cputime / (3600 * 24);
++ cputime -= cday * (3600 * 24);
++ chr = cputime / 3600;
++ cputime -= chr * 3600;
++ cmin = cputime / 60;
++ cputime -= cmin * 60;
++ csec = cputime;
++
++ security_audit(GR_ACL_PROCACCT_MSG, gr_task_fullpath(task), task->comm,
++ task->pid, NIPQUAD(task->curr_ip), tty_name(task->signal->tty,
++ cur_tty),
++ task->uid, task->euid, task->gid, task->egid, wday, whr,
++ wmin, wsec, cday, chr, cmin, csec,
++ (task->flags & PF_SIGNALED) ? "killed by signal" : "exited",
++ code, gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ NIPQUAD(task->parent->curr_ip),
++ tty_name(task->parent->signal->tty, parent_tty),
++ task->parent->uid, task->parent->euid, task->parent->gid,
++ task->parent->egid);
++
++ return;
++}
++
++EXPORT_SYMBOL(gr_set_kernel_label);
++
++void gr_set_kernel_label(struct task_struct *task)
++{
++ if (gr_status & GR_READY) {
++ task->role = kernel_role;
++ task->acl = kernel_role->root_label;
++ }
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/gracl_alloc.c linux-2.6.7/grsecurity/gracl_alloc.c
+--- linux-2.6.7/grsecurity/gracl_alloc.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_alloc.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,93 @@
++/* stack-based acl allocation tracking (c) Brad Spengler 2002,2003 */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++
++static unsigned long alloc_stack_next = 1;
++static unsigned long alloc_stack_size = 1;
++static void **alloc_stack;
++
++static __inline__ int
++alloc_pop(void)
++{
++ if (alloc_stack_next == 1)
++ return 0;
++
++ kfree(alloc_stack[alloc_stack_next - 2]);
++
++ alloc_stack_next--;
++
++ return 1;
++}
++
++static __inline__ void
++alloc_push(void *buf)
++{
++ if (alloc_stack_next >= alloc_stack_size)
++ BUG();
++
++ alloc_stack[alloc_stack_next - 1] = buf;
++
++ alloc_stack_next++;
++
++ return;
++}
++
++void *
++acl_alloc(unsigned long len)
++{
++ void *ret;
++
++ if (len > PAGE_SIZE)
++ BUG();
++
++ ret = kmalloc(len, GFP_KERNEL);
++
++ if (ret)
++ alloc_push(ret);
++
++ return ret;
++}
++
++void
++acl_free_all(void)
++{
++ if (gr_acl_is_enabled() || !alloc_stack)
++ return;
++
++ while (alloc_pop()) ;
++
++ if (alloc_stack) {
++ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
++ kfree(alloc_stack);
++ else
++ vfree(alloc_stack);
++ }
++
++ alloc_stack = NULL;
++ alloc_stack_size = 1;
++ alloc_stack_next = 1;
++
++ return;
++}
++
++int
++acl_alloc_stack_init(unsigned long size)
++{
++ if ((size * sizeof (void *)) <= PAGE_SIZE)
++ alloc_stack =
++ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
++ else
++ alloc_stack = (void **) vmalloc(size * sizeof (void *));
++
++ alloc_stack_size = size;
++
++ if (!alloc_stack)
++ return 0;
++ else
++ return 1;
++}
+diff -urN linux-2.6.7/grsecurity/gracl_cap.c linux-2.6.7/grsecurity/gracl_cap.c
+--- linux-2.6.7/grsecurity/gracl_cap.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_cap.c 2004-06-29 10:06:07 -0400
+@@ -0,0 +1,116 @@
++/* capability handling routines, (c) Brad Spengler 2002,2003 */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/capability.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static const char *captab_log[29] = {
++ "CAP_CHOWN",
++ "CAP_DAC_OVERRIDE",
++ "CAP_DAC_READ_SEARCH",
++ "CAP_FOWNER",
++ "CAP_FSETID",
++ "CAP_KILL",
++ "CAP_SETGID",
++ "CAP_SETUID",
++ "CAP_SETPCAP",
++ "CAP_LINUX_IMMUTABLE",
++ "CAP_NET_BIND_SERVICE",
++ "CAP_NET_BROADCAST",
++ "CAP_NET_ADMIN",
++ "CAP_NET_RAW",
++ "CAP_IPC_LOCK",
++ "CAP_IPC_OWNER",
++ "CAP_SYS_MODULE",
++ "CAP_SYS_RAWIO",
++ "CAP_SYS_CHROOT",
++ "CAP_SYS_PTRACE",
++ "CAP_SYS_PACCT",
++ "CAP_SYS_ADMIN",
++ "CAP_SYS_BOOT",
++ "CAP_SYS_NICE",
++ "CAP_SYS_RESOURCE",
++ "CAP_SYS_TIME",
++ "CAP_SYS_TTY_CONFIG",
++ "CAP_MKNOD",
++ "CAP_LEASE"
++};
++
++EXPORT_SYMBOL(gr_task_is_capable);
++
++int
++gr_task_is_capable(struct task_struct *task, const int cap)
++{
++ struct acl_subject_label *curracl;
++ __u32 cap_drop = 0, cap_mask = 0;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = task->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ if (!(cap_mask & (1 << cap)) && (curracl->cap_mask & (1 << cap)))
++ cap_drop |= curracl->cap_lower & (1 << cap);
++ cap_mask |= curracl->cap_mask;
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ curracl = task->acl;
++
++ if ((curracl->mode & GR_LEARN)
++ && cap_raised(task->cap_effective, cap)) {
++ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
++ task->role->roletype, task->uid,
++ task->gid, task->exec_file ?
++ gr_to_filename(task->exec_file->f_dentry,
++ task->exec_file->f_vfsmnt) : curracl->filename,
++ curracl->filename, 0UL,
++ 0UL, "", (unsigned long) cap, NIPQUAD(task->curr_ip));
++ return 1;
++ }
++
++ if ((cap >= 0) && (cap < 29) && cap_raised(task->cap_effective, cap))
++ security_alert(GR_CAP_ACL_MSG, captab_log[cap],
++ gr_task_fullpath(task), task->comm, task->pid, task->uid, task->euid,
++ task->gid, task->egid, gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid, task->parent->uid,
++ task->parent->euid, task->parent->gid, task->parent->egid);
++
++ return 0;
++}
++
++int
++gr_is_capable_nolog(const int cap)
++{
++ struct acl_subject_label *curracl;
++ __u32 cap_drop = 0, cap_mask = 0;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ curracl = current->acl;
++
++ cap_drop = curracl->cap_lower;
++ cap_mask = curracl->cap_mask;
++
++ while ((curracl = curracl->parent_subject)) {
++ cap_drop |= curracl->cap_lower & (cap_mask & ~curracl->cap_mask);
++ cap_mask |= curracl->cap_mask;
++ }
++
++ if (!cap_raised(cap_drop, cap))
++ return 1;
++
++ return 0;
++}
++
+diff -urN linux-2.6.7/grsecurity/gracl_fs.c linux-2.6.7/grsecurity/gracl_fs.c
+--- linux-2.6.7/grsecurity/gracl_fs.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_fs.c 2004-07-27 16:58:25 -0400
+@@ -0,0 +1,468 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/stat.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++__u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return GR_FIND;
++
++ mode =
++ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
++
++ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
++ security_audit(GR_HIDDEN_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_HIDDEN_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely(!(mode & GR_FIND)))
++ return 0;
++
++ return GR_FIND;
++}
++
++__u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 reqmode = GR_FIND;
++ __u32 mode;
++
++ if (unlikely(!dentry->d_inode))
++ return reqmode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ else if (unlikely(fmode & FMODE_WRITE))
++ reqmode |= GR_WRITE;
++ if (likely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_OPEN_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" :
++ reqmode & GR_APPEND ? " appending" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_OPEN_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "", DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ if (unlikely(fmode & O_APPEND))
++ reqmode |= GR_APPEND;
++ if (unlikely((fmode & FMODE_READ) && !(fmode & O_DIRECTORY)))
++ reqmode |= GR_READ;
++ if (unlikely((fmode & O_CREAT) && (imode & (S_ISUID | S_ISGID))))
++ reqmode |= GR_SETID;
++
++ mode =
++ gr_check_create(dentry, p_dentry, p_mnt,
++ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_CREATE_ACL_MSG, "successful",
++ gr_to_filename(dentry, p_mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" :
++ reqmode & GR_APPEND ? " appending" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_CREATE_ACL_MSG, "denied",
++ gr_to_filename(dentry, p_mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : reqmode &
++ GR_APPEND ? " appending" : "", DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++__u32
++gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ __u32 mode, reqmode = GR_FIND;
++
++ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
++ reqmode |= GR_EXEC;
++ if (fmode & S_IWOTH)
++ reqmode |= GR_WRITE;
++ if (fmode & S_IROTH)
++ reqmode |= GR_READ;
++
++ mode =
++ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
++ mnt);
++
++ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
++ security_audit(GR_ACCESS_ACL_MSG, "successful",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "",
++ DEFAULTSECARGS);
++ return reqmode;
++ } else
++ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
++ {
++ security_alert(GR_ACCESS_ACL_MSG, "denied",
++ gr_to_filename(dentry, mnt),
++ reqmode & GR_READ ? " reading" : "",
++ reqmode & GR_WRITE ? " writing" : "",
++ reqmode & GR_EXEC ? " executing" : "",
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & reqmode) != reqmode))
++ return 0;
++
++ return reqmode;
++}
++
++#define generic_fs_handler(dentry, mnt, reqmode, fmt) \
++{ \
++ __u32 mode; \
++ \
++ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt); \
++ \
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
++ security_audit(fmt, "successful", \
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS); \
++ return mode; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
++ security_alert(fmt, "denied", gr_to_filename(dentry, mnt), \
++ DEFAULTSECARGS); \
++ return 0; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode))) \
++ return 0; \
++ \
++ return (reqmode); \
++}
++
++__u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_fchmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
++ return 1;
++
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_FCHMOD_ACL_MSG);
++ } else {
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_FCHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
++ mode_t mode)
++{
++ if (unlikely((mode != (mode_t)-1) && (mode & (S_ISUID | S_ISGID)))) {
++ generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
++ GR_CHMOD_ACL_MSG);
++ } else {
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
++ }
++}
++
++__u32
++gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
++ GR_UNIXCONNECT_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_filldir(const struct dentry *dentry, const struct vfsmount *mnt,
++ const ino_t ino)
++{
++ if (likely((unsigned long)(dentry->d_inode))) {
++ struct dentry d = *dentry;
++ struct inode inode = *(dentry->d_inode);
++
++ inode.i_ino = ino;
++ d.d_inode = &inode;
++
++ if (unlikely(!gr_search_file(&d, GR_FIND | GR_NOLEARN, mnt)))
++ return 0;
++ }
++
++ return 1;
++}
++
++__u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
++ old_mnt);
++
++ if (unlikely(((mode & needmode) == needmode) && mode & GR_AUDITS)) {
++ security_audit(GR_LINK_ACL_MSG, "successful",
++ gr_to_filename(old_dentry, old_mnt), to,
++ DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_LINK_ACL_MSG, "denied",
++ gr_to_filename(old_dentry, old_mnt), to,
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++__u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ __u32 needmode = GR_WRITE | GR_CREATE;
++ __u32 mode;
++
++ mode =
++ gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_CREATE | GR_AUDIT_CREATE |
++ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
++
++ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
++ security_audit(GR_SYMLINK_ACL_MSG, "successful",
++ from, gr_to_filename(new_dentry, parent_mnt),
++ DEFAULTSECARGS);
++ return mode;
++ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
++ security_alert(GR_SYMLINK_ACL_MSG, "denied",
++ from, gr_to_filename(new_dentry, parent_mnt),
++ DEFAULTSECARGS);
++ return 0;
++ } else if (unlikely((mode & needmode) != needmode))
++ return 0;
++
++ return (GR_WRITE | GR_CREATE);
++}
++
++#define generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt, reqmode, fmt) \
++{ \
++ __u32 mode; \
++ \
++ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS); \
++ \
++ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) { \
++ security_audit(fmt, "successful", \
++ gr_to_filename(new_dentry, parent_mnt), \
++ DEFAULTSECARGS); \
++ return mode; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) { \
++ security_alert(fmt, "denied", \
++ gr_to_filename(new_dentry, parent_mnt), \
++ DEFAULTSECARGS); \
++ return 0; \
++ } else if (unlikely((mode & (reqmode)) != (reqmode))) \
++ return 0; \
++ \
++ return (reqmode); \
++}
++
++__u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ __u32 reqmode = GR_WRITE | GR_CREATE;
++ if (unlikely(mode & (S_ISUID | S_ISGID)))
++ reqmode |= GR_SETID;
++
++ generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ reqmode, GR_MKNOD_ACL_MSG);
++}
++
++__u32
++gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt)
++{
++ generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
++ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
++}
++
++#define RENAME_CHECK_SUCCESS(old, new) \
++ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
++ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
++
++int
++gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname)
++{
++ __u32 comp1, comp2;
++ int error = 0;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (!new_dentry->d_inode) {
++ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
++ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
++ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
++ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, old_mnt);
++ } else {
++ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
++ GR_CREATE | GR_DELETE |
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
++ GR_AUDIT_READ | GR_AUDIT_WRITE |
++ GR_SUPPRESS, parent_mnt);
++ comp2 =
++ gr_search_file(old_dentry,
++ GR_READ | GR_WRITE | GR_AUDIT_READ |
++ GR_DELETE | GR_AUDIT_DELETE |
++ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
++ }
++
++ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
++ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
++ security_audit(GR_RENAME_ACL_MSG, "successful",
++ gr_to_filename(old_dentry, old_mnt),
++ newname, DEFAULTSECARGS);
++ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
++ && !(comp2 & GR_SUPPRESS)) {
++ security_alert(GR_RENAME_ACL_MSG, "denied",
++ gr_to_filename(old_dentry, old_mnt), newname,
++ DEFAULTSECARGS);
++ error = -EACCES;
++ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
++ error = -EACCES;
++
++ return error;
++}
++
++void
++gr_acl_handle_exit(void)
++{
++ u16 id;
++ char *rolename;
++ struct file *exec_file;
++
++ if (unlikely(current->acl_sp_role && gr_acl_is_enabled())) {
++ id = current->acl_role_id;
++ rolename = current->role->rolename;
++ gr_set_acls(1);
++ security_alert_good(GR_SPROLEL_ACL_MSG,
++ rolename, id, DEFAULTSECARGS);
++ }
++
++ write_lock(&grsec_exec_file_lock);
++ exec_file = current->exec_file;
++ current->exec_file = NULL;
++ write_unlock(&grsec_exec_file_lock);
++
++ if (exec_file)
++ fput(exec_file);
++}
++
++int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ if (task->acl->mode & GR_PROTPROCFD)
++ return -EACCES;
++
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/gracl_ip.c linux-2.6.7/grsecurity/gracl_ip.c
+--- linux-2.6.7/grsecurity/gracl_ip.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_ip.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,236 @@
++/*
++ * grsecurity/gracl_ip.c
++ * Copyright Brad Spengler 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/skbuff.h>
++#include <linux/ip.h>
++#include <linux/udp.h>
++#include <linux/smp_lock.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++#define GR_BIND 0x01
++#define GR_CONNECT 0x02
++
++static const char * gr_protocols[256] = {
++ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
++ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
++ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
++ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
++ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
++ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
++ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
++ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
++ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
++ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
++ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
++ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
++ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
++ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
++ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
++ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
++ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
++ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
++ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
++ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
++ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
++ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
++ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
++ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
++ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
++ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
++ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
++ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
++ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
++ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
++ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
++ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
++ };
++
++static const char * gr_socktypes[11] = {
++ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
++ "unknown:7", "unknown:8", "unknown:9", "packet"
++ };
++
++__inline__ const char *
++gr_proto_to_name(unsigned char proto)
++{
++ return gr_protocols[proto];
++}
++
++__inline__ const char *
++gr_socktype_to_name(unsigned char type)
++{
++ return gr_socktypes[type];
++}
++
++int
++gr_search_socket(const int domain, const int type, const int protocol)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ goto exit;
++
++ if ((domain < 0) || (type < 0) || (protocol < 0) || (domain != PF_INET)
++ || (domain >= NPROTO) || (type >= SOCK_MAX) || (protocol > 255))
++ goto exit; // let the kernel handle it
++
++ curr = current->acl;
++
++ if (!curr->ips)
++ goto exit;
++
++ if ((curr->ip_type & (1 << type)) &&
++ (curr->ip_proto[protocol / 32] & (1 << (protocol % 32))))
++ goto exit;
++
++ if (curr->mode & GR_LEARN) {
++ /* we don't place acls on raw sockets , and sometimes
++ dgram/ip sockets are opened for ioctl and not
++ bind/connect, so we'll fake a bind learn log */
++ if (type == SOCK_RAW || type == SOCK_PACKET) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_CONNECT, NIPQUAD(current->curr_ip));
++ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
++ __u32 fakeip = 0;
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(fakeip), 0, type,
++ protocol, GR_BIND, NIPQUAD(current->curr_ip));
++ }
++ /* we'll log when they use connect or bind */
++ goto exit;
++ }
++
++ security_alert(GR_SOCK_MSG, "inet", gr_socktype_to_name(type),
++ gr_proto_to_name(protocol), DEFAULTSECARGS);
++
++ return 0;
++ exit:
++ return 1;
++}
++
++static __inline__ int
++gr_search_connectbind(const int mode, const struct sock *sk,
++ const struct sockaddr_in *addr, const int type)
++{
++ struct acl_subject_label *curr;
++ struct acl_ip_label *ip;
++ unsigned long i;
++ __u32 ip_addr = 0;
++ __u16 ip_port = 0;
++
++ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
++ return 1;
++
++ curr = current->acl;
++
++ if (!curr->ips)
++ return 1;
++
++ ip_addr = addr->sin_addr.s_addr;
++ ip_port = ntohs(addr->sin_port);
++
++ for (i = 0; i < curr->ip_num; i++) {
++ ip = *(curr->ips + i);
++ if ((ip->mode & mode) &&
++ (ip_port >= ip->low) &&
++ (ip_port <= ip->high) &&
++ ((ntohl(ip_addr) & ip->netmask) ==
++ (ntohl(ip->addr) & ip->netmask))
++ && (ip->
++ proto[sk->sk_protocol / 32] & (1 << (sk->sk_protocol % 32)))
++ && (ip->type & (1 << type)))
++ return 1;
++ }
++
++ if (curr->mode & GR_LEARN) {
++ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
++ current->role->roletype, current->uid,
++ current->gid, current->exec_file ?
++ gr_to_filename(current->exec_file->f_dentry,
++ current->exec_file->f_vfsmnt) :
++ curr->filename, curr->filename,
++ NIPQUAD(ip_addr), ip_port, type,
++ sk->sk_protocol, mode, NIPQUAD(current->curr_ip));
++ return 1;
++ }
++
++ if (mode == GR_BIND)
++ security_alert(GR_BIND_ACL_MSG, NIPQUAD(ip_addr), ip_port,
++ gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol),
++ DEFAULTSECARGS);
++ else if (mode == GR_CONNECT)
++ security_alert(GR_CONNECT_ACL_MSG, NIPQUAD(ip_addr), ip_port,
++ gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol),
++ DEFAULTSECARGS);
++
++ return 0;
++}
++
++int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_CONNECT, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return gr_search_connectbind(GR_BIND, sock->sk, addr, sock->type);
++}
++
++int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ if (addr)
++ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
++ else {
++ struct sockaddr_in sin;
++ const struct inet_opt *inet = inet_sk(sk);
++
++ sin.sin_addr.s_addr = inet->daddr;
++ sin.sin_port = inet->dport;
++
++ return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
++ }
++}
++
++int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ struct sockaddr_in sin;
++
++ if (unlikely(skb->len < sizeof (struct udphdr)))
++ return 1; // skip this packet
++
++ sin.sin_addr.s_addr = skb->nh.iph->saddr;
++ sin.sin_port = skb->h.uh->source;
++
++ return gr_search_connectbind(GR_CONNECT, sk, &sin, SOCK_DGRAM);
++}
+diff -urN linux-2.6.7/grsecurity/gracl_learn.c linux-2.6.7/grsecurity/gracl_learn.c
+--- linux-2.6.7/grsecurity/gracl_learn.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_learn.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,204 @@
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/poll.h>
++#include <linux/smp_lock.h>
++#include <linux/string.h>
++#include <linux/file.h>
++#include <linux/types.h>
++#include <linux/vmalloc.h>
++#include <linux/grinternal.h>
++
++extern ssize_t write_grsec_handler(struct file * file, const char * buf,
++ size_t count, loff_t *ppos);
++extern int gr_acl_is_enabled(void);
++
++static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
++static int gr_learn_attached;
++
++/* use a 512k buffer */
++#define LEARN_BUFFER_SIZE (512 * 1024)
++
++static spinlock_t gr_learn_lock = SPIN_LOCK_UNLOCKED;
++static DECLARE_MUTEX(gr_learn_user_sem);
++
++/* we need to maintain two buffers, so that the kernel context of grlearn
++ uses a semaphore around the userspace copying, and the other kernel contexts
++ use a spinlock when copying into the buffer, since they cannot sleep
++*/
++static char *learn_buffer;
++static char *learn_buffer_user;
++static int learn_buffer_len;
++static int learn_buffer_user_len;
++
++static ssize_t
++read_learn(struct file *file, char * buf, size_t count, loff_t * ppos)
++{
++ DECLARE_WAITQUEUE(wait, current);
++ ssize_t retval = 0;
++
++ add_wait_queue(&learn_wait, &wait);
++ set_current_state(TASK_INTERRUPTIBLE);
++ do {
++ down(&gr_learn_user_sem);
++ spin_lock(&gr_learn_lock);
++ if (learn_buffer_len)
++ break;
++ spin_unlock(&gr_learn_lock);
++ up(&gr_learn_user_sem);
++ if (file->f_flags & O_NONBLOCK) {
++ retval = -EAGAIN;
++ goto out;
++ }
++ if (signal_pending(current)) {
++ retval = -ERESTARTSYS;
++ goto out;
++ }
++
++ schedule();
++ } while (1);
++
++ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
++ learn_buffer_user_len = learn_buffer_len;
++ retval = learn_buffer_len;
++ learn_buffer_len = 0;
++
++ spin_unlock(&gr_learn_lock);
++
++ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
++ retval = -EFAULT;
++
++ up(&gr_learn_user_sem);
++out:
++ set_current_state(TASK_RUNNING);
++ remove_wait_queue(&learn_wait, &wait);
++ return retval;
++}
++
++static unsigned int
++poll_learn(struct file * file, poll_table * wait)
++{
++ poll_wait(file, &learn_wait, wait);
++
++ if (learn_buffer_len)
++ return (POLLIN | POLLRDNORM);
++
++ return 0;
++}
++
++void
++gr_clear_learn_entries(void)
++{
++ char *tmp;
++
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(learn_buffer);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ up(&gr_learn_user_sem);
++
++ return;
++}
++
++void
++gr_add_learn_entry(const char *fmt, ...)
++{
++ va_list args;
++ unsigned int len;
++
++ if (!gr_learn_attached)
++ return;
++
++ spin_lock(&gr_learn_lock);
++
++ /* leave a gap at the end so we know when it's "full" but don't have to
++ compute the exact length of the string we're trying to append
++ */
++ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++ return;
++ }
++ if (learn_buffer == NULL) {
++ spin_unlock(&gr_learn_lock);
++ return;
++ }
++
++ va_start(args, fmt);
++ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
++ va_end(args);
++
++ learn_buffer_len += len + 1;
++
++ spin_unlock(&gr_learn_lock);
++ wake_up_interruptible(&learn_wait);
++
++ return;
++}
++
++static int
++open_learn(struct inode *inode, struct file *file)
++{
++ if (file->f_mode & FMODE_READ && gr_learn_attached)
++ return -EBUSY;
++ if (file->f_mode & FMODE_READ) {
++ down(&gr_learn_user_sem);
++ if (learn_buffer == NULL)
++ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer_user == NULL)
++ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
++ if (learn_buffer == NULL)
++ return -ENOMEM;
++ if (learn_buffer_user == NULL)
++ return -ENOMEM;
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 1;
++ up(&gr_learn_user_sem);
++ }
++ return 0;
++}
++
++static int
++close_learn(struct inode *inode, struct file *file)
++{
++ char *tmp;
++
++ if (file->f_mode & FMODE_READ) {
++ down(&gr_learn_user_sem);
++ if (learn_buffer != NULL) {
++ spin_lock(&gr_learn_lock);
++ tmp = learn_buffer;
++ learn_buffer = NULL;
++ spin_unlock(&gr_learn_lock);
++ vfree(tmp);
++ }
++ if (learn_buffer_user != NULL) {
++ vfree(learn_buffer_user);
++ learn_buffer_user = NULL;
++ }
++ learn_buffer_len = 0;
++ learn_buffer_user_len = 0;
++ gr_learn_attached = 0;
++ up(&gr_learn_user_sem);
++ }
++
++ return 0;
++}
++
++struct file_operations grsec_fops = {
++ read: read_learn,
++ write: write_grsec_handler,
++ open: open_learn,
++ release: close_learn,
++ poll: poll_learn,
++};
+diff -urN linux-2.6.7/grsecurity/gracl_res.c linux-2.6.7/grsecurity/gracl_res.c
+--- linux-2.6.7/grsecurity/gracl_res.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_res.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,50 @@
++/* resource handling routines (c) Brad Spengler 2002, 2003 */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/gracl.h>
++#include <linux/grinternal.h>
++
++static const char *restab_log[11] = {
++ "RLIMIT_CPU",
++ "RLIMIT_FSIZE",
++ "RLIMIT_DATA",
++ "RLIMIT_STACK",
++ "RLIMIT_CORE",
++ "RLIMIT_RSS",
++ "RLIMIT_NPROC",
++ "RLIMIT_NOFILE",
++ "RLIMIT_MEMLOCK",
++ "RLIMIT_AS",
++ "RLIMIT_LOCKS"
++};
++
++__inline__ void
++gr_log_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ if (unlikely(res == RLIMIT_NPROC &&
++ (cap_raised(task->cap_effective, CAP_SYS_ADMIN) ||
++ cap_raised(task->cap_effective, CAP_SYS_RESOURCE))))
++ return;
++
++ preempt_disable();
++
++ if (unlikely(((gt && wanted > task->rlim[res].rlim_cur) ||
++ (!gt && wanted >= task->rlim[res].rlim_cur)) &&
++ task->rlim[res].rlim_cur != RLIM_INFINITY))
++ security_alert(GR_RESOURCE_MSG, wanted, restab_log[res],
++ task->rlim[res].rlim_cur,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm,
++ task->parent->pid, task->parent->uid,
++ task->parent->euid, task->parent->gid,
++ task->parent->egid);
++
++ preempt_enable_no_resched();
++
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/gracl_segv.c linux-2.6.7/grsecurity/gracl_segv.c
+--- linux-2.6.7/grsecurity/gracl_segv.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_segv.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,330 @@
++/*
++ * grsecurity/gracl_segv.c
++ * Copyright Brad Spengler 2002, 2003
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <asm/uaccess.h>
++#include <asm/errno.h>
++#include <asm/mman.h>
++#include <net/sock.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/smp_lock.h>
++#include <linux/slab.h>
++#include <linux/types.h>
++#include <linux/sched.h>
++#include <linux/timer.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++static struct crash_uid *uid_set;
++static unsigned short uid_used;
++static rwlock_t gr_uid_lock = RW_LOCK_UNLOCKED;
++extern rwlock_t gr_inode_lock;
++extern struct acl_subject_label *
++ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
++ struct acl_role_label *role);
++extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
++
++int
++gr_init_uidset(void)
++{
++ uid_set =
++ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
++ uid_used = 0;
++
++ return uid_set ? 1 : 0;
++}
++
++void
++gr_free_uidset(void)
++{
++ if (uid_set)
++ kfree(uid_set);
++
++ return;
++}
++
++int
++gr_find_uid(const uid_t uid)
++{
++ struct crash_uid *tmp = uid_set;
++ uid_t buid;
++ int low = 0, high = uid_used - 1, mid;
++
++ while (high >= low) {
++ mid = (low + high) >> 1;
++ buid = tmp[mid].uid;
++ if (buid == uid)
++ return mid;
++ if (buid > uid)
++ high = mid - 1;
++ if (buid < uid)
++ low = mid + 1;
++ }
++
++ return -1;
++}
++
++static __inline__ void
++gr_insertsort(void)
++{
++ unsigned short i, j;
++ struct crash_uid index;
++
++ for (i = 1; i < uid_used; i++) {
++ index = uid_set[i];
++ j = i;
++ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
++ uid_set[j] = uid_set[j - 1];
++ j--;
++ }
++ uid_set[j] = index;
++ }
++
++ return;
++}
++
++static __inline__ void
++gr_insert_uid(const uid_t uid, const unsigned long expires)
++{
++ int loc;
++
++ if (uid_used == GR_UIDTABLE_MAX)
++ return;
++
++ loc = gr_find_uid(uid);
++
++ if (loc >= 0) {
++ uid_set[loc].expires = expires;
++ return;
++ }
++
++ uid_set[uid_used].uid = uid;
++ uid_set[uid_used].expires = expires;
++ uid_used++;
++
++ gr_insertsort();
++
++ return;
++}
++
++void
++gr_remove_uid(const unsigned short loc)
++{
++ unsigned short i;
++
++ for (i = loc + 1; i < uid_used; i++)
++ uid_set[i - i] = uid_set[i];
++
++ uid_used--;
++
++ return;
++}
++
++int
++gr_check_crash_uid(const uid_t uid)
++{
++ int loc;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_uid_lock);
++ loc = gr_find_uid(uid);
++ read_unlock(&gr_uid_lock);
++
++ if (loc < 0)
++ return 0;
++
++ write_lock(&gr_uid_lock);
++ if (time_before_eq(uid_set[loc].expires, get_seconds()))
++ gr_remove_uid(loc);
++ else {
++ write_unlock(&gr_uid_lock);
++ return 1;
++ }
++
++ write_unlock(&gr_uid_lock);
++ return 0;
++}
++
++static __inline__ int
++proc_is_setxid(const struct task_struct *task)
++{
++ if (task->uid != task->euid || task->uid != task->suid ||
++ task->uid != task->fsuid)
++ return 1;
++ if (task->gid != task->egid || task->gid != task->sgid ||
++ task->gid != task->fsgid)
++ return 1;
++
++ return 0;
++}
++static __inline__ int
++gr_fake_force_sig(int sig, struct task_struct *t)
++{
++ unsigned long int flags;
++ int ret;
++
++ spin_lock_irqsave(&t->sighand->siglock, flags);
++ if (sigismember(&t->blocked, sig) || t->sighand->action[sig-1].sa.sa_handler == SIG_IGN) {
++ t->sighand->action[sig-1].sa.sa_handler = SIG_DFL;
++ sigdelset(&t->blocked, sig);
++ recalc_sigpending_tsk(t);
++ }
++ ret = specific_send_sig_info(sig, (void*)1L, t);
++ spin_unlock_irqrestore(&t->sighand->siglock, flags);
++
++ return ret;
++}
++
++void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ struct acl_subject_label *curr;
++ struct acl_subject_label *curr2;
++ struct task_struct *tsk, *tsk2;
++
++ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
++ return;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curr = task->acl;
++
++ if (!(curr->resmask & (1 << GR_CRASH_RES)))
++ return;
++
++ if (time_before_eq(curr->expires, get_seconds())) {
++ curr->expires = 0;
++ curr->crashes = 0;
++ }
++
++ curr->crashes++;
++
++ if (!curr->expires)
++ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds())) {
++ if (task->uid && proc_is_setxid(task)) {
++ security_alert(GR_SEGVSTART_ACL_MSG,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ task->parent->uid, task->parent->euid,
++ task->parent->gid, task->parent->egid,
++ task->uid,
++ curr->res[GR_CRASH_RES].rlim_max);
++ write_lock(&gr_uid_lock);
++ gr_insert_uid(task->uid, curr->expires);
++ write_unlock(&gr_uid_lock);
++ curr->expires = 0;
++ curr->crashes = 0;
++ read_lock(&tasklist_lock);
++ for_each_process(tsk) {
++ tsk2 = tsk;
++ do {
++ if (tsk2 != task && tsk2->uid == task->uid)
++ gr_fake_force_sig(SIGKILL, tsk2);
++ } while ((tsk2 = next_thread(tsk2)) != tsk);
++ }
++ read_unlock(&tasklist_lock);
++ } else {
++ security_alert(GR_SEGVNOSUID_ACL_MSG,
++ gr_task_fullpath(task), task->comm,
++ task->pid, task->uid, task->euid,
++ task->gid, task->egid,
++ gr_parent_task_fullpath(task),
++ task->parent->comm, task->parent->pid,
++ task->parent->uid, task->parent->euid,
++ task->parent->gid, task->parent->egid,
++ curr->res[GR_CRASH_RES].rlim_max);
++ read_lock(&tasklist_lock);
++ for_each_process(tsk) {
++ tsk2 = tsk;
++ do {
++ if (likely(tsk2 != task)) {
++ curr2 = tsk2->acl;
++
++ if (curr2->device == curr->device &&
++ curr2->inode == curr->inode)
++ gr_fake_force_sig(SIGKILL, tsk2);
++ }
++ } while ((tsk2 = next_thread(tsk2)) != tsk);
++ }
++ read_unlock(&tasklist_lock);
++ }
++ }
++
++ return;
++}
++
++int
++gr_check_crash_exec(const struct file *filp)
++{
++ struct acl_subject_label *curr;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return 0;
++
++ read_lock(&gr_inode_lock);
++ curr = lookup_acl_subj_label(filp->f_dentry->d_inode->i_ino,
++ filp->f_dentry->d_inode->i_sb->s_dev,
++ current->role);
++ read_unlock(&gr_inode_lock);
++
++ if (!curr || !(curr->resmask & (1 << GR_CRASH_RES)) ||
++ (!curr->crashes && !curr->expires))
++ return 0;
++
++ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
++ time_after(curr->expires, get_seconds()))
++ return 1;
++ else if (time_before_eq(curr->expires, get_seconds())) {
++ curr->crashes = 0;
++ curr->expires = 0;
++ }
++
++ return 0;
++}
++
++void
++gr_handle_alertkill(void)
++{
++ struct acl_subject_label *curracl;
++ __u32 curr_ip;
++ struct task_struct *task, *task2;
++
++ if (unlikely(!gr_acl_is_enabled()))
++ return;
++
++ curracl = current->acl;
++ curr_ip = current->curr_ip;
++
++ if ((curracl->mode & GR_KILLIPPROC) && curr_ip &&
++ (curr_ip != 0xffffffff)) {
++ read_lock(&tasklist_lock);
++ for_each_process(task) {
++ task2 = task;
++ do {
++ if (task2->curr_ip == curr_ip)
++ gr_fake_force_sig(SIGKILL, task2);
++ } while ((task2 = next_thread(task2)) != task);
++ }
++ read_unlock(&tasklist_lock);
++ } else if (curracl->mode & GR_KILLPROC)
++ gr_fake_force_sig(SIGKILL, current);
++
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/gracl_shm.c linux-2.6.7/grsecurity/gracl_shm.c
+--- linux-2.6.7/grsecurity/gracl_shm.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/gracl_shm.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,36 @@
++/* shared memory handling routines, (c) Brad Spengler 2002, 2003 */
++
++#include <linux/kernel.h>
++#include <linux/mm.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/ipc.h>
++#include <linux/gracl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ struct task_struct *task;
++
++ if (!gr_acl_is_enabled())
++ return 1;
++
++ task = find_task_by_pid(shm_cprid);
++
++ if (unlikely(!task))
++ task = find_task_by_pid(shm_lapid);
++
++ if (unlikely(task && ((task->start_time < shm_createtime) ||
++ (task->pid == shm_lapid)) &&
++ (task->acl->mode & GR_PROTSHM) &&
++ (task->acl != current->acl))) {
++ security_alert(GR_SHMAT_ACL_MSG, cuid, shm_cprid, shmid,
++ DEFAULTSECARGS);
++ return 0;
++ }
++
++ return 1;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_chdir.c linux-2.6.7/grsecurity/grsec_chdir.c
+--- linux-2.6.7/grsecurity/grsec_chdir.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_chdir.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,20 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ if ((grsec_enable_chdir && grsec_enable_group &&
++ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
++ !grsec_enable_group)) {
++ security_audit(GR_CHDIR_AUDIT_MSG, gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++ }
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_chroot.c linux-2.6.7/grsecurity/grsec_chroot.c
+--- linux-2.6.7/grsecurity/grsec_chroot.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_chroot.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,360 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/mount.h>
++#include <linux/types.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_chroot_unix(const pid_t pid)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ struct pid *spid = NULL;
++
++ if (unlikely(!grsec_enable_chroot_unix))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ spid = find_pid(PIDTYPE_PID, pid);
++ if (spid) {
++ struct task_struct *p;
++ p = pid_task(spid->task_list.next, PIDTYPE_PID);
++ task_lock(p);
++ if (unlikely(!have_same_root(current, p))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ security_alert(GR_UNIX_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ task_unlock(p);
++ }
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++int
++gr_handle_chroot_nice(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
++ security_alert(GR_NICE_CHROOT_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
++ && proc_is_chrooted(current)) {
++ security_alert(GR_PRIORITY_CHROOT_MSG, p->comm, p->pid,
++ DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_capset(struct task_struct *target)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (!grsec_enable_chroot_caps || !proc_is_chrooted(current))
++ return 0;
++
++ task_lock(target);
++ if (!have_same_root(current, target)) {
++ task_unlock(target);
++ security_alert(GR_CAPSET_CHROOT_MSG, target->comm, target->pid,
++ DEFAULTSECARGS);
++ return 1;
++ }
++ task_unlock(target);
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_rawio(const struct inode *inode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(current) &&
++ inode && S_ISBLK(inode->i_mode) && !capable(CAP_SYS_RAWIO))
++ return 1;
++#endif
++ return 0;
++}
++
++int
++gr_pid_is_chrooted(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ if (!grsec_enable_chroot_findtask || !current->fs ||
++ !proc_is_chrooted(current) || !p)
++ return 0;
++
++ task_lock(p);
++ if (p->fs && !have_same_root(current, p)) {
++ task_unlock(p);
++ return 1;
++ }
++ task_unlock(p);
++#endif
++ return 0;
++}
++
++EXPORT_SYMBOL(gr_pid_is_chrooted);
++
++#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
++int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
++{
++ struct dentry *dentry = (struct dentry *)u_dentry;
++ struct vfsmount *mnt = (struct vfsmount *)u_mnt;
++ struct dentry *realroot;
++ struct vfsmount *realrootmnt;
++ struct dentry *currentroot;
++ struct vfsmount *currentmnt;
++
++ read_lock(&child_reaper->fs->lock);
++ realrootmnt = mntget(child_reaper->fs->rootmnt);
++ realroot = dget(child_reaper->fs->root);
++ read_unlock(&child_reaper->fs->lock);
++
++ read_lock(&current->fs->lock);
++ currentmnt = mntget(current->fs->rootmnt);
++ currentroot = dget(current->fs->root);
++ read_unlock(&current->fs->lock);
++
++ spin_lock(&dcache_lock);
++ for (;;) {
++ if (unlikely((dentry == realroot && mnt == realrootmnt)
++ || (dentry == currentroot && mnt == currentmnt)))
++ break;
++ if (unlikely(dentry == mnt->mnt_root || IS_ROOT(dentry))) {
++ if (mnt->mnt_parent == mnt)
++ break;
++ dentry = mnt->mnt_mountpoint;
++ mnt = mnt->mnt_parent;
++ continue;
++ }
++ dentry = dentry->d_parent;
++ }
++ spin_unlock(&dcache_lock);
++
++ dput(currentroot);
++ mntput(currentmnt);
++
++ if (dentry == realroot && mnt == realrootmnt) {
++ /* access is outside of chroot */
++ dput(realroot);
++ mntput(realrootmnt);
++ return 0;
++ }
++
++ dput(realroot);
++ mntput(realrootmnt);
++ return 1;
++}
++#endif
++
++int
++gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ if (!grsec_enable_chroot_fchdir)
++ return 1;
++
++ if (!proc_is_chrooted(current))
++ return 1;
++ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
++ security_alert(GR_CHROOT_FCHDIR_MSG,
++ gr_to_filename(u_dentry, u_mnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#endif
++ return 1;
++}
++
++int
++gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ struct pid *pid = NULL;
++ u64 starttime64;
++ time_t starttime;
++
++ if (unlikely(!grsec_enable_chroot_shmat))
++ return 1;
++
++ if (likely(!proc_is_chrooted(current)))
++ return 1;
++
++ read_lock(&tasklist_lock);
++
++ pid = find_pid(PIDTYPE_PID, shm_cprid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid->task_list.next, PIDTYPE_PID);
++ task_lock(p);
++ starttime64 = p->start_time;
++ do_div(starttime64, HZ);
++ starttime = (time_t) starttime64;
++ if (unlikely(!have_same_root(current, p) &&
++ time_before((unsigned long)starttime, (unsigned long)shm_createtime))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ task_unlock(p);
++ } else {
++ pid = find_pid(PIDTYPE_PID, shm_lapid);
++ if (pid) {
++ struct task_struct *p;
++ p = pid_task(pid->task_list.next, PIDTYPE_PID);
++ task_lock(p);
++ if (unlikely(!have_same_root(current, p))) {
++ task_unlock(p);
++ read_unlock(&tasklist_lock);
++ security_alert(GR_SHMAT_CHROOT_MSG, DEFAULTSECARGS);
++ return 0;
++ }
++ task_unlock(p);
++ }
++ }
++
++ read_unlock(&tasklist_lock);
++#endif
++ return 1;
++}
++
++void
++gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
++ security_audit(GR_EXEC_CHROOT_MSG, gr_to_filename(dentry, mnt),
++ DEFAULTSECARGS);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
++ proc_is_chrooted(current)) {
++ security_alert(GR_MKNOD_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt, const char *dev_name)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
++ security_alert(GR_MOUNT_CHROOT_MSG, dev_name,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_pivot(void)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
++ security_alert(GR_PIVOT_CHROOT_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
++ !gr_is_outside_chroot(dentry, mnt)) {
++ security_alert(GR_CHROOT_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_caps(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
++ task->cap_permitted =
++ cap_drop(task->cap_permitted, GR_CHROOT_CAPS);
++ task->cap_inheritable =
++ cap_drop(task->cap_inheritable, GR_CHROOT_CAPS);
++ task->cap_effective =
++ cap_drop(task->cap_effective, GR_CHROOT_CAPS);
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_sysctl(const int op)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ if (grsec_enable_chroot_sysctl && proc_is_chrooted(current)
++ && (op & 002))
++ return -EACCES;
++#endif
++ return 0;
++}
++
++void
++gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ if (grsec_enable_chroot_chdir)
++ set_fs_pwd(current->fs, mnt, dentry);
++#endif
++ return;
++}
++
++int
++gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode)
++{
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ if (grsec_enable_chroot_chmod &&
++ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
++ proc_is_chrooted(current)) {
++ security_alert(GR_CHMOD_CHROOT_MSG,
++ gr_to_filename(dentry, mnt), DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_disabled.c linux-2.6.7/grsecurity/grsec_disabled.c
+--- linux-2.6.7/grsecurity/grsec_disabled.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_disabled.c 2004-06-25 17:25:57 -0400
+@@ -0,0 +1,413 @@
++/*
++ * when grsecurity is disabled, compile all external functions into nothing
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/config.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/kdev_t.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <linux/skbuff.h>
++#include <linux/sysctl.h>
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++__inline__ void
++pax_set_flags(struct linux_binprm *bprm)
++{
++ return;
++}
++#endif
++
++#ifdef CONFIG_SYSCTL
++__inline__ __u32
++gr_handle_sysctl(const struct ctl_table * table, __u32 mode)
++{
++ return mode;
++}
++#endif
++
++__inline__ int
++gr_acl_is_enabled(void)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_rawio(const struct inode *inode)
++{
++ return 0;
++}
++
++__inline__ void
++gr_acl_handle_psacct(struct task_struct *task, const long code)
++{
++ return;
++}
++
++__inline__ int
++gr_handle_mmap(const struct file *filp, const unsigned long prot)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_ptrace(struct task_struct *task, const long request)
++{
++ return 0;
++}
++
++__inline__ int
++gr_handle_proc_ptrace(struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ void
++gr_learn_resource(const struct task_struct *task,
++ const int res, const unsigned long wanted, const int gt)
++{
++ return;
++}
++
++__inline__ int
++gr_set_acls(const int type)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_hidden_task(const struct task_struct *tsk)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_protected_task(const struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ void
++gr_copy_label(struct task_struct *tsk)
++{
++ return;
++}
++
++__inline__ void
++gr_set_pax_flags(struct task_struct *task)
++{
++ return;
++}
++
++__inline__ int
++gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return 0;
++}
++
++__inline__ void
++gr_handle_delete(const ino_t ino, const dev_t dev)
++{
++ return;
++}
++
++__inline__ void
++gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
++{
++ return;
++}
++
++__inline__ void
++gr_handle_crash(struct task_struct *task, const int sig)
++{
++ return;
++}
++
++__inline__ int
++gr_check_crash_exec(const struct file *filp)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_crash_uid(const uid_t uid)
++{
++ return 0;
++}
++
++__inline__ void
++gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace)
++{
++ return;
++}
++
++__inline__ int
++gr_search_socket(const int family, const int type, const int protocol)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_connectbind(const int mode, const struct socket *sock,
++ const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ int
++gr_task_is_capable(struct task_struct *task, const int cap)
++{
++ return 1;
++}
++
++__inline__ int
++gr_is_capable_nolog(const int cap)
++{
++ return 1;
++}
++
++__inline__ void
++gr_handle_alertkill(void)
++{
++ return;
++}
++
++__inline__ __u32
++gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_hidden_file(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
++ const int fmode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ int
++gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
++ unsigned int *vm_flags)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_truncate(const struct dentry * dentry,
++ const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_access(const struct dentry * dentry,
++ const struct vfsmount * mnt, const int fmode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_fchmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
++ mode_t mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ void
++grsecurity_init(void)
++{
++ return;
++}
++
++__inline__ __u32
++gr_acl_handle_mknod(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const int mode)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_mkdir(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_symlink(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt, const char *from)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_link(const struct dentry * new_dentry,
++ const struct dentry * parent_dentry,
++ const struct vfsmount * parent_mnt,
++ const struct dentry * old_dentry,
++ const struct vfsmount * old_mnt, const char *to)
++{
++ return 1;
++}
++
++__inline__ int
++gr_acl_handle_rename(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct inode *old_parent_inode,
++ const struct vfsmount *old_mnt, const char *newname)
++{
++ return 0;
++}
++
++__inline__ __u32
++gr_acl_handle_filldir(const struct dentry * dentry,
++ const struct vfsmount * mnt, const ino_t ino)
++{
++ return 1;
++}
++
++__inline__ int
++gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid, const int shmid)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
++{
++ return 1;
++}
++
++__inline__ __u32
++gr_acl_handle_creat(const struct dentry * dentry,
++ const struct dentry * p_dentry,
++ const struct vfsmount * p_mnt, const int fmode,
++ const int imode)
++{
++ return 1;
++}
++
++__inline__ void
++gr_acl_handle_exit(void)
++{
++ return;
++}
++
++__inline__ int
++gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
++{
++ return 1;
++}
++
++__inline__ void
++gr_set_role_label(const uid_t uid, const gid_t gid)
++{
++ return;
++}
++
++__inline__ int
++gr_acl_handle_procpidmem(const struct task_struct *task)
++{
++ return 0;
++}
++
++__inline__ int
++gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
++{
++ return 1;
++}
++
++__inline__ int
++gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
++{
++ return 1;
++}
++
++__inline__ void
++gr_set_kernel_label(struct task_struct *task)
++{
++ return;
++}
++
++__inline__ int
++gr_check_user_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++__inline__ int
++gr_check_group_change(int real, int effective, int fs)
++{
++ return 0;
++}
++
++
++EXPORT_SYMBOL(gr_task_is_capable);
++EXPORT_SYMBOL(gr_learn_resource);
++EXPORT_SYMBOL(gr_set_kernel_label);
++
+diff -urN linux-2.6.7/grsecurity/grsec_exec.c linux-2.6.7/grsecurity/grsec_exec.c
+--- linux-2.6.7/grsecurity/grsec_exec.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_exec.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,71 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/binfmts.h>
++#include <linux/fs.h>
++#include <linux/types.h>
++#include <linux/grdefs.h>
++#include <linux/grinternal.h>
++#include <linux/capability.h>
++
++#include <asm/uaccess.h>
++
++int
++gr_handle_nproc(void)
++{
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ if (grsec_enable_execve && current->user &&
++ (atomic_read(&current->user->processes) >
++ current->rlim[RLIMIT_NPROC].rlim_cur) &&
++ !capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE)) {
++ security_alert(GR_NPROC_MSG, DEFAULTSECARGS);
++ return -EAGAIN;
++ }
++#endif
++ return 0;
++}
++
++void
++gr_handle_exec_args(struct linux_binprm *bprm, char **argv)
++{
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ char grarg[64] = { 0 };
++ __u8 execlen = 0;
++ unsigned int i;
++
++ if (!((grsec_enable_execlog && grsec_enable_group &&
++ in_group_p(grsec_audit_gid))
++ || (grsec_enable_execlog && !grsec_enable_group)))
++ return;
++
++ if (unlikely(!argv))
++ goto log;
++
++ for (i = 0; i < bprm->argc && execlen < 62; i++) {
++ char *p;
++ __u8 len;
++
++ if (get_user(p, argv + i))
++ goto log;
++ if (!p)
++ goto log;
++ len = strnlen_user(p, 62 - execlen);
++ if (len > 62 - execlen)
++ len = 62 - execlen;
++ else if (len > 0)
++ len--;
++ if (copy_from_user(grarg + execlen, p, len))
++ goto log;
++ execlen += len;
++ *(grarg + execlen) = ' ';
++ *(grarg + execlen + 1) = '\0';
++ execlen++;
++ }
++
++ log:
++ security_audit(GR_EXEC_AUDIT_MSG, gr_to_filename(bprm->file->f_dentry,
++ bprm->file->f_vfsmnt),
++ grarg, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_fifo.c linux-2.6.7/grsecurity/grsec_fifo.c
+--- linux-2.6.7/grsecurity/grsec_fifo.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_fifo.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,24 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag, const int acc_mode)
++{
++#ifdef CONFIG_GRKERNSEC_FIFO
++ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
++ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
++ (dentry->d_inode->i_uid != dir->d_inode->i_uid) &&
++ (current->fsuid != dentry->d_inode->i_uid)) {
++ if (!vfs_permission(dentry->d_inode, acc_mode))
++ security_alert(GR_FIFO_MSG, gr_to_filename(dentry, mnt),
++ dentry->d_inode->i_uid,
++ dentry->d_inode->i_gid, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_fork.c linux-2.6.7/grsecurity/grsec_fork.c
+--- linux-2.6.7/grsecurity/grsec_fork.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_fork.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,14 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_forkfail(const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ if (grsec_enable_forkfail)
++ security_alert(GR_FAILFORK_MSG, retval, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_init.c linux-2.6.7/grsecurity/grsec_init.c
+--- linux-2.6.7/grsecurity/grsec_init.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_init.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,225 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/smp_lock.h>
++#include <linux/gracl.h>
++#include <linux/slab.h>
++#include <linux/vmalloc.h>
++#include <linux/percpu.h>
++
++int grsec_enable_link;
++int grsec_enable_dmesg;
++int grsec_enable_fifo;
++int grsec_enable_execve;
++int grsec_enable_execlog;
++int grsec_enable_signal;
++int grsec_enable_forkfail;
++int grsec_enable_time;
++int grsec_enable_audit_textrel;
++int grsec_enable_group;
++int grsec_audit_gid;
++int grsec_enable_chdir;
++int grsec_enable_audit_ipc;
++int grsec_enable_mount;
++int grsec_enable_chroot_findtask;
++int grsec_enable_chroot_mount;
++int grsec_enable_chroot_shmat;
++int grsec_enable_chroot_fchdir;
++int grsec_enable_chroot_double;
++int grsec_enable_chroot_pivot;
++int grsec_enable_chroot_chdir;
++int grsec_enable_chroot_chmod;
++int grsec_enable_chroot_mknod;
++int grsec_enable_chroot_nice;
++int grsec_enable_chroot_execlog;
++int grsec_enable_chroot_caps;
++int grsec_enable_chroot_sysctl;
++int grsec_enable_chroot_unix;
++int grsec_enable_tpe;
++int grsec_tpe_gid;
++int grsec_enable_tpe_all;
++int grsec_enable_randpid;
++int grsec_enable_randid;
++int grsec_enable_randisn;
++int grsec_enable_randsrc;
++int grsec_enable_randrpc;
++int grsec_enable_socket_all;
++int grsec_socket_all_gid;
++int grsec_enable_socket_client;
++int grsec_socket_client_gid;
++int grsec_enable_socket_server;
++int grsec_socket_server_gid;
++int grsec_lock;
++
++spinlock_t grsec_alert_lock = SPIN_LOCK_UNLOCKED;
++unsigned long grsec_alert_wtime = 0;
++unsigned long grsec_alert_fyet = 0;
++
++spinlock_t grsec_audit_lock = SPIN_LOCK_UNLOCKED;
++
++rwlock_t grsec_exec_file_lock = RW_LOCK_UNLOCKED;
++
++char *gr_shared_page[4];
++extern struct gr_arg *gr_usermode;
++extern unsigned char *gr_system_salt;
++extern unsigned char *gr_system_sum;
++extern struct task_struct **gr_conn_table;
++extern const unsigned int gr_conn_table_size;
++
++void
++grsecurity_init(void)
++{
++ int j;
++ /* create the per-cpu shared pages */
++
++ preempt_disable();
++ for (j = 0; j < 4; j++) {
++ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(char *));
++ if (gr_shared_page[j] == NULL) {
++ panic("Unable to allocate grsecurity shared page");
++ return;
++ }
++ }
++ preempt_enable();
++
++ /* create hash tables for ip tagging */
++
++ gr_conn_table = (struct task_struct **) vmalloc(gr_conn_table_size * sizeof(struct task_struct *));
++ if (gr_conn_table == NULL) {
++ panic("Unable to allocate grsecurity IP tagging table");
++ return;
++ }
++ memset(gr_conn_table, 0, gr_conn_table_size * sizeof(struct task_struct *));
++
++ /* allocate memory for authentication structure */
++ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
++ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
++ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
++
++ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
++ panic("Unable to allocate grsecurity authentication structure");
++ return;
++ }
++
++#ifndef CONFIG_GRKERNSEC_SYSCTL
++ grsec_lock = 1;
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ grsec_enable_audit_textrel = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ grsec_enable_group = 1;
++ grsec_audit_gid = CONFIG_GRKERNSEC_AUDIT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ grsec_enable_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ grsec_enable_audit_ipc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ grsec_enable_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_LINK
++ grsec_enable_link = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ grsec_enable_dmesg = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ grsec_enable_fifo = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ grsec_enable_execve = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ grsec_enable_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ grsec_enable_signal = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ grsec_enable_forkfail = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ grsec_enable_time = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ grsec_enable_chroot_findtask = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ grsec_enable_chroot_unix = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ grsec_enable_chroot_mount = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ grsec_enable_chroot_fchdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ grsec_enable_chroot_shmat = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ grsec_enable_chroot_double = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ grsec_enable_chroot_pivot = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ grsec_enable_chroot_chdir = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ grsec_enable_chroot_chmod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ grsec_enable_chroot_mknod = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ grsec_enable_chroot_nice = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ grsec_enable_chroot_execlog = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ grsec_enable_chroot_caps = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ grsec_enable_chroot_sysctl = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ grsec_enable_tpe = 1;
++ grsec_tpe_gid = CONFIG_GRKERNSEC_TPE_GID;
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ grsec_enable_tpe_all = 1;
++#endif
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ grsec_enable_randpid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++ grsec_enable_randid = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ grsec_enable_randisn = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ grsec_enable_randsrc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ grsec_enable_randrpc = 1;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ grsec_enable_socket_all = 1;
++ grsec_socket_all_gid = CONFIG_GRKERNSEC_SOCKET_ALL_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ grsec_enable_socket_client = 1;
++ grsec_socket_client_gid = CONFIG_GRKERNSEC_SOCKET_CLIENT_GID;
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ grsec_enable_socket_server = 1;
++ grsec_socket_server_gid = CONFIG_GRKERNSEC_SOCKET_SERVER_GID;
++#endif
++#endif
++
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_ipc.c linux-2.6.7/grsecurity/grsec_ipc.c
+--- linux-2.6.7/grsecurity/grsec_ipc.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_ipc.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,81 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/types.h>
++#include <linux/ipc.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_msgget(const int ret, const int msgflg)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (ret >= 0)
++ && (msgflg & IPC_CREAT))
++ security_audit(GR_MSGQ_AUDIT_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_msgrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_MSGQR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_semget(const int err, const int semflg)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (err >= 0)
++ && (semflg & IPC_CREAT))
++ security_audit(GR_SEM_AUDIT_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_semrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_SEMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_shmget(const int err, const int shmflg, const size_t size)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if (((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) || (grsec_enable_audit_ipc &&
++ !grsec_enable_group)) && (err >= 0)
++ && (shmflg & IPC_CREAT))
++ security_audit(GR_SHM_AUDIT_MSG, size, DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_shmrm(const uid_t uid, const uid_t cuid)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ if ((grsec_enable_group && in_group_p(grsec_audit_gid) &&
++ grsec_enable_audit_ipc) ||
++ (grsec_enable_audit_ipc && !grsec_enable_group))
++ security_audit(GR_SHMR_AUDIT_MSG, uid, cuid, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_link.c linux-2.6.7/grsecurity/grsec_link.c
+--- linux-2.6.7/grsecurity/grsec_link.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_link.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,41 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/fs.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry, const struct vfsmount *mnt)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
++ (parent->i_mode & S_ISVTX) && (parent->i_uid != inode->i_uid) &&
++ (parent->i_mode & S_IWOTH) && (current->fsuid != inode->i_uid)) {
++ security_alert(GR_SYMLINK_MSG, gr_to_filename(dentry, mnt),
++ inode->i_uid, inode->i_gid, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode, const int mode, const char *to)
++{
++#ifdef CONFIG_GRKERNSEC_LINK
++ if (grsec_enable_link && current->fsuid != inode->i_uid &&
++ (!S_ISREG(mode) || (mode & S_ISUID) ||
++ ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP)) ||
++ (vfs_permission(inode, MAY_READ | MAY_WRITE))) &&
++ !capable(CAP_FOWNER) && current->uid) {
++ security_alert(GR_HARDLINK_MSG, gr_to_filename(dentry, mnt),
++ inode->i_uid, inode->i_gid, to, DEFAULTSECARGS);
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_mem.c linux-2.6.7/grsecurity/grsec_mem.c
+--- linux-2.6.7/grsecurity/grsec_mem.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_mem.c 2004-08-03 08:55:01 -0400
+@@ -0,0 +1,62 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/mman.h>
++#include <linux/grinternal.h>
++
++void
++gr_handle_ioperm(void)
++{
++ security_alert(GR_IOPERM_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_iopl(void)
++{
++ security_alert(GR_IOPL_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_mem_write(void)
++{
++ security_alert(GR_MEM_WRITE_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_kmem_write(void)
++{
++ security_alert(GR_KMEM_MSG, DEFAULTSECARGS);
++ return;
++}
++
++void
++gr_handle_open_port(void)
++{
++ security_alert(GR_PORT_OPEN_MSG, DEFAULTSECARGS);
++ return;
++}
++
++int
++gr_handle_mem_mmap(const unsigned long offset, struct vm_area_struct *vma)
++{
++ if (offset + vma->vm_end - vma->vm_start <= offset) {
++ security_alert(GR_MEM_MMAP_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ }
++
++ if (offset < __pa(high_memory) && (vma->vm_flags & VM_WRITE)
++#ifdef CONFIG_X86
++ && !(offset == 0xf0000 && ((vma->vm_end - vma->vm_start) <= 0x10000))
++ && !(offset == 0xa0000 && ((vma->vm_end - vma->vm_start) <= 0x20000))
++#endif
++ ) {
++ security_alert(GR_MEM_MMAP_MSG, DEFAULTSECARGS);
++ return -EPERM;
++ } else if (offset < __pa(high_memory))
++ vma->vm_flags &= ~VM_MAYWRITE;
++
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_mount.c linux-2.6.7/grsecurity/grsec_mount.c
+--- linux-2.6.7/grsecurity/grsec_mount.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_mount.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,34 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_remount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_REMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_unmount(const char *devname, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none", DEFAULTSECARGS);
++#endif
++ return;
++}
++
++void
++gr_log_mount(const char *from, const char *to, const int retval)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ if (grsec_enable_mount && (retval >= 0))
++ security_audit(GR_MOUNT_AUDIT_MSG, from, to, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_rand.c linux-2.6.7/grsecurity/grsec_rand.c
+--- linux-2.6.7/grsecurity/grsec_rand.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_rand.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,22 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern int pid_max;
++
++int
++gr_random_pid(void)
++{
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ int pid;
++
++ if (grsec_enable_randpid && current->fs->root) {
++
++ pid = 1 + (get_random_long() % pid_max);
++ return pid;
++ }
++#endif
++ return 0;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_sig.c linux-2.6.7/grsecurity/grsec_sig.c
+--- linux-2.6.7/grsecurity/grsec_sig.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_sig.c 2004-08-03 17:45:56 -0400
+@@ -0,0 +1,73 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_signal(const int sig, const struct task_struct *t)
++{
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
++ (sig == SIGABRT) || (sig == SIGBUS))) {
++ if (t->pid == current->pid) {
++ security_alert_good(GR_UNISIGLOG_MSG, sig,
++ DEFAULTSECARGS);
++ } else {
++ security_alert_good(GR_DUALSIGLOG_MSG, sig,
++ gr_task_fullpath0(t), t->comm,
++ t->pid, t->uid, t->euid, t->gid,
++ t->egid, gr_parent_task_fullpath0(t),
++ t->parent->comm,
++ t->parent->pid, t->parent->uid,
++ t->parent->euid, t->parent->gid,
++ t->parent->egid, DEFAULTSECARGS);
++ }
++ }
++#endif
++ return;
++}
++
++int
++gr_handle_signal(const struct task_struct *p, const int sig)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (current->pid > 1 && gr_check_protected_task(p)) {
++ security_alert(GR_SIG_ACL_MSG, sig, gr_task_fullpath0(p),
++ p->comm, p->pid, p->uid,
++ p->euid, p->gid, p->egid,
++ gr_parent_task_fullpath0(p), p->parent->comm,
++ p->parent->pid, p->parent->uid,
++ p->parent->euid, p->parent->gid,
++ p->parent->egid, DEFAULTSECARGS);
++ return -EPERM;
++ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
++ return -EPERM;
++ }
++#endif
++ return 0;
++}
++
++void gr_handle_brute_attach(struct task_struct *p)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ read_lock(&tasklist_lock);
++ read_lock(&grsec_exec_file_lock);
++ if (p->parent && p->parent->exec_file == p->exec_file)
++ p->parent->brute = 1;
++ read_unlock(&grsec_exec_file_lock);
++ read_unlock(&tasklist_lock);
++#endif
++ return;
++}
++
++void gr_handle_brute_check(void)
++{
++#ifdef CONFIG_GRKERNSEC_BRUTE
++ if (current->brute) {
++ set_current_state(TASK_UNINTERRUPTIBLE);
++ schedule_timeout(30 * HZ);
++ }
++#endif
++ return;
++}
++
+diff -urN linux-2.6.7/grsecurity/grsec_sock.c linux-2.6.7/grsecurity/grsec_sock.c
+--- linux-2.6.7/grsecurity/grsec_sock.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_sock.c 2004-06-29 11:04:30 -0400
+@@ -0,0 +1,259 @@
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/net.h>
++#include <linux/in.h>
++#include <linux/ip.h>
++#include <net/sock.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++#include <linux/gracl.h>
++
++#if defined(CONFIG_IP_NF_MATCH_STEALTH_MODULE)
++extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
++EXPORT_SYMBOL(udp_v4_lookup);
++#endif
++#if defined(CONFIG_GRKERNSEC_RANDID)
++EXPORT_SYMBOL(ip_randomid);
++#endif
++#if defined(CONFIG_GRKERNSEC_RANDSRC) || defined(CONFIG_GRKERNSEC_RANDRPC)
++EXPORT_SYMBOL(get_random_long);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++EXPORT_SYMBOL(ip_randomisn);
++EXPORT_SYMBOL(grsec_enable_randisn);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++EXPORT_SYMBOL(grsec_enable_randid);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++EXPORT_SYMBOL(grsec_enable_randsrc);
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++EXPORT_SYMBOL(grsec_enable_randrpc);
++#endif
++
++EXPORT_SYMBOL(gr_cap_rtnetlink);
++
++extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
++
++EXPORT_SYMBOL(gr_search_udp_recvmsg);
++EXPORT_SYMBOL(gr_search_udp_sendmsg);
++
++#ifdef CONFIG_UNIX_MODULE
++EXPORT_SYMBOL(gr_acl_handle_unix);
++EXPORT_SYMBOL(gr_acl_handle_mknod);
++EXPORT_SYMBOL(gr_handle_chroot_unix);
++EXPORT_SYMBOL(gr_handle_create);
++#endif
++
++#ifdef CONFIG_GRKERNSEC
++struct task_struct **gr_conn_table;
++const unsigned int gr_conn_table_size = 65521;
++struct task_struct *deleted_conn = (struct task_struct *)~0;
++spinlock_t gr_conn_table_lock = SPIN_LOCK_UNLOCKED;
++
++extern __inline__ const char * gr_socktype_to_name(unsigned char type);
++extern __inline__ const char * gr_proto_to_name(unsigned char proto);
++
++static __inline__ int
++conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
++{
++ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
++}
++
++static __inline__ int
++conn_match(const struct task_struct *task, __u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ if (unlikely(task != deleted_conn && task->gr_saddr == saddr &&
++ task->gr_daddr == daddr && task->gr_sport == sport &&
++ task->gr_dport == dport))
++ return 1;
++ else
++ return 0;
++}
++
++void gr_add_to_task_ip_table(struct task_struct *task)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return;
++
++ if (!thread_group_leader(task))
++ task = task->group_leader;
++
++ index = conn_hash(task->gr_saddr, task->gr_daddr,
++ task->gr_sport, task->gr_dport,
++ gr_conn_table_size);
++
++ spin_lock(&gr_conn_table_lock);
++
++ while (gr_conn_table[index] && gr_conn_table[index] != deleted_conn) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ gr_conn_table[index] = task;
++
++ spin_unlock(&gr_conn_table_lock);
++
++ return;
++}
++
++void gr_del_task_from_ip_table_nolock(struct task_struct *task)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return;
++
++ if (!thread_group_leader(task))
++ task = task->group_leader;
++
++ index = conn_hash(task->gr_saddr, task->gr_daddr,
++ task->gr_sport, task->gr_dport,
++ gr_conn_table_size);
++
++ while (gr_conn_table[index] && !conn_match(gr_conn_table[index],
++ task->gr_saddr, task->gr_daddr, task->gr_sport,
++ task->gr_dport)) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ if (gr_conn_table[index]) {
++ if (gr_conn_table[(index + 1) % gr_conn_table_size])
++ gr_conn_table[index] = deleted_conn;
++ else
++ gr_conn_table[index] = NULL;
++ }
++
++ return;
++}
++
++struct task_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
++ __u16 sport, __u16 dport)
++{
++ unsigned int index;
++
++ if (unlikely(gr_conn_table == NULL))
++ return NULL;
++
++ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
++
++ while (gr_conn_table[index] && !conn_match(gr_conn_table[index],
++ saddr, daddr, sport, dport)) {
++ index = (index + 1) % gr_conn_table_size;
++ }
++
++ return gr_conn_table[index];
++}
++
++#endif
++
++void gr_del_task_from_ip_table(struct task_struct *task)
++{
++#ifdef CONFIG_GRKERNSEC
++ spin_lock(&gr_conn_table_lock);
++ if (!thread_group_leader(task))
++ gr_del_task_from_ip_table_nolock(task->group_leader);
++ else
++ gr_del_task_from_ip_table_nolock(task);
++ spin_unlock(&gr_conn_table_lock);
++#endif
++ return;
++}
++
++void
++gr_attach_curr_ip(const struct sock *sk)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct task_struct *p;
++ struct task_struct *set;
++ const struct inet_opt *inet = inet_sk(sk);
++
++ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
++ return;
++
++ set = current;
++ if (!thread_group_leader(set))
++ set = set->group_leader;
++
++ spin_lock(&gr_conn_table_lock);
++ p = gr_lookup_task_ip_table(inet->daddr, inet->rcv_saddr,
++ inet->dport, inet->sport);
++ if (unlikely(p != NULL)) {
++ set->curr_ip = p->curr_ip;
++ set->used_accept = 1;
++ gr_del_task_from_ip_table_nolock(p);
++ spin_unlock(&gr_conn_table_lock);
++ return;
++ }
++ spin_unlock(&gr_conn_table_lock);
++
++ set->curr_ip = inet->daddr;
++ set->used_accept = 1;
++#endif
++ return;
++}
++
++int
++gr_handle_sock_all(const int family, const int type, const int protocol)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
++ (family != AF_UNIX) && (family != AF_LOCAL)) {
++ security_alert(GR_SOCK2_MSG, family, gr_socktype_to_name(type), gr_proto_to_name(protocol),
++ DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_server(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ if (grsec_enable_socket_server &&
++ in_group_p(grsec_socket_server_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ security_alert(GR_BIND_MSG, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++int
++gr_handle_sock_client(const struct sockaddr *sck)
++{
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
++ sck && (sck->sa_family != AF_UNIX) &&
++ (sck->sa_family != AF_LOCAL)) {
++ security_alert(GR_CONNECT_MSG, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++__u32
++gr_cap_rtnetlink(void)
++{
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_acl_is_enabled())
++ return current->cap_effective;
++ else if (cap_raised(current->cap_effective, CAP_NET_ADMIN) &&
++ gr_task_is_capable(current, CAP_NET_ADMIN))
++ return current->cap_effective;
++ else
++ return 0;
++#else
++ return current->cap_effective;
++#endif
++}
+diff -urN linux-2.6.7/grsecurity/grsec_sysctl.c linux-2.6.7/grsecurity/grsec_sysctl.c
+--- linux-2.6.7/grsecurity/grsec_sysctl.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_sysctl.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,453 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/sysctl.h>
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++int
++gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
++{
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & 002)) {
++ security_alert(GR_SYSCTL_MSG, name, DEFAULTSECARGS);
++ return -EACCES;
++ }
++#endif
++ return 0;
++}
++
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++enum {GS_LINK=1, GS_FIFO, GS_EXECVE, GS_EXECLOG, GS_SIGNAL,
++GS_FORKFAIL, GS_TIME, GS_CHROOT_SHMAT, GS_CHROOT_UNIX, GS_CHROOT_MNT,
++GS_CHROOT_FCHDIR, GS_CHROOT_DBL, GS_CHROOT_PVT, GS_CHROOT_CD, GS_CHROOT_CM,
++GS_CHROOT_MK, GS_CHROOT_NI, GS_CHROOT_EXECLOG, GS_CHROOT_CAPS,
++GS_CHROOT_SYSCTL, GS_TPE, GS_TPE_GID, GS_TPE_ALL, GS_SIDCAPS,
++GS_RANDPID, GS_RANDID, GS_RANDSRC, GS_RANDISN,
++GS_SOCKET_ALL, GS_SOCKET_ALL_GID, GS_SOCKET_CLIENT,
++GS_SOCKET_CLIENT_GID, GS_SOCKET_SERVER, GS_SOCKET_SERVER_GID, GS_TTY, GS_TTYS,
++GS_PTY, GS_GROUP, GS_GID, GS_ACHDIR, GS_AMOUNT, GS_AIPC, GS_DMSG, GS_RANDRPC,
++GS_TEXTREL, GS_FINDTASK, GS_LOCK};
++
++
++ctl_table grsecurity_table[] = {
++#ifdef CONFIG_GRKERNSEC_LINK
++ {
++ .ctl_name = GS_LINK,
++ .procname = "linking_restrictions",
++ .data = &grsec_enable_link,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FIFO
++ {
++ .ctl_name = GS_FIFO,
++ .procname = "fifo_restrictions",
++ .data = &grsec_enable_fifo,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECVE
++ {
++ .ctl_name = GS_EXECVE,
++ .procname = "execve_limiting",
++ .data = &grsec_enable_execve,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_EXECLOG
++ {
++ .ctl_name = GS_EXECLOG,
++ .procname = "exec_logging",
++ .data = &grsec_enable_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SIGNAL
++ {
++ .ctl_name = GS_SIGNAL,
++ .procname = "signal_logging",
++ .data = &grsec_enable_signal,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_FORKFAIL
++ {
++ .ctl_name = GS_FORKFAIL,
++ .procname = "forkfail_logging",
++ .data = &grsec_enable_forkfail,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TIME
++ {
++ .ctl_name = GS_TIME,
++ .procname = "timechange_logging",
++ .data = &grsec_enable_time,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
++ {
++ .ctl_name = GS_CHROOT_SHMAT,
++ .procname = "chroot_deny_shmat",
++ .data = &grsec_enable_chroot_shmat,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ {
++ .ctl_name = GS_CHROOT_UNIX,
++ .procname = "chroot_deny_unix",
++ .data = &grsec_enable_chroot_unix,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
++ {
++ .ctl_name = GS_CHROOT_MNT,
++ .procname = "chroot_deny_mount",
++ .data = &grsec_enable_chroot_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
++ {
++ .ctl_name = GS_CHROOT_FCHDIR,
++ .procname = "chroot_deny_fchdir",
++ .data = &grsec_enable_chroot_fchdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
++ {
++ .ctl_name = GS_CHROOT_DBL,
++ .procname = "chroot_deny_chroot",
++ .data = &grsec_enable_chroot_double,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
++ {
++ .ctl_name = GS_CHROOT_PVT,
++ .procname = "chroot_deny_pivot",
++ .data = &grsec_enable_chroot_pivot,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
++ {
++ .ctl_name = GS_CHROOT_CD,
++ .procname = "chroot_enforce_chdir",
++ .data = &grsec_enable_chroot_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
++ {
++ .ctl_name = GS_CHROOT_CM,
++ .procname = "chroot_deny_chmod",
++ .data = &grsec_enable_chroot_chmod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
++ {
++ .ctl_name = GS_CHROOT_MK,
++ .procname = "chroot_deny_mknod",
++ .data = &grsec_enable_chroot_mknod,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
++ {
++ .ctl_name = GS_CHROOT_NI,
++ .procname = "chroot_restrict_nice",
++ .data = &grsec_enable_chroot_nice,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
++ {
++ .ctl_name = GS_CHROOT_EXECLOG,
++ .procname = "chroot_execlog",
++ .data = &grsec_enable_chroot_execlog,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
++ {
++ .ctl_name = GS_CHROOT_CAPS,
++ .procname = "chroot_caps",
++ .data = &grsec_enable_chroot_caps,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
++ {
++ .ctl_name = GS_CHROOT_SYSCTL,
++ .procname = "chroot_deny_sysctl",
++ .data = &grsec_enable_chroot_sysctl,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE
++ {
++ .ctl_name = GS_TPE,
++ .procname = "tpe",
++ .data = &grsec_enable_tpe,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_TPE_GID,
++ .procname = "tpe_gid",
++ .data = &grsec_tpe_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ {
++ .ctl_name = GS_TPE_ALL,
++ .procname = "tpe_restrict_all",
++ .data = &grsec_enable_tpe_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDPID
++ {
++ .ctl_name = GS_RANDPID,
++ .procname = "rand_pids",
++ .data = &grsec_enable_randpid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDID
++ {
++ .ctl_name = GS_RANDID,
++ .procname = "rand_ip_ids",
++ .data = &grsec_enable_randid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ {
++ .ctl_name = GS_RANDSRC,
++ .procname = "rand_tcp_src_ports",
++ .data = &grsec_enable_randsrc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ {
++ .ctl_name = GS_RANDISN,
++ .procname = "rand_isns",
++ .data = &grsec_enable_randisn,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
++ {
++ .ctl_name = GS_SOCKET_ALL,
++ .procname = "socket_all",
++ .data = &grsec_enable_socket_all,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_ALL_GID,
++ .procname = "socket_all_gid",
++ .data = &grsec_socket_all_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
++ {
++ .ctl_name = GS_SOCKET_CLIENT,
++ .procname = "socket_client",
++ .data = &grsec_enable_socket_client,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_CLIENT_GID,
++ .procname = "socket_client_gid",
++ .data = &grsec_socket_client_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
++ {
++ .ctl_name = GS_SOCKET_SERVER,
++ .procname = "socket_server",
++ .data = &grsec_enable_socket_server,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_SOCKET_SERVER_GID,
++ .procname = "socket_server_gid",
++ .data = &grsec_socket_server_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
++ {
++ .ctl_name = GS_GROUP,
++ .procname = "audit_group",
++ .data = &grsec_enable_group,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ {
++ .ctl_name = GS_GID,
++ .procname = "audit_gid",
++ .data = &grsec_audit_gid,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
++ {
++ .ctl_name = GS_ACHDIR,
++ .procname = "audit_chdir",
++ .data = &grsec_enable_chdir,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
++ {
++ .ctl_name = GS_AMOUNT,
++ .procname = "audit_mount",
++ .data = &grsec_enable_mount,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_IPC
++ {
++ .ctl_name = GS_AIPC,
++ .procname = "audit_ipc",
++ .data = &grsec_enable_audit_ipc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ {
++ .ctl_name = GS_TEXTREL,
++ .procname = "audit_textrel",
++ .data = &grsec_enable_audit_textrel,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_DMESG
++ {
++ .ctl_name = GS_DMSG,
++ .procname = "dmesg",
++ .data = &grsec_enable_dmesg,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ {
++ .ctl_name = GS_RANDRPC,
++ .procname = "rand_rpc",
++ .data = &grsec_enable_randrpc,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
++ {
++ .ctl_name = GS_FINDTASK,
++ .procname = "chroot_findtask",
++ .data = &grsec_enable_chroot_findtask,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++ {
++ .ctl_name = GS_LOCK,
++ .procname = "grsec_lock",
++ .data = &grsec_lock,
++ .maxlen = sizeof(int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++ { .ctl_name = 0 }
++};
++#endif
+diff -urN linux-2.6.7/grsecurity/grsec_textrel.c linux-2.6.7/grsecurity/grsec_textrel.c
+--- linux-2.6.7/grsecurity/grsec_textrel.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_textrel.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,19 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <linux/file.h>
++#include <linux/grinternal.h>
++#include <linux/grsecurity.h>
++
++void
++gr_log_textrel(struct vm_area_struct * vma)
++{
++#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
++ if (grsec_enable_audit_textrel)
++ security_audit(GR_TEXTREL_AUDIT_MSG, vma->vm_file ?
++ gr_to_filename(vma->vm_file->f_dentry, vma->vm_file->f_vfsmnt)
++ : "<anonymous mapping>", vma->vm_start,
++ vma->vm_pgoff, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_time.c linux-2.6.7/grsecurity/grsec_time.c
+--- linux-2.6.7/grsecurity/grsec_time.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_time.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,13 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/grinternal.h>
++
++void
++gr_log_timechange(void)
++{
++#ifdef CONFIG_GRKERNSEC_TIME
++ if (grsec_enable_time)
++ security_alert_good(GR_TIME_MSG, DEFAULTSECARGS);
++#endif
++ return;
++}
+diff -urN linux-2.6.7/grsecurity/grsec_tpe.c linux-2.6.7/grsecurity/grsec_tpe.c
+--- linux-2.6.7/grsecurity/grsec_tpe.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsec_tpe.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,35 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/file.h>
++#include <linux/fs.h>
++#include <linux/grinternal.h>
++
++extern int gr_acl_tpe_check(void);
++
++int
++gr_tpe_allow(const struct file *file)
++{
++#ifdef CONFIG_GRKERNSEC
++ struct inode *inode = file->f_dentry->d_parent->d_inode;
++
++ if (current->uid && ((grsec_enable_tpe && in_group_p(grsec_tpe_gid)) || gr_acl_tpe_check()) &&
++ (inode->i_uid || (!inode->i_uid && ((inode->i_mode & S_IWGRP) ||
++ (inode->i_mode & S_IWOTH))))) {
++ security_alert(GR_EXEC_TPE_MSG,
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#ifdef CONFIG_GRKERNSEC_TPE_ALL
++ if (current->uid && grsec_enable_tpe && grsec_enable_tpe_all &&
++ ((inode->i_uid && (inode->i_uid != current->uid)) ||
++ (inode->i_mode & S_IWGRP) || (inode->i_mode & S_IWOTH))) {
++ security_alert(GR_EXEC_TPE_MSG,
++ gr_to_filename(file->f_dentry, file->f_vfsmnt),
++ DEFAULTSECARGS);
++ return 0;
++ }
++#endif
++#endif
++ return 1;
++}
+diff -urN linux-2.6.7/grsecurity/grsum.c linux-2.6.7/grsecurity/grsum.c
+--- linux-2.6.7/grsecurity/grsum.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/grsum.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,59 @@
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/mm.h>
++#include <asm/scatterlist.h>
++#include <linux/crypto.h>
++#include <linux/gracl.h>
++
++
++#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
++#error "crypto and sha256 must be built into the kernel"
++#endif
++
++int
++chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
++{
++ char *p;
++ struct crypto_tfm *tfm;
++ unsigned char temp_sum[GR_SHA_LEN];
++ struct scatterlist sg[2];
++ volatile int retval = 0;
++ volatile int dummy = 0;
++ unsigned int i;
++
++ tfm = crypto_alloc_tfm("sha256", 0);
++ if (tfm == NULL) {
++ /* should never happen, since sha256 should be built in */
++ return 1;
++ }
++
++ crypto_digest_init(tfm);
++
++ p = salt;
++ sg[0].page = virt_to_page(p);
++ sg[0].offset = ((long) p & ~PAGE_MASK);
++ sg[0].length = GR_SALT_LEN;
++
++ crypto_digest_update(tfm, sg, 1);
++
++ p = entry->pw;
++ sg[0].page = virt_to_page(p);
++ sg[0].offset = ((long) p & ~PAGE_MASK);
++ sg[0].length = strlen(entry->pw);
++
++ crypto_digest_update(tfm, sg, 1);
++
++ crypto_digest_final(tfm, temp_sum);
++
++ memset(entry->pw, 0, GR_PW_LEN);
++
++ for (i = 0; i < GR_SHA_LEN; i++)
++ if (sum[i] != temp_sum[i])
++ retval = 1;
++ else
++ dummy = 1; // waste a cycle
++
++ crypto_free_tfm(tfm);
++
++ return retval;
++}
+diff -urN linux-2.6.7/grsecurity/obsd_rand.c linux-2.6.7/grsecurity/obsd_rand.c
+--- linux-2.6.7/grsecurity/obsd_rand.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/grsecurity/obsd_rand.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,186 @@
++
++/*
++ * Copyright (c) 1996, 1997, 2000-2002 Michael Shalayeff.
++ *
++ * Version 1.89, last modified 19-Sep-99
++ *
++ * Copyright Theodore Ts'o, 1994, 1995, 1996, 1997, 1998, 1999.
++ * All rights reserved.
++ *
++ * Copyright 1998 Niels Provos <provos@citi.umich.edu>
++ * All rights reserved.
++ * Theo de Raadt <deraadt@openbsd.org> came up with the idea of using
++ * such a mathematical system to generate more random (yet non-repeating)
++ * ids to solve the resolver/named problem. But Niels designed the
++ * actual system based on the constraints.
++ *
++ * Redistribution and use in source and binary forms, with or without
++ * modification, are permitted provided that the following conditions
++ * are met:
++ * 1. Redistributions of source code must retain the above copyright
++ * notice, this list of conditions and the following disclaimer,
++ * 2. Redistributions in binary form must reproduce the above copyright
++ * notice, this list of conditions and the following disclaimer in the
++ * documentation and/or other materials provided with the distribution.
++ *
++ * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
++ * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
++ * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
++ * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
++ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
++ * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
++ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
++ */
++
++#include <linux/kernel.h>
++#include <linux/sched.h>
++#include <linux/time.h>
++#include <linux/timer.h>
++#include <linux/smp_lock.h>
++#include <linux/random.h>
++#include <linux/grsecurity.h>
++
++#define RU_OUT 180
++#define RU_MAX 30000
++#define RU_GEN 2
++#define RU_N 32749
++#define RU_AGEN 7
++#define RU_M 31104
++#define PFAC_N 3
++const static __u16 pfacts[PFAC_N] = { 2, 3, 2729 };
++
++static __u16 ru_x;
++static __u16 ru_seed, ru_seed2;
++static __u16 ru_a, ru_b;
++static __u16 ru_g;
++static __u16 ru_counter = 0;
++static __u16 ru_msb = 0;
++static unsigned long ru_reseed = 0;
++static __u32 tmp;
++
++#define TCP_RNDISS_ROUNDS 15
++#define TCP_RNDISS_OUT 7200
++#define TCP_RNDISS_MAX 30000
++
++static __u8 tcp_rndiss_sbox[128];
++static __u16 tcp_rndiss_msb;
++static __u16 tcp_rndiss_cnt;
++static unsigned long tcp_rndiss_reseed;
++
++static __u16 pmod(__u16, __u16, __u16);
++static void ip_initid(void);
++__u16 ip_randomid(void);
++
++static __u16
++pmod(__u16 gen, __u16 exp, __u16 mod)
++{
++ __u16 s, t, u;
++
++ s = 1;
++ t = gen;
++ u = exp;
++
++ while (u) {
++ if (u & 1)
++ s = (s * t) % mod;
++ u >>= 1;
++ t = (t * t) % mod;
++ }
++ return (s);
++}
++
++static void
++ip_initid(void)
++{
++ __u16 j, i;
++ int noprime = 1;
++
++ ru_x = ((tmp = get_random_long()) & 0xFFFF) % RU_M;
++
++ ru_seed = (tmp >> 16) & 0x7FFF;
++ ru_seed2 = get_random_long() & 0x7FFF;
++
++ ru_b = ((tmp = get_random_long()) & 0xfffe) | 1;
++ ru_a = pmod(RU_AGEN, (tmp >> 16) & 0xfffe, RU_M);
++ while (ru_b % 3 == 0)
++ ru_b += 2;
++
++ j = (tmp = get_random_long()) % RU_N;
++ tmp = tmp >> 16;
++
++ while (noprime) {
++ for (i = 0; i < PFAC_N; i++)
++ if (j % pfacts[i] == 0)
++ break;
++
++ if (i >= PFAC_N)
++ noprime = 0;
++ else
++ j = (j + 1) % RU_N;
++ }
++
++ ru_g = pmod(RU_GEN, j, RU_N);
++ ru_counter = 0;
++
++ ru_reseed = xtime.tv_sec + RU_OUT;
++ ru_msb = ru_msb == 0x8000 ? 0 : 0x8000;
++}
++
++__u16
++ip_randomid(void)
++{
++ int i, n;
++
++ if (ru_counter >= RU_MAX || time_after(get_seconds(), ru_reseed))
++ ip_initid();
++
++ if (!tmp)
++ tmp = get_random_long();
++
++ n = tmp & 0x3;
++ tmp = tmp >> 2;
++ if (ru_counter + n >= RU_MAX)
++ ip_initid();
++ for (i = 0; i <= n; i++)
++ ru_x = (ru_a * ru_x + ru_b) % RU_M;
++ ru_counter += i;
++
++ return ((ru_seed ^ pmod(ru_g, ru_seed2 ^ ru_x, RU_N)) | ru_msb);
++}
++
++__u16
++tcp_rndiss_encrypt(__u16 val)
++{
++ __u16 sum = 0, i;
++
++ for (i = 0; i < TCP_RNDISS_ROUNDS; i++) {
++ sum += 0x79b9;
++ val ^= ((__u16) tcp_rndiss_sbox[(val ^ sum) & 0x7f]) << 7;
++ val = ((val & 0xff) << 7) | (val >> 8);
++ }
++
++ return val;
++}
++
++static void
++tcp_rndiss_init(void)
++{
++ get_random_bytes(tcp_rndiss_sbox, sizeof (tcp_rndiss_sbox));
++ tcp_rndiss_reseed = get_seconds() + TCP_RNDISS_OUT;
++ tcp_rndiss_msb = tcp_rndiss_msb == 0x8000 ? 0 : 0x8000;
++ tcp_rndiss_cnt = 0;
++}
++
++__u32
++ip_randomisn(void)
++{
++ if (tcp_rndiss_cnt >= TCP_RNDISS_MAX ||
++ time_after(get_seconds(), tcp_rndiss_reseed))
++ tcp_rndiss_init();
++
++ return (((tcp_rndiss_encrypt(tcp_rndiss_cnt++) |
++ tcp_rndiss_msb) << 16) | (get_random_long() & 0x7fff));
++}
+diff -urN linux-2.6.7/include/asm-alpha/a.out.h linux-2.6.7/include/asm-alpha/a.out.h
+--- linux-2.6.7/include/asm-alpha/a.out.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-alpha/a.out.h 2004-06-25 17:41:53 -0400
+@@ -98,7 +98,7 @@
+ set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000 \
+ ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
+
+-#define STACK_TOP \
++#define __STACK_TOP \
+ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+
+ #endif
+diff -urN linux-2.6.7/include/asm-alpha/elf.h linux-2.6.7/include/asm-alpha/elf.h
+--- linux-2.6.7/include/asm-alpha/elf.h 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/include/asm-alpha/elf.h 2004-06-25 17:41:53 -0400
+@@ -89,6 +89,17 @@
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -urN linux-2.6.7/include/asm-alpha/mman.h linux-2.6.7/include/asm-alpha/mman.h
+--- linux-2.6.7/include/asm-alpha/mman.h 2004-06-16 01:18:52 -0400
++++ linux-2.6.7/include/asm-alpha/mman.h 2004-06-25 17:41:53 -0400
+@@ -29,6 +29,10 @@
+ #define MAP_POPULATE 0x20000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x40000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x20000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_SYNC 2 /* synchronous memory sync */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -urN linux-2.6.7/include/asm-alpha/page.h linux-2.6.7/include/asm-alpha/page.h
+--- linux-2.6.7/include/asm-alpha/page.h 2004-06-16 01:19:44 -0400
++++ linux-2.6.7/include/asm-alpha/page.h 2004-06-25 17:41:53 -0400
+@@ -106,6 +106,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ALPHA_PAGE_H */
+diff -urN linux-2.6.7/include/asm-alpha/pgtable.h linux-2.6.7/include/asm-alpha/pgtable.h
+--- linux-2.6.7/include/asm-alpha/pgtable.h 2004-06-16 01:19:03 -0400
++++ linux-2.6.7/include/asm-alpha/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -96,6 +96,17 @@
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -urN linux-2.6.7/include/asm-i386/a.out.h linux-2.6.7/include/asm-i386/a.out.h
+--- linux-2.6.7/include/asm-i386/a.out.h 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/include/asm-i386/a.out.h 2004-06-25 17:41:53 -0400
+@@ -19,7 +19,11 @@
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP TASK_SIZE
++#ifdef CONFIG_PAX_SEGMEXEC
++#define __STACK_TOP ((current->flags & PF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE)
++#else
++#define __STACK_TOP TASK_SIZE
++#endif
+
+ #endif
+
+diff -urN linux-2.6.7/include/asm-i386/desc.h linux-2.6.7/include/asm-i386/desc.h
+--- linux-2.6.7/include/asm-i386/desc.h 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/include/asm-i386/desc.h 2004-06-25 17:41:53 -0400
+@@ -8,11 +8,71 @@
+
+ #include <linux/preempt.h>
+ #include <linux/smp.h>
++#include <linux/sched.h>
+
+ #include <asm/mmu.h>
++#include <asm/pgtable.h>
++#include <asm/tlbflush.h>
+
+ extern struct desc_struct cpu_gdt_table[NR_CPUS][GDT_ENTRIES];
+
++#define pax_open_kernel(flags, cr3) \
++do { \
++ typecheck(unsigned long,flags); \
++ typecheck(unsigned long,cr3); \
++ local_irq_save(flags); \
++ asm("movl %%cr3,%0":"=r" (cr3)); \
++ load_cr3(kernexec_pg_dir); \
++} while(0)
++
++#define pax_close_kernel(flags, cr3) \
++do { \
++ typecheck(unsigned long,flags); \
++ typecheck(unsigned long,cr3); \
++ asm("movl %0,%%cr3": :"r" (cr3)); \
++ local_irq_restore(flags); \
++} while(0)
++
++#define pax_open_kernel_noirq(cr3) \
++do { \
++ typecheck(unsigned long,cr3); \
++ asm("movl %%cr3,%0":"=r" (cr3)); \
++ load_cr3(kernexec_pg_dir); \
++} while(0)
++
++#define pax_close_kernel_noirq(cr3) \
++do { \
++ typecheck(unsigned long,cr3); \
++ asm("movl %0,%%cr3": :"r" (cr3)); \
++} while(0)
++
++static inline void set_user_cs(struct mm_struct *mm, int cpu)
++{
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long base = mm->context.user_cs_base;
++ unsigned long limit = mm->context.user_cs_limit;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++
++ pax_open_kernel(flags, cr3);
++#endif
++
++ if (limit) {
++ limit -= 1UL;
++ limit >>= 12;
++ }
++
++ cpu_gdt_table[cpu][GDT_ENTRY_DEFAULT_USER_CS].a = (limit & 0xFFFFUL) | (base << 16);
++ cpu_gdt_table[cpu][GDT_ENTRY_DEFAULT_USER_CS].b = (limit & 0xF0000UL) | 0xC0FB00UL | (base & 0xFF000000UL) | ((base >> 16) & 0xFFUL);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
++#endif
++}
++
+ struct Xgt_desc_struct {
+ unsigned short size;
+ unsigned long address __attribute__((packed));
+@@ -28,7 +88,7 @@
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+-extern struct desc_struct default_ldt[];
++extern const struct desc_struct default_ldt[];
+ extern void set_intr_gate(unsigned int irq, void * addr);
+
+ #define _set_tssldt_desc(n,addr,limit,type) \
+@@ -42,18 +102,35 @@
+ "rorl $16,%%eax" \
+ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+-static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, void *addr)
++static inline void __set_tss_desc(unsigned int cpu, unsigned int entry, const void *addr)
+ {
+ _set_tssldt_desc(&cpu_gdt_table[cpu][entry], (int)addr, 235, 0x89);
+ }
+
+ #define set_tss_desc(cpu,addr) __set_tss_desc(cpu, GDT_ENTRY_TSS, addr)
+
+-static inline void set_ldt_desc(unsigned int cpu, void *addr, unsigned int size)
++static inline void __set_ldt_desc(unsigned int cpu, const void *addr, unsigned int size)
+ {
+ _set_tssldt_desc(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
+ }
+
++static inline void set_ldt_desc(unsigned int cpu, const void *addr, unsigned int size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long flags, cr3;
++
++ pax_open_kernel(flags, cr3);
++#endif
++
++ _set_tssldt_desc(&cpu_gdt_table[cpu][GDT_ENTRY_LDT], (int)addr, ((size << 3)-1), 0x82);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ pax_close_kernel(flags, cr3);
++#endif
++
++}
++
+ #define LDT_entry_a(info) \
+ ((((info)->base_addr & 0x0000ffff) << 16) | ((info)->limit & 0x0ffff))
+
+@@ -67,7 +144,7 @@
+ ((info)->seg_32bit << 22) | \
+ ((info)->limit_in_pages << 23) | \
+ ((info)->useable << 20) | \
+- 0x7000)
++ 0x7100)
+
+ #define LDT_empty(info) (\
+ (info)->base_addr == 0 && \
+@@ -104,7 +181,7 @@
+ */
+ static inline void load_LDT_nolock(mm_context_t *pc, int cpu)
+ {
+- void *segments = pc->ldt;
++ const void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (likely(!count)) {
+@@ -123,6 +200,22 @@
+ put_cpu();
+ }
+
++static inline void _load_LDT(mm_context_t *pc)
++{
++ int cpu = get_cpu();
++ const void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (likely(!count)) {
++ segments = &default_ldt[0];
++ count = 5;
++ }
++
++ __set_ldt_desc(cpu, segments, count);
++ load_LDT_desc();
++ put_cpu();
++}
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+diff -urN linux-2.6.7/include/asm-i386/elf.h linux-2.6.7/include/asm-i386/elf.h
+--- linux-2.6.7/include/asm-i386/elf.h 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/include/asm-i386/elf.h 2004-06-25 17:41:53 -0400
+@@ -72,6 +72,19 @@
+
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000000UL
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 15
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 15
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->flags & PF_PAX_SEGMEXEC ? 15 : 16)
++#endif
++
+ /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different) */
+
+@@ -113,8 +126,11 @@
+ * Architecture-neutral AT_ values in 0-17, leave some room
+ * for more of them, start the x86-specific ones at 32.
+ */
++
++#ifndef CONFIG_PAX_NOVSYSCALL
+ #define AT_SYSINFO 32
+ #define AT_SYSINFO_EHDR 33
++#endif
+
+ #ifdef __KERNEL__
+ #define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+@@ -129,7 +145,14 @@
+
+ #define VSYSCALL_BASE (__fix_to_virt(FIX_VSYSCALL))
+ #define VSYSCALL_EHDR ((const struct elfhdr *) VSYSCALL_BASE)
++
++#ifndef CONFIG_PAX_NOVSYSCALL
++#ifdef CONFIG_PAX_SEGMEXEC
++#define VSYSCALL_ENTRY ((current->flags & PF_PAX_SEGMEXEC) ? (unsigned long) &__kernel_vsyscall - SEGMEXEC_TASK_SIZE : (unsigned long) &__kernel_vsyscall)
++#else
+ #define VSYSCALL_ENTRY ((unsigned long) &__kernel_vsyscall)
++#endif
++
+ extern void __kernel_vsyscall;
+
+ #define ARCH_DLINFO \
+@@ -185,3 +208,5 @@
+ #endif
+
+ #endif
++
++#endif
+diff -urN linux-2.6.7/include/asm-i386/mach-default/apm.h linux-2.6.7/include/asm-i386/mach-default/apm.h
+--- linux-2.6.7/include/asm-i386/mach-default/apm.h 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/include/asm-i386/mach-default/apm.h 2004-06-25 17:41:53 -0400
+@@ -36,7 +36,7 @@
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -60,7 +60,7 @@
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urN linux-2.6.7/include/asm-i386/mach-pc9800/apm.h linux-2.6.7/include/asm-i386/mach-pc9800/apm.h
+--- linux-2.6.7/include/asm-i386/mach-pc9800/apm.h 2004-06-16 01:19:17 -0400
++++ linux-2.6.7/include/asm-i386/mach-pc9800/apm.h 2004-06-25 17:41:53 -0400
+@@ -39,7 +39,7 @@
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "pushfl\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -64,7 +64,7 @@
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+ "pushfl\n\t"
+- "lcall *%%cs:apm_bios_entry\n\t"
++ "lcall *%%ss:apm_bios_entry\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+diff -urN linux-2.6.7/include/asm-i386/mman.h linux-2.6.7/include/asm-i386/mman.h
+--- linux-2.6.7/include/asm-i386/mman.h 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/include/asm-i386/mman.h 2004-06-25 17:41:53 -0400
+@@ -23,6 +23,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define MAP_MIRROR 0x20000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-i386/mmu.h linux-2.6.7/include/asm-i386/mmu.h
+--- linux-2.6.7/include/asm-i386/mmu.h 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/include/asm-i386/mmu.h 2004-06-25 17:41:53 -0400
+@@ -12,6 +12,17 @@
+ int size;
+ struct semaphore sem;
+ void *ldt;
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ unsigned long user_cs_base;
++ unsigned long user_cs_limit;
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpumask_t cpu_user_cs_mask;
++#endif
++
++#endif
++
+ } mm_context_t;
+
+ #endif
+diff -urN linux-2.6.7/include/asm-i386/mmu_context.h linux-2.6.7/include/asm-i386/mmu_context.h
+--- linux-2.6.7/include/asm-i386/mmu_context.h 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/include/asm-i386/mmu_context.h 2004-06-25 17:41:53 -0400
+@@ -46,6 +46,13 @@
+ */
+ if (unlikely(prev->context.ldt != next->context.ldt))
+ load_LDT_nolock(&next->context, cpu);
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
++ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(next, cpu);
+ }
+ #ifdef CONFIG_SMP
+ else {
+@@ -58,6 +65,12 @@
+ */
+ load_cr3(next->pgd);
+ load_LDT_nolock(&next->context, cpu);
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ cpu_set(cpu, next->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(next, cpu);
+ }
+ }
+ #endif
+diff -urN linux-2.6.7/include/asm-i386/module.h linux-2.6.7/include/asm-i386/module.h
+--- linux-2.6.7/include/asm-i386/module.h 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/include/asm-i386/module.h 2004-06-25 14:23:23 -0400
+@@ -66,6 +66,12 @@
+ #define MODULE_STACKSIZE ""
+ #endif
+
+-#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE
++#ifdef CONFIG_GRKERNSEC
++#define MODULE_GRSEC "GRSECURITY "
++#else
++#define MODULE_GRSEC ""
++#endif
++
++#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_REGPARM MODULE_STACKSIZE MODULE_GRSEC
+
+ #endif /* _ASM_I386_MODULE_H */
+diff -urN linux-2.6.7/include/asm-i386/page.h linux-2.6.7/include/asm-i386/page.h
+--- linux-2.6.7/include/asm-i386/page.h 2004-06-16 01:18:59 -0400
++++ linux-2.6.7/include/asm-i386/page.h 2004-06-25 17:41:53 -0400
+@@ -120,6 +120,19 @@
+ #define __PAGE_OFFSET (0xC0000000UL)
+ #endif
+
++#ifdef CONFIG_PAX_KERNEXEC
++#ifdef __ASSEMBLY__
++#define __KERNEL_TEXT_OFFSET (0xC0400000)
++#else
++#define __KERNEL_TEXT_OFFSET (0xC0400000UL)
++#endif
++#else
++#ifdef __ASSEMBLY__
++#define __KERNEL_TEXT_OFFSET (0)
++#else
++#define __KERNEL_TEXT_OFFSET (0x0UL)
++#endif
++#endif
+
+ #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
+ #define VMALLOC_RESERVE ((unsigned long)__VMALLOC_RESERVE)
+@@ -139,6 +152,19 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#endif
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define CONFIG_ARCH_TRACK_EXEC_LIMIT 1
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
+diff -urN linux-2.6.7/include/asm-i386/pgalloc.h linux-2.6.7/include/asm-i386/pgalloc.h
+--- linux-2.6.7/include/asm-i386/pgalloc.h 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/include/asm-i386/pgalloc.h 2004-06-25 17:41:53 -0400
+@@ -8,7 +8,7 @@
+ #include <linux/mm.h> /* for struct page */
+
+ #define pmd_populate_kernel(mm, pmd, pte) \
+- set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
++ set_pmd(pmd, __pmd(_KERNPG_TABLE + __pa(pte)))
+
+ static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
+ {
+diff -urN linux-2.6.7/include/asm-i386/pgtable.h linux-2.6.7/include/asm-i386/pgtable.h
+--- linux-2.6.7/include/asm-i386/pgtable.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-i386/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -25,13 +25,6 @@
+ #include <linux/list.h>
+ #include <linux/spinlock.h>
+
+-/*
+- * ZERO_PAGE is a global shared page that is always zero: used
+- * for zero-mapped memory areas etc..
+- */
+-#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+-extern unsigned long empty_zero_page[1024];
+-extern pgd_t swapper_pg_dir[1024];
+ extern kmem_cache_t *pgd_cache;
+ extern kmem_cache_t *pmd_cache;
+ extern spinlock_t pgd_lock;
+@@ -43,21 +36,31 @@
+ void pgtable_cache_init(void);
+ void paging_init(void);
+
+-#endif /* !__ASSEMBLY__ */
+-
+ /*
+ * The Linux x86 paging architecture is 'compile-time dual-mode', it
+ * implements both the traditional 2-level x86 page tables and the
+ * newer 3-level PAE-mode page tables.
+ */
+-#ifndef __ASSEMBLY__
+ #ifdef CONFIG_X86_PAE
+ # include <asm/pgtable-3level.h>
+ #else
+ # include <asm/pgtable-2level.h>
+ #endif
++
++/*
++ * ZERO_PAGE is a global shared page that is always zero: used
++ * for zero-mapped memory areas etc..
++ */
++#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
++extern unsigned long empty_zero_page[1024];
++extern pgd_t swapper_pg_dir[PTRS_PER_PTE];
++
++#ifdef CONFIG_PAX_KERNEXEC
++extern pgd_t kernexec_pg_dir[PTRS_PER_PTE];
+ #endif
+
++#endif /* !__ASSEMBLY__ */
++
+ #define PMD_SIZE (1UL << PMD_SHIFT)
+ #define PMD_MASK (~(PMD_SIZE-1))
+ #define PGDIR_SIZE (1UL << PGDIR_SHIFT)
+@@ -136,6 +139,16 @@
+ #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define _PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+
+@@ -155,18 +168,18 @@
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+@@ -343,6 +356,8 @@
+
+ #endif /* !__ASSEMBLY__ */
+
++#define HAVE_ARCH_UNMAPPED_AREA
++
+ #ifndef CONFIG_DISCONTIGMEM
+ #define kern_addr_valid(addr) (1)
+ #endif /* !CONFIG_DISCONTIGMEM */
+diff -urN linux-2.6.7/include/asm-i386/processor.h linux-2.6.7/include/asm-i386/processor.h
+--- linux-2.6.7/include/asm-i386/processor.h 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/include/asm-i386/processor.h 2004-06-25 17:41:53 -0400
+@@ -28,7 +28,7 @@
+ };
+
+ #define desc_empty(desc) \
+- (!((desc)->a + (desc)->b))
++ (!((desc)->a | (desc)->b))
+
+ #define desc_equal(desc1, desc2) \
+ (((desc1)->a == (desc2)->a) && ((desc1)->b == (desc2)->b))
+@@ -297,10 +297,23 @@
+ */
+ #define TASK_SIZE (PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE ((PAGE_OFFSET) / 2)
++#endif
++
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
++
++#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN((current->flags & PF_PAX_PAGEEXEC)? 0x00110000UL : (current->flags & PF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3 : TASK_SIZE/3))
++#elif defined(CONFIG_PAX_PAGEEXEC)
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN((current->flags & PF_PAX_PAGEEXEC)? 0x00110000UL : TASK_SIZE/3))
++#elif defined(CONFIG_PAX_SEGMEXEC)
++#define TASK_UNMAPPED_BASE (PAGE_ALIGN((current->flags & PF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3 : TASK_SIZE/3))
++#else
+ #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
++#endif
+
+ /*
+ * Size of io_bitmap, covering ports 0 to 0x3ff.
+@@ -494,16 +507,12 @@
+ unsigned long get_wchan(struct task_struct *p);
+
+ #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
+-#define KSTK_TOP(info) \
+-({ \
+- unsigned long *__ptr = (unsigned long *)(info); \
+- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
+-})
++#define KSTK_TOP(info) ((info)->task.thread.esp0)
+
+ #define task_pt_regs(task) \
+ ({ \
+ struct pt_regs *__regs__; \
+- __regs__ = (struct pt_regs *)KSTK_TOP((task)->thread_info); \
++ __regs__ = (struct pt_regs *)((task)->thread.esp0); \
+ __regs__ - 1; \
+ })
+
+@@ -627,7 +636,7 @@
+ extern inline void prefetch(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+- "prefetchnta (%1)",
++ "prefetchnta (%2)",
+ X86_FEATURE_XMM,
+ "r" (x));
+ }
+@@ -641,7 +650,7 @@
+ extern inline void prefetchw(const void *x)
+ {
+ alternative_input(ASM_NOP4,
+- "prefetchw (%1)",
++ "prefetchw (%2)",
+ X86_FEATURE_3DNOW,
+ "r" (x));
+ }
+diff -urN linux-2.6.7/include/asm-i386/system.h linux-2.6.7/include/asm-i386/system.h
+--- linux-2.6.7/include/asm-i386/system.h 2004-06-16 01:18:38 -0400
++++ linux-2.6.7/include/asm-i386/system.h 2004-06-25 17:41:53 -0400
+@@ -5,6 +5,7 @@
+ #include <linux/kernel.h>
+ #include <asm/segment.h>
+ #include <asm/cpufeature.h>
++#include <asm/page.h>
+ #include <linux/bitops.h> /* for LOCK_PREFIX */
+
+ #ifdef __KERNEL__
+@@ -301,7 +302,7 @@
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+- " .long 661b\n" /* label */ \
++ " .long 661b + %c1\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+@@ -309,7 +310,7 @@
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+- ".previous" :: "i" (feature) : "memory")
++ ".previous" :: "i" (feature), "i" (__KERNEL_TEXT_OFFSET) : "memory")
+
+ /*
+ * Alternative inline assembly with input.
+@@ -325,7 +326,7 @@
+ asm volatile ("661:\n\t" oldinstr "\n662:\n" \
+ ".section .altinstructions,\"a\"\n" \
+ " .align 4\n" \
+- " .long 661b\n" /* label */ \
++ " .long 661b + %c1\n" /* label */ \
+ " .long 663f\n" /* new instruction */ \
+ " .byte %c0\n" /* feature bit */ \
+ " .byte 662b-661b\n" /* sourcelen */ \
+@@ -333,7 +334,7 @@
+ ".previous\n" \
+ ".section .altinstr_replacement,\"ax\"\n" \
+ "663:\n\t" newinstr "\n664:\n" /* replacement */ \
+- ".previous" :: "i" (feature), input)
++ ".previous" :: "i" (feature), "i" (__KERNEL_TEXT_OFFSET), input)
+
+ /*
+ * Force strict CPU ordering.
+diff -urN linux-2.6.7/include/asm-ia64/elf.h linux-2.6.7/include/asm-ia64/elf.h
+--- linux-2.6.7/include/asm-ia64/elf.h 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/include/asm-ia64/elf.h 2004-06-25 17:41:53 -0400
+@@ -162,6 +162,16 @@
+ typedef struct ia64_fpreg elf_fpreg_t;
+ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#endif
+
+
+ struct pt_regs; /* forward declaration... */
+diff -urN linux-2.6.7/include/asm-ia64/mman.h linux-2.6.7/include/asm-ia64/mman.h
+--- linux-2.6.7/include/asm-ia64/mman.h 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/include/asm-ia64/mman.h 2004-06-25 17:41:53 -0400
+@@ -31,6 +31,10 @@
+ #define MAP_POPULATE 0x08000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x40000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-ia64/page.h linux-2.6.7/include/asm-ia64/page.h
+--- linux-2.6.7/include/asm-ia64/page.h 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/include/asm-ia64/page.h 2004-06-25 17:41:53 -0400
+@@ -187,4 +187,13 @@
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
+ ? VM_EXEC : 0))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* _ASM_IA64_PAGE_H */
+diff -urN linux-2.6.7/include/asm-ia64/pgtable.h linux-2.6.7/include/asm-ia64/pgtable.h
+--- linux-2.6.7/include/asm-ia64/pgtable.h 2004-06-16 01:19:09 -0400
++++ linux-2.6.7/include/asm-ia64/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -121,6 +121,17 @@
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -urN linux-2.6.7/include/asm-ia64/ustack.h linux-2.6.7/include/asm-ia64/ustack.h
+--- linux-2.6.7/include/asm-ia64/ustack.h 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/include/asm-ia64/ustack.h 2004-06-25 17:41:53 -0400
+@@ -11,6 +11,6 @@
+ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+ /* Make a default stack size of 2GB */
+ #define DEFAULT_USER_STACK_SIZE (1UL << 31)
+-#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
++#define __STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+
+ #endif /* _ASM_IA64_USTACK_H */
+diff -urN linux-2.6.7/include/asm-mips/a.out.h linux-2.6.7/include/asm-mips/a.out.h
+--- linux-2.6.7/include/asm-mips/a.out.h 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/include/asm-mips/a.out.h 2004-06-25 17:41:53 -0400
+@@ -36,10 +36,10 @@
+ #ifdef __KERNEL__
+
+ #ifdef CONFIG_MIPS32
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+ #endif
+ #ifdef CONFIG_MIPS64
+-#define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
++#define __STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
+ #endif
+
+ #endif
+diff -urN linux-2.6.7/include/asm-mips/elf.h linux-2.6.7/include/asm-mips/elf.h
+--- linux-2.6.7/include/asm-mips/elf.h 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/include/asm-mips/elf.h 2004-06-25 17:41:53 -0400
+@@ -273,4 +273,15 @@
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #endif /* _ASM_ELF_H */
+diff -urN linux-2.6.7/include/asm-mips/page.h linux-2.6.7/include/asm-mips/page.h
+--- linux-2.6.7/include/asm-mips/page.h 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/include/asm-mips/page.h 2004-06-25 17:41:53 -0400
+@@ -124,6 +124,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
+ #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+
+diff -urN linux-2.6.7/include/asm-parisc/a.out.h linux-2.6.7/include/asm-parisc/a.out.h
+--- linux-2.6.7/include/asm-parisc/a.out.h 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/include/asm-parisc/a.out.h 2004-06-25 17:41:53 -0400
+@@ -22,7 +22,7 @@
+ /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
+ * prumpf */
+
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+ #endif
+
+diff -urN linux-2.6.7/include/asm-parisc/elf.h linux-2.6.7/include/asm-parisc/elf.h
+--- linux-2.6.7/include/asm-parisc/elf.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-parisc/elf.h 2004-06-25 17:41:53 -0400
+@@ -337,6 +337,17 @@
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -urN linux-2.6.7/include/asm-parisc/mman.h linux-2.6.7/include/asm-parisc/mman.h
+--- linux-2.6.7/include/asm-parisc/mman.h 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/include/asm-parisc/mman.h 2004-06-25 17:41:53 -0400
+@@ -23,6 +23,10 @@
+ #define MAP_POPULATE 0x10000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x20000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_SYNC 1 /* synchronous memory sync */
+ #define MS_ASYNC 2 /* sync memory asynchronously */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -urN linux-2.6.7/include/asm-parisc/page.h linux-2.6.7/include/asm-parisc/page.h
+--- linux-2.6.7/include/asm-parisc/page.h 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/include/asm-parisc/page.h 2004-06-25 17:41:53 -0400
+@@ -157,6 +157,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _PARISC_PAGE_H */
+diff -urN linux-2.6.7/include/asm-parisc/pgtable.h linux-2.6.7/include/asm-parisc/pgtable.h
+--- linux-2.6.7/include/asm-parisc/pgtable.h 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/include/asm-parisc/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -189,6 +189,17 @@
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -urN linux-2.6.7/include/asm-ppc/a.out.h linux-2.6.7/include/asm-ppc/a.out.h
+--- linux-2.6.7/include/asm-ppc/a.out.h 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/include/asm-ppc/a.out.h 2004-06-25 17:41:53 -0400
+@@ -2,7 +2,7 @@
+ #define __PPC_A_OUT_H__
+
+ /* grabbed from the intel stuff */
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+
+ struct exec
+diff -urN linux-2.6.7/include/asm-ppc/elf.h linux-2.6.7/include/asm-ppc/elf.h
+--- linux-2.6.7/include/asm-ppc/elf.h 2004-06-16 01:18:37 -0400
++++ linux-2.6.7/include/asm-ppc/elf.h 2004-06-25 17:41:53 -0400
+@@ -89,6 +89,17 @@
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 15
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 15
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 15
++#endif
++
+ #define USE_ELF_CORE_DUMP
+ #define ELF_EXEC_PAGESIZE 4096
+
+diff -urN linux-2.6.7/include/asm-ppc/mman.h linux-2.6.7/include/asm-ppc/mman.h
+--- linux-2.6.7/include/asm-ppc/mman.h 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/include/asm-ppc/mman.h 2004-06-25 17:41:53 -0400
+@@ -24,6 +24,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0200
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-ppc/page.h linux-2.6.7/include/asm-ppc/page.h
+--- linux-2.6.7/include/asm-ppc/page.h 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/include/asm-ppc/page.h 2004-06-25 17:41:53 -0400
+@@ -163,5 +163,14 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _PPC_PAGE_H */
+diff -urN linux-2.6.7/include/asm-ppc/pgtable.h linux-2.6.7/include/asm-ppc/pgtable.h
+--- linux-2.6.7/include/asm-ppc/pgtable.h 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/include/asm-ppc/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -349,11 +349,21 @@
+
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
++
++#if defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x)
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define PAGE_KERNEL __pgprot(_PAGE_RAM)
+ #define PAGE_KERNEL_NOCACHE __pgprot(_PAGE_IO)
+@@ -365,21 +375,21 @@
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY_X
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY_X
+-#define __P100 PAGE_READONLY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
++#define __P100 PAGE_READONLY_X
+ #define __P101 PAGE_READONLY_X
+-#define __P110 PAGE_COPY
++#define __P110 PAGE_COPY_X
+ #define __P111 PAGE_COPY_X
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY_X
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED_X
+-#define __S100 PAGE_READONLY
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
++#define __S100 PAGE_READONLY_X
+ #define __S101 PAGE_READONLY_X
+-#define __S110 PAGE_SHARED
++#define __S110 PAGE_SHARED_X
+ #define __S111 PAGE_SHARED_X
+
+ #ifndef __ASSEMBLY__
+diff -urN linux-2.6.7/include/asm-ppc64/a.out.h linux-2.6.7/include/asm-ppc64/a.out.h
+--- linux-2.6.7/include/asm-ppc64/a.out.h 2004-06-16 01:18:52 -0400
++++ linux-2.6.7/include/asm-ppc64/a.out.h 2004-06-25 17:41:53 -0400
+@@ -35,7 +35,7 @@
+ /* Give 32-bit user space a full 4G address space to live in. */
+ #define STACK_TOP_USER32 (TASK_SIZE_USER32)
+
+-#define STACK_TOP ((test_thread_flag(TIF_32BIT) || \
++#define __STACK_TOP ((test_thread_flag(TIF_32BIT) || \
+ (ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? \
+ STACK_TOP_USER32 : STACK_TOP_USER64)
+
+diff -urN linux-2.6.7/include/asm-ppc64/elf.h linux-2.6.7/include/asm-ppc64/elf.h
+--- linux-2.6.7/include/asm-ppc64/elf.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-ppc64/elf.h 2004-06-25 17:41:53 -0400
+@@ -154,6 +154,17 @@
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (0x10000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((test_thread_flag(TIF_32BIT) || (ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((test_thread_flag(TIF_32BIT) || (ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((test_thread_flag(TIF_32BIT) || (ppcdebugset(PPCDBG_BINFMT_32ADDR))) ? 16 : 24)
++#endif
++
+ #ifdef __KERNEL__
+
+ /* Common routine for both 32-bit and 64-bit processes */
+diff -urN linux-2.6.7/include/asm-ppc64/mman.h linux-2.6.7/include/asm-ppc64/mman.h
+--- linux-2.6.7/include/asm-ppc64/mman.h 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/include/asm-ppc64/mman.h 2004-06-25 17:41:53 -0400
+@@ -29,6 +29,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0200
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-ppc64/page.h linux-2.6.7/include/asm-ppc64/page.h
+--- linux-2.6.7/include/asm-ppc64/page.h 2004-06-16 01:18:47 -0400
++++ linux-2.6.7/include/asm-ppc64/page.h 2004-06-25 17:41:53 -0400
+@@ -245,5 +245,14 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _PPC64_PAGE_H */
+diff -urN linux-2.6.7/include/asm-ppc64/pgtable.h linux-2.6.7/include/asm-ppc64/pgtable.h
+--- linux-2.6.7/include/asm-ppc64/pgtable.h 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/include/asm-ppc64/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -112,6 +112,17 @@
+ #define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
+ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+ #define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_RW | _PAGE_USER | _PAGE_GUARDED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_WRENABLE)
+ #define PAGE_KERNEL_CI __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | \
+ _PAGE_WRENABLE | _PAGE_NO_CACHE | _PAGE_GUARDED)
+@@ -123,21 +134,21 @@
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY_X
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY_X
+-#define __P100 PAGE_READONLY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
++#define __P100 PAGE_READONLY_X
+ #define __P101 PAGE_READONLY_X
+-#define __P110 PAGE_COPY
++#define __P110 PAGE_COPY_X
+ #define __P111 PAGE_COPY_X
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY_X
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED_X
+-#define __S100 PAGE_READONLY
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
++#define __S100 PAGE_READONLY_X
+ #define __S101 PAGE_READONLY_X
+-#define __S110 PAGE_SHARED
++#define __S110 PAGE_SHARED_X
+ #define __S111 PAGE_SHARED_X
+
+ #ifndef __ASSEMBLY__
+diff -urN linux-2.6.7/include/asm-sparc/a.out.h linux-2.6.7/include/asm-sparc/a.out.h
+--- linux-2.6.7/include/asm-sparc/a.out.h 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/include/asm-sparc/a.out.h 2004-06-25 17:41:53 -0400
+@@ -91,7 +91,7 @@
+
+ #include <asm/page.h>
+
+-#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
++#define __STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
+
+ #endif /* __KERNEL__ */
+
+diff -urN linux-2.6.7/include/asm-sparc/elf.h linux-2.6.7/include/asm-sparc/elf.h
+--- linux-2.6.7/include/asm-sparc/elf.h 2004-06-16 01:19:51 -0400
++++ linux-2.6.7/include/asm-sparc/elf.h 2004-06-25 17:41:53 -0400
+@@ -145,6 +145,17 @@
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -urN linux-2.6.7/include/asm-sparc/mman.h linux-2.6.7/include/asm-sparc/mman.h
+--- linux-2.6.7/include/asm-sparc/mman.h 2004-06-16 01:20:04 -0400
++++ linux-2.6.7/include/asm-sparc/mman.h 2004-06-25 17:41:53 -0400
+@@ -27,6 +27,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-sparc/page.h linux-2.6.7/include/asm-sparc/page.h
+--- linux-2.6.7/include/asm-sparc/page.h 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/include/asm-sparc/page.h 2004-06-25 17:41:53 -0400
+@@ -176,6 +176,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _SPARC_PAGE_H */
+diff -urN linux-2.6.7/include/asm-sparc/pgtable.h linux-2.6.7/include/asm-sparc/pgtable.h
+--- linux-2.6.7/include/asm-sparc/pgtable.h 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/include/asm-sparc/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -91,6 +91,13 @@
+ BTFIXUPDEF_INT(page_shared)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT SUN4C_PMD_SHIFT
+@@ -112,6 +119,16 @@
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(BTFIXUP_INT(page_shared_noexec))
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -urN linux-2.6.7/include/asm-sparc/pgtsrmmu.h linux-2.6.7/include/asm-sparc/pgtsrmmu.h
+--- linux-2.6.7/include/asm-sparc/pgtsrmmu.h 2004-06-16 01:19:10 -0400
++++ linux-2.6.7/include/asm-sparc/pgtsrmmu.h 2004-06-25 17:41:53 -0400
+@@ -114,6 +114,16 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -urN linux-2.6.7/include/asm-sparc/uaccess.h linux-2.6.7/include/asm-sparc/uaccess.h
+--- linux-2.6.7/include/asm-sparc/uaccess.h 2004-06-16 01:19:53 -0400
++++ linux-2.6.7/include/asm-sparc/uaccess.h 2004-06-25 17:41:53 -0400
+@@ -41,7 +41,7 @@
+ * No one can read/write anything from userland in the kernel space by setting
+ * large size and address near to PAGE_OFFSET - a fault will break his intentions.
+ */
+-#define __user_ok(addr,size) ((addr) < STACK_TOP)
++#define __user_ok(addr,size) ((addr) < __STACK_TOP)
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
+ #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+diff -urN linux-2.6.7/include/asm-sparc64/a.out.h linux-2.6.7/include/asm-sparc64/a.out.h
+--- linux-2.6.7/include/asm-sparc64/a.out.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-sparc64/a.out.h 2004-06-25 17:41:53 -0400
+@@ -95,7 +95,7 @@
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
++#define __STACK_TOP (test_thread_flag(TIF_32BIT) ? 0xf0000000 : 0x80000000000L)
+
+ #endif
+
+diff -urN linux-2.6.7/include/asm-sparc64/elf.h linux-2.6.7/include/asm-sparc64/elf.h
+--- linux-2.6.7/include/asm-sparc64/elf.h 2004-06-16 01:19:10 -0400
++++ linux-2.6.7/include/asm-sparc64/elf.h 2004-06-25 17:41:53 -0400
+@@ -140,6 +140,16 @@
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_EXEC_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 14 : 28 )
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_32BIT) ? 15 : 29 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -urN linux-2.6.7/include/asm-sparc64/mman.h linux-2.6.7/include/asm-sparc64/mman.h
+--- linux-2.6.7/include/asm-sparc64/mman.h 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/include/asm-sparc64/mman.h 2004-06-25 17:41:53 -0400
+@@ -27,6 +27,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-sparc64/page.h linux-2.6.7/include/asm-sparc64/page.h
+--- linux-2.6.7/include/asm-sparc64/page.h 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/include/asm-sparc64/page.h 2004-06-25 17:41:53 -0400
+@@ -168,6 +168,15 @@
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* !(__KERNEL__) */
+
+ #endif /* !(_SPARC64_PAGE_H) */
+diff -urN linux-2.6.7/include/asm-sparc64/pgtable.h linux-2.6.7/include/asm-sparc64/pgtable.h
+--- linux-2.6.7/include/asm-sparc64/pgtable.h 2004-06-16 01:18:52 -0400
++++ linux-2.6.7/include/asm-sparc64/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -128,7 +128,8 @@
+
+ /* Here are the SpitFire software bits we use in the TTE's. */
+ #define _PAGE_FILE _AC(0x0000000000001000,UL) /* Pagecache page */
+-#define _PAGE_MODIFIED _AC(0x0000000000000800,UL) /* Modified (dirty) */
++#define _PAGE_MODIFIED _AC(0x0000000000001000,UL) /* Modified (dirty) */
++#define _PAGE_EXEC _AC(0x0000000000000800,UL) /* Executable SW Bit */
+ #define _PAGE_ACCESSED _AC(0x0000000000000400,UL) /* Accessed (ref'd) */
+ #define _PAGE_READ _AC(0x0000000000000200,UL) /* Readable SW Bit */
+ #define _PAGE_WRITE _AC(0x0000000000000100,UL) /* Writable SW Bit */
+@@ -164,16 +165,30 @@
+
+ /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
+ #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS | _PAGE_WRITE)
++ __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
+
+ #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
++ __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS | \
++ _PAGE_EXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS | _PAGE_WRITE)
++# define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++# define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define _PFN_MASK _PAGE_PADDR
+
+@@ -181,18 +196,18 @@
+ __ACCESS_BITS | _PAGE_E)
+
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff -urN linux-2.6.7/include/asm-x86_64/a.out.h linux-2.6.7/include/asm-x86_64/a.out.h
+--- linux-2.6.7/include/asm-x86_64/a.out.h 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/include/asm-x86_64/a.out.h 2004-06-25 17:41:53 -0400
+@@ -21,7 +21,7 @@
+
+ #ifdef __KERNEL__
+ #include <linux/thread_info.h>
+-#define STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
++#define __STACK_TOP (test_thread_flag(TIF_IA32) ? IA32_PAGE_OFFSET : TASK_SIZE)
+ #endif
+
+ #endif /* __A_OUT_GNU_H__ */
+diff -urN linux-2.6.7/include/asm-x86_64/elf.h linux-2.6.7/include/asm-x86_64/elf.h
+--- linux-2.6.7/include/asm-x86_64/elf.h 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/include/asm-x86_64/elf.h 2004-06-25 17:41:53 -0400
+@@ -89,6 +89,17 @@
+
+ #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (test_thread_flag(TIF_IA32) ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (test_thread_flag(TIF_IA32) ? 16 : 24)
++#endif
++
+ /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different). Assumes current is the process
+ getting dumped. */
+diff -urN linux-2.6.7/include/asm-x86_64/mman.h linux-2.6.7/include/asm-x86_64/mman.h
+--- linux-2.6.7/include/asm-x86_64/mman.h 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/include/asm-x86_64/mman.h 2004-06-25 17:41:53 -0400
+@@ -24,6 +24,10 @@
+ #define MAP_POPULATE 0x8000 /* populate (prefault) pagetables */
+ #define MAP_NONBLOCK 0x10000 /* do not block on IO */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x8000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -urN linux-2.6.7/include/asm-x86_64/page.h linux-2.6.7/include/asm-x86_64/page.h
+--- linux-2.6.7/include/asm-x86_64/page.h 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/include/asm-x86_64/page.h 2004-06-25 17:41:53 -0400
+@@ -132,6 +132,16 @@
+
+ #define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define VM_DATA_DEFAULT_FLAGS __VM_DATA_DEFAULT_FLAGS
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#else
+ #define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+@@ -142,6 +152,8 @@
+ #define VM_STACK_DEFAULT_FLAGS \
+ (test_thread_flag(TIF_IA32) ? vm_stack_flags32 : vm_stack_flags)
+
++#endif
++
+ #define CONFIG_ARCH_GATE_AREA 1
+
+ #ifndef __ASSEMBLY__
+diff -urN linux-2.6.7/include/asm-x86_64/pgalloc.h linux-2.6.7/include/asm-x86_64/pgalloc.h
+--- linux-2.6.7/include/asm-x86_64/pgalloc.h 2004-06-16 01:19:29 -0400
++++ linux-2.6.7/include/asm-x86_64/pgalloc.h 2004-06-25 17:41:53 -0400
+@@ -8,7 +8,7 @@
+ #include <linux/mm.h>
+
+ #define pmd_populate_kernel(mm, pmd, pte) \
+- set_pmd(pmd, __pmd(_PAGE_TABLE | __pa(pte)))
++ set_pmd(pmd, __pmd(_KERNPG_TABLE | __pa(pte)))
+ #define pgd_populate(mm, pgd, pmd) \
+ set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pmd)))
+
+diff -urN linux-2.6.7/include/asm-x86_64/pgtable.h linux-2.6.7/include/asm-x86_64/pgtable.h
+--- linux-2.6.7/include/asm-x86_64/pgtable.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/asm-x86_64/pgtable.h 2004-06-25 17:41:53 -0400
+@@ -170,6 +170,10 @@
+ #define PAGE_COPY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_NX)
+ #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
++
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++#define PAGE_SHARED_NOEXEC PAGE_SHARED
++
+ #define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_NX)
+ #define __PAGE_KERNEL_EXECUTABLE \
+diff -urN linux-2.6.7/include/linux/a.out.h linux-2.6.7/include/linux/a.out.h
+--- linux-2.6.7/include/linux/a.out.h 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/include/linux/a.out.h 2004-06-25 17:41:53 -0400
+@@ -7,6 +7,16 @@
+
+ #include <asm/a.out.h>
+
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __DELTA_STACK (current->mm->delta_stack)
++#else
++#define __DELTA_STACK 0UL
++#endif
++
++#ifndef STACK_TOP
++#define STACK_TOP (__STACK_TOP - __DELTA_STACK)
++#endif
++
+ #endif /* __STRUCT_EXEC_OVERRIDE__ */
+
+ /* these go in the N_MACHTYPE field */
+@@ -37,6 +47,14 @@
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define F_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -urN linux-2.6.7/include/linux/binfmts.h linux-2.6.7/include/linux/binfmts.h
+--- linux-2.6.7/include/linux/binfmts.h 2004-06-16 01:19:26 -0400
++++ linux-2.6.7/include/linux/binfmts.h 2004-06-25 17:41:53 -0400
+@@ -36,6 +36,7 @@
+ of the time same as filename, but could be
+ different for binfmt_{misc,script} */
+ unsigned long loader, exec;
++ int misc;
+ };
+
+ /*
+@@ -71,5 +72,8 @@
+ extern int do_coredump(long signr, int exit_code, struct pt_regs * regs);
+ extern int set_binfmt(struct linux_binfmt *new);
+
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++void pax_report_insns(void *pc, void *sp);
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_BINFMTS_H */
+diff -urN linux-2.6.7/include/linux/elf.h linux-2.6.7/include/linux/elf.h
+--- linux-2.6.7/include/linux/elf.h 2004-06-16 01:18:59 -0400
++++ linux-2.6.7/include/linux/elf.h 2004-06-25 17:41:53 -0400
+@@ -37,6 +37,16 @@
+
+ #define PT_GNU_STACK (PT_LOOS + 0x474e551)
+
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define EF_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ /* These constants define the different elf file types */
+ #define ET_NONE 0
+ #define ET_REL 1
+@@ -122,6 +132,8 @@
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_LOPROC 0x70000000
+ #define DT_HIPROC 0x7fffffff
+
+@@ -272,6 +284,19 @@
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1 << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1 << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1 << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1 << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1 << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1 << 9) /* Disable MPROTECT */
++#define PF_RANDEXEC (1 << 10) /* Enable RANDEXEC */
++#define PF_NORANDEXEC (1 << 11) /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1 << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1 << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1 << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1 << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -364,6 +389,8 @@
+ #define EI_OSABI 7
+ #define EI_PAD 8
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -420,6 +447,7 @@
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -427,6 +455,7 @@
+ #define elfhdr elf64_hdr
+ #define elf_phdr elf64_phdr
+ #define elf_note elf64_note
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -urN linux-2.6.7/include/linux/fs.h linux-2.6.7/include/linux/fs.h
+--- linux-2.6.7/include/linux/fs.h 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/include/linux/fs.h 2004-06-25 14:07:21 -0400
+@@ -1192,7 +1192,7 @@
+
+ /* fs/open.c */
+
+-extern int do_truncate(struct dentry *, loff_t start);
++extern int do_truncate(struct dentry *, loff_t start, struct vfsmount *);
+ extern struct file *filp_open(const char *, int, int);
+ extern struct file * dentry_open(struct dentry *, struct vfsmount *, int);
+ extern int filp_close(struct file *, fl_owner_t id);
+diff -urN linux-2.6.7/include/linux/gracl.h linux-2.6.7/include/linux/gracl.h
+--- linux-2.6.7/include/linux/gracl.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/gracl.h 2004-07-05 15:04:57 -0400
+@@ -0,0 +1,250 @@
++#ifndef GR_ACL_H
++#define GR_ACL_H
++
++#include <linux/grdefs.h>
++#include <linux/resource.h>
++#include <linux/dcache.h>
++#include <asm/resource.h>
++
++/* Major status information */
++
++#define GR_VERSION "grsecurity 2.0.1"
++#define GRSECURITY_VERSION 0x201
++
++enum {
++
++ SHUTDOWN = 0,
++ ENABLE = 1,
++ SPROLE = 2,
++ RELOAD = 3,
++ SEGVMOD = 4,
++ STATUS = 5,
++ UNSPROLE = 6
++};
++
++/* Password setup definitions
++ * kernel/grhash.c */
++enum {
++ GR_PW_LEN = 128,
++ GR_SALT_LEN = 16,
++ GR_SHA_LEN = 32,
++};
++
++enum {
++ GR_SPROLE_LEN = 64,
++};
++
++/* Begin Data Structures */
++
++struct sprole_pw {
++ unsigned char *rolename;
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
++};
++
++struct name_entry {
++ ino_t inode;
++ dev_t device;
++ char *name;
++ __u16 len;
++};
++
++struct acl_role_db {
++ struct acl_role_label **r_hash;
++ __u32 r_size;
++};
++
++struct name_db {
++ struct name_entry **n_hash;
++ __u32 n_size;
++};
++
++struct crash_uid {
++ uid_t uid;
++ unsigned long expires;
++};
++
++struct gr_hash_struct {
++ void **table;
++ void **nametable;
++ void *first;
++ __u32 table_size;
++ __u32 used_size;
++ int type;
++};
++
++/* Userspace Grsecurity ACL data structures */
++struct acl_subject_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++ __u32 cap_mask;
++ __u32 cap_lower;
++
++ struct rlimit res[RLIM_NLIMITS + 1];
++ __u16 resmask;
++
++ __u8 user_trans_type;
++ __u8 group_trans_type;
++ uid_t *user_transitions;
++ gid_t *group_transitions;
++ __u16 user_trans_num;
++ __u16 group_trans_num;
++
++ __u32 ip_proto[8];
++ __u32 ip_type;
++ struct acl_ip_label **ips;
++ __u32 ip_num;
++
++ __u32 crashes;
++ unsigned long expires;
++
++ struct acl_subject_label *parent_subject;
++ struct gr_hash_struct *hash;
++ struct acl_subject_label *prev;
++ struct acl_subject_label *next;
++
++ struct acl_object_label **obj_hash;
++ __u32 obj_hash_size;
++};
++
++struct role_allowed_ip {
++ __u32 addr;
++ __u32 netmask;
++
++ struct role_allowed_ip *prev;
++ struct role_allowed_ip *next;
++};
++
++struct role_transition {
++ char *rolename;
++
++ struct role_transition *prev;
++ struct role_transition *next;
++};
++
++struct acl_role_label {
++ char *rolename;
++ uid_t uidgid;
++ __u16 roletype;
++
++ __u16 auth_attempts;
++ unsigned long expires;
++
++ struct acl_subject_label *root_label;
++ struct gr_hash_struct *hash;
++
++ struct acl_role_label *prev;
++ struct acl_role_label *next;
++
++ struct role_transition *transitions;
++ struct role_allowed_ip *allowed_ips;
++ uid_t *domain_children;
++ __u16 domain_child_num;
++
++ struct acl_subject_label **subj_hash;
++ __u32 subj_hash_size;
++};
++
++struct user_acl_role_db {
++ struct acl_role_label **r_table;
++ __u32 num_pointers; /* Number of allocations to track */
++ __u32 num_roles; /* Number of roles */
++ __u32 num_domain_children; /* Number of domain children */
++ __u32 num_subjects; /* Number of subjects */
++ __u32 num_objects; /* Number of objects */
++};
++
++struct acl_object_label {
++ char *filename;
++ ino_t inode;
++ dev_t device;
++ __u32 mode;
++
++ struct acl_subject_label *nested;
++ struct acl_object_label *globbed;
++
++ /* next two structures not used */
++
++ struct acl_object_label *prev;
++ struct acl_object_label *next;
++};
++
++struct acl_ip_label {
++ __u32 addr;
++ __u32 netmask;
++ __u16 low, high;
++ __u8 mode;
++ __u32 type;
++ __u32 proto[8];
++
++ /* next two structures not used */
++
++ struct acl_ip_label *prev;
++ struct acl_ip_label *next;
++};
++
++struct gr_arg {
++ struct user_acl_role_db role_db;
++ unsigned char pw[GR_PW_LEN];
++ unsigned char salt[GR_SALT_LEN];
++ unsigned char sum[GR_SHA_LEN];
++ unsigned char sp_role[GR_SPROLE_LEN];
++ struct sprole_pw *sprole_pws;
++ dev_t segv_device;
++ ino_t segv_inode;
++ uid_t segv_uid;
++ __u16 num_sprole_pws;
++ __u16 mode;
++};
++
++struct gr_arg_wrapper {
++ struct gr_arg *arg;
++ __u32 version;
++ __u32 size;
++};
++
++struct subject_map {
++ struct acl_subject_label *user;
++ struct acl_subject_label *kernel;
++};
++
++struct acl_subj_map_db {
++ struct subject_map **s_hash;
++ __u32 s_size;
++};
++
++/* End Data Structures Section */
++
++/* Hash functions generated by empirical testing by Brad Spengler
++ Makes good use of the low bits of the inode. Generally 0-1 times
++ in loop for successful match. 0-3 for unsuccessful match.
++ Shift/add algorithm with modulus of table size and an XOR*/
++
++static __inline__ unsigned long
++rhash(const uid_t uid, const __u16 type, const unsigned long sz)
++{
++ return (((uid << type) + (uid ^ type)) % sz);
++}
++
++ static __inline__ unsigned long
++shash(const struct acl_subject_label *userp, const unsigned long sz)
++{
++ return ((const unsigned long)userp % sz);
++}
++
++static __inline__ unsigned long
++fhash(const ino_t ino, const dev_t dev, const unsigned long sz)
++{
++ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
++}
++
++static __inline__ unsigned long
++nhash(const char *name, const __u16 len, const unsigned long sz)
++{
++ return full_name_hash(name, len) % sz;
++}
++
++#endif
++
+diff -urN linux-2.6.7/include/linux/gralloc.h linux-2.6.7/include/linux/gralloc.h
+--- linux-2.6.7/include/linux/gralloc.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/gralloc.h 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,8 @@
++#ifndef __GRALLOC_H
++#define __GRALLOC_H
++
++void acl_free_all(void);
++int acl_alloc_stack_init(unsigned long size);
++void *acl_alloc(unsigned long len);
++
++#endif
+diff -urN linux-2.6.7/include/linux/grdefs.h linux-2.6.7/include/linux/grdefs.h
+--- linux-2.6.7/include/linux/grdefs.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/grdefs.h 2004-07-05 15:04:57 -0400
+@@ -0,0 +1,117 @@
++#ifndef GRDEFS_H
++#define GRDEFS_H
++
++/* Begin grsecurity status declarations */
++
++enum {
++ GR_READY = 0x01,
++ GR_STATUS_INIT = 0x00 // disabled state
++};
++
++/* Begin ACL declarations */
++
++/* Role flags */
++
++enum {
++ GR_ROLE_USER = 0x0001,
++ GR_ROLE_GROUP = 0x0002,
++ GR_ROLE_DEFAULT = 0x0004,
++ GR_ROLE_SPECIAL = 0x0008,
++ GR_ROLE_AUTH = 0x0010,
++ GR_ROLE_NOPW = 0x0020,
++ GR_ROLE_GOD = 0x0040,
++ GR_ROLE_LEARN = 0x0080,
++ GR_ROLE_TPE = 0x0100,
++ GR_ROLE_DOMAIN = 0x0200
++};
++
++/* ACL Subject and Object mode flags */
++enum {
++ GR_DELETED = 0x00000080
++};
++
++/* ACL Object-only mode flags */
++enum {
++ GR_READ = 0x00000001,
++ GR_APPEND = 0x00000002,
++ GR_WRITE = 0x00000004,
++ GR_EXEC = 0x00000008,
++ GR_FIND = 0x00000010,
++ GR_INHERIT = 0x00000040,
++ GR_PTRACERD = 0x00000100,
++ GR_SETID = 0x00000200,
++ GR_CREATE = 0x00000400,
++ GR_DELETE = 0x00000800,
++ GR_NOPTRACE = 0x00001000,
++ GR_AUDIT_READ = 0x00002000,
++ GR_AUDIT_APPEND = 0x00004000,
++ GR_AUDIT_WRITE = 0x00008000,
++ GR_AUDIT_EXEC = 0x00010000,
++ GR_AUDIT_FIND = 0x00020000,
++ GR_AUDIT_INHERIT= 0x00040000,
++ GR_AUDIT_SETID = 0x00080000,
++ GR_AUDIT_CREATE = 0x00100000,
++ GR_AUDIT_DELETE = 0x00200000,
++ GR_SUPPRESS = 0x00400000,
++ GR_NOLEARN = 0x00800000
++};
++
++#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
++ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
++ GR_AUDIT_CREATE | GR_AUDIT_DELETE)
++
++/* ACL subject-only mode flags */
++enum {
++ GR_KILL = 0x00000001,
++ GR_VIEW = 0x00000002,
++ GR_PROTECTED = 0x00000100,
++ GR_LEARN = 0x00000200,
++ GR_OVERRIDE = 0x00000400,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_DUMMY = 0x00000800,
++ GR_PAXPAGE = 0x00001000,
++ GR_PAXSEGM = 0x00002000,
++ GR_PAXGCC = 0x00004000,
++ GR_PAXRANDMMAP = 0x00008000,
++ GR_PAXRANDEXEC = 0x00010000,
++ GR_PAXMPROTECT = 0x00020000,
++ GR_PROTSHM = 0x00040000,
++ GR_KILLPROC = 0x00080000,
++ GR_KILLIPPROC = 0x00100000,
++ /* just a placeholder, this mode is only used in userspace */
++ GR_NOTROJAN = 0x00200000,
++ GR_PROTPROCFD = 0x00400000,
++ GR_PROCACCT = 0x00800000,
++ GR_RELAXPTRACE = 0x01000000,
++ GR_NESTED = 0x02000000
++};
++
++enum {
++ GR_ID_USER = 0x01,
++ GR_ID_GROUP = 0x02,
++};
++
++enum {
++ GR_ID_ALLOW = 0x01,
++ GR_ID_DENY = 0x02,
++};
++
++#define GR_CRASH_RES 11
++#define GR_UIDTABLE_MAX 500
++
++/* begin resource learning section */
++enum {
++ GR_RLIM_CPU_BUMP = 60,
++ GR_RLIM_FSIZE_BUMP = 50000,
++ GR_RLIM_DATA_BUMP = 10000,
++ GR_RLIM_STACK_BUMP = 1000,
++ GR_RLIM_CORE_BUMP = 10000,
++ GR_RLIM_RSS_BUMP = 500000,
++ GR_RLIM_NPROC_BUMP = 1,
++ GR_RLIM_NOFILE_BUMP = 5,
++ GR_RLIM_MEMLOCK_BUMP = 50000,
++ GR_RLIM_AS_BUMP = 500000,
++ GR_RLIM_LOCKS_BUMP = 2
++};
++
++#endif
+diff -urN linux-2.6.7/include/linux/grinternal.h linux-2.6.7/include/linux/grinternal.h
+--- linux-2.6.7/include/linux/grinternal.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/grinternal.h 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,202 @@
++#ifndef __GRINTERNAL_H
++#define __GRINTERNAL_H
++
++#ifdef CONFIG_GRKERNSEC
++
++#include <linux/fs.h>
++#include <linux/gracl.h>
++#include <linux/grdefs.h>
++#include <linux/grmsg.h>
++
++extern void gr_add_learn_entry(const char *fmt, ...);
++extern __u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
++ const struct vfsmount *mnt);
++extern __u32 gr_check_create(const struct dentry *new_dentry,
++ const struct dentry *parent,
++ const struct vfsmount *mnt, const __u32 mode);
++extern int gr_check_protected_task(const struct task_struct *task);
++extern __u32 to_gr_audit(const __u32 reqmode);
++extern int gr_set_acls(const int type);
++
++extern int gr_acl_is_enabled(void);
++extern char gr_roletype_to_char(void);
++
++extern void gr_handle_alertkill(void);
++extern char *gr_to_filename(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename1(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename2(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern char *gr_to_filename3(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++
++extern int grsec_enable_link;
++extern int grsec_enable_fifo;
++extern int grsec_enable_execve;
++extern int grsec_enable_forkbomb;
++extern int grsec_forkbomb_gid;
++extern int grsec_forkbomb_sec;
++extern int grsec_forkbomb_max;
++extern int grsec_enable_execlog;
++extern int grsec_enable_signal;
++extern int grsec_enable_forkfail;
++extern int grsec_enable_time;
++extern int grsec_enable_chroot_shmat;
++extern int grsec_enable_chroot_findtask;
++extern int grsec_enable_chroot_mount;
++extern int grsec_enable_chroot_double;
++extern int grsec_enable_chroot_pivot;
++extern int grsec_enable_chroot_chdir;
++extern int grsec_enable_chroot_chmod;
++extern int grsec_enable_chroot_mknod;
++extern int grsec_enable_chroot_fchdir;
++extern int grsec_enable_chroot_nice;
++extern int grsec_enable_chroot_execlog;
++extern int grsec_enable_chroot_caps;
++extern int grsec_enable_chroot_sysctl;
++extern int grsec_enable_chroot_unix;
++extern int grsec_enable_tpe;
++extern int grsec_tpe_gid;
++extern int grsec_enable_tpe_all;
++extern int grsec_enable_sidcaps;
++extern int grsec_enable_randpid;
++extern int grsec_enable_socket_all;
++extern int grsec_socket_all_gid;
++extern int grsec_enable_socket_client;
++extern int grsec_socket_client_gid;
++extern int grsec_enable_socket_server;
++extern int grsec_socket_server_gid;
++extern int grsec_audit_gid;
++extern int grsec_enable_group;
++extern int grsec_enable_audit_ipc;
++extern int grsec_enable_audit_textrel;
++extern int grsec_enable_mount;
++extern int grsec_enable_chdir;
++extern int grsec_lock;
++
++extern struct task_struct *child_reaper;
++
++extern spinlock_t grsec_alert_lock;
++extern unsigned long grsec_alert_wtime;
++extern unsigned long grsec_alert_fyet;
++
++extern spinlock_t grsec_audit_lock;
++
++extern rwlock_t grsec_exec_file_lock;
++
++#define gr_task_fullpath(tsk) (tsk->exec_file ? \
++ gr_to_filename2(tsk->exec_file->f_dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename3(tsk->parent->exec_file->f_dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define gr_task_fullpath0(tsk) (tsk->exec_file ? \
++ gr_to_filename(tsk->exec_file->f_dentry, \
++ tsk->exec_file->f_vfsmnt) : "/")
++
++#define gr_parent_task_fullpath0(tsk) (tsk->parent->exec_file ? \
++ gr_to_filename1(tsk->parent->exec_file->f_dentry, \
++ tsk->parent->exec_file->f_vfsmnt) : "/")
++
++#define proc_is_chrooted(tsk_a) ((tsk_a->pid > 1) && \
++ ((tsk_a->fs->root->d_inode->i_sb->s_dev != \
++ child_reaper->fs->root->d_inode->i_sb->s_dev) || \
++ (tsk_a->fs->root->d_inode->i_ino != \
++ child_reaper->fs->root->d_inode->i_ino)))
++
++#define have_same_root(tsk_a,tsk_b) ((tsk_a->fs->root->d_inode->i_sb->s_dev == \
++ tsk_b->fs->root->d_inode->i_sb->s_dev) && \
++ (tsk_a->fs->root->d_inode->i_ino == \
++ tsk_b->fs->root->d_inode->i_ino))
++
++#define DEFAULTSECARGS gr_task_fullpath(current), current->comm, \
++ current->pid, current->uid, \
++ current->euid, current->gid, current->egid, \
++ gr_parent_task_fullpath(current), \
++ current->parent->comm, current->parent->pid, \
++ current->parent->uid, current->parent->euid, \
++ current->parent->gid, current->parent->egid
++
++#define GR_CHROOT_CAPS ( \
++ CAP_TO_MASK(CAP_FOWNER) | \
++ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
++ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
++ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
++ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
++ CAP_TO_MASK(CAP_IPC_OWNER))
++
++#define security_alert_good(normal_msg,args...) \
++({ \
++ read_lock(&tasklist_lock); \
++ read_lock(&grsec_exec_file_lock); \
++ spin_lock(&grsec_alert_lock); \
++ \
++ if (!grsec_alert_wtime || get_seconds() - grsec_alert_wtime > CONFIG_GRKERNSEC_FLOODTIME) { \
++ grsec_alert_wtime = get_seconds(); grsec_alert_fyet = 0; \
++ if (current->curr_ip && gr_acl_is_enabled()) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) " normal_msg "\n", NIPQUAD(current->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else if (gr_acl_is_enabled()) \
++ printk(KERN_ALERT "grsec: (%.64s:%c:%.950s) " normal_msg "\n", current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if((get_seconds() - grsec_alert_wtime < CONFIG_GRKERNSEC_FLOODTIME) && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) { \
++ grsec_alert_fyet++; \
++ if (current->curr_ip && gr_acl_is_enabled()) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) " normal_msg "\n", NIPQUAD(current->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else if (current->curr_ip) \
++ printk(KERN_ALERT "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else if (gr_acl_is_enabled()) \
++ printk(KERN_ALERT "grsec: (%.64s:%c:%.950s) " normal_msg "\n", current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else \
++ printk(KERN_ALERT "grsec: " normal_msg "\n" , ## args); \
++ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) { \
++ grsec_alert_wtime = get_seconds(); grsec_alert_fyet++; \
++ printk(KERN_ALERT "grsec: more alerts, logging disabled for " \
++ "%d seconds\n", CONFIG_GRKERNSEC_FLOODTIME); \
++ } \
++ \
++ spin_unlock(&grsec_alert_lock); \
++ read_unlock(&grsec_exec_file_lock); \
++ read_unlock(&tasklist_lock); \
++})
++
++#define security_alert(normal_msg,args...) \
++({ \
++ security_alert_good(normal_msg,args); \
++ gr_handle_alertkill(); \
++})
++
++#define security_audit(normal_msg,args...) \
++({ \
++ read_lock(&tasklist_lock); \
++ read_lock(&grsec_exec_file_lock); \
++ spin_lock(&grsec_audit_lock); \
++ if (current->curr_ip && gr_acl_is_enabled()) \
++ printk(KERN_INFO "grsec: From %u.%u.%u.%u: (%.64s:%c:%.950s) " normal_msg "\n", NIPQUAD(current->curr_ip), current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else if (current->curr_ip) \
++ printk(KERN_INFO "grsec: From %u.%u.%u.%u: " normal_msg "\n", NIPQUAD(current->curr_ip) , ## args); \
++ else if (gr_acl_is_enabled()) \
++ printk(KERN_INFO "grsec: (%.64s:%c:%.950s) " normal_msg "\n", current->role->rolename, gr_roletype_to_char(), current->acl->filename , ## args); \
++ else \
++ printk(KERN_INFO "grsec: " normal_msg "\n" , ## args); \
++ spin_unlock(&grsec_audit_lock); \
++ read_unlock(&grsec_exec_file_lock); \
++ read_unlock(&tasklist_lock); \
++})
++
++#define security_learn(normal_msg,args...) \
++({ \
++ preempt_disable(); \
++ gr_add_learn_entry(normal_msg "\n", ## args); \
++ preempt_enable(); \
++})
++
++#endif
++
++#endif
+diff -urN linux-2.6.7/include/linux/grmsg.h linux-2.6.7/include/linux/grmsg.h
+--- linux-2.6.7/include/linux/grmsg.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/grmsg.h 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,108 @@
++#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d, parent %.256s[%.16s:%d] uid/euid:%d/%d gid/egid:%d/%d"
++#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%u.%u.%u.%u TTY:%.64s uid/euid:%d/%d gid/egid:%d/%d"
++#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by " DEFAULTSECMSG
++#define GR_IOPERM_MSG "denied use of ioperm() by " DEFAULTSECMSG
++#define GR_IOPL_MSG "denied use of iopl() by " DEFAULTSECMSG
++#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by " DEFAULTSECMSG
++#define GR_UNIX_CHROOT_MSG "denied connect to abstract AF_UNIX socket outside of chroot by " DEFAULTSECMSG
++#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by " DEFAULTSECMSG
++#define GR_KMEM_MSG "attempted write to /dev/kmem by " DEFAULTSECMSG
++#define GR_PORT_OPEN_MSG "attempted open of /dev/port by " DEFAULTSECMSG
++#define GR_MEM_WRITE_MSG "attempted write of /dev/mem by " DEFAULTSECMSG
++#define GR_MEM_MMAP_MSG "attempted mmap write of /dev/[k]mem by " DEFAULTSECMSG
++#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by " DEFAULTSECMSG
++#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%u.%u.%u.%u"
++#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by " DEFAULTSECMSG
++#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by " DEFAULTSECMSG
++#define GR_MKNOD_CHROOT_MSG "refused attempt to mknod %.950s from chroot by " DEFAULTSECMSG
++#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by " DEFAULTSECMSG
++#define GR_UNIXCONNECT_ACL_MSG "%s connect to the unix domain socket %.950s by " DEFAULTSECMSG
++#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by " DEFAULTSECMSG
++#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by " DEFAULTSECMSG
++#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by " DEFAULTSECMSG
++#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by " DEFAULTSECMSG
++#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for " DEFAULTSECMSG
++#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by " DEFAULTSECMSG
++#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by " DEFAULTSECMSG
++#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by " DEFAULTSECMSG
++#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by " DEFAULTSECMSG
++#define GR_NPROC_MSG "attempt to overstep process limit by " DEFAULTSECMSG
++#define GR_EXEC_ACL_MSG "%s execution of %.950s by " DEFAULTSECMSG
++#define GR_EXEC_TPE_MSG "denied untrusted exec of %.950s by " DEFAULTSECMSG
++#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning uid %u from login for %lu seconds"
++#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " Banning execution for %lu seconds"
++#define GR_MOUNT_CHROOT_MSG "denied attempt to mount %.30s as %.930s from chroot by " DEFAULTSECMSG
++#define GR_PIVOT_CHROOT_MSG "denied attempt to pivot_root from chroot by " DEFAULTSECMSG
++#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by " DEFAULTSECMSG
++#define GR_ATIME_ACL_MSG "%s access time change of %.950s by " DEFAULTSECMSG
++#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by " DEFAULTSECMSG
++#define GR_CHROOT_CHROOT_MSG "denied attempt to double chroot to %.950s by " DEFAULTSECMSG
++#define GR_FCHMOD_ACL_MSG "%s fchmod of %.950s by " DEFAULTSECMSG
++#define GR_CHMOD_CHROOT_MSG "denied attempt to chmod +s %.950s by " DEFAULTSECMSG
++#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by " DEFAULTSECMSG
++#define GR_CHROOT_FCHDIR_MSG "attempted fchdir outside of chroot to %.950s by " DEFAULTSECMSG
++#define GR_CHOWN_ACL_MSG "%s chown of %.950s by " DEFAULTSECMSG
++#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by " DEFAULTSECMSG
++#define GR_INITF_ACL_MSG "init_variables() failed %s"
++#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
++#define GR_DEV_ACL_MSG "/dev/grsec: being fed garbage %d bytes sent %d required"
++#define GR_SHUTS_ACL_MSG "shutdown auth success for " DEFAULTSECMSG
++#define GR_SHUTF_ACL_MSG "shutdown auth failure for " DEFAULTSECMSG
++#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for " DEFAULTSECMSG
++#define GR_SEGVMODS_ACL_MSG "segvmod auth success for " DEFAULTSECMSG
++#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for " DEFAULTSECMSG
++#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for " DEFAULTSECMSG
++#define GR_ENABLE_ACL_MSG "Loaded %s"
++#define GR_ENABLEF_ACL_MSG "Unable to load %s for " DEFAULTSECMSG " RBAC system may already be enabled."
++#define GR_RELOADI_ACL_MSG "Ignoring reload request for disabled RBAC system"
++#define GR_RELOAD_ACL_MSG "Reloaded %s"
++#define GR_RELOADF_ACL_MSG "Failed reload of %s for " DEFAULTSECMSG
++#define GR_SPROLEI_ACL_MSG "Ignoring change to special role for disabled RBAC system for " DEFAULTSECMSG
++#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by " DEFAULTSECMSG
++#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by " DEFAULTSECMSG
++#define GR_SPROLEF_ACL_MSG "special role %s failure for " DEFAULTSECMSG
++#define GR_UNSPROLEI_ACL_MSG "Ignoring unauth of special role for disabled RBAC system for " DEFAULTSECMSG
++#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by " DEFAULTSECMSG
++#define GR_UNSPROLEF_ACL_MSG "special role unauth of %s failure for " DEFAULTSECMSG
++#define GR_INVMODE_ACL_MSG "Invalid mode %d by " DEFAULTSECMSG
++#define GR_MAXPW_ACL_MSG "Maximum pw attempts reached (%d), locking password authentication"
++#define GR_MAXROLEPW_ACL_MSG "Maximum pw attempts reached (%d) trying to auth to special role %s, locking auth for role of " DEFAULTSECMSG
++#define GR_PRIORITY_CHROOT_MSG "attempted priority change of process (%.16s:%d) by " DEFAULTSECMSG
++#define GR_CAPSET_CHROOT_MSG "denied capset of (%.16s:%d) within chroot by " DEFAULTSECMSG
++#define GR_FAILFORK_MSG "failed fork with errno %d by " DEFAULTSECMSG
++#define GR_NICE_CHROOT_MSG "attempted priority change by " DEFAULTSECMSG
++#define GR_UNISIGLOG_MSG "signal %d sent to " DEFAULTSECMSG
++#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by " DEFAULTSECMSG
++#define GR_SIG_ACL_MSG "Attempted send of signal %d to protected task " DEFAULTSECMSG " by " DEFAULTSECMSG
++#define GR_SYSCTL_MSG "attempt to modify grsecurity sysctl value : %.32s by " DEFAULTSECMSG
++#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by " DEFAULTSECMSG
++#define GR_TIME_MSG "time set by " DEFAULTSECMSG
++#define GR_DEFACL_MSG "Fatal: Unable to find ACL for (%.16s:%d)"
++#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by " DEFAULTSECMSG
++#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by " DEFAULTSECMSG
++#define GR_SOCK_MSG "attempted socket(%.16s,%.16s,%.16s) by " DEFAULTSECMSG
++#define GR_SOCK2_MSG "attempted socket(%d,%.16s,%.16s) by " DEFAULTSECMSG
++#define GR_BIND_MSG "attempted bind() by " DEFAULTSECMSG
++#define GR_CONNECT_MSG "attempted connect by " DEFAULTSECMSG
++#define GR_BIND_ACL_MSG "attempted bind to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
++#define GR_CONNECT_ACL_MSG "attempted connect to %u.%u.%u.%u port %u sock type %.16s protocol %.16s by " DEFAULTSECMSG
++#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%u.%u.%u.%u\t%u\t%u\t%u\t%u\t%u.%u.%u.%u"
++#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process " DEFAULTSECMSG
++#define GR_CAP_ACL_MSG "use of %s denied for " DEFAULTSECMSG
++#define GR_USRCHANGE_ACL_MSG "change to uid %d denied for " DEFAULTSECMSG
++#define GR_GRPCHANGE_ACL_MSG "change to gid %d denied for " DEFAULTSECMSG
++#define GR_REMOUNT_AUDIT_MSG "remount of %.30s by " DEFAULTSECMSG
++#define GR_UNMOUNT_AUDIT_MSG "unmount of %.30s by " DEFAULTSECMSG
++#define GR_MOUNT_AUDIT_MSG "mount %.30s to %.64s by " DEFAULTSECMSG
++#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by " DEFAULTSECMSG
++#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.63s) by " DEFAULTSECMSG
++#define GR_MSGQ_AUDIT_MSG "message queue created by " DEFAULTSECMSG
++#define GR_MSGQR_AUDIT_MSG "message queue of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_SEM_AUDIT_MSG "semaphore created by " DEFAULTSECMSG
++#define GR_SEMR_AUDIT_MSG "semaphore of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_SHM_AUDIT_MSG "shared memory of size %d created by " DEFAULTSECMSG
++#define GR_SHMR_AUDIT_MSG "shared memory of uid:%d euid:%d removed by " DEFAULTSECMSG
++#define GR_RESOURCE_MSG "attempted resource overstep by requesting %lu for %.16s against limit %lu by " DEFAULTSECMSG
++#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by " DEFAULTSECMSG
+diff -urN linux-2.6.7/include/linux/grsecurity.h linux-2.6.7/include/linux/grsecurity.h
+--- linux-2.6.7/include/linux/grsecurity.h 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/include/linux/grsecurity.h 2004-08-03 17:47:38 -0400
+@@ -0,0 +1,192 @@
++#ifndef GR_SECURITY_H
++#define GR_SECURITY_H
++#include <linux/fs.h>
++#include <linux/binfmts.h>
++#include <linux/gracl.h>
++
++extern void gr_handle_brute_attach(struct task_struct *p);
++extern void gr_handle_brute_check(void);
++
++extern char gr_roletype_to_char(void);
++
++extern int gr_check_user_change(int real, int effective, int fs);
++extern int gr_check_group_change(int real, int effective, int fs);
++
++extern void gr_add_to_task_ip_table(struct task_struct *p);
++extern void gr_del_task_from_ip_table(struct task_struct *p);
++
++extern int gr_pid_is_chrooted(struct task_struct *p);
++extern int gr_handle_chroot_nice(void);
++extern int gr_handle_chroot_sysctl(const int op);
++extern int gr_handle_chroot_capset(struct task_struct *target);
++extern int gr_handle_chroot_setpriority(struct task_struct *p,
++ const int niceval);
++extern int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
++extern int gr_handle_chroot_chroot(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_chroot_caps(struct task_struct *task);
++extern void gr_handle_chroot_chdir(struct dentry *dentry, struct vfsmount *mnt);
++extern int gr_handle_chroot_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++extern int gr_handle_chroot_mknod(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int mode);
++extern int gr_handle_chroot_mount(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const char *dev_name);
++extern int gr_handle_chroot_pivot(void);
++extern int gr_handle_chroot_unix(const pid_t pid);
++
++extern int gr_handle_rawio(const struct inode *inode);
++extern int gr_handle_nproc(void);
++
++extern void gr_handle_ioperm(void);
++extern void gr_handle_iopl(void);
++
++extern int gr_tpe_allow(const struct file *file);
++
++extern int gr_random_pid(void);
++
++extern void gr_log_forkfail(const int retval);
++extern void gr_log_timechange(void);
++extern void gr_log_signal(const int sig, const struct task_struct *t);
++extern void gr_log_chdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_log_chroot_exec(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_exec_args(struct linux_binprm *bprm, char **argv);
++extern void gr_log_remount(const char *devname, const int retval);
++extern void gr_log_unmount(const char *devname, const int retval);
++extern void gr_log_mount(const char *from, const char *to, const int retval);
++extern void gr_log_msgget(const int ret, const int msgflg);
++extern void gr_log_msgrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_semget(const int err, const int semflg);
++extern void gr_log_semrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_shmget(const int err, const int shmflg, const size_t size);
++extern void gr_log_shmrm(const uid_t uid, const uid_t cuid);
++extern void gr_log_textrel(struct vm_area_struct *vma);
++
++extern int gr_handle_follow_link(const struct inode *parent,
++ const struct inode *inode,
++ const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_handle_fifo(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ const struct dentry *dir, const int flag,
++ const int acc_mode);
++extern int gr_handle_hardlink(const struct dentry *dentry,
++ const struct vfsmount *mnt,
++ struct inode *inode,
++ const int mode, const char *to);
++
++extern int gr_task_is_capable(struct task_struct *task, const int cap);
++extern int gr_is_capable_nolog(const int cap);
++extern void gr_learn_resource(const struct task_struct *task, const int limit,
++ const unsigned long wanted, const int gt);
++extern void gr_copy_label(struct task_struct *tsk);
++extern void gr_handle_crash(struct task_struct *task, const int sig);
++extern int gr_handle_signal(const struct task_struct *p, const int sig);
++extern int gr_check_crash_uid(const uid_t uid);
++extern int gr_check_protected_task(const struct task_struct *task);
++extern int gr_acl_handle_mmap(const struct file *file,
++ const unsigned long prot);
++extern int gr_acl_handle_mprotect(const struct file *file,
++ const unsigned long prot);
++extern int gr_check_hidden_task(const struct task_struct *tsk);
++extern __u32 gr_acl_handle_truncate(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_utime(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_access(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++extern __u32 gr_acl_handle_fchmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++extern __u32 gr_acl_handle_chmod(const struct dentry *dentry,
++ const struct vfsmount *mnt, mode_t mode);
++extern __u32 gr_acl_handle_chown(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_handle_ptrace(struct task_struct *task, const long request);
++extern int gr_handle_proc_ptrace(struct task_struct *task);
++extern int gr_handle_mmap(const struct file *filp, const unsigned long prot);
++extern __u32 gr_acl_handle_execve(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern int gr_check_crash_exec(const struct file *filp);
++extern int gr_acl_is_enabled(void);
++extern void gr_set_kernel_label(struct task_struct *task);
++extern void gr_set_role_label(struct task_struct *task, const uid_t uid,
++ const gid_t gid);
++extern int gr_set_proc_label(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_open(const struct dentry *dentry,
++ const struct vfsmount *mnt, const int fmode);
++extern __u32 gr_acl_handle_creat(const struct dentry *dentry,
++ const struct dentry *p_dentry,
++ const struct vfsmount *p_mnt, const int fmode,
++ const int imode);
++extern void gr_handle_create(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const int mode);
++extern __u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt);
++extern __u32 gr_acl_handle_rmdir(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_handle_delete(const ino_t ino, const dev_t dev);
++extern __u32 gr_acl_handle_unlink(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern __u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const char *from);
++extern __u32 gr_acl_handle_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt, const char *to);
++extern int gr_acl_handle_rename(struct dentry *new_dentry,
++ struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ struct dentry *old_dentry,
++ struct inode *old_parent_inode,
++ struct vfsmount *old_mnt, const char *newname);
++extern void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
++ struct dentry *old_dentry,
++ struct dentry *new_dentry,
++ struct vfsmount *mnt, const __u8 replace);
++extern __u32 gr_check_link(const struct dentry *new_dentry,
++ const struct dentry *parent_dentry,
++ const struct vfsmount *parent_mnt,
++ const struct dentry *old_dentry,
++ const struct vfsmount *old_mnt);
++extern __u32 gr_acl_handle_filldir(const struct dentry *dentry,
++ const struct vfsmount *mnt, const ino_t ino);
++extern __u32 gr_acl_handle_unix(const struct dentry *dentry,
++ const struct vfsmount *mnt);
++extern void gr_acl_handle_exit(void);
++extern void gr_acl_handle_psacct(struct task_struct *task, const long code);
++extern int gr_acl_handle_procpidmem(const struct task_struct *task);
++extern __u32 gr_cap_rtnetlink(void);
++
++#ifdef CONFIG_GRKERNSEC
++extern void gr_handle_mem_write(void);
++extern void gr_handle_kmem_write(void);
++extern void gr_handle_open_port(void);
++extern int gr_handle_mem_mmap(const unsigned long offset,
++ struct vm_area_struct *vma);
++
++extern __u16 ip_randomid(void);
++extern __u32 ip_randomisn(void);
++extern unsigned long get_random_long(void);
++
++extern int grsec_enable_dmesg;
++extern int grsec_enable_randid;
++extern int grsec_enable_randisn;
++extern int grsec_enable_randsrc;
++extern int grsec_enable_randrpc;
++#endif
++
++#endif
+diff -urN linux-2.6.7/include/linux/mm.h linux-2.6.7/include/linux/mm.h
+--- linux-2.6.7/include/linux/mm.h 2004-06-16 01:18:56 -0400
++++ linux-2.6.7/include/linux/mm.h 2004-06-25 17:41:53 -0400
+@@ -29,6 +29,7 @@
+ #include <asm/pgtable.h>
+ #include <asm/processor.h>
+ #include <asm/atomic.h>
++#include <asm/mman.h>
+
+ #ifndef MM_VM_SIZE
+ #define MM_VM_SIZE(mm) TASK_SIZE
+@@ -100,6 +101,11 @@
+ #ifdef CONFIG_NUMA
+ struct mempolicy *vm_policy; /* NUMA policy for the VMA */
+ #endif
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long vm_mirror; /* PaX: mirror distance */
++#endif
++
+ };
+
+ /*
+@@ -135,6 +141,18 @@
+ #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
+ #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define VM_MIRROR 0x01000000 /* vma is mirroring another */
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++#define VM_MAYNOTWRITE 0x02000000 /* vma cannot be granted VM_WRITE any more */
++#endif
++
++#ifdef __VM_STACK_FLAGS
++#define VM_STACK_DEFAULT_FLAGS (0x00000033 | __VM_STACK_FLAGS)
++#endif
++
+ #ifndef VM_STACK_DEFAULT_FLAGS /* arch can override this */
+ #define VM_STACK_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS
+ #endif
+@@ -636,6 +654,10 @@
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+
++extern unsigned long __do_mmap_pgoff(struct file *file, unsigned long addr,
++ unsigned long len, unsigned long prot,
++ unsigned long flag, unsigned long pgoff);
++
+ static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+@@ -725,5 +747,11 @@
+ int in_gate_area(struct task_struct *task, unsigned long addr);
+ #endif
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
++#else
++static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _LINUX_MM_H */
+diff -urN linux-2.6.7/include/linux/mman.h linux-2.6.7/include/linux/mman.h
+--- linux-2.6.7/include/linux/mman.h 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/include/linux/mman.h 2004-06-25 17:41:53 -0400
+@@ -56,6 +56,11 @@
+ calc_vm_flag_bits(unsigned long flags)
+ {
+ return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) |
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ _calc_vm_trans(flags, MAP_MIRROR, VM_MIRROR) |
++#endif
++
+ _calc_vm_trans(flags, MAP_DENYWRITE, VM_DENYWRITE ) |
+ _calc_vm_trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE) |
+ _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED );
+diff -urN linux-2.6.7/include/linux/proc_fs.h linux-2.6.7/include/linux/proc_fs.h
+--- linux-2.6.7/include/linux/proc_fs.h 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/include/linux/proc_fs.h 2004-06-25 14:07:21 -0400
+@@ -221,7 +221,7 @@
+
+ #endif /* CONFIG_PROC_FS */
+
+-#if !defined(CONFIG_PROC_FS)
++#if !defined(CONFIG_PROC_FS) || !defined(CONFIG_PROC_KCORE)
+ static inline void kclist_add(struct kcore_list *new, void *addr, size_t size)
+ {
+ }
+diff -urN linux-2.6.7/include/linux/random.h linux-2.6.7/include/linux/random.h
+--- linux-2.6.7/include/linux/random.h 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/include/linux/random.h 2004-06-25 17:41:53 -0400
+@@ -69,6 +69,8 @@
+
+ extern __u32 secure_ipv6_id(__u32 *daddr);
+
++extern unsigned long pax_get_random_long(void);
++
+ #ifndef MODULE
+ extern struct file_operations random_fops, urandom_fops;
+ #endif
+diff -urN linux-2.6.7/include/linux/sched.h linux-2.6.7/include/linux/sched.h
+--- linux-2.6.7/include/linux/sched.h 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/include/linux/sched.h 2004-08-03 17:47:15 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/percpu.h>
+
+ struct exec_domain;
++struct linux_binprm;
+
+ /*
+ * cloning flags:
+@@ -234,6 +235,21 @@
+ struct kioctx *ioctx_list;
+
+ struct kioctx default_kioctx;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_exec; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ extern int mmlist_nr;
+@@ -513,6 +529,23 @@
+ struct mempolicy *mempolicy;
+ short il_next; /* could be shared with used_math */
+ #endif
++
++#ifdef CONFIG_GRKERNSEC
++ /* grsecurity */
++ struct acl_subject_label *acl;
++ struct acl_role_label *role;
++ struct file *exec_file;
++ u32 curr_ip;
++ u32 gr_saddr;
++ u32 gr_daddr;
++ u16 gr_sport;
++ u16 gr_dport;
++ u16 acl_role_id;
++ u8 acl_sp_role:1;
++ u8 used_accept:1;
++ u8 is_writable:1;
++ u8 brute:1;
++#endif
+ };
+
+ static inline pid_t process_group(struct task_struct *tsk)
+@@ -550,6 +583,29 @@
+ #define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
+ #define PF_SYNCWRITE 0x00200000 /* I am doing a sync write */
+
++#define PF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define PF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define PF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define PF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++#define PF_PAX_RANDEXEC 0x10000000 /* Randomize ET_EXEC base */
++#define PF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++extern unsigned int pax_aslr;
++#endif
++
++extern unsigned int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_flags(struct linux_binprm * bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_flags_func)(struct linux_binprm * bprm);
++#endif
++
+ #ifdef CONFIG_SMP
+ #define SCHED_LOAD_SCALE 128UL /* increase resolution of load */
+
+@@ -809,14 +865,29 @@
+ : on_sig_stack(sp) ? SS_ONSTACK : 0);
+ }
+
++extern int gr_task_is_capable(struct task_struct *task, const int cap);
++extern int gr_is_capable_nolog(const int cap);
+
+ #ifdef CONFIG_SECURITY
+ /* code is in security.c */
+ extern int capable(int cap);
++static inline int capable_nolog(int cap)
++{
++ return capable(cap);
++}
+ #else
+ static inline int capable(int cap)
+ {
+- if (cap_raised(current->cap_effective, cap)) {
++ if (cap_raised(current->cap_effective, cap) && gr_task_is_capable(current, cap)) {
++ current->flags |= PF_SUPERPRIV;
++ return 1;
++ }
++ return 0;
++}
++
++static inline int capable_nolog(int cap)
++{
++ if (cap_raised(current->cap_effective, cap) && gr_is_capable_nolog(cap)) {
+ current->flags |= PF_SUPERPRIV;
+ return 1;
+ }
+diff -urN linux-2.6.7/include/linux/shm.h linux-2.6.7/include/linux/shm.h
+--- linux-2.6.7/include/linux/shm.h 2004-06-16 01:19:43 -0400
++++ linux-2.6.7/include/linux/shm.h 2004-06-25 14:07:21 -0400
+@@ -84,6 +84,10 @@
+ time_t shm_ctim;
+ pid_t shm_cprid;
+ pid_t shm_lprid;
++#ifdef CONFIG_GRKERNSEC
++ time_t shm_createtime;
++ pid_t shm_lapid;
++#endif
+ };
+
+ /* shm_mode upper byte flags */
+diff -urN linux-2.6.7/include/linux/sysctl.h linux-2.6.7/include/linux/sysctl.h
+--- linux-2.6.7/include/linux/sysctl.h 2004-06-16 01:19:35 -0400
++++ linux-2.6.7/include/linux/sysctl.h 2004-06-25 17:47:51 -0400
+@@ -133,7 +133,20 @@
+ KERN_NGROUPS_MAX=63, /* int: NGROUPS_MAX */
+ KERN_SPARC_SCONS_PWROFF=64, /* int: serial console power-off halt */
+ KERN_HZ_TIMER=65, /* int: hz timer on or off */
++ KERN_GRSECURITY=68, /* grsecurity */
++
++#ifdef CONFIG_PAX_SOFTMODE
++ KERN_PAX=69, /* PaX control */
++#endif
++
++};
++
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_ASLR=1, /* PaX: disable/enable all randomization features */
++ PAX_SOFTMODE=2 /* PaX: disable/enable soft mode */
+ };
++#endif
+
+
+ /* CTL_VM names: */
+diff -urN linux-2.6.7/include/net/ip.h linux-2.6.7/include/net/ip.h
+--- linux-2.6.7/include/net/ip.h 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/include/net/ip.h 2004-06-25 14:07:21 -0400
+@@ -33,6 +33,11 @@
+ #include <net/route.h>
+ #include <net/arp.h>
+
++#ifdef CONFIG_GRKERNSEC_RANDID
++extern int grsec_enable_randid;
++extern __u16 ip_randomid(void);
++#endif
++
+ #ifndef _SNMP_H
+ #include <net/snmp.h>
+ #endif
+@@ -188,6 +193,13 @@
+
+ static inline void ip_select_ident(struct iphdr *iph, struct dst_entry *dst, struct sock *sk)
+ {
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ iph->id = htons(ip_randomid());
++ else
++#endif
++
+ if (iph->frag_off & htons(IP_DF)) {
+ /* This is only to work around buggy Windows95/2000
+ * VJ compression implementations. If the ID field
+diff -urN linux-2.6.7/init/Kconfig linux-2.6.7/init/Kconfig
+--- linux-2.6.7/init/Kconfig 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/init/Kconfig 2004-06-25 14:07:21 -0400
+@@ -230,6 +230,7 @@
+ config KALLSYMS
+ bool "Load all symbols for debugging/kksymoops" if EMBEDDED
+ default y
++ depends on !GRKERNSEC_HIDESYM
+ help
+ Say Y here to let the kernel print out symbolic crash information and
+ symbolic stack backtraces. This increases the size of the kernel
+diff -urN linux-2.6.7/init/do_mounts.c linux-2.6.7/init/do_mounts.c
+--- linux-2.6.7/init/do_mounts.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/init/do_mounts.c 2004-06-25 14:07:21 -0400
+@@ -291,6 +291,7 @@
+ case -EINVAL:
+ continue;
+ }
++
+ /*
+ * Allow the user to distinguish between failed sys_open
+ * and bad superblock on root device.
+diff -urN linux-2.6.7/init/main.c linux-2.6.7/init/main.c
+--- linux-2.6.7/init/main.c 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/init/main.c 2004-06-25 17:33:13 -0400
+@@ -91,6 +91,7 @@
+ extern void populate_rootfs(void);
+ extern void driver_init(void);
+ extern void prepare_namespace(void);
++extern void grsecurity_init(void);
+
+ #ifdef CONFIG_TC
+ extern void tc_init(void);
+@@ -636,6 +637,7 @@
+ execute_command = "/init";
+ else
+ prepare_namespace();
++ grsecurity_init();
+
+ /*
+ * Ok, we have completed the initial bootup, and
+diff -urN linux-2.6.7/ipc/msg.c linux-2.6.7/ipc/msg.c
+--- linux-2.6.7/ipc/msg.c 2004-06-16 01:18:38 -0400
++++ linux-2.6.7/ipc/msg.c 2004-06-25 14:07:21 -0400
+@@ -24,6 +24,7 @@
+ #include <linux/list.h>
+ #include <linux/security.h>
+ #include <linux/sched.h>
++#include <linux/grsecurity.h>
+ #include <asm/current.h>
+ #include <asm/uaccess.h>
+ #include "util.h"
+@@ -226,6 +227,9 @@
+ msg_unlock(msq);
+ }
+ up(&msg_ids.sem);
++
++ gr_log_msgget(ret, msgflg);
++
+ return ret;
+ }
+
+@@ -475,6 +479,8 @@
+ break;
+ }
+ case IPC_RMID:
++ gr_log_msgrm(ipcp->uid, ipcp->cuid);
++
+ freeque (msq, msqid);
+ break;
+ }
+diff -urN linux-2.6.7/ipc/sem.c linux-2.6.7/ipc/sem.c
+--- linux-2.6.7/ipc/sem.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/ipc/sem.c 2004-06-25 14:07:21 -0400
+@@ -71,6 +71,7 @@
+ #include <linux/time.h>
+ #include <linux/smp_lock.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include "util.h"
+
+@@ -238,6 +239,9 @@
+ }
+
+ up(&sem_ids.sem);
++
++ gr_log_semget(err, semflg);
++
+ return err;
+ }
+
+@@ -804,6 +808,8 @@
+
+ switch(cmd){
+ case IPC_RMID:
++ gr_log_semrm(ipcp->uid, ipcp->cuid);
++
+ freeary(sma, semid);
+ err = 0;
+ break;
+diff -urN linux-2.6.7/ipc/shm.c linux-2.6.7/ipc/shm.c
+--- linux-2.6.7/ipc/shm.c 2004-06-16 01:19:23 -0400
++++ linux-2.6.7/ipc/shm.c 2004-06-25 14:07:21 -0400
+@@ -26,6 +26,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/shmem_fs.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ #include "util.h"
+@@ -50,6 +51,14 @@
+ static int sysvipc_shm_read_proc(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
+ #endif
+
++#ifdef CONFIG_GRKERNSEC
++extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime, const uid_t cuid,
++ const int shmid);
++extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
++ const time_t shm_createtime);
++#endif
++
+ size_t shm_ctlmax = SHMMAX;
+ size_t shm_ctlall = SHMALL;
+ int shm_ctlmni = SHMMNI;
+@@ -217,6 +226,9 @@
+ shp->shm_lprid = 0;
+ shp->shm_atim = shp->shm_dtim = 0;
+ shp->shm_ctim = get_seconds();
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_createtime = get_seconds();
++#endif
+ shp->shm_segsz = size;
+ shp->shm_nattch = 0;
+ shp->id = shm_buildid(id,shp->shm_perm.seq);
+@@ -271,6 +283,8 @@
+ }
+ up(&shm_ids.sem);
+
++ gr_log_shmget(err, shmflg, size);
++
+ return err;
+ }
+
+@@ -569,6 +583,8 @@
+ if (err)
+ goto out_unlock_up;
+
++ gr_log_shmrm(shp->shm_perm.uid, shp->shm_perm.cuid);
++
+ if (shp->shm_nattch){
+ shp->shm_flags |= SHM_DEST;
+ /* Do not find it any more */
+@@ -707,9 +723,27 @@
+ return err;
+ }
+
++#ifdef CONFIG_GRKERNSEC
++ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
++ shp->shm_perm.cuid, shmid)) {
++ shm_unlock(shp);
++ return -EACCES;
++ }
++
++ if (!gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
++ shm_unlock(shp);
++ return -EACCES;
++ }
++#endif
++
+ file = shp->shm_file;
+ size = i_size_read(file->f_dentry->d_inode);
+ shp->shm_nattch++;
++
++#ifdef CONFIG_GRKERNSEC
++ shp->shm_lapid = current->pid;
++#endif
++
+ shm_unlock(shp);
+
+ down_write(&current->mm->mmap_sem);
+diff -urN linux-2.6.7/kernel/capability.c linux-2.6.7/kernel/capability.c
+--- linux-2.6.7/kernel/capability.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/kernel/capability.c 2004-06-25 14:07:21 -0400
+@@ -10,6 +10,7 @@
+ #include <linux/mm.h>
+ #include <linux/module.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+
+ unsigned securebits = SECUREBITS_DEFAULT; /* systemwide security settings */
+@@ -168,6 +169,11 @@
+ } else
+ target = current;
+
++ if (gr_handle_chroot_capset(target)) {
++ ret = -ESRCH;
++ goto out;
++ }
++
+ ret = -EPERM;
+
+ if (security_capset_check(target, &effective, &inheritable, &permitted))
+diff -urN linux-2.6.7/kernel/configs.c linux-2.6.7/kernel/configs.c
+--- linux-2.6.7/kernel/configs.c 2004-06-16 01:19:12 -0400
++++ linux-2.6.7/kernel/configs.c 2004-06-25 14:07:21 -0400
+@@ -78,8 +78,16 @@
+ struct proc_dir_entry *entry;
+
+ /* create the current config file */
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR, &proc_root);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("config.gz", S_IFREG | S_IRUSR | S_IRGRP, &proc_root);
++#endif
++#else
+ entry = create_proc_entry("config.gz", S_IFREG | S_IRUGO,
+ &proc_root);
++#endif
+ if (!entry)
+ return -ENOMEM;
+
+diff -urN linux-2.6.7/kernel/exit.c linux-2.6.7/kernel/exit.c
+--- linux-2.6.7/kernel/exit.c 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/kernel/exit.c 2004-06-25 17:30:50 -0400
+@@ -23,6 +23,11 @@
+ #include <linux/mount.h>
+ #include <linux/proc_fs.h>
+ #include <linux/mempolicy.h>
++#include <linux/grsecurity.h>
++
++#ifdef CONFIG_GRKERNSEC
++extern rwlock_t grsec_exec_file_lock;
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -233,6 +238,15 @@
+ {
+ write_lock_irq(&tasklist_lock);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
+ ptrace_unlink(current);
+ /* Reparent to init */
+ REMOVE_LINKS(current);
+@@ -240,6 +254,8 @@
+ current->real_parent = child_reaper;
+ SET_LINKS(current);
+
++ gr_set_kernel_label(current);
++
+ /* Set the exit signal to SIGCHLD so we signal init on exit */
+ current->exit_signal = SIGCHLD;
+
+@@ -334,6 +350,17 @@
+ vsnprintf(current->comm, sizeof(current->comm), name, args);
+ va_end(args);
+
++#ifdef CONFIG_GRKERNSEC
++ write_lock(&grsec_exec_file_lock);
++ if (current->exec_file) {
++ fput(current->exec_file);
++ current->exec_file = NULL;
++ }
++ write_unlock(&grsec_exec_file_lock);
++#endif
++
++ gr_set_kernel_label(current);
++
+ /*
+ * If we were started as result of loading a module, close all of the
+ * user space pages. We don't need them, and if we didn't close them
+@@ -793,6 +820,11 @@
+ }
+
+ acct_process(code);
++
++ gr_acl_handle_psacct(tsk, code);
++ gr_acl_handle_exit();
++ gr_del_task_from_ip_table(tsk);
++
+ __exit_mm(tsk);
+
+ exit_sem(tsk);
+diff -urN linux-2.6.7/kernel/fork.c linux-2.6.7/kernel/fork.c
+--- linux-2.6.7/kernel/fork.c 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/kernel/fork.c 2004-08-03 17:50:30 -0400
+@@ -36,6 +36,7 @@
+ #include <linux/mount.h>
+ #include <linux/audit.h>
+ #include <linux/rmap.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgtable.h>
+ #include <asm/pgalloc.h>
+@@ -279,7 +280,7 @@
+ mm->locked_vm = 0;
+ mm->mmap = NULL;
+ mm->mmap_cache = NULL;
+- mm->free_area_cache = TASK_UNMAPPED_BASE;
++ mm->free_area_cache = oldmm->free_area_cache;
+ mm->map_count = 0;
+ mm->rss = 0;
+ cpus_clear(mm->cpu_vm_mask);
+@@ -902,6 +903,9 @@
+ goto fork_out;
+
+ retval = -EAGAIN;
++
++ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->user->processes), 0);
++
+ if (atomic_read(&p->user->processes) >=
+ p->rlim[RLIMIT_NPROC].rlim_cur) {
+ if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
+@@ -998,6 +1002,8 @@
+ if (retval)
+ goto bad_fork_cleanup_namespace;
+
++ gr_copy_label(p);
++
+ p->set_child_tid = (clone_flags & CLONE_CHILD_SETTID) ? child_tidptr : NULL;
+ /*
+ * Clear TID on mm_release()?
+@@ -1139,6 +1145,9 @@
+ free_uid(p->user);
+ bad_fork_free:
+ free_task(p);
++
++ gr_log_forkfail(retval);
++
+ goto fork_out;
+ }
+
+@@ -1175,6 +1184,8 @@
+ int trace = 0;
+ long pid;
+
++ gr_handle_brute_check();
++
+ if (unlikely(current->ptrace)) {
+ trace = fork_traceflag (clone_flags);
+ if (trace)
+diff -urN linux-2.6.7/kernel/kallsyms.c linux-2.6.7/kernel/kallsyms.c
+--- linux-2.6.7/kernel/kallsyms.c 2004-06-16 01:20:19 -0400
++++ linux-2.6.7/kernel/kallsyms.c 2004-06-25 14:07:21 -0400
+@@ -313,7 +313,15 @@
+ {
+ struct proc_dir_entry *entry;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("kallsyms", S_IFREG | S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("kallsyms", 0444, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &kallsyms_operations;
+ return 0;
+diff -urN linux-2.6.7/kernel/pid.c linux-2.6.7/kernel/pid.c
+--- linux-2.6.7/kernel/pid.c 2004-06-16 01:19:36 -0400
++++ linux-2.6.7/kernel/pid.c 2004-06-25 14:07:21 -0400
+@@ -25,6 +25,7 @@
+ #include <linux/init.h>
+ #include <linux/bootmem.h>
+ #include <linux/hash.h>
++#include <linux/grsecurity.h>
+
+ #define pid_hashfn(nr) hash_long((unsigned long)nr, pidhash_shift)
+ static struct list_head *pid_hash[PIDTYPE_MAX];
+@@ -99,10 +100,12 @@
+
+ int alloc_pidmap(void)
+ {
+- int pid, offset, max_steps = PIDMAP_ENTRIES + 1;
++ int pid = 0, offset, max_steps = PIDMAP_ENTRIES + 1;
+ pidmap_t *map;
+
+- pid = last_pid + 1;
++ pid = gr_random_pid();
++ if (!pid)
++ pid = last_pid + 1;
+ if (pid >= pid_max)
+ pid = RESERVED_PIDS;
+
+@@ -225,10 +228,16 @@
+ task_t *find_task_by_pid(int nr)
+ {
+ struct pid *pid = find_pid(PIDTYPE_PID, nr);
++ struct task_struct *task = NULL;
+
+ if (!pid)
+ return NULL;
+- return pid_task(pid->task_list.next, PIDTYPE_PID);
++ task = pid_task(pid->task_list.next, PIDTYPE_PID);
++
++ if (gr_pid_is_chrooted(task))
++ return NULL;
++
++ return task;
+ }
+
+ EXPORT_SYMBOL(find_task_by_pid);
+diff -urN linux-2.6.7/kernel/printk.c linux-2.6.7/kernel/printk.c
+--- linux-2.6.7/kernel/printk.c 2004-06-16 01:20:26 -0400
++++ linux-2.6.7/kernel/printk.c 2004-06-25 14:07:21 -0400
+@@ -30,6 +30,7 @@
+ #include <linux/smp.h>
+ #include <linux/security.h>
+ #include <linux/bootmem.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+
+@@ -249,6 +250,11 @@
+ char c;
+ int error = 0;
+
++#ifdef CONFIG_GRKERNSEC_DMESG
++ if (!capable(CAP_SYS_ADMIN) && grsec_enable_dmesg)
++ return -EPERM;
++#endif
++
+ error = security_syslog(type);
+ if (error)
+ return error;
+diff -urN linux-2.6.7/kernel/resource.c linux-2.6.7/kernel/resource.c
+--- linux-2.6.7/kernel/resource.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/kernel/resource.c 2004-06-25 14:07:21 -0400
+@@ -134,10 +134,27 @@
+ {
+ struct proc_dir_entry *entry;
+
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("ioports", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("ioports", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("ioports", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_ioports_operations;
++
++#ifdef CONFIG_GRKERNSEC_PROC_ADD
++#ifdef CONFIG_GRKERNSEC_PROC_USER
++ entry = create_proc_entry("iomem", S_IRUSR, NULL);
++#elif CONFIG_GRKERNSEC_PROC_USERGROUP
++ entry = create_proc_entry("iomem", S_IRUSR | S_IRGRP, NULL);
++#endif
++#else
+ entry = create_proc_entry("iomem", 0, NULL);
++#endif
+ if (entry)
+ entry->proc_fops = &proc_iomem_operations;
+ return 0;
+diff -urN linux-2.6.7/kernel/sched.c linux-2.6.7/kernel/sched.c
+--- linux-2.6.7/kernel/sched.c 2004-06-16 01:19:51 -0400
++++ linux-2.6.7/kernel/sched.c 2004-06-25 14:07:21 -0400
+@@ -40,6 +40,7 @@
+ #include <linux/cpu.h>
+ #include <linux/percpu.h>
+ #include <linux/kthread.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/unistd.h>
+
+@@ -2630,6 +2631,8 @@
+ return -EPERM;
+ if (increment < -40)
+ increment = -40;
++ if (gr_handle_chroot_nice())
++ return -EPERM;
+ }
+ if (increment > 40)
+ increment = 40;
+diff -urN linux-2.6.7/kernel/signal.c linux-2.6.7/kernel/signal.c
+--- linux-2.6.7/kernel/signal.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/kernel/signal.c 2004-07-09 09:34:17 -0400
+@@ -21,6 +21,7 @@
+ #include <linux/binfmts.h>
+ #include <linux/security.h>
+ #include <linux/ptrace.h>
++#include <linux/grsecurity.h>
+ #include <asm/param.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -613,6 +614,8 @@
+ && (current->uid ^ t->suid) && (current->uid ^ t->uid)
+ && !capable(CAP_KILL))
+ return error;
++ if (gr_handle_signal(t, sig))
++ return error;
+ return security_task_kill(t, info, sig);
+ }
+
+@@ -770,11 +773,13 @@
+ (((sig) < SIGRTMIN) && sigismember(&(sigptr)->signal, (sig)))
+
+
+-static int
++int
+ specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
+ {
+ int ret = 0;
+
++ gr_log_signal(sig, t);
++
+ if (!irqs_disabled())
+ BUG();
+ #ifdef CONFIG_SMP
+@@ -825,6 +830,8 @@
+ ret = specific_send_sig_info(sig, info, t);
+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
+
++ gr_handle_crash(t, sig);
++
+ return ret;
+ }
+
+diff -urN linux-2.6.7/kernel/sys.c linux-2.6.7/kernel/sys.c
+--- linux-2.6.7/kernel/sys.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/kernel/sys.c 2004-06-25 14:07:21 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/security.h>
+ #include <linux/dcookies.h>
+ #include <linux/suspend.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/io.h>
+@@ -294,6 +295,12 @@
+ error = -EACCES;
+ goto out;
+ }
++
++ if (gr_handle_chroot_setpriority(p, niceval)) {
++ error = -EACCES;
++ goto out;
++ }
++
+ no_nice = security_task_setnice(p, niceval);
+ if (no_nice) {
+ error = no_nice;
+@@ -598,6 +605,9 @@
+ if (rgid != (gid_t) -1 ||
+ (egid != (gid_t) -1 && egid != old_rgid))
+ current->sgid = new_egid;
++
++ gr_set_role_label(current, current->uid, new_rgid);
++
+ current->fsgid = new_egid;
+ current->egid = new_egid;
+ current->gid = new_rgid;
+@@ -625,6 +635,9 @@
+ current->mm->dumpable=0;
+ wmb();
+ }
++
++ gr_set_role_label(current, current->uid, gid);
++
+ current->gid = current->egid = current->sgid = current->fsgid = gid;
+ }
+ else if ((gid == current->gid) || (gid == current->sgid))
+@@ -663,6 +676,9 @@
+ current->mm->dumpable = 0;
+ wmb();
+ }
++
++ gr_set_role_label(current, new_ruid, current->gid);
++
+ current->uid = new_ruid;
+ return 0;
+ }
+@@ -763,6 +779,9 @@
+ } else if ((uid != current->uid) && (uid != new_suid))
+ return -EPERM;
+
++ if (gr_check_crash_uid(uid))
++ return -EPERM;
++
+ if (old_euid != uid)
+ {
+ current->mm->dumpable = 0;
+@@ -862,8 +881,10 @@
+ current->egid = egid;
+ }
+ current->fsgid = current->egid;
+- if (rgid != (gid_t) -1)
++ if (rgid != (gid_t) -1) {
++ gr_set_role_label(current, current->uid, rgid);
+ current->gid = rgid;
++ }
+ if (sgid != (gid_t) -1)
+ current->sgid = sgid;
+ return 0;
+diff -urN linux-2.6.7/kernel/sysctl.c linux-2.6.7/kernel/sysctl.c
+--- linux-2.6.7/kernel/sysctl.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/kernel/sysctl.c 2004-06-25 17:41:53 -0400
+@@ -46,6 +46,14 @@
+ #endif
+
+ #if defined(CONFIG_SYSCTL)
++#include <linux/grsecurity.h>
++#include <linux/grinternal.h>
++
++extern __u32 gr_handle_sysctl(const ctl_table *table, const void *oldval,
++ const void *newval);
++extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
++ const int op);
++extern int gr_handle_chroot_sysctl(const int op);
+
+ /* External variables not in a header file. */
+ extern int panic_timeout;
+@@ -142,6 +150,32 @@
+ #ifdef CONFIG_UNIX98_PTYS
+ extern ctl_table pty_table[];
+ #endif
++extern ctl_table grsecurity_table[];
++
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++ {
++ .ctl_name = PAX_ASLR,
++ .procname = "aslr",
++ .data = &pax_aslr,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ },
++#endif
++
++ {
++ .ctl_name = PAX_SOFTMODE,
++ .procname = "softmode",
++ .data = &pax_softmode,
++ .maxlen = sizeof(unsigned int),
++ .mode = 0600,
++ .proc_handler = &proc_dointvec,
++ }
++};
++#endif
+
+ /* /proc declarations: */
+
+@@ -636,6 +670,14 @@
+ .mode = 0444,
+ .proc_handler = &proc_dointvec,
+ },
++#ifdef CONFIG_GRKERNSEC_SYSCTL
++ {
++ .ctl_name = KERN_GRSECURITY,
++ .procname = "grsecurity",
++ .mode = 0500,
++ .child = grsecurity_table,
++ },
++#endif
+ { .ctl_name = 0 }
+ };
+
+@@ -905,6 +947,16 @@
+ .mode = 0644,
+ .proc_handler = &proc_dointvec,
+ },
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {
++ .ctl_name = KERN_PAX,
++ .procname = "pax",
++ .mode = 0500,
++ .child = pax_table,
++ },
++#endif
++
+ { .ctl_name = 0 }
+ };
+
+@@ -989,6 +1041,10 @@
+ static inline int ctl_perm(ctl_table *table, int op)
+ {
+ int error;
++ if (table->de && gr_handle_sysctl_mod(table->de->parent->name, table->de->name, op))
++ return -EACCES;
++ if (gr_handle_chroot_sysctl(op))
++ return -EACCES;
+ error = security_sysctl(table, op);
+ if (error)
+ return error;
+@@ -1025,6 +1081,10 @@
+ table = table->child;
+ goto repeat;
+ }
++
++ if (!gr_handle_sysctl(table, oldval, newval))
++ return -EACCES;
++
+ error = do_sysctl_strategy(table, name, nlen,
+ oldval, oldlenp,
+ newval, newlen, context);
+diff -urN linux-2.6.7/kernel/time.c linux-2.6.7/kernel/time.c
+--- linux-2.6.7/kernel/time.c 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/kernel/time.c 2004-06-25 14:07:21 -0400
+@@ -28,6 +28,7 @@
+ #include <linux/timex.h>
+ #include <linux/errno.h>
+ #include <linux/smp_lock.h>
++#include <linux/grsecurity.h>
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+
+@@ -82,6 +83,9 @@
+
+ tv.tv_nsec = 0;
+ do_settimeofday(&tv);
++
++ gr_log_timechange();
++
+ return 0;
+ }
+
+@@ -183,6 +187,8 @@
+ return -EFAULT;
+ }
+
++ gr_log_timechange();
++
+ return do_sys_settimeofday(tv ? &new_ts : NULL, tz ? &new_tz : NULL);
+ }
+
+diff -urN linux-2.6.7/kernel/timer.c linux-2.6.7/kernel/timer.c
+--- linux-2.6.7/kernel/timer.c 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/kernel/timer.c 2004-06-25 14:07:21 -0400
+@@ -31,6 +31,7 @@
+ #include <linux/time.h>
+ #include <linux/jiffies.h>
+ #include <linux/cpu.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/unistd.h>
+@@ -792,6 +793,9 @@
+
+ psecs = (p->utime += user);
+ psecs += (p->stime += system);
++
++ gr_learn_resource(p, RLIMIT_CPU, psecs / HZ, 1);
++
+ if (psecs / HZ > p->rlim[RLIMIT_CPU].rlim_cur) {
+ /* Send SIGXCPU every second.. */
+ if (!(psecs % HZ))
+diff -urN linux-2.6.7/mm/filemap.c linux-2.6.7/mm/filemap.c
+--- linux-2.6.7/mm/filemap.c 2004-06-16 01:19:12 -0400
++++ linux-2.6.7/mm/filemap.c 2004-06-25 17:41:53 -0400
+@@ -27,6 +27,8 @@
+ #include <linux/pagevec.h>
+ #include <linux/blkdev.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
++
+ /*
+ * This is needed for the following functions:
+ * - try_to_release_page
+@@ -1423,6 +1425,12 @@
+
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++#endif
++
+ file_accessed(file);
+ vma->vm_ops = &generic_file_vm_ops;
+ return 0;
+@@ -1721,6 +1729,7 @@
+ *pos = i_size_read(inode);
+
+ if (limit != RLIM_INFINITY) {
++ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
+ if (*pos >= limit) {
+ send_sig(SIGXFSZ, current, 0);
+ return -EFBIG;
+diff -urN linux-2.6.7/mm/madvise.c linux-2.6.7/mm/madvise.c
+--- linux-2.6.7/mm/madvise.c 2004-06-16 01:19:02 -0400
++++ linux-2.6.7/mm/madvise.c 2004-06-25 17:41:53 -0400
+@@ -13,8 +13,42 @@
+ * We can potentially split a vm area into separate
+ * areas, each area with its own behavior.
+ */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static long __madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior);
++
++static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + vma->vm_mirror;
++ end_m = end + vma->vm_mirror;
++ error = __madvise_behavior(vma_m, start_m, end_m, behavior);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: madvise bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __madvise_behavior(vma, start, end, behavior);
++}
++
++static long __madvise_behavior(struct vm_area_struct * vma, unsigned long start,
++ unsigned long end, int behavior)
++#else
+ static long madvise_behavior(struct vm_area_struct * vma, unsigned long start,
+ unsigned long end, int behavior)
++#endif
+ {
+ struct mm_struct * mm = vma->vm_mm;
+ int error;
+diff -urN linux-2.6.7/mm/memory.c linux-2.6.7/mm/memory.c
+--- linux-2.6.7/mm/memory.c 2004-06-16 01:19:22 -0400
++++ linux-2.6.7/mm/memory.c 2004-06-25 17:41:53 -0400
+@@ -46,6 +46,7 @@
+ #include <linux/rmap.h>
+ #include <linux/module.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/pgalloc.h>
+ #include <asm/uaccess.h>
+@@ -1004,6 +1005,81 @@
+ update_mmu_cache(vma, address, entry);
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * mm->page_table_lock is held on entry and is not released on exit or inside
++ * to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_fault(struct mm_struct *mm, struct vm_area_struct * vma,
++ unsigned long address, pte_t *pte)
++{
++ unsigned long address_m;
++ struct vm_area_struct * vma_m = NULL;
++ pte_t * pte_m, entry_m;
++ struct page * page_m;
++
++ address_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(mm, address_m);
++ BUG_ON(!vma_m || vma_m->vm_start != address_m);
++
++ address_m = address + vma->vm_mirror;
++
++ {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_offset(pgd_m, address_m);
++ pte_m = pte_offset_map_nested(pmd_m, address_m);
++ }
++
++ if (pte_present(*pte_m)) {
++ flush_cache_page(vma_m, address_m);
++ flush_icache_page(vma_m, pte_page(*pte_m));
++ }
++ entry_m = ptep_get_and_clear(pte_m);
++ if (pte_present(entry_m))
++ flush_tlb_page(vma_m, address_m);
++
++ if (pte_none(entry_m)) {
++ ++mm->rss;
++ } else if (pte_present(entry_m)) {
++ page_m = pte_page(entry_m);
++ if (PageReserved(page_m))
++ ++mm->rss;
++ else
++ page_remove_rmap(page_m);
++ page_cache_release(page_m);
++ } else if (!pte_file(entry_m)) {
++ free_swap_and_cache(pte_to_swp_entry(entry_m));
++ ++mm->rss;
++ } else {
++ printk(KERN_ERR "PAX: VMMIRROR: bug in mirror_fault: %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, address_m, vma_m->vm_start);
++ }
++
++ page_m = pte_page(*pte);
++ entry_m = mk_pte(page_m, vma_m->vm_page_prot);
++ if (pte_write(*pte) && (vma_m->vm_flags & VM_WRITE))
++ entry_m = pte_mkdirty(pte_mkwrite(entry_m));
++ if (!PageReserved(page_m)) {
++ page_cache_get(page_m);
++ /*
++ * we can test PG_anon without holding page_map_lock because
++ * we hold the page table lock and have a reference to page_m
++ */
++ if (PageAnon(page_m))
++ page_add_anon_rmap(page_m, vma_m, address_m);
++ else
++ page_add_file_rmap(page_m);
++ }
++ ptep_establish(vma_m, address_m, pte_m, entry_m);
++ update_mmu_cache(vma_m, address_m, entry_m);
++ pte_unmap_nested(pte_m);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -1090,6 +1166,12 @@
+
+ /* Free the old page.. */
+ new_page = old_page;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ }
+ pte_unmap(page_table);
+ page_cache_release(new_page);
+@@ -1216,6 +1298,7 @@
+
+ do_expand:
+ limit = current->rlim[RLIMIT_FSIZE].rlim_cur;
++ gr_learn_resource(current, RLIMIT_FSIZE, offset, 1);
+ if (limit != RLIM_INFINITY && offset > limit)
+ goto out_sig;
+ if (offset > inode->i_sb->s_maxbytes)
+@@ -1375,6 +1458,12 @@
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ out:
+@@ -1429,10 +1518,16 @@
+ }
+
+ set_pte(page_table, entry);
+- pte_unmap(page_table);
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pax_mirror_fault(mm, vma, addr, page_table);
++#endif
++
++ pte_unmap(page_table);
+ spin_unlock(&mm->page_table_lock);
+ out:
+ return VM_FAULT_MINOR;
+@@ -1539,6 +1634,15 @@
+ page_add_anon_rmap(new_page, vma, address);
+ } else
+ page_add_file_rmap(new_page);
++
++ /* no need to invalidate: a not-present page shouldn't be cached */
++ update_mmu_cache(vma, address, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ pte_unmap(page_table);
+ } else {
+ /* One of our sibling threads was faster, back out. */
+@@ -1548,8 +1652,6 @@
+ goto out;
+ }
+
+- /* no need to invalidate: a not-present page shouldn't be cached */
+- update_mmu_cache(vma, address, entry);
+ spin_unlock(&mm->page_table_lock);
+ out:
+ return ret;
+@@ -1658,6 +1760,11 @@
+ pgd_t *pgd;
+ pmd_t *pmd;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long address_m = 0UL;
++ struct vm_area_struct * vma_m = NULL;
++#endif
++
+ __set_current_state(TASK_RUNNING);
+ pgd = pgd_offset(mm, address);
+
+@@ -1671,6 +1778,45 @@
+ * and the SMP-safe atomic PTE updates.
+ */
+ spin_lock(&mm->page_table_lock);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m;
++
++ address_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(mm, address_m);
++
++ /* PaX: sanity checks */
++ if (!vma_m) {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n",
++ address, vma, address_m, vma_m);
++ return VM_FAULT_SIGBUS;
++ } else if (!(vma_m->vm_flags & VM_MIRROR) ||
++ vma_m->vm_start != address_m ||
++ vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start)
++ {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return VM_FAULT_SIGBUS;
++ }
++
++ address_m = address + vma->vm_mirror;
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_alloc(mm, pgd_m, address_m);
++ if (pmd_m)
++ pte_m = pte_alloc_map(mm, pmd_m, address_m);
++ if (!pmd_m || !pte_m) {
++ spin_unlock(&mm->page_table_lock);
++ return VM_FAULT_OOM;
++ }
++ pte_unmap(pte_m);
++ }
++#endif
++
+ pmd = pmd_alloc(mm, pgd, address);
+
+ if (pmd) {
+diff -urN linux-2.6.7/mm/mlock.c linux-2.6.7/mm/mlock.c
+--- linux-2.6.7/mm/mlock.c 2004-06-16 01:18:57 -0400
++++ linux-2.6.7/mm/mlock.c 2004-06-25 17:41:53 -0400
+@@ -7,11 +7,43 @@
+
+ #include <linux/mman.h>
+ #include <linux/mm.h>
++#include <linux/grsecurity.h>
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags);
+
+ static int mlock_fixup(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, unsigned int newflags)
+ {
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(vma->vm_mm, start_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + vma->vm_mirror;
++ end_m = end + vma->vm_mirror;
++ error = __mlock_fixup(vma_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mlock bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++ return __mlock_fixup(vma, start, end, newflags);
++}
++
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++#else
++static int mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++#endif
++{
+ struct mm_struct * mm = vma->vm_mm;
+ int pages;
+ int ret = 0;
+@@ -68,6 +100,17 @@
+ return -EINVAL;
+ if (end == start)
+ return 0;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -118,6 +161,7 @@
+ lock_limit >>= PAGE_SHIFT;
+
+ /* check against resource limits */
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked <= lock_limit)
+ error = do_mlock(start, len, 1);
+ up_write(&current->mm->mmap_sem);
+@@ -154,6 +198,16 @@
+ for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
+ unsigned int newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (vma->vm_end > SEGMEXEC_TASK_SIZE)
++ break;
++ } else
++#endif
++
++ if (vma->vm_end > TASK_SIZE)
++ break;
++
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+@@ -177,6 +231,7 @@
+ lock_limit >>= PAGE_SHIFT;
+
+ ret = -ENOMEM;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm, 1);
+ if (current->mm->total_vm <= lock_limit)
+ ret = do_mlockall(flags);
+ out:
+diff -urN linux-2.6.7/mm/mmap.c linux-2.6.7/mm/mmap.c
+--- linux-2.6.7/mm/mmap.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/mm/mmap.c 2004-06-25 22:44:53 -0400
+@@ -23,6 +23,7 @@
+ #include <linux/mount.h>
+ #include <linux/mempolicy.h>
+ #include <linux/rmap.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+@@ -137,6 +138,7 @@
+
+ /* Check against rlimit.. */
+ rlim = current->rlim[RLIMIT_DATA].rlim_cur;
++ gr_learn_resource(current, RLIMIT_DATA, brk - mm->start_data, 1);
+ if (rlim < RLIM_INFINITY && brk - mm->start_data > rlim)
+ goto out;
+
+@@ -495,7 +497,11 @@
+ * If the vma has a ->close operation then the driver probably needs to release
+ * per-vma resources, so we don't attempt to merge those.
+ */
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED | VM_MIRROR)
++#else
+ #define VM_SPECIAL (VM_IO | VM_DONTCOPY | VM_DONTEXPAND | VM_RESERVED)
++#endif
+
+ static inline int is_mergeable_vma(struct vm_area_struct *vma,
+ struct file *file, unsigned long vm_flags)
+@@ -729,6 +735,42 @@
+ unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long pgoff)
+ {
++ unsigned long ret = -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || (addr && addr > SEGMEXEC_TASK_SIZE-len)))
++ return ret;
++#endif
++
++ ret = __do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && ret < TASK_SIZE && ((flags & MAP_TYPE) == MAP_PRIVATE)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && (!(current->flags & PF_PAX_MPROTECT) || ((prot & PROT_EXEC) && file && !(prot & PROT_WRITE)))
++#endif
++
++ )
++ {
++ unsigned long ret_m;
++ prot = prot & PROT_EXEC ? prot : PROT_NONE;
++ ret_m = __do_mmap_pgoff(NULL, ret + SEGMEXEC_TASK_SIZE, 0UL, prot, flags | MAP_MIRROR | MAP_FIXED, ret);
++ if (ret_m >= TASK_SIZE) {
++ do_munmap(current->mm, ret, len);
++ ret = ret_m;
++ }
++ }
++#endif
++
++ return ret;
++}
++
++unsigned long __do_mmap_pgoff(struct file * file, unsigned long addr,
++ unsigned long len, unsigned long prot,
++ unsigned long flags, unsigned long pgoff)
++{
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+ struct inode *inode;
+@@ -739,6 +781,28 @@
+ int accountable = 1;
+ unsigned long charged = 0;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct * vma_m = NULL;
++
++ if (flags & MAP_MIRROR) {
++ /* PaX: sanity checks, to be removed when proved to be stable */
++ if (file || len || ((flags & MAP_TYPE) != MAP_PRIVATE))
++ return -EINVAL;
++
++ vma_m = find_vma(mm, pgoff);
++
++ if (!vma_m || is_vm_hugetlb_page(vma_m) ||
++ vma_m->vm_start != pgoff ||
++ (vma_m->vm_flags & VM_MIRROR) ||
++ (!(vma_m->vm_flags & VM_WRITE) && (prot & PROT_WRITE)))
++ return -EINVAL;
++
++ file = vma_m->vm_file;
++ pgoff = vma_m->vm_pgoff;
++ len = vma_m->vm_end - vma_m->vm_start;
++ }
++#endif
++
+ if (file) {
+ if (is_file_hugepages(file))
+ accountable = 0;
+@@ -770,7 +834,7 @@
+ /* Obtain the address to map to. we verify (or select) it and ensure
+ * that it represents a valid section of the address space.
+ */
+- addr = get_unmapped_area(file, addr, len, pgoff, flags);
++ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
+ if (addr & ~PAGE_MASK)
+ return addr;
+
+@@ -781,6 +845,30 @@
+ vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
+ mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++ if (file && (file->f_vfsmnt->mnt_flags & MNT_NOEXEC))
++ vm_flags &= ~VM_MAYEXEC;
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT) {
++ if (!file || (prot & PROT_WRITE))
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++ else
++ vm_flags &= ~VM_MAYWRITE;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (file && (flags & MAP_MIRROR) && (vm_flags & VM_EXEC))
++ vma_m->vm_flags &= ~VM_MAYWRITE;
++#endif
++
++ }
++#endif
++
++ }
++#endif
++
+ if (flags & MAP_LOCKED) {
+ if (!capable(CAP_IPC_LOCK))
+ return -EPERM;
+@@ -790,6 +878,7 @@
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+@@ -837,6 +926,11 @@
+ /*
+ * Set pgoff according to addr for anon_vma.
+ */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (!(flags & MAP_MIRROR))
++#endif
++
+ pgoff = addr >> PAGE_SHIFT;
+ break;
+ default:
+@@ -848,6 +942,9 @@
+ if (error)
+ return error;
+
++ if (!gr_acl_handle_mmap(file, prot))
++ return -EACCES;
++
+ /* Clear old maps */
+ error = -ENOMEM;
+ munmap_back:
+@@ -859,6 +956,7 @@
+ }
+
+ /* Check against address space limit. */
++ gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+@@ -905,6 +1003,13 @@
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((file || !(current->flags & PF_PAX_PAGEEXEC)) && (vm_flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(vm_flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_pgoff = pgoff;
+
+@@ -929,6 +1034,14 @@
+ goto free_vma;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR) {
++ vma_m->vm_flags |= VM_MIRROR;
++ vma_m->vm_mirror = vma->vm_start - vma_m->vm_start;
++ vma->vm_mirror = vma_m->vm_start - vma->vm_start;
++ }
++#endif
++
+ /* We set VM_ACCOUNT in a shared mapping's vm_flags, to inform
+ * shmem_zero_setup (perhaps called through /dev/zero's ->mmap)
+ * that memory reservation must be checked; but that reservation
+@@ -970,6 +1083,7 @@
+ pgoff, flags & MAP_NONBLOCK);
+ down_write(&mm->mmap_sem);
+ }
++ track_exec_limit(mm, addr, addr + len, vm_flags);
+ return addr;
+
+ unmap_and_free_vma:
+@@ -989,6 +1103,7 @@
+ }
+
+ EXPORT_SYMBOL(do_mmap_pgoff);
++EXPORT_SYMBOL(__do_mmap_pgoff);
+
+ /* Get an address range which is currently unmapped.
+ * For shmat() with addr=0.
+@@ -1008,11 +1123,17 @@
+ {
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+- unsigned long start_addr;
++ unsigned long start_addr, task_unmapped_base = TASK_UNMAPPED_BASE;
+
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ task_unmapped_base += mm->delta_mmap;
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(mm, addr);
+@@ -1030,8 +1151,8 @@
+ * Start a new search - just in case we missed
+ * some holes.
+ */
+- if (start_addr != TASK_UNMAPPED_BASE) {
+- start_addr = addr = TASK_UNMAPPED_BASE;
++ if (start_addr != task_unmapped_base) {
++ start_addr = addr = task_unmapped_base;
+ goto full_search;
+ }
+ return -ENOMEM;
+@@ -1172,6 +1293,11 @@
+ {
+ unsigned long grow;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct * vma_m = NULL;
++ unsigned long address_m = 0UL;
++#endif
++
+ if (!(vma->vm_flags & VM_GROWSUP))
+ return -EFAULT;
+
+@@ -1198,17 +1324,76 @@
+ return -ENOMEM;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ address_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(vma->vm_mm, address_m);
++ if (!vma_m || vma_m->vm_start != address_m ||
++ !(vma_m->vm_flags & VM_MIRROR) ||
++ vma->vm_end - vma->vm_start !=
++ vma_m->vm_end - vma_m->vm_start) {
++ anon_vma_unlock(vma);
++ vm_unacct_memory(grow);
++ printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return -ENOMEM;
++ }
++
++ gr_learn_resource(current, RLIMIT_STACK, address_m - vma_m->vm_start, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT, 1);
++ if (vma_m->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma_->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT, 1);
++
++ address_m = address + vma->vm_mirror;
++ if (address_m - vma_m->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
++ ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma_m->vm_flags & VM_LOCKED) &&
++ ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
++ anon_vma_unlock(vma);
++ vm_unacct_memory(grow);
++ return -ENOMEM;
++ }
++ } else {
++#endif
++
++ gr_learn_resource(current, RLIMIT_STACK, address - vma->vm_start, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1);
++ if (vma->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1);
++
+ if (address - vma->vm_start > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+- current->rlim[RLIMIT_AS].rlim_cur) {
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma->vm_flags & VM_LOCKED) &&
++ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ anon_vma_unlock(vma);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ }
++#endif
++
+ vma->vm_end = address;
+ vma->vm_mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ vma_m->vm_end = address_m;
++ vma_m->vm_mm->total_vm += grow;
++ if (vma_m->vm_flags & VM_LOCKED)
++ vma_m->vm_mm->locked_vm += grow;
++ track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
++ }
++#endif
++
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
+ anon_vma_unlock(vma);
+ return 0;
+ }
+@@ -1237,6 +1422,11 @@
+ {
+ unsigned long grow;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct * vma_m = NULL;
++ unsigned long address_m = 0UL;
++#endif
++
+ /*
+ * We must make sure the anon_vma is allocated
+ * so that the anon_vma locking is not a noop.
+@@ -1258,19 +1448,80 @@
+ anon_vma_unlock(vma);
+ return -ENOMEM;
+ }
+-
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ address_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(vma->vm_mm, address_m);
++ if (!vma_m || vma_m->vm_start != address_m ||
++ !(vma_m->vm_flags & VM_MIRROR) ||
++ vma->vm_end - vma->vm_start !=
++ vma_m->vm_end - vma_m->vm_start ||
++ vma->anon_vma != vma_m->anon_vma) {
++ anon_vma_unlock(vma);
++ vm_unacct_memory(grow);
++ printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return -ENOMEM;
++ }
++
++ gr_learn_resource(current, RLIMIT_STACK, vma_m->vm_end - address_m, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT, 1);
++ if (vma_m->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT, 1);
++
++ address_m = address + vma->vm_mirror;
++ if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
++ ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma_m->vm_flags & VM_LOCKED) &&
++ ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
++ anon_vma_unlock(vma);
++ vm_unacct_memory(grow);
++ return -ENOMEM;
++ }
++ } else {
++#endif
++
++ gr_learn_resource(current, RLIMIT_STACK, vma->vm_end - address, 1);
++ gr_learn_resource(current, RLIMIT_AS, (vma->vm_mm->total_vm + grow) << PAGE_SHIFT, 1);
++ if (vma->vm_flags & VM_LOCKED)
++ gr_learn_resource(current, RLIMIT_MEMLOCK, (vma->vm_mm->locked_vm + grow) << PAGE_SHIFT, 1);
++
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
+ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) >
+- current->rlim[RLIMIT_AS].rlim_cur) {
++ current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma->vm_flags & VM_LOCKED) &&
++ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) >
++ current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ anon_vma_unlock(vma);
+ vm_unacct_memory(grow);
+ return -ENOMEM;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ }
++#endif
++
+ vma->vm_start = address;
+ vma->vm_pgoff -= grow;
+ vma->vm_mm->total_vm += grow;
+ if (vma->vm_flags & VM_LOCKED)
+ vma->vm_mm->locked_vm += grow;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ vma_m->vm_start = address_m;
++ vma_m->vm_pgoff -= grow;
++ vma_m->vm_mm->total_vm += grow;
++ if (vma_m->vm_flags & VM_LOCKED)
++ vma_m->vm_mm->locked_vm += grow;
++ track_exec_limit(vma_m->vm_mm, vma_m->vm_start, vma_m->vm_end, vma_m->vm_flags);
++ }
++#endif
++
++ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
+ anon_vma_unlock(vma);
+ return 0;
+ }
+@@ -1373,15 +1624,15 @@
+ {
+ size_t len = area->vm_end - area->vm_start;
+
+- area->vm_mm->total_vm -= len >> PAGE_SHIFT;
++ mm->total_vm -= len >> PAGE_SHIFT;
+ if (area->vm_flags & VM_LOCKED)
+- area->vm_mm->locked_vm -= len >> PAGE_SHIFT;
++ mm->locked_vm -= len >> PAGE_SHIFT;
+ /*
+ * Is this a new hole at the lowest possible address?
+ */
+ if (area->vm_start >= TASK_UNMAPPED_BASE &&
+- area->vm_start < area->vm_mm->free_area_cache)
+- area->vm_mm->free_area_cache = area->vm_start;
++ area->vm_start < mm->free_area_cache)
++ mm->free_area_cache = area->vm_start;
+
+ remove_vm_struct(area);
+ }
+@@ -1435,21 +1686,73 @@
+ */
+ static void
+ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
+- struct vm_area_struct *prev, unsigned long end)
++ struct vm_area_struct *prev, unsigned long *start, unsigned long *end)
+ {
+ struct vm_area_struct **insertion_point;
+ struct vm_area_struct *tail_vma = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long start_m;
++ struct vm_area_struct *vma_m, *head_vma = vma, *mirrors = NULL, *head_vma_m = NULL;
++#endif
++
+ insertion_point = (prev ? &prev->vm_next : &mm->mmap);
+ do {
+ rb_erase(&vma->vm_rb, &mm->mm_rb);
+ mm->map_count--;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((vma->vm_flags & VM_MIRROR) &&
++ vma->vm_start + vma->vm_mirror >= *start &&
++ vma->vm_start + vma->vm_mirror < *end)
++ {
++ mm->mmap_cache = NULL; /* Kill the cache. */
++ start_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma(mm, start_m);
++ if (vma_m && (vma_m->vm_flags & VM_MIRROR) && vma_m->vm_start == start_m) {
++ vma->vm_flags &= ~VM_MIRROR;
++ vma_m->vm_flags &= ~VM_MIRROR;
++ } else
++ printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, vma->vm_start);
++ }
++#endif
++
+ tail_vma = vma;
+ vma = vma->vm_next;
+- } while (vma && vma->vm_start < end);
++ } while (vma && vma->vm_start < *end);
+ *insertion_point = vma;
+ tail_vma->vm_next = NULL;
+ mm->mmap_cache = NULL; /* Kill the cache. */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ for (; head_vma; head_vma = head_vma->vm_next) {
++ struct vm_area_struct *prev_m;
++
++ if (!(head_vma->vm_flags & VM_MIRROR))
++ continue;
++
++ start_m = head_vma->vm_start + head_vma->vm_mirror;
++ vma_m = find_vma_prev(mm, start_m, &prev_m);
++ rb_erase(&vma_m->vm_rb, &mm->mm_rb);
++ mm->map_count--;
++ insertion_point = prev_m ? &prev_m->vm_next : &mm->mmap;
++ *insertion_point = vma_m->vm_next;
++ if (head_vma_m) {
++ mirrors->vm_next = vma_m;
++ mirrors = vma_m;
++ } else
++ head_vma_m = mirrors = vma_m;
++ mirrors->vm_next = NULL;
++ if (vma_m->vm_start < *start)
++ *start = vma_m->vm_start;
++ if (vma_m->vm_end > *end)
++ *end = vma_m->vm_end;
++ mm->mmap_cache = NULL; /* Kill the cache. */
++ }
++ if (head_vma_m)
++ tail_vma->vm_next = head_vma_m;
++#endif
++
+ }
+
+ /*
+@@ -1512,6 +1815,10 @@
+ unsigned long end;
+ struct vm_area_struct *mpnt, *prev, *last;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct *mpnt_m = NULL, *last_m;
++#endif
++
+ if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
+ return -EINVAL;
+
+@@ -1548,6 +1855,20 @@
+ * places tmp vma above, and higher split_vma places tmp vma below.
+ */
+ if (start > mpnt->vm_start) {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (mpnt->vm_flags & VM_MIRROR) {
++ unsigned long start_m = mpnt->vm_start + mpnt->vm_mirror;
++
++ mpnt_m = find_vma(mm, start_m);
++ if (!mpnt_m || (!mpnt_m->vm_flags & VM_MIRROR) || mpnt_m->vm_start != start_m)
++ return -EINVAL;
++ start_m = start + mpnt->vm_mirror;
++ if (split_vma(mm, mpnt_m, start_m, 0))
++ return -ENOMEM;
++ }
++#endif
++
+ if (split_vma(mm, mpnt, start, 0))
+ return -ENOMEM;
+ prev = mpnt;
+@@ -1556,6 +1877,20 @@
+ /* Does it split the last one? */
+ last = find_vma(mm, end);
+ if (last && end > last->vm_start) {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (last->vm_flags & VM_MIRROR) {
++ unsigned long end_m = last->vm_start + last->vm_mirror;
++
++ last_m = find_vma(mm, end_m);
++ if (!last_m || (!last_m->vm_flags & VM_MIRROR) || last_m->vm_start != end_m)
++ return -EINVAL;
++ end_m = end + last->vm_mirror;
++ if (split_vma(mm, last_m, end_m, 1))
++ return -ENOMEM;
++ }
++#endif
++
+ if (split_vma(mm, last, end, 1))
+ return -ENOMEM;
+ }
+@@ -1564,7 +1899,7 @@
+ /*
+ * Remove the vma's, and unmap the actual pages
+ */
+- detach_vmas_to_be_unmapped(mm, mpnt, prev, end);
++ detach_vmas_to_be_unmapped(mm, mpnt, prev, &start, &end);
+ spin_lock(&mm->page_table_lock);
+ unmap_region(mm, mpnt, prev, start, end);
+ spin_unlock(&mm->page_table_lock);
+@@ -1572,6 +1907,8 @@
+ /* Fix up all other VM information */
+ unmap_vma_list(mm, mpnt);
+
++ track_exec_limit(mm, start, end, 0UL);
++
+ return 0;
+ }
+
+@@ -1582,6 +1919,12 @@
+ int ret;
+ struct mm_struct *mm = current->mm;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+@@ -1593,7 +1936,31 @@
+ * anonymous maps. eventually we may be able to do some
+ * brk-specific accounting here.
+ */
++#if defined(CONFIG_PAX_SEGMEXEC) && defined(CONFIG_PAX_MPROTECT)
++unsigned long __do_brk(unsigned long addr, unsigned long len);
++
++unsigned long do_brk(unsigned long addr, unsigned long len)
++{
++ unsigned long ret;
++
++ ret = __do_brk(addr, len);
++ if (ret == addr && (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_MPROTECT)) == PF_PAX_SEGMEXEC) {
++ unsigned long ret_m;
++
++ ret_m = __do_mmap_pgoff(NULL, addr + SEGMEXEC_TASK_SIZE, 0UL, PROT_NONE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, addr);
++ if (ret_m > TASK_SIZE) {
++ do_munmap(current->mm, addr, len);
++ ret = ret_m;
++ }
++ }
++
++ return ret;
++}
++
++unsigned long __do_brk(unsigned long addr, unsigned long len)
++#else
+ unsigned long do_brk(unsigned long addr, unsigned long len)
++#endif
+ {
+ struct mm_struct * mm = current->mm;
+ struct vm_area_struct * vma, * prev;
+@@ -1605,6 +1972,13 @@
+ if (!len)
+ return addr;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
++ return -EINVAL;
++ } else
++#endif
++
+ if ((addr + len) > TASK_SIZE || (addr + len) < addr)
+ return -EINVAL;
+
+@@ -1614,6 +1988,7 @@
+ if (mm->def_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+ locked += len;
++ gr_learn_resource(current, RLIMIT_MEMLOCK, locked, 1);
+ if (locked > current->rlim[RLIMIT_MEMLOCK].rlim_cur)
+ return -EAGAIN;
+ }
+@@ -1630,6 +2005,7 @@
+ }
+
+ /* Check against address space limits *after* clearing old maps... */
++ gr_learn_resource(current, RLIMIT_AS, (mm->total_vm << PAGE_SHIFT) + len, 1);
+ if ((mm->total_vm << PAGE_SHIFT) + len
+ > current->rlim[RLIMIT_AS].rlim_cur)
+ return -ENOMEM;
+@@ -1642,6 +2018,18 @@
+
+ flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /* Can we just expand an old private anonymous mapping? */
+ if (vma_merge(mm, prev, addr, addr + len, flags,
+ NULL, NULL, pgoff, NULL))
+@@ -1662,6 +2050,13 @@
+ vma->vm_end = addr + len;
+ vma->vm_pgoff = pgoff;
+ vma->vm_flags = flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma_link(mm, vma, prev, rb_link, rb_parent);
+ out:
+@@ -1670,6 +2065,7 @@
+ mm->locked_vm += len >> PAGE_SHIFT;
+ make_pages_present(addr, addr + len);
+ }
++ track_exec_limit(mm, addr, addr + len, flags);
+ return addr;
+ }
+
+diff -urN linux-2.6.7/mm/mprotect.c linux-2.6.7/mm/mprotect.c
+--- linux-2.6.7/mm/mprotect.c 2004-06-16 01:20:27 -0400
++++ linux-2.6.7/mm/mprotect.c 2004-06-28 10:50:16 -0400
+@@ -17,12 +17,19 @@
+ #include <linux/highmem.h>
+ #include <linux/security.h>
+ #include <linux/mempolicy.h>
++#include <linux/grsecurity.h>
++
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/fs.h>
++#endif
+
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+ #include <asm/cacheflush.h>
+ #include <asm/tlbflush.h>
++#include <asm/mmu_context.h>
+
+ static inline void
+ change_pte_range(pmd_t *pmd, unsigned long address,
+@@ -108,6 +115,90 @@
+ return;
+ }
+
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++/* called while holding the mmap semaphor for writing */
++static inline void establish_user_cs_limit(struct mm_struct *mm, unsigned long start, unsigned long end)
++{
++ struct vm_area_struct *vma = find_vma(mm, start);
++
++ for (; vma && vma->vm_start < end; vma = vma->vm_next)
++ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
++
++}
++
++void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
++{
++ if (current->flags & PF_PAX_PAGEEXEC) {
++ unsigned long oldlimit, newlimit = 0UL;
++
++ spin_lock(&mm->page_table_lock);
++ oldlimit = mm->context.user_cs_limit;
++ if ((prot & VM_EXEC) && oldlimit < end)
++ /* USER_CS limit moved up */
++ newlimit = end;
++ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
++ /* USER_CS limit moved down */
++ newlimit = start;
++
++ if (newlimit) {
++ mm->context.user_cs_limit = newlimit;
++
++#ifdef CONFIG_SMP
++ wmb();
++ cpus_clear(mm->context.cpu_user_cs_mask);
++ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
++#endif
++
++ set_user_cs(mm, smp_processor_id());
++ }
++ spin_unlock(&mm->page_table_lock);
++ if (newlimit == end)
++ establish_user_cs_limit(mm, oldlimit, end);
++ }
++}
++#endif
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags);
++
++static int mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + vma->vm_mirror;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + vma->vm_mirror;
++ end_m = end + vma->vm_mirror;
++ if ((current->flags & PF_PAX_SEGMEXEC) && !(newflags & VM_EXEC))
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
++ else
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __mprotect_fixup(vma, pprev, start, end, newflags);
++}
++
++static int __mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ struct mm_struct * mm = vma->vm_mm;
++ unsigned long charged = 0;
++ pgprot_t newprot;
++ pgoff_t pgoff;
++ int error;
++#else
+ static int
+ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
+ unsigned long start, unsigned long end, unsigned int newflags)
+@@ -122,6 +213,7 @@
+ *pprev = vma;
+ return 0;
+ }
++#endif
+
+ /*
+ * If we make a private mapping writable we increase our commit;
+@@ -140,6 +232,12 @@
+ }
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE)))
++ newprot = protection_map[(newflags | VM_EXEC) & 0xf];
++ else
++#endif
++
+ newprot = protection_map[newflags & 0xf];
+
+ /*
+@@ -185,6 +283,69 @@
+ return error;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p, p_dyn;
++ elf_dyn dyn;
++ unsigned long i, j = 65536UL / sizeof(struct elf_phdr);
++
++#ifndef CONFIG_PAX_NOELFRELOCS
++ if ((vma->vm_start != start) ||
++ !vma->vm_file ||
++ !(vma->vm_flags & VM_MAYEXEC) ||
++ (vma->vm_flags & VM_MAYNOTWRITE))
++#endif
++
++ return;
++
++ if (0 > kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) ||
++#else
++ elf_h.e_type != ET_DYN ||
++#endif
++
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > j)
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (0 > kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p)))
++ return;
++ if (elf_p.p_type == PT_DYNAMIC) {
++ p_dyn = elf_p;
++ j = i;
++ }
++ }
++ if (elf_h.e_phnum <= j)
++ return;
++
++ i = 0UL;
++ do {
++ if (0 > kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE;
++ gr_log_textrel(vma);
++ return;
++ }
++ i++;
++ } while (dyn.d_tag != DT_NULL);
++ return;
++}
++#endif
++
+ asmlinkage long
+ sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+ {
+@@ -202,6 +363,17 @@
+ end = start + len;
+ if (end < start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM))
+ return -EINVAL;
+ if (end == start)
+@@ -236,6 +408,16 @@
+ if (start > vma->vm_start)
+ prev = vma;
+
++ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
++ error = -EACCES;
++ goto out;
++ }
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE))
++ pax_handle_maywrite(vma, start);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned int newflags;
+
+@@ -253,6 +435,12 @@
+ goto out;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE))
++ newflags &= ~VM_MAYWRITE;
++#endif
++
+ error = security_file_mprotect(vma, prot);
+ if (error)
+ goto out;
+@@ -276,6 +464,9 @@
+ goto out;
+ }
+ }
++
++ track_exec_limit(current->mm, start, end, vm_flags);
++
+ out:
+ up_write(&current->mm->mmap_sem);
+ return error;
+diff -urN linux-2.6.7/mm/mremap.c linux-2.6.7/mm/mremap.c
+--- linux-2.6.7/mm/mremap.c 2004-06-16 01:19:35 -0400
++++ linux-2.6.7/mm/mremap.c 2004-07-01 16:33:43 -0400
+@@ -129,6 +129,12 @@
+ if (dst) {
+ pte_t pte;
+ pte = ptep_clear_flush(vma, old_addr, src);
++
++#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
++ if ((current->flags & PF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_EXEC))
++ pte_exprotect(pte);
++#endif
++
+ set_pte(dst, pte);
+ } else
+ error = -ENOMEM;
+@@ -267,6 +273,18 @@
+ if (!new_len)
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-new_len ||
++ old_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-old_len)
++ goto out;
++ } else
++#endif
++
++ if (new_len > TASK_SIZE || addr > TASK_SIZE-new_len ||
++ old_len > TASK_SIZE || addr > TASK_SIZE-old_len)
++ goto out;
++
+ /* new_addr is only valid if MREMAP_FIXED is specified */
+ if (flags & MREMAP_FIXED) {
+ if (new_addr & ~PAGE_MASK)
+@@ -274,6 +292,13 @@
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || new_addr > SEGMEXEC_TASK_SIZE-new_len)
++ goto out;
++ } else
++#endif
++
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+
+@@ -317,6 +342,16 @@
+ ret = -EINVAL;
+ goto out;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) &&
++ (vma->vm_flags & VM_MIRROR))
++ {
++ ret = -EINVAL;
++ goto out;
++ }
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
+@@ -365,6 +400,7 @@
+ addr + new_len);
+ }
+ ret = addr;
++ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
+ goto out;
+ }
+ }
+@@ -375,8 +411,8 @@
+ */
+ ret = -ENOMEM;
+ if (flags & MREMAP_MAYMOVE) {
++ unsigned long map_flags = 0;
+ if (!(flags & MREMAP_FIXED)) {
+- unsigned long map_flags = 0;
+ if (vma->vm_flags & VM_MAYSHARE)
+ map_flags |= MAP_SHARED;
+
+@@ -386,7 +422,12 @@
+ if (new_addr & ~PAGE_MASK)
+ goto out;
+ }
++ map_flags = vma->vm_flags;
+ ret = move_vma(vma, addr, old_len, new_len, new_addr);
++ if (!(ret & ~PAGE_MASK)) {
++ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
++ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
++ }
+ }
+ out:
+ if (ret & ~PAGE_MASK)
+diff -urN linux-2.6.7/mm/rmap.c linux-2.6.7/mm/rmap.c
+--- linux-2.6.7/mm/rmap.c 2004-06-16 01:20:03 -0400
++++ linux-2.6.7/mm/rmap.c 2004-06-25 17:41:53 -0400
+@@ -83,6 +83,19 @@
+ list_add(&vma->anon_vma_node, &anon_vma->head);
+ allocated = NULL;
+ }
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct *vma_m;
++
++ vma_m = find_vma(vma->vm_mm, vma->vm_start + vma->vm_mirror);
++ BUG_ON(!vma_m || vma_m->vm_start != vma->vm_start + vma->vm_mirror);
++ BUG_ON(vma_m->anon_vma || vma->vm_pgoff != vma_m->vm_pgoff);
++ vma_m->anon_vma = anon_vma;
++ __anon_vma_link(vma_m);
++ }
++#endif
++
+ spin_unlock(&mm->page_table_lock);
+ if (unlikely(allocated))
+ anon_vma_free(allocated);
+diff -urN linux-2.6.7/net/ipv4/af_inet.c linux-2.6.7/net/ipv4/af_inet.c
+--- linux-2.6.7/net/ipv4/af_inet.c 2004-06-16 01:18:58 -0400
++++ linux-2.6.7/net/ipv4/af_inet.c 2004-06-25 14:07:21 -0400
+@@ -87,6 +87,7 @@
+ #include <linux/init.h>
+ #include <linux/poll.h>
+ #include <linux/netfilter_ipv4.h>
++#include <linux/grsecurity.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/system.h>
+@@ -387,7 +388,12 @@
+ else
+ inet->pmtudisc = IP_PMTUDISC_WANT;
+
+- inet->id = 0;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = 0;
+
+ sock_init_data(sock, sk);
+ sk_set_owner(sk, THIS_MODULE);
+diff -urN linux-2.6.7/net/ipv4/ip_output.c linux-2.6.7/net/ipv4/ip_output.c
+--- linux-2.6.7/net/ipv4/ip_output.c 2004-06-16 01:20:22 -0400
++++ linux-2.6.7/net/ipv4/ip_output.c 2004-06-25 14:07:21 -0400
+@@ -64,6 +64,7 @@
+ #include <linux/proc_fs.h>
+ #include <linux/stat.h>
+ #include <linux/init.h>
++#include <linux/grsecurity.h>
+
+ #include <net/snmp.h>
+ #include <net/ip.h>
+@@ -1164,6 +1165,12 @@
+ iph->tos = inet->tos;
+ iph->tot_len = htons(skb->len);
+ iph->frag_off = df;
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ iph->id = htons(ip_randomid());
++ else
++#endif
+ if (!df) {
+ __ip_select_ident(iph, &rt->u.dst, 0);
+ } else {
+diff -urN linux-2.6.7/net/ipv4/netfilter/Kconfig linux-2.6.7/net/ipv4/netfilter/Kconfig
+--- linux-2.6.7/net/ipv4/netfilter/Kconfig 2004-06-16 01:19:52 -0400
++++ linux-2.6.7/net/ipv4/netfilter/Kconfig 2004-06-25 14:07:21 -0400
+@@ -225,6 +225,21 @@
+
+ To compile it as a module, choose M here. If unsure, say N.
+
++config IP_NF_MATCH_STEALTH
++ tristate "stealth match support"
++ depends on IP_NF_IPTABLES
++ help
++ Enabling this option will drop all syn packets coming to unserved tcp
++ ports as well as all packets coming to unserved udp ports. If you
++ are using your system to route any type of packets (ie. via NAT)
++ you should put this module at the end of your ruleset, since it will
++ drop packets that aren't going to ports that are listening on your
++ machine itself, it doesn't take into account that the packet might be
++ destined for someone on your internal network if you're using NAT for
++ instance.
++
++ To compile it as a module, choose M here. If unsure, say N.
++
+ config IP_NF_MATCH_HELPER
+ tristate "Helper match support"
+ depends on IP_NF_CONNTRACK && IP_NF_IPTABLES
+diff -urN linux-2.6.7/net/ipv4/netfilter/Makefile linux-2.6.7/net/ipv4/netfilter/Makefile
+--- linux-2.6.7/net/ipv4/netfilter/Makefile 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/net/ipv4/netfilter/Makefile 2004-06-25 14:07:21 -0400
+@@ -65,6 +65,8 @@
+ obj-$(CONFIG_IP_NF_MATCH_CONNTRACK) += ipt_conntrack.o
+ obj-$(CONFIG_IP_NF_MATCH_TCPMSS) += ipt_tcpmss.o
+
++obj-$(CONFIG_IP_NF_MATCH_STEALTH) += ipt_stealth.o
++
+ obj-$(CONFIG_IP_NF_MATCH_PHYSDEV) += ipt_physdev.o
+
+ # targets
+diff -urN linux-2.6.7/net/ipv4/netfilter/ipt_stealth.c linux-2.6.7/net/ipv4/netfilter/ipt_stealth.c
+--- linux-2.6.7/net/ipv4/netfilter/ipt_stealth.c 1969-12-31 19:00:00 -0500
++++ linux-2.6.7/net/ipv4/netfilter/ipt_stealth.c 2004-06-25 14:07:21 -0400
+@@ -0,0 +1,112 @@
++/* Kernel module to add stealth support.
++ *
++ * Copyright (C) 2002 Brad Spengler <spender@grsecurity.net>
++ *
++ */
++
++#include <linux/kernel.h>
++#include <linux/module.h>
++#include <linux/skbuff.h>
++#include <linux/net.h>
++#include <linux/sched.h>
++#include <linux/inet.h>
++#include <linux/stddef.h>
++
++#include <net/ip.h>
++#include <net/sock.h>
++#include <net/tcp.h>
++#include <net/udp.h>
++#include <net/route.h>
++#include <net/inet_common.h>
++
++#include <linux/netfilter_ipv4/ip_tables.h>
++
++MODULE_LICENSE("GPL");
++
++extern struct sock *udp_v4_lookup(u32 saddr, u16 sport, u32 daddr, u16 dport, int dif);
++
++static int
++match(const struct sk_buff *skb,
++ const struct net_device *in,
++ const struct net_device *out,
++ const void *matchinfo,
++ int offset,
++ int *hotdrop)
++{
++ struct iphdr *ip = skb->nh.iph;
++ struct tcphdr th;
++ struct udphdr uh;
++ struct sock *sk = NULL;
++
++ if (!ip || offset) return 0;
++
++ switch(ip->protocol) {
++ case IPPROTO_TCP:
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &th, sizeof(th)) < 0) {
++ *hotdrop = 1;
++ return 0;
++ }
++ if (!(th.syn && !th.ack)) return 0;
++ sk = tcp_v4_lookup_listener(ip->daddr, ntohs(th.dest), ((struct rtable*)skb->dst)->rt_iif);
++ break;
++ case IPPROTO_UDP:
++ if (skb_copy_bits(skb, skb->nh.iph->ihl*4, &uh, sizeof(uh)) < 0) {
++ *hotdrop = 1;
++ return 0;
++ }
++ sk = udp_v4_lookup(ip->saddr, uh.source, ip->daddr, uh.dest, skb->dev->ifindex);
++ break;
++ default:
++ return 0;
++ }
++
++ if(!sk) // port is being listened on, match this
++ return 1;
++ else {
++ sock_put(sk);
++ return 0;
++ }
++}
++
++/* Called when user tries to insert an entry of this type. */
++static int
++checkentry(const char *tablename,
++ const struct ipt_ip *ip,
++ void *matchinfo,
++ unsigned int matchsize,
++ unsigned int hook_mask)
++{
++ if (matchsize != IPT_ALIGN(0))
++ return 0;
++
++ if(((ip->proto == IPPROTO_TCP && !(ip->invflags & IPT_INV_PROTO)) ||
++ ((ip->proto == IPPROTO_UDP) && !(ip->invflags & IPT_INV_PROTO)))
++ && (hook_mask & (1 << NF_IP_LOCAL_IN)))
++ return 1;
++
++ printk("stealth: Only works on TCP and UDP for the INPUT chain.\n");
++
++ return 0;
++}
++
++
++static struct ipt_match stealth_match = {
++ .name = "stealth",
++ .match = &match,
++ .checkentry = &checkentry,
++ .destroy = NULL,
++ .me = THIS_MODULE
++};
++
++static int __init init(void)
++{
++ return ipt_register_match(&stealth_match);
++}
++
++static void __exit fini(void)
++{
++ ipt_unregister_match(&stealth_match);
++}
++
++module_init(init);
++module_exit(fini);
+diff -urN linux-2.6.7/net/ipv4/tcp_ipv4.c linux-2.6.7/net/ipv4/tcp_ipv4.c
+--- linux-2.6.7/net/ipv4/tcp_ipv4.c 2004-06-16 01:19:10 -0400
++++ linux-2.6.7/net/ipv4/tcp_ipv4.c 2004-06-28 16:43:12 -0400
+@@ -62,6 +62,7 @@
+ #include <linux/jhash.h>
+ #include <linux/init.h>
+ #include <linux/times.h>
++#include <linux/grsecurity.h>
+
+ #include <net/icmp.h>
+ #include <net/tcp.h>
+@@ -223,6 +224,10 @@
+
+ spin_lock(&tcp_portalloc_lock);
+ rover = tcp_port_rover;
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ if (grsec_enable_randsrc && (high > low))
++ rover = low + (get_random_long() % remaining);
++#endif
+ do {
+ rover++;
+ if (rover < low || rover > high)
+@@ -537,6 +542,11 @@
+
+ static inline __u32 tcp_v4_init_sequence(struct sock *sk, struct sk_buff *skb)
+ {
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ if (likely(grsec_enable_randisn))
++ return ip_randomisn();
++ else
++#endif
+ return secure_tcp_sequence_number(skb->nh.iph->daddr,
+ skb->nh.iph->saddr,
+ skb->h.th->dest,
+@@ -669,12 +679,15 @@
+ */
+ spin_lock(&tcp_portalloc_lock);
+ rover = tcp_port_rover;
+-
++#ifdef CONFIG_GRKERNSEC_RANDSRC
++ if (grsec_enable_randsrc && (high > low))
++ rover = low + (get_random_long() % remaining);
++#endif
+ do {
+ rover++;
+ if ((rover < low) || (rover > high))
+ rover = low;
+- head = &tcp_bhash[tcp_bhashfn(rover)];
++ head = &tcp_bhash[tcp_bhashfn(rover)];
+ spin_lock(&head->lock);
+
+ /* Does not bother with rcv_saddr checks,
+@@ -724,6 +737,15 @@
+ }
+ spin_unlock(&head->lock);
+
++#ifdef CONFIG_GRKERNSEC
++ gr_del_task_from_ip_table(current);
++ current->gr_saddr = inet_sk(sk)->rcv_saddr;
++ current->gr_daddr = inet_sk(sk)->daddr;
++ current->gr_sport = inet_sk(sk)->sport;
++ current->gr_dport = inet_sk(sk)->dport;
++ gr_add_to_task_ip_table(current);
++#endif
++
+ if (tw) {
+ tcp_tw_deschedule(tw);
+ tcp_tw_put(tw);
+@@ -843,13 +865,24 @@
+ tcp_v4_setup_caps(sk, &rt->u.dst);
+ tp->ext2_header_len = rt->u.dst.header_len;
+
+- if (!tp->write_seq)
++ if (!tp->write_seq) {
++#ifdef CONFIG_GRKERNSEC_RANDISN
++ if (likely(grsec_enable_randisn))
++ tp->write_seq = ip_randomisn();
++ else
++#endif
+ tp->write_seq = secure_tcp_sequence_number(inet->saddr,
+ inet->daddr,
+ inet->sport,
+ usin->sin_port);
++ }
+
+- inet->id = tp->write_seq ^ jiffies;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = tp->write_seq ^ jiffies;
+
+ err = tcp_connect(sk);
+ rt = NULL;
+@@ -1593,7 +1626,13 @@
+ if (newinet->opt)
+ newtp->ext_header_len = newinet->opt->optlen;
+ newtp->ext2_header_len = dst->header_len;
+- newinet->id = newtp->write_seq ^ jiffies;
++
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ newinet->id = htons(ip_randomid());
++ else
++#endif
++ newinet->id = newtp->write_seq ^ jiffies;
+
+ tcp_sync_mss(newsk, dst_pmtu(dst));
+ newtp->advmss = dst_metric(dst, RTAX_ADVMSS);
+diff -urN linux-2.6.7/net/ipv4/udp.c linux-2.6.7/net/ipv4/udp.c
+--- linux-2.6.7/net/ipv4/udp.c 2004-06-16 01:18:59 -0400
++++ linux-2.6.7/net/ipv4/udp.c 2004-06-25 14:07:21 -0400
+@@ -100,6 +100,7 @@
+ #include <linux/skbuff.h>
+ #include <linux/proc_fs.h>
+ #include <linux/seq_file.h>
++#include <linux/grsecurity.h>
+ #include <net/sock.h>
+ #include <net/udp.h>
+ #include <net/icmp.h>
+@@ -108,6 +109,12 @@
+ #include <net/checksum.h>
+ #include <net/xfrm.h>
+
++extern int gr_search_udp_recvmsg(const struct sock *sk,
++ const struct sk_buff *skb);
++extern int gr_search_udp_sendmsg(const struct sock *sk,
++ const struct sockaddr_in *addr);
++
++
+ /*
+ * Snmp MIB for the UDP layer
+ */
+@@ -538,9 +545,16 @@
+ dport = usin->sin_port;
+ if (dport == 0)
+ return -EINVAL;
++
++ if (!gr_search_udp_sendmsg(sk, usin))
++ return -EPERM;
+ } else {
+ if (sk->sk_state != TCP_ESTABLISHED)
+ return -EDESTADDRREQ;
++
++ if (!gr_search_udp_sendmsg(sk, NULL))
++ return -EPERM;
++
+ daddr = inet->daddr;
+ dport = inet->dport;
+ /* Open fast path for connected socket.
+@@ -792,7 +806,12 @@
+ if (!skb)
+ goto out;
+
+- copied = skb->len - sizeof(struct udphdr);
++ if (!gr_search_udp_recvmsg(sk, skb)) {
++ err = -EPERM;
++ goto out_free;
++ }
++
++ copied = skb->len - sizeof(struct udphdr);
+ if (copied > len) {
+ copied = len;
+ msg->msg_flags |= MSG_TRUNC;
+@@ -901,7 +920,12 @@
+ inet->daddr = rt->rt_dst;
+ inet->dport = usin->sin_port;
+ sk->sk_state = TCP_ESTABLISHED;
+- inet->id = jiffies;
++#ifdef CONFIG_GRKERNSEC_RANDID
++ if (grsec_enable_randid)
++ inet->id = htons(ip_randomid());
++ else
++#endif
++ inet->id = jiffies;
+
+ sk_dst_set(sk, &rt->u.dst);
+ return(0);
+diff -urN linux-2.6.7/net/socket.c linux-2.6.7/net/socket.c
+--- linux-2.6.7/net/socket.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/net/socket.c 2004-06-25 14:07:21 -0400
+@@ -81,6 +81,7 @@
+ #include <linux/syscalls.h>
+ #include <linux/compat.h>
+ #include <linux/kmod.h>
++#include <linux/in.h>
+
+ #ifdef CONFIG_NET_RADIO
+ #include <linux/wireless.h> /* Note : will define WIRELESS_EXT */
+@@ -94,6 +95,18 @@
+ #include <net/sock.h>
+ #include <linux/netfilter.h>
+
++extern void gr_attach_curr_ip(const struct sock *sk);
++extern int gr_handle_sock_all(const int family, const int type,
++ const int protocol);
++extern int gr_handle_sock_server(const struct sockaddr *sck);
++extern int gr_handle_sock_client(const struct sockaddr *sck);
++extern int gr_search_connect(const struct socket * sock,
++ const struct sockaddr_in * addr);
++extern int gr_search_bind(const struct socket * sock,
++ const struct sockaddr_in * addr);
++extern int gr_search_socket(const int domain, const int type,
++ const int protocol);
++
+ static int sock_no_open(struct inode *irrelevant, struct file *dontcare);
+ static ssize_t sock_aio_read(struct kiocb *iocb, char __user *buf,
+ size_t size, loff_t pos);
+@@ -898,6 +911,7 @@
+ printk(KERN_DEBUG "sock_close: NULL inode\n");
+ return 0;
+ }
++
+ sock_fasync(-1, filp, 0);
+ sock_release(SOCKET_I(inode));
+ return 0;
+@@ -1127,6 +1141,16 @@
+ int retval;
+ struct socket *sock;
+
++ if(!gr_search_socket(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
++ if (gr_handle_sock_all(family, type, protocol)) {
++ retval = -EACCES;
++ goto out;
++ }
++
+ retval = sock_create(family, type, protocol, &sock);
+ if (retval < 0)
+ goto out;
+@@ -1222,11 +1246,23 @@
+ {
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
++ struct sockaddr *sck;
+ int err;
+
+ if((sock = sockfd_lookup(fd,&err))!=NULL)
+ {
+ if((err=move_addr_to_kernel(umyaddr,addrlen,address))>=0) {
++ sck = (struct sockaddr *)address;
++ if (!gr_search_bind(sock, (struct sockaddr_in *)sck)) {
++ sockfd_put(sock);
++ return -EACCES;
++ }
++
++ if (gr_handle_sock_server(sck)) {
++ sockfd_put(sock);
++ return -EACCES;
++ }
++
+ err = security_socket_bind(sock, (struct sockaddr *)address, addrlen);
+ if (err) {
+ sockfd_put(sock);
+@@ -1329,6 +1365,7 @@
+ goto out_release;
+
+ security_socket_post_accept(sock, newsock);
++ gr_attach_curr_ip(newsock->sk);
+
+ out_put:
+ sockfd_put(sock);
+@@ -1356,6 +1393,7 @@
+ {
+ struct socket *sock;
+ char address[MAX_SOCK_ADDR];
++ struct sockaddr *sck;
+ int err;
+
+ sock = sockfd_lookup(fd, &err);
+@@ -1365,6 +1403,18 @@
+ if (err < 0)
+ goto out_put;
+
++ sck = (struct sockaddr *)address;
++
++ if (!gr_search_connect(sock, (struct sockaddr_in *)sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
++ if (gr_handle_sock_client(sck)) {
++ err = -EACCES;
++ goto out_put;
++ }
++
+ err = security_socket_connect(sock, (struct sockaddr *)address, addrlen);
+ if (err)
+ goto out_put;
+@@ -1618,6 +1668,7 @@
+ err=sock->ops->shutdown(sock, how);
+ sockfd_put(sock);
+ }
++
+ return err;
+ }
+
+diff -urN linux-2.6.7/net/sunrpc/xprt.c linux-2.6.7/net/sunrpc/xprt.c
+--- linux-2.6.7/net/sunrpc/xprt.c 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/net/sunrpc/xprt.c 2004-06-25 14:07:21 -0400
+@@ -58,6 +58,7 @@
+ #include <linux/file.h>
+ #include <linux/workqueue.h>
+ #include <linux/random.h>
++#include <linux/grsecurity.h>
+
+ #include <net/sock.h>
+ #include <net/checksum.h>
+@@ -1337,6 +1338,12 @@
+ */
+ static inline u32 xprt_alloc_xid(struct rpc_xprt *xprt)
+ {
++
++#ifdef CONFIG_GRKERNSEC_RANDRPC
++ if (grsec_enable_randrpc)
++ return (u32) get_random_long();
++#endif
++
+ return xprt->xid++;
+ }
+
+diff -urN linux-2.6.7/net/unix/af_unix.c linux-2.6.7/net/unix/af_unix.c
+--- linux-2.6.7/net/unix/af_unix.c 2004-06-16 01:19:37 -0400
++++ linux-2.6.7/net/unix/af_unix.c 2004-06-25 14:07:21 -0400
+@@ -118,6 +118,7 @@
+ #include <linux/mount.h>
+ #include <net/checksum.h>
+ #include <linux/security.h>
++#include <linux/grsecurity.h>
+
+ int sysctl_unix_max_dgram_qlen = 10;
+
+@@ -681,6 +682,11 @@
+ if (err)
+ goto put_fail;
+
++ if (!gr_acl_handle_unix(nd.dentry, nd.mnt)) {
++ err = -EACCES;
++ goto put_fail;
++ }
++
+ err = -ECONNREFUSED;
+ if (!S_ISSOCK(nd.dentry->d_inode->i_mode))
+ goto put_fail;
+@@ -704,6 +710,13 @@
+ if (u) {
+ struct dentry *dentry;
+ dentry = unix_sk(u)->dentry;
++
++ if (!gr_handle_chroot_unix(u->sk_peercred.pid)) {
++ err = -EPERM;
++ sock_put(u);
++ goto fail;
++ }
++
+ if (dentry)
+ touch_atime(unix_sk(u)->mnt, dentry);
+ } else
+@@ -803,9 +816,18 @@
+ */
+ mode = S_IFSOCK |
+ (SOCK_INODE(sock)->i_mode & ~current->fs->umask);
++
++ if (!gr_acl_handle_mknod(dentry, nd.dentry, nd.mnt, mode)) {
++ err = -EACCES;
++ goto out_mknod_dput;
++ }
++
+ err = vfs_mknod(nd.dentry->d_inode, dentry, mode, 0);
+ if (err)
+ goto out_mknod_dput;
++
++ gr_handle_create(dentry, nd.mnt);
++
+ up(&nd.dentry->d_inode->i_sem);
+ dput(nd.dentry);
+ nd.dentry = dentry;
+@@ -823,6 +845,10 @@
+ goto out_unlock;
+ }
+
++#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
++ sk->sk_peercred.pid = current->pid;
++#endif
++
+ list = &unix_socket_table[addr->hash];
+ } else {
+ list = &unix_socket_table[dentry->d_inode->i_ino & (UNIX_HASH_SIZE-1)];
+diff -urN linux-2.6.7/security/Kconfig linux-2.6.7/security/Kconfig
+--- linux-2.6.7/security/Kconfig 2004-06-16 01:19:42 -0400
++++ linux-2.6.7/security/Kconfig 2004-06-28 11:27:47 -0400
+@@ -4,6 +4,422 @@
+
+ menu "Security options"
+
++source grsecurity/Kconfig
++
++menu "PaX"
++depends on GRKERNSEC
++
++config PAX
++ bool "Enable various PaX features"
++ depends on ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64
++ help
++ This allows you to enable various PaX features. PaX adds
++ intrusion prevention mechanisms to the kernel that reduce
++ the risks posed by exploitable memory corruption bugs.
++
++menu "PaX Control"
++ depends on PAX
++
++config PAX_SOFTMODE
++ bool 'Support soft mode'
++ help
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
++ is the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++config PAX_EI_PAX
++ bool 'Use legacy ELF header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'chpax' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS support.
++
++ You should enable this option only if your toolchain does not yet
++ support the new control flag location (PT_PAX_FLAGS) or you still
++ have applications not marked by PT_PAX_FLAGS.
++
++ Note that if you enable PT_PAX_FLAGS marking support as well,
++ it will override the legacy EI_PAX marks.
++
++config PAX_PT_PAX_FLAGS
++ bool 'Use ELF program header marking'
++ help
++ Enabling this option will allow you to control PaX features on
++ a per executable basis via the 'paxctl' utility available at
++ http://pax.grsecurity.net/. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ it will be overridden by the PT_PAX_FLAGS marking.
++
++choice
++ prompt 'MAC system integration'
++ default PAX_NO_ACL_FLAGS
++ help
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_flags() itself,
++ - "hook": if your MAC system uses the pax_set_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++config PAX_NO_ACL_FLAGS
++ bool 'none'
++
++config PAX_HAVE_ACL_FLAGS
++ bool 'direct'
++
++config PAX_HOOK_ACL_FLAGS
++ bool 'hook'
++endchoice
++
++endmenu
++
++menu "Non-executable pages"
++ depends on PAX
++
++config PAX_NOEXEC
++ bool "Enforce non-executable pages"
++ depends on (PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS) && (ALPHA || IA64 || MIPS32 || MIPS64 || PARISC || PPC32 || PPC64 || SPARC32 || SPARC64 || X86 || X86_64)
++ help
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++config PAX_PAGEEXEC
++ bool "Paging based non-executable pages"
++ depends on PAX_NOEXEC && !HIGHPTE && (!X86 || X86_64 || M586 || M586TSC || M586MMX || M686 || MPENTIUMII || MPENTIUMIII || MPENTIUM4 || MK7 || MK8)
++ select PAX_NOVSYSCALL if X86 && !X86_64
++ help
++ This implementation is based on the paging feature of the CPU.
++ On i386 and ppc there is a variable but usually low performance
++ impact on applications. On alpha, ia64, parisc, sparc, sparc64
++ and x86_64 there is no performance impact.
++
++config PAX_SEGMEXEC
++ bool "Segmentation based non-executable pages"
++ depends on PAX_NOEXEC && X86 && !X86_64
++ help
++ This implementation is based on the segmentation feature of the
++ CPU and has little performance impact, however applications will
++ be limited to a 1.5 GB address space instead of the normal 3 GB.
++
++choice
++ prompt "Default non-executable page method"
++ depends on PAX_PAGEEXEC && PAX_SEGMEXEC
++ default PAX_DEFAULT_SEGMEXEC
++ help
++ Select the default non-executable page method applied to applications
++ that do not select one themselves.
++
++config PAX_DEFAULT_PAGEEXEC
++ bool "PAGEEXEC"
++
++config PAX_DEFAULT_SEGMEXEC
++ bool "SEGMEXEC"
++endchoice
++
++config PAX_EMUTRAMP
++ bool "Emulate trampolines" if (PAX_PAGEEXEC || PAX_SEGMEXEC) && (PARISC || PPC32 || X86) && !X86_64
++ default y if PARISC || PPC32
++ help
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' or 'paxctl'
++ utilities to enable trampoline emulation for the affected programs
++ yet still have the protection provided by the non-executable pages.
++
++ On parisc and ppc you MUST enable this option and EMUSIGRT as
++ well, otherwise your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' or 'paxctl'
++ utilities to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC
++ for the affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++config PAX_EMUSIGRT
++ bool "Automatically emulate sigreturn trampolines"
++ depends on PAX_EMUTRAMP && (PARISC || PPC32)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc and ppc you MUST enable this option, otherwise your
++ system will not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++config PAX_MPROTECT
++ bool "Restrict mprotect()"
++ depends on (PAX_PAGEEXEC || PAX_SEGMEXEC) && !PPC64
++ help
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_NOELFRELOCS
++ bool "Disallow ELF text relocations"
++ depends on PAX_MPROTECT && (IA64 || X86 || X86_64)
++ help
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ then enable this option otherwise be careful as you may not even
++ be able to boot or log on your system (for example, some PAM
++ modules are erroneously compiled as non-PIC by default).
++
++ NOTE: if you are using dynamic ELF executables (as suggested
++ when using ASLR) then you must have made sure that you linked
++ your files using the PIC version of crt1 (the et_dyn.tar.gz package
++ referenced there has already been updated to support this).
++
++config PAX_ETEXECRELOCS
++ bool "Allow ELF ET_EXEC text relocations"
++ depends on PAX_MPROTECT && (ALPHA || IA64 || PARISC)
++ default y
++ help
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++config PAX_EMUPLT
++ bool "Automatically emulate ELF PLT"
++ depends on PAX_MPROTECT && (ALPHA || PARISC || PPC32 || SPARC32 || SPARC64)
++ default y
++ help
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc, ppc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++config PAX_DLRESOLVE
++ bool
++ depends on PAX_EMUPLT && (SPARC32 || SPARC64)
++ default y
++
++config PAX_SYSCALL
++ bool
++ depends on PAX_PAGEEXEC && PPC32
++ default y
++
++config PAX_KERNEXEC
++ bool "Enforce non-executable kernel pages"
++ depends on PAX_NOEXEC && X86 && !X86_64 && !MODULES && !HOTPLUG_PCI_COMPAQ_NVRAM
++ help
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++endmenu
++
++menu "Address Space Layout Randomization"
++ depends on PAX
++
++config PAX_ASLR
++ bool "Address Space Layout Randomization"
++ depends on PAX_EI_PAX || PAX_PT_PAX_FLAGS || PAX_HAVE_ACL_FLAGS || PAX_HOOK_ACL_FLAGS
++ help
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control
++ this feature on a per file basis.
++
++config PAX_RANDKSTACK
++ bool "Randomize kernel stack base"
++ depends on PAX_ASLR && X86_TSC && !X86_64
++ help
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++config PAX_RANDUSTACK
++ bool "Randomize user stack base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' or
++ 'paxctl' on a per file basis.
++
++config PAX_RANDMMAP
++ bool "Randomize mmap() base"
++ depends on PAX_ASLR
++ help
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples at
++ http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++config PAX_RANDEXEC
++ bool "Randomize ET_EXEC base"
++ depends on PAX_MPROTECT && PAX_RANDMMAP
++ help
++ By saying Y here the kernel will randomize the base address of normal
++ ET_EXEC ELF executables as well. This is accomplished by mapping the
++ executable in memory in a special way which also allows for detecting
++ attackers who attempt to execute its code for their purposes. Since
++ this special mapping causes performance degradation and the attack
++ detection may create false alarms as well, you should carefully test
++ your executables when this feature is enabled.
++
++ This solution is intended only as a temporary one until you relink
++ your programs as a dynamic ELF file.
++
++ NOTE: you can use the 'chpax' or 'paxctl' utilities to control this
++ feature on a per file basis.
++
++config PAX_NOVSYSCALL
++ bool "Disable the vsyscall page"
++ depends on PAX_ASLR && X86 && !X86_64
++ help
++ The Linux 2.6 kernel introduced a new feature that speeds up or
++ simplifies certain operations, such as system calls or returns
++ from signal handlers.
++
++ Unfortunately the implementation also gives a powerful instrument
++ into the hands of exploit writers: the so-called vsyscall page exists
++ in every task at the same fixed address and it contains machine code
++ that is very useful in performing the return-to-libc style attack.
++
++ Since this exploit technique cannot in general be protected against
++ via kernel solutions, this option will allow you to disable the use
++ of the vsyscall page and revert back to the old behaviour.
++
++endmenu
++
++endmenu
++
+ config SECURITY
+ bool "Enable different security models"
+ help
+diff -urN linux-2.6.7/security/commoncap.c linux-2.6.7/security/commoncap.c
+--- linux-2.6.7/security/commoncap.c 2004-06-16 01:19:13 -0400
++++ linux-2.6.7/security/commoncap.c 2004-06-25 14:33:47 -0400
+@@ -23,11 +23,12 @@
+ #include <linux/ptrace.h>
+ #include <linux/xattr.h>
+ #include <linux/hugetlb.h>
++#include <linux/grsecurity.h>
+
+ int cap_capable (struct task_struct *tsk, int cap)
+ {
+ /* Derived from include/linux/sched.h:capable. */
+- if (cap_raised (tsk->cap_effective, cap))
++ if (cap_raised (tsk->cap_effective, cap) && gr_task_is_capable(tsk, cap))
+ return 0;
+ else
+ return -EPERM;
+@@ -37,7 +38,7 @@
+ {
+ /* Derived from arch/i386/kernel/ptrace.c:sys_ptrace. */
+ if (!cap_issubset (child->cap_permitted, current->cap_permitted) &&
+- !capable (CAP_SYS_PTRACE))
++ !capable_nolog (CAP_SYS_PTRACE))
+ return -EPERM;
+ else
+ return 0;
+@@ -141,8 +142,11 @@
+ }
+ }
+
+- current->suid = current->euid = current->fsuid = bprm->e_uid;
+- current->sgid = current->egid = current->fsgid = bprm->e_gid;
++ if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
++ current->suid = current->euid = current->fsuid = bprm->e_uid;
++
++ if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
++ current->sgid = current->egid = current->fsgid = bprm->e_gid;
+
+ /* For init, we want to retain the capabilities set
+ * in the init_task struct. Thus we skip the usual
+@@ -153,6 +157,8 @@
+ cap_intersect (new_permitted, bprm->cap_effective);
+ }
+
++ gr_handle_chroot_caps(current);
++
+ /* AUD: Audit candidate if current->cap_effective is set */
+
+ current->keep_capabilities = 0;
+@@ -334,7 +340,7 @@
+ /*
+ * Leave the last 3% for root
+ */
+- if (!capable(CAP_SYS_ADMIN))
++ if (!capable_nolog(CAP_SYS_ADMIN))
+ free -= free / 32;
+
+ if (free > pages)
+@@ -345,7 +351,7 @@
+ * only call if we're about to fail.
+ */
+ n = nr_free_pages();
+- if (!capable(CAP_SYS_ADMIN))
++ if (!capable_nolog(CAP_SYS_ADMIN))
+ n -= n / 32;
+ free += n;
+
+diff -urN linux-2.6.7/security/dummy.c linux-2.6.7/security/dummy.c
+--- linux-2.6.7/security/dummy.c 2004-06-16 01:19:01 -0400
++++ linux-2.6.7/security/dummy.c 2004-06-25 14:35:43 -0400
+@@ -28,6 +28,7 @@
+ #include <linux/hugetlb.h>
+ #include <linux/ptrace.h>
+ #include <linux/file.h>
++#include <linux/grsecurity.h>
+
+ static int dummy_ptrace (struct task_struct *parent, struct task_struct *child)
+ {
+@@ -182,8 +183,11 @@
+ }
+ }
+
+- current->suid = current->euid = current->fsuid = bprm->e_uid;
+- current->sgid = current->egid = current->fsgid = bprm->e_gid;
++ if (!gr_check_user_change(-1, bprm->e_uid, bprm->e_uid))
++ current->suid = current->euid = current->fsuid = bprm->e_uid;
++
++ if (!gr_check_group_change(-1, bprm->e_gid, bprm->e_gid))
++ current->sgid = current->egid = current->fsgid = bprm->e_gid;
+ }
+
+ static int dummy_bprm_set_security (struct linux_binprm *bprm)
+diff -urN linux-2.6.7/security/security.c linux-2.6.7/security/security.c
+--- linux-2.6.7/security/security.c 2004-06-16 01:18:38 -0400
++++ linux-2.6.7/security/security.c 2004-06-25 14:07:21 -0400
+@@ -206,4 +206,5 @@
+ EXPORT_SYMBOL_GPL(mod_reg_security);
+ EXPORT_SYMBOL_GPL(mod_unreg_security);
+ EXPORT_SYMBOL(capable);
++EXPORT_SYMBOL(capable_nolog);
+ EXPORT_SYMBOL(security_ops);
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1010_grsec_no_depend_pax.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1010_grsec_no_depend_pax.patch
new file mode 100644
index 0000000000..da2c477144
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1010_grsec_no_depend_pax.patch
@@ -0,0 +1,12 @@
+diff -u linux-2.6.7/security/Kconfig linux-2.6.7/security/Kconfig
+--- linux-2.6.7/security/Kconfig 2004-06-28 11:27:47 -0400
++++ linux-2.6.7/security/Kconfig 2004-06-28 11:27:47 -0400
+@@ -7,7 +7,7 @@
+ source grsecurity/Kconfig
+
+ menu "PaX"
+-depends on GRKERNSEC
++
+
+ config PAX
+ bool "Enable various PaX features"
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1300_linux-2.6.4-selinux-hooks.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1300_linux-2.6.4-selinux-hooks.patch
new file mode 100644
index 0000000000..ce033764f6
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1300_linux-2.6.4-selinux-hooks.patch
@@ -0,0 +1,137 @@
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/hooks.c linux-2.4.24-hardened-r1/security/selinux/hooks.c
+--- linux-2.4.24-hardened-r1.orig/security/selinux/hooks.c 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/hooks.c 2004-02-22 23:46:53.000000000 -0600
+@@ -3190,6 +3190,68 @@
+ return size;
+ }
+
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++static void avc_pax_set_flags(struct linux_binprm * bprm)
++{
++ struct inode_security_struct *isec;
++ unsigned long flags = 0;
++ int rc;
++
++ char *scontext;
++ u32 scontext_len;
++
++ /*
++ * get the security struct from the inode of the file
++ * since the bprm security struct will just point to
++ * the user running the binary
++ */
++ struct inode *inode = bprm->file->f_dentry->d_inode;
++ isec = inode->i_security;
++
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__PAGEEXEC, &isec->avcr,NULL);
++ if (!rc) {
++ flags |= PF_PAX_PAGEEXEC;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__EMUTRAMP, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_EMUTRAMP;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__RANDEXEC, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_RANDEXEC;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__MPROTECT, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_MPROTECT;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__RANDMMAP, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_RANDMMAP;
++ }
++ rc = avc_has_perm(isec->sid, isec->sid, SECCLASS_PAX, PAX__SEGMEXEC, &isec->avcr, NULL);
++ if (!rc) {
++ flags |= PF_PAX_SEGMEXEC;
++ }
++
++ if (selinux_enforcing) {
++ /* pull all the pax flags in current */
++ current->flags &= ~(PF_PAX_PAGEEXEC | PF_PAX_EMUTRAMP | PF_PAX_MPROTECT | PF_PAX_RANDMMAP | PF_PAX_RANDEXEC | PF_PAX_SEGMEXEC);
++ /* and add ours */
++ current->flags |= flags;
++
++ if (pax_check_flags(&current->flags) < 0) {
++ security_sid_to_context(isec->sid, &scontext, &scontext_len);
++ printk(KERN_WARNING "avc: PaX flags overridden to %lx for %s (%s)\n",
++ current->flags,
++ scontext,
++ bprm->filename);
++ kfree(scontext);
++ }
++ }
++}
++#endif /* CONFIG_PAX_HOOK_ACL_FLAGS */
++
++
+ struct security_operations selinux_ops = {
+ .ptrace = selinux_ptrace,
+ .capget = selinux_capget,
+@@ -3370,6 +3432,11 @@
+ {
+ printk(KERN_INFO "SELinux: Completing initialization.\n");
+
++ #ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++ printk(KERN_INFO "SELinux: Setting PaX callback function.\n");
++ pax_set_flags_func = avc_pax_set_flags;
++ #endif
++
+ /* Set up any superblocks initialized prior to the policy load. */
+ printk(KERN_INFO "SELinux: Setting up existing superblocks.\n");
+ spin_lock(&sb_security_lock);
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/av_perm_to_string.h linux-2.4.24-hardened-r1/security/selinux/include/av_perm_to_string.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/av_perm_to_string.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/av_perm_to_string.h 2004-02-20 16:50:39.000000000 -0600
+@@ -114,6 +120,12 @@
+ { SECCLASS_PASSWD, PASSWD__PASSWD, "passwd" },
+ { SECCLASS_PASSWD, PASSWD__CHFN, "chfn" },
+ { SECCLASS_PASSWD, PASSWD__CHSH, "chsh" },
++ { SECCLASS_PAX, PAX__PAGEEXEC, "pageexec" },
++ { SECCLASS_PAX, PAX__EMUTRAMP, "emutramp" },
++ { SECCLASS_PAX, PAX__MPROTECT, "mprotect" },
++ { SECCLASS_PAX, PAX__RANDMMAP, "randmmap" },
++ { SECCLASS_PAX, PAX__RANDEXEC, "randexec" },
++ { SECCLASS_PAX, PAX__SEGMEXEC, "segmexec" },
+ };
+
+
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/av_permissions.h linux-2.4.24-hardened-r1/security/selinux/include/av_permissions.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/av_permissions.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/av_permissions.h 2004-02-20 16:50:40.000000000 -0600
+@@ -546,5 +554,12 @@
+ #define PASSWD__CHFN 0x00000002UL
+ #define PASSWD__CHSH 0x00000004UL
+
++#define PAX__PAGEEXEC 0x00000001UL
++#define PAX__EMUTRAMP 0x00000002UL
++#define PAX__MPROTECT 0x00000004UL
++#define PAX__RANDMMAP 0x00000008UL
++#define PAX__RANDEXEC 0x00000010UL
++#define PAX__SEGMEXEC 0x00000020UL
++
+
+ /* FLASK */
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/class_to_string.h linux-2.4.24-hardened-r1/security/selinux/include/class_to_string.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/class_to_string.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/class_to_string.h 2004-02-20 16:50:40.000000000 -0600
+@@ -35,5 +35,6 @@
+ "shm",
+ "ipc",
+ "passwd",
++ "pax",
+ };
+
+diff -urN linux-2.4.24-hardened-r1.orig/security/selinux/include/flask.h linux-2.4.24-hardened-r1/security/selinux/include/flask.h
+--- linux-2.4.24-hardened-r1.orig/security/selinux/include/flask.h 2004-02-22 23:03:26.000000000 -0600
++++ linux-2.4.24-hardened-r1/security/selinux/include/flask.h 2004-02-20 16:50:41.000000000 -0600
+@@ -35,6 +35,7 @@
+ #define SECCLASS_SHM 28
+ #define SECCLASS_IPC 29
+ #define SECCLASS_PASSWD 30
++#define SECCLASS_PAX 31
+
+ /*
+ * Security identifier indices for initial entities
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1305_linux-2.6.7-selinux-ipaddr.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1305_linux-2.6.7-selinux-ipaddr.patch
new file mode 100644
index 0000000000..3f989767a9
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1305_linux-2.6.7-selinux-ipaddr.patch
@@ -0,0 +1,14 @@
+--- linux-2.6.3-openpax/security/selinux/avc.c 2004-02-17 21:58:52.000000000 -0600
++++ linux-2.6.3/security/selinux/avc.c 2004-03-07 18:24:57.000000000 -0600
+@@ -143,6 +143,11 @@
+ char *scontext;
+ u32 scontext_len;
+
++#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
++ if (current->curr_ip)
++ audit_log_format(ab, "ipaddr=%u.%u.%u.%u ", NIPQUAD(current->curr_ip));
++#endif /* CONFIG_GRKERNSEC_PROC_IPADDR */
++
+ rc = security_sid_to_context(ssid, &scontext, &scontext_len);
+ if (rc)
+ printk("ssid=%d", ssid);
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1310_linux-2.6.5-extra_sec_ops.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1310_linux-2.6.5-extra_sec_ops.patch
new file mode 100644
index 0000000000..fee0ff623e
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/1310_linux-2.6.5-extra_sec_ops.patch
@@ -0,0 +1,63 @@
+--- linux-2.6.4/security/selinux/hooks.c 2004-04-13 00:51:48.225259424 -0500
++++ linux-2.6.5-hardened/security/selinux/hooks.c 2004-04-13 00:34:15.067464600 -0500
+@@ -1673,6 +1673,11 @@
+
+ static int selinux_bprm_check_security (struct linux_binprm *bprm)
+ {
++ int rc;
++
++ rc = secondary_ops->bprm_check_security(bprm);
++ if (rc)
++ return rc;
+ return 0;
+ }
+
+@@ -2013,6 +2018,11 @@
+
+ static int selinux_inode_unlink(struct inode *dir, struct dentry *dentry)
+ {
++ int rc;
++
++ rc = secondary_ops->inode_unlink(dir, dentry);
++ if (rc)
++ return rc;
+ return may_link(dir, dentry, MAY_UNLINK);
+ }
+
+@@ -2081,11 +2091,17 @@
+ static int selinux_inode_permission(struct inode *inode, int mask,
+ struct nameidata *nd)
+ {
++ int rc;
++
+ if (!mask) {
+ /* No permission to check. Existence test. */
+ return 0;
+ }
+
++ rc = secondary_ops->inode_permission(inode, mask, nd);
++ if (rc)
++ return rc;
++
+ return inode_has_perm(current, inode,
+ file_mask_to_av(inode->i_mode, mask), NULL, NULL);
+ }
+@@ -2358,6 +2374,7 @@
+ static int selinux_file_mmap(struct file *file, unsigned long prot, unsigned long flags)
+ {
+ u32 av;
++ int rc;
+
+ if (file) {
+ /* read access is always possible with a mapping */
+@@ -2369,6 +2386,10 @@
+
+ if (prot & PROT_EXEC)
+ av |= FILE__EXECUTE;
++
++ rc = secondary_ops->file_mmap(file, prot, flags);
++ if (rc)
++ return rc;
+
+ return file_has_perm(current, file, av);
+ }
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/2010_tcp-stealth-2.6.7.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/2010_tcp-stealth-2.6.7.patch
new file mode 100644
index 0000000000..cd2eb529cd
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/2010_tcp-stealth-2.6.7.patch
@@ -0,0 +1,184 @@
+diff -uprN -X dontdiff linux-2.6.7/include/linux/sysctl.h linux-2.6.7-tcp-stealth/include/linux/sysctl.h
+--- linux-2.6.7/include/linux/sysctl.h 2004-06-16 07:19:35.000000000 +0200
++++ linux-2.6.7-tcp-stealth/include/linux/sysctl.h 2004-06-24 11:27:56.753248176 +0200
+@@ -338,6 +338,10 @@ enum
+ NET_TCP_BIC_LOW_WINDOW=104,
+ NET_TCP_DEFAULT_WIN_SCALE=105,
+ NET_TCP_MODERATE_RCVBUF=106,
++ NET_IPV4_IP_MASQ_UDP_DLOOSE=107,
++ NET_TCP_STACK_SYNFIN=108,
++ NET_TCP_STACK_BOGUS=109,
++ NET_TCP_STACK_ACK=110,
+ };
+
+ enum {
+diff -uprN -X dontdiff linux-2.6.7/net/ipv4/Kconfig linux-2.6.7-tcp-stealth/net/ipv4/Kconfig
+--- linux-2.6.7/net/ipv4/Kconfig 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-tcp-stealth/net/ipv4/Kconfig 2004-06-24 11:25:42.504657064 +0200
+@@ -324,6 +324,29 @@ config SYN_COOKIES
+
+ If unsure, say N.
+
++config NET_STEALTH
++ bool "IP: TCP stealth options (enabled per default)"
++ depends on INET
++ default n
++ ---help---
++ If you say Y here, note that these options are now enabled by
++ default; you can disable them by executing the commands
++
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_ack
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_bogus
++ echo 0 >/proc/sys/net/ipv4/tcp_ignore_synfin
++
++ at boot time after the /proc file system has been mounted.
++
++ If security is more important, say Y.
++
++config NET_STEALTH_LOG
++ bool 'Log all dropped packets'
++ depends on NET_STEALTH
++ ---help---
++ This turns on a logging facility that logs all tcp packets with
++ bad flags. If you said Y to "TCP stealth options", say Y too.
++
+ config INET_AH
+ tristate "IP: AH transformation"
+ select XFRM
+diff -uprN -X dontdiff linux-2.6.7/net/ipv4/sysctl_net_ipv4.c linux-2.6.7-tcp-stealth/net/ipv4/sysctl_net_ipv4.c
+--- linux-2.6.7/net/ipv4/sysctl_net_ipv4.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-tcp-stealth/net/ipv4/sysctl_net_ipv4.c 2004-06-24 11:25:42.516655240 +0200
+@@ -48,6 +48,11 @@ extern int inet_peer_maxttl;
+ extern int inet_peer_gc_mintime;
+ extern int inet_peer_gc_maxtime;
+
++/* stealth stuff */
++extern int sysctl_tcp_ignore_synfin;
++extern int sysctl_tcp_ignore_bogus;
++extern int sysctl_tcp_ignore_ack;
++
+ #ifdef CONFIG_SYSCTL
+ static int tcp_retr1_max = 255;
+ static int ip_local_port_range_min[] = { 1, 1 };
+@@ -320,6 +325,32 @@ ctl_table ipv4_table[] = {
+ .proc_handler = &proc_dointvec
+ },
+ #endif
++#ifdef CONFIG_NET_STEALTH
++ {
++ .ctl_name = NET_TCP_STACK_SYNFIN,
++ .procname = "tcp_ignore_synfin",
++ .data = &sysctl_tcp_ignore_synfin,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .ctl_name = NET_TCP_STACK_BOGUS,
++ .procname = "tcp_ignore_bogus",
++ .data = &sysctl_tcp_ignore_bogus,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++ {
++ .ctl_name = NET_TCP_STACK_ACK,
++ .procname = "tcp_ignore_ack",
++ .data = &sysctl_tcp_ignore_ack,
++ .maxlen = sizeof(int),
++ .mode = 0644,
++ .proc_handler = &proc_dointvec
++ },
++#endif
+ {
+ .ctl_name = NET_TCP_TW_RECYCLE,
+ .procname = "tcp_tw_recycle",
+diff -uprN -X dontdiff linux-2.6.7/net/ipv4/tcp_input.c linux-2.6.7-tcp-stealth/net/ipv4/tcp_input.c
+--- linux-2.6.7/net/ipv4/tcp_input.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-tcp-stealth/net/ipv4/tcp_input.c 2004-06-24 11:25:42.538651896 +0200
+@@ -75,6 +75,11 @@
+ int sysctl_tcp_timestamps = 1;
+ int sysctl_tcp_window_scaling = 1;
+ int sysctl_tcp_sack = 1;
++#ifdef CONFIG_NET_STEALTH
++int sysctl_tcp_ignore_synfin = 1;
++int sysctl_tcp_ignore_bogus = 1;
++int sysctl_tcp_ignore_ack = 1;
++#endif
+ int sysctl_tcp_fack = 1;
+ int sysctl_tcp_reordering = TCP_FASTRETRANS_THRESH;
+ int sysctl_tcp_ecn;
+diff -uprN -X dontdiff linux-2.6.7/net/ipv4/tcp_ipv4.c linux-2.6.7-tcp-stealth/net/ipv4/tcp_ipv4.c
+--- linux-2.6.7/net/ipv4/tcp_ipv4.c 2004-06-16 07:19:10.000000000 +0200
++++ linux-2.6.7-tcp-stealth/net/ipv4/tcp_ipv4.c 2004-06-24 11:25:42.551649920 +0200
+@@ -79,6 +79,12 @@ extern int sysctl_ip_dynaddr;
+ int sysctl_tcp_tw_reuse;
+ int sysctl_tcp_low_latency;
+
++#ifdef CONFIG_NET_STEALTH
++extern int sysctl_tcp_ignore_synfin;
++extern int sysctl_tcp_ignore_bogus;
++extern int sysctl_tcp_ignore_ack;
++#endif
++
+ /* Check TCP sequence numbers in ICMP packets. */
+ #define ICMP_MIN_LENGTH 8
+
+@@ -1763,6 +1769,23 @@ int tcp_v4_rcv(struct sk_buff *skb)
+ tcp_v4_checksum_init(skb) < 0))
+ goto bad_packet;
+
++#ifdef CONFIG_NET_STEALTH
++ if(sysctl_tcp_ignore_synfin) {
++ if(th->fin && th->syn)
++ goto tcp_bad_flags;
++ }
++
++ if(sysctl_tcp_ignore_bogus) {
++ if(!(th->ack || th->syn || th->rst) || th->res1)
++ goto tcp_bad_flags;
++ }
++
++ if(sysctl_tcp_ignore_ack) {
++ if(th->fin && th->psh && th->urg)
++ goto tcp_bad_flags;
++ }
++#endif
++
+ th = skb->h.th;
+ TCP_SKB_CB(skb)->seq = ntohl(th->seq);
+ TCP_SKB_CB(skb)->end_seq = (TCP_SKB_CB(skb)->seq + th->syn + th->fin +
+@@ -1804,6 +1827,33 @@ process:
+
+ return ret;
+
++#ifdef CONFIG_NET_STEALTH_LOG
++tcp_bad_flags:
++ printk(KERN_INFO
++ "Packet log: badflag DENY %s PROTO=TCP %d.%d.%d.%d:%d "
++ "%d.%d.%d.%d:%d L=%hu:%u:%u S=0x%2.2hX I=%hu:%u:%u "
++ "T=%hu %c%c%c%c%c%c%c%c%c\n",
++ skb->dev->name, NIPQUAD(skb->nh.iph->saddr), ntohs(th->source),
++ NIPQUAD(skb->nh.iph->daddr), ntohs(th->dest),
++ ntohs(skb->nh.iph->tot_len), skb->len, skb->len - th->doff*4,
++ skb->nh.iph->tos, ntohs(skb->nh.iph->id), ntohl(th->seq),
++ ntohl(th->ack_seq), skb->nh.iph->ttl,
++ th->res1 ? '1' : '.',
++ th->ece ? 'E' : '.',
++ th->cwr ? 'C' : '.',
++ th->ack ? 'A' : '.',
++ th->syn ? 'S' : '.',
++ th->fin ? 'F' : '.',
++ th->rst ? 'R' : '.',
++ th->psh ? 'P' : '.',
++ th->urg ? 'U' : '.' );
++ goto bad_packet;
++#else
++tcp_bad_flags:
++ goto bad_packet;
++
++#endif /* CONFIG_NET_STEALTH_LOG */
++
+ no_tcp_socket:
+ if (!xfrm4_policy_check(NULL, XFRM_POLICY_IN, skb))
+ goto discard_it;
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3000_netdev-random-core-2.6.7.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3000_netdev-random-core-2.6.7.patch
new file mode 100644
index 0000000000..38f50b4136
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3000_netdev-random-core-2.6.7.patch
@@ -0,0 +1,300 @@
+diff -ruN linux-2.6.7-hardened/drivers/net/Kconfig /home/brandon/linux-2.6.7-hardened/drivers/net/Kconfig
+--- linux-2.6.7-hardened/drivers/net/Kconfig 2004-06-30 01:11:09.536167840 -0400
++++ /home/brandon/linux-2.6.7-hardened/drivers/net/Kconfig 2004-06-29 17:47:26.000000000 -0400
+@@ -104,6 +104,22 @@
+
+ If you don't know what to use this for, you don't need it.
+
++config NET_RANDOM
++ bool "Allow Net Devices to contribute to /dev/random"
++ depends on NETDEVICES && EXPERIMENTAL
++ ---help---
++ If you say Y here, network device interrupts will contribute to the
++ kernel entropy pool. Normally, block devices and some other devices
++ feed the pool. Some systems, such as those that are headless or diskless,
++ need additional entropy sources. Some people, however, feel that network
++ devices should not contribute to /dev/random because an external attacker
++ could observe incoming packets in an attempt to learn the entropy pool's
++ state. If you say N, no network device will contribute entropy.
++
++ If you believe there is a chance of your network packets being observed
++ and you doubt the security of the entropy pool's one-way hash, do not
++ enable this. If unsure, say N.
++
+ config ETHERTAP
+ tristate "Ethertap network tap"
+ depends on NETDEVICES && EXPERIMENTAL && NETLINK_DEV
+diff -ruN linux-2.6.7-hardened/include/asm-alpha/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-alpha/signal.h
+--- linux-2.6.7-hardened/include/asm-alpha/signal.h 2004-06-30 01:11:09.537167688 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-alpha/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -121,8 +121,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x40000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 1 /* for blocking signals */
+ #define SIG_UNBLOCK 2 /* for unblocking signals */
+ #define SIG_SETMASK 3 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-arm/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-arm/signal.h
+--- linux-2.6.7-hardened/include/asm-arm/signal.h 2004-06-30 01:11:09.538167536 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-arm/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -126,8 +126,15 @@
+ #define SA_SAMPLE_RANDOM 0x10000000
+ #define SA_IRQNOMASK 0x08000000
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-cris/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-cris/signal.h
+--- linux-2.6.7-hardened/include/asm-cris/signal.h 2004-06-30 01:11:09.538167536 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-cris/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -120,8 +120,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-i386/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-i386/signal.h
+--- linux-2.6.7-hardened/include/asm-i386/signal.h 2004-06-30 01:11:09.566163280 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-i386/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -121,8 +121,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-ia64/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-ia64/signal.h
+--- linux-2.6.7-hardened/include/asm-ia64/signal.h 2004-06-30 01:11:09.585160392 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-ia64/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -126,6 +126,12 @@
+ #define SA_SHIRQ 0x04000000
+ #define SA_PERCPU_IRQ 0x02000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 0 /* for blocking signals */
+diff -ruN linux-2.6.7-hardened/include/asm-m68k/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-m68k/signal.h
+--- linux-2.6.7-hardened/include/asm-m68k/signal.h 2004-06-30 01:11:09.601157960 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-m68k/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -116,8 +116,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-mips/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-mips/signal.h
+--- linux-2.6.7-hardened/include/asm-mips/signal.h 2004-06-30 01:11:09.617155528 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-mips/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -111,6 +111,12 @@
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x02000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 1 /* for blocking signals */
+diff -ruN linux-2.6.7-hardened/include/asm-parisc/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-parisc/signal.h
+--- linux-2.6.7-hardened/include/asm-parisc/signal.h 2004-06-30 01:11:09.631153400 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-parisc/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -100,6 +100,12 @@
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
+
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 0 /* for blocking signals */
+diff -ruN linux-2.6.7-hardened/include/asm-ppc/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-ppc/signal.h
+--- linux-2.6.7-hardened/include/asm-ppc/signal.h 2004-06-29 17:47:26.000000000 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-ppc/signal.h 2004-06-30 01:07:33.000000000 -0400
+@@ -111,6 +111,13 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #define SIG_BLOCK 0 /* for blocking signals */
+diff -ruN linux-2.6.7-hardened/include/asm-ppc64/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-ppc64/signal.h
+--- linux-2.6.7-hardened/include/asm-ppc64/signal.h 2004-06-30 01:11:09.659149144 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-ppc64/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -107,8 +107,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-s390/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-s390/signal.h
+--- linux-2.6.7-hardened/include/asm-s390/signal.h 2004-06-30 01:11:09.671147320 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-s390/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -129,8 +129,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-sh/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-sh/signal.h
+--- linux-2.6.7-hardened/include/asm-sh/signal.h 2004-06-30 01:11:09.687144888 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-sh/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -120,8 +120,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
+diff -ruN linux-2.6.7-hardened/include/asm-sparc/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-sparc/signal.h
+--- linux-2.6.7-hardened/include/asm-sparc/signal.h 2004-06-30 01:11:09.720139872 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-sparc/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -176,8 +176,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_STATIC_ALLOC 0x80
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ /* Type of a signal handler. */
+ #ifdef __KERNEL__
+ typedef void (*__sighandler_t)(int, int, struct sigcontext *, char *);
+diff -ruN linux-2.6.7-hardened/include/asm-sparc64/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-sparc64/signal.h
+--- linux-2.6.7-hardened/include/asm-sparc64/signal.h 2004-06-30 01:11:09.731138200 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-sparc64/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -182,8 +182,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_STATIC_ALLOC 0x80
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ /* Type of a signal handler. */
+ #ifdef __KERNEL__
+ typedef void (*__sighandler_t)(int, struct sigcontext *);
+diff -ruN linux-2.6.7-hardened/include/asm-x86_64/signal.h /home/brandon/linux-2.6.7-hardened/include/asm-x86_64/signal.h
+--- linux-2.6.7-hardened/include/asm-x86_64/signal.h 2004-06-30 01:11:09.753134856 -0400
++++ /home/brandon/linux-2.6.7-hardened/include/asm-x86_64/signal.h 2004-06-29 17:47:26.000000000 -0400
+@@ -128,8 +128,15 @@
+ #define SA_PROBE SA_ONESHOT
+ #define SA_SAMPLE_RANDOM SA_RESTART
+ #define SA_SHIRQ 0x04000000
++
++#ifdef CONFIG_NET_RANDOM
++#define SA_NET_RANDOM SA_SAMPLE_RANDOM
++#else
++#define SA_NET_RANDOM 0
+ #endif
+
++#endif /* __KERNEL__ */
++
+ #define SIG_BLOCK 0 /* for blocking signals */
+ #define SIG_UNBLOCK 1 /* for unblocking signals */
+ #define SIG_SETMASK 2 /* for setting the signal mask */
diff --git a/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3005_netdev-random-drivers-2.6.7.patch b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3005_netdev-random-drivers-2.6.7.patch
new file mode 100644
index 0000000000..c462150bd3
--- /dev/null
+++ b/src/kernel/hardened-patches/hardened-patches-2.6-7.6/3005_netdev-random-drivers-2.6.7.patch
@@ -0,0 +1,2362 @@
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c501.c linux-2.6.7-netdev_random/drivers/net/3c501.c
+--- linux-2.6.7/drivers/net/3c501.c 2004-06-16 07:18:45.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c501.c 2004-06-24 10:16:08.089264664 +0200
+@@ -347,7 +347,7 @@ static int el_open(struct net_device *de
+ if (el_debug > 2)
+ printk(KERN_DEBUG "%s: Doing el_open()...", dev->name);
+
+- if ((retval = request_irq(dev->irq, &el_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &el_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ spin_lock_irqsave(&lp->lock, flags);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c503.c linux-2.6.7-netdev_random/drivers/net/3c503.c
+--- linux-2.6.7/drivers/net/3c503.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c503.c 2004-06-24 10:16:08.094263904 +0200
+@@ -380,7 +380,7 @@ el2_open(struct net_device *dev)
+ outb_p(0x00, E33G_IDCFR);
+ if (*irqp == probe_irq_off(cookie) /* It's a good IRQ line! */
+ && ((retval = request_irq(dev->irq = *irqp,
+- ei_interrupt, 0, dev->name, dev)) == 0))
++ ei_interrupt, SA_NET_RANDOM, dev->name, dev)) == 0))
+ break;
+ }
+ } while (*++irqp);
+@@ -389,7 +389,7 @@ el2_open(struct net_device *dev)
+ return retval;
+ }
+ } else {
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ return retval;
+ }
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c505.c linux-2.6.7-netdev_random/drivers/net/3c505.c
+--- linux-2.6.7/drivers/net/3c505.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c505.c 2004-06-24 10:16:08.097263448 +0200
+@@ -905,7 +905,7 @@ static int elp_open(struct net_device *d
+ /*
+ * install our interrupt service routine
+ */
+- if ((retval = request_irq(dev->irq, &elp_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, &elp_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: could not allocate IRQ%d\n", dev->name, dev->irq);
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c507.c linux-2.6.7-netdev_random/drivers/net/3c507.c
+--- linux-2.6.7/drivers/net/3c507.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c507.c 2004-06-24 10:16:08.102262688 +0200
+@@ -392,7 +392,7 @@ static int __init el16_probe1(struct net
+
+ irq = inb(ioaddr + IRQ_CONFIG) & 0x0f;
+
+- irqval = request_irq(irq, &el16_interrupt, 0, dev->name, dev);
++ irqval = request_irq(irq, &el16_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (irqval) {
+ printk ("unable to get IRQ %d (irqval=%d).\n", irq, irqval);
+ retval = -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c509.c linux-2.6.7-netdev_random/drivers/net/3c509.c
+--- linux-2.6.7/drivers/net/3c509.c 2004-06-16 07:18:55.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c509.c 2004-06-24 10:16:08.105262232 +0200
+@@ -809,7 +809,7 @@ el3_open(struct net_device *dev)
+ outw(RxReset, ioaddr + EL3_CMD);
+ outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+- i = request_irq(dev->irq, &el3_interrupt, 0, dev->name, dev);
++ i = request_irq(dev->irq, &el3_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c515.c linux-2.6.7-netdev_random/drivers/net/3c515.c
+--- linux-2.6.7/drivers/net/3c515.c 2004-06-16 07:18:59.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c515.c 2004-06-24 10:16:08.176251440 +0200
+@@ -756,11 +756,11 @@ static int corkscrew_open(struct net_dev
+ /* Corkscrew: Cannot share ISA resources. */
+ if (dev->irq == 0
+ || dev->dma == 0
+- || request_irq(dev->irq, &corkscrew_interrupt, 0,
++ || request_irq(dev->irq, &corkscrew_interrupt, SA_NET_RANDOM,
+ vp->product_name, dev)) return -EAGAIN;
+ enable_dma(dev->dma);
+ set_dma_mode(dev->dma, DMA_MODE_CASCADE);
+- } else if (request_irq(dev->irq, &corkscrew_interrupt, SA_SHIRQ,
++ } else if (request_irq(dev->irq, &corkscrew_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ vp->product_name, dev)) {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c523.c linux-2.6.7-netdev_random/drivers/net/3c523.c
+--- linux-2.6.7/drivers/net/3c523.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c523.c 2004-06-24 10:16:08.182250528 +0200
+@@ -288,7 +288,7 @@ static int elmc_open(struct net_device *
+
+ elmc_id_attn586(); /* disable interrupts */
+
+- ret = request_irq(dev->irq, &elmc_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM,
++ ret = request_irq(dev->irq, &elmc_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: couldn't get irq %d\n", dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c527.c linux-2.6.7-netdev_random/drivers/net/3c527.c
+--- linux-2.6.7/drivers/net/3c527.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c527.c 2004-06-24 10:16:08.195248552 +0200
+@@ -435,7 +435,7 @@ static int __init mc32_probe1(struct net
+ * Grab the IRQ
+ */
+
+- err = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
++ err = request_irq(dev->irq, &mc32_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ release_region(dev->base_addr, MC32_IO_EXTENT);
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/3c59x.c linux-2.6.7-netdev_random/drivers/net/3c59x.c
+--- linux-2.6.7/drivers/net/3c59x.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/3c59x.c 2004-06-24 10:16:08.209246424 +0200
+@@ -1735,7 +1735,7 @@ vortex_open(struct net_device *dev)
+
+ /* Use the now-standard shared IRQ implementation. */
+ if ((retval = request_irq(dev->irq, vp->full_bus_master_rx ?
+- &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ, dev->name, dev))) {
++ &boomerang_interrupt : &vortex_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: Could not reserve IRQ %d\n", dev->name, dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/7990.c linux-2.6.7-netdev_random/drivers/net/7990.c
+--- linux-2.6.7/drivers/net/7990.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/7990.c 2004-06-24 10:16:08.215245512 +0200
+@@ -462,7 +462,7 @@ int lance_open (struct net_device *dev)
+ DECLARE_LL;
+
+ /* Install the Interrupt handler. Or we could shunt this out to specific drivers? */
+- if (request_irq(lp->irq, lance_interrupt, 0, lp->name, dev))
++ if (request_irq(lp->irq, lance_interrupt, SA_NET_RANDOM, lp->name, dev))
+ return -EAGAIN;
+
+ res = lance_reset(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/8139cp.c linux-2.6.7-netdev_random/drivers/net/8139cp.c
+--- linux-2.6.7/drivers/net/8139cp.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/8139cp.c 2004-06-24 10:16:08.230243232 +0200
+@@ -1172,7 +1172,7 @@ static int cp_open (struct net_device *d
+
+ cp_init_hw(cp);
+
+- rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ, dev->name, dev);
++ rc = request_irq(dev->irq, cp_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (rc)
+ goto err_out_hw;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/8139too.c linux-2.6.7-netdev_random/drivers/net/8139too.c
+--- linux-2.6.7/drivers/net/8139too.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/8139too.c 2004-06-24 10:16:08.244241104 +0200
+@@ -1325,7 +1325,7 @@ static int rtl8139_open (struct net_devi
+ int retval;
+ void *ioaddr = tp->mmio_addr;
+
+- retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq (dev->irq, rtl8139_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ return retval;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/82596.c linux-2.6.7-netdev_random/drivers/net/82596.c
+--- linux-2.6.7/drivers/net/82596.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/82596.c 2004-06-24 10:16:08.252239888 +0200
+@@ -1005,7 +1005,7 @@ static int i596_open(struct net_device *
+
+ DEB(DEB_OPEN,printk(KERN_DEBUG "%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+- if (request_irq(dev->irq, i596_interrupt, 0, "i82596", dev)) {
++ if (request_irq(dev->irq, i596_interrupt, SA_NET_RANDOM, "i82596", dev)) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/a2065.c linux-2.6.7-netdev_random/drivers/net/a2065.c
+--- linux-2.6.7/drivers/net/a2065.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/a2065.c 2004-06-24 10:16:08.267237608 +0200
+@@ -496,7 +496,7 @@ static int lance_open (struct net_device
+ ll->rdp = LE_C0_STOP;
+
+ /* Install the Interrupt handler */
+- ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ,
++ ret = request_irq(IRQ_AMIGA_PORTS, lance_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret) return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ac3200.c linux-2.6.7-netdev_random/drivers/net/ac3200.c
+--- linux-2.6.7/drivers/net/ac3200.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ac3200.c 2004-06-24 10:16:08.274236544 +0200
+@@ -203,7 +203,7 @@ static int __init ac_probe1(int ioaddr,
+ printk(", assigning");
+ }
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" nothing! Unable to get IRQ %d.\n", dev->irq);
+ goto out1;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/acenic.c linux-2.6.7-netdev_random/drivers/net/acenic.c
+--- linux-2.6.7/drivers/net/acenic.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/acenic.c 2004-06-24 10:16:08.291233960 +0200
+@@ -1194,7 +1194,7 @@ static int __init ace_init(struct net_de
+ goto init_error;
+ }
+
+- ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ,
++ ecode = request_irq(pdev->irq, ace_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ecode) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/amd8111e.c linux-2.6.7-netdev_random/drivers/net/amd8111e.c
+--- linux-2.6.7/drivers/net/amd8111e.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/amd8111e.c 2004-06-24 10:46:11.580092384 +0200
+@@ -1372,7 +1372,7 @@ static int amd8111e_open(struct net_devi
+ {
+ struct amd8111e_priv *lp = netdev_priv(dev);
+
+- if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ,
++ if(dev->irq ==0 || request_irq(dev->irq, amd8111e_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev))
+ return -EAGAIN;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/apne.c linux-2.6.7-netdev_random/drivers/net/apne.c
+--- linux-2.6.7/drivers/net/apne.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/apne.c 2004-06-24 10:16:08.301232440 +0200
+@@ -310,7 +310,7 @@ static int __init apne_probe1(struct net
+ dev->base_addr = ioaddr;
+
+ /* Install the Interrupt handler */
+- i = request_irq(IRQ_AMIGA_PORTS, apne_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(IRQ_AMIGA_PORTS, apne_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/appletalk/cops.c linux-2.6.7-netdev_random/drivers/net/appletalk/cops.c
+--- linux-2.6.7/drivers/net/appletalk/cops.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/appletalk/cops.c 2004-06-24 10:16:08.325228792 +0200
+@@ -326,7 +326,7 @@ static int __init cops_probe1(struct net
+
+ /* Reserve any actual interrupt. */
+ if (dev->irq) {
+- retval = request_irq(dev->irq, &cops_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &cops_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ goto err_out;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/appletalk/ltpc.c linux-2.6.7-netdev_random/drivers/net/appletalk/ltpc.c
+--- linux-2.6.7/drivers/net/appletalk/ltpc.c 2004-06-16 07:19:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/appletalk/ltpc.c 2004-06-24 10:16:08.333227576 +0200
+@@ -1182,7 +1182,7 @@ struct net_device * __init ltpc_probe(vo
+ }
+
+ /* grab it and don't let go :-) */
+- if (irq && request_irq( irq, &ltpc_interrupt, 0, "ltpc", dev) >= 0)
++ if (irq && request_irq( irq, &ltpc_interrupt, SA_NET_RANDOM, "ltpc", dev) >= 0)
+ {
+ (void) inb_p(io+7); /* enable interrupts from board */
+ (void) inb_p(io+7); /* and reset irq line */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arcnet/arc-rimi.c linux-2.6.7-netdev_random/drivers/net/arcnet/arc-rimi.c
+--- linux-2.6.7/drivers/net/arcnet/arc-rimi.c 2004-06-16 07:18:56.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arcnet/arc-rimi.c 2004-06-24 10:16:08.341226360 +0200
+@@ -129,7 +129,7 @@ static int __init arcrimi_found(struct n
+ int err;
+
+ /* reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (RIM I)", dev)) {
++ if (request_irq(dev->irq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (RIM I)", dev)) {
+ release_mem_region(dev->mem_start, BUFFER_SIZE);
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arcnet/com20020.c linux-2.6.7-netdev_random/drivers/net/arcnet/com20020.c
+--- linux-2.6.7/drivers/net/arcnet/com20020.c 2004-06-16 07:19:09.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arcnet/com20020.c 2004-06-24 10:16:08.351224840 +0200
+@@ -195,7 +195,7 @@ int com20020_found(struct net_device *de
+ outb(dev->dev_addr[0], _XREG);
+
+ /* reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, shared,
++ if (request_irq(dev->irq, &arcnet_interrupt, shared | SA_NET_RANDOM,
+ "arcnet (COM20020)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arcnet/com90io.c linux-2.6.7-netdev_random/drivers/net/arcnet/com90io.c
+--- linux-2.6.7/drivers/net/arcnet/com90io.c 2004-06-16 07:19:10.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arcnet/com90io.c 2004-06-24 10:16:08.358223776 +0200
+@@ -238,7 +238,7 @@ static int __init com90io_found(struct n
+ int err;
+
+ /* Reserve the irq */
+- if (request_irq(dev->irq, &arcnet_interrupt, 0, "arcnet (COM90xx-IO)", dev)) {
++ if (request_irq(dev->irq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (COM90xx-IO)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", dev->irq);
+ return -ENODEV;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arcnet/com90xx.c linux-2.6.7-netdev_random/drivers/net/arcnet/com90xx.c
+--- linux-2.6.7/drivers/net/arcnet/com90xx.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arcnet/com90xx.c 2004-06-24 10:16:08.366222560 +0200
+@@ -445,7 +445,7 @@ static int __init com90xx_found(int ioad
+ goto err_free_dev;
+
+ /* reserve the irq */
+- if (request_irq(airq, &arcnet_interrupt, 0, "arcnet (90xx)", dev)) {
++ if (request_irq(airq, &arcnet_interrupt, SA_NET_RANDOM, "arcnet (90xx)", dev)) {
+ BUGMSG(D_NORMAL, "Can't get IRQ %d!\n", airq);
+ goto err_release_mem;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ariadne.c linux-2.6.7-netdev_random/drivers/net/ariadne.c
+--- linux-2.6.7/drivers/net/ariadne.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ariadne.c 2004-06-24 10:16:08.384219824 +0200
+@@ -320,7 +320,7 @@ static int ariadne_open(struct net_devic
+
+ netif_start_queue(dev);
+
+- i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, SA_SHIRQ,
++ i = request_irq(IRQ_AMIGA_PORTS, ariadne_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (i) return i;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arm/am79c961a.c linux-2.6.7-netdev_random/drivers/net/arm/am79c961a.c
+--- linux-2.6.7/drivers/net/arm/am79c961a.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arm/am79c961a.c 2004-06-24 10:16:08.398217696 +0200
+@@ -302,7 +302,7 @@ am79c961_open(struct net_device *dev)
+
+ memset (&priv->stats, 0, sizeof (priv->stats));
+
+- ret = request_irq(dev->irq, am79c961_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, am79c961_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arm/ether00.c linux-2.6.7-netdev_random/drivers/net/arm/ether00.c
+--- linux-2.6.7/drivers/net/arm/ether00.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arm/ether00.c 2004-06-24 10:16:08.414215264 +0200
+@@ -706,11 +706,11 @@ static int ether00_open(struct net_devic
+ return -EINVAL;
+
+ /* Install interrupt handlers */
+- result=request_irq(dev->irq,ether00_int,0,"ether00",dev);
++ result=request_irq(dev->irq,ether00_int,SA_NET_RANDOM,"ether00",dev);
+ if(result)
+ goto open_err1;
+
+- result=request_irq(2,ether00_phy_int,0,"ether00_phy",dev);
++ result=request_irq(2,ether00_phy_int,SA_NET_RANDOM,"ether00_phy",dev);
+ if(result)
+ goto open_err2;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arm/ether1.c linux-2.6.7-netdev_random/drivers/net/arm/ether1.c
+--- linux-2.6.7/drivers/net/arm/ether1.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arm/ether1.c 2004-06-24 10:16:08.423213896 +0200
+@@ -650,7 +650,7 @@ ether1_open (struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ether1_interrupt, 0, "ether1", dev))
++ if (request_irq(dev->irq, ether1_interrupt, SA_NET_RANDOM, "ether1", dev))
+ return -EAGAIN;
+
+ memset (&priv->stats, 0, sizeof (struct net_device_stats));
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arm/ether3.c linux-2.6.7-netdev_random/drivers/net/arm/ether3.c
+--- linux-2.6.7/drivers/net/arm/ether3.c 2004-06-16 07:20:19.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arm/ether3.c 2004-06-24 10:16:08.432212528 +0200
+@@ -418,7 +418,7 @@ ether3_open(struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ether3_interrupt, 0, "ether3", dev))
++ if (request_irq(dev->irq, ether3_interrupt, SA_NET_RANDOM, "ether3", dev))
+ return -EAGAIN;
+
+ ether3_init_for_open(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/arm/etherh.c linux-2.6.7-netdev_random/drivers/net/arm/etherh.c
+--- linux-2.6.7/drivers/net/arm/etherh.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/arm/etherh.c 2004-06-24 10:16:08.450209792 +0200
+@@ -459,7 +459,7 @@ etherh_open(struct net_device *dev)
+ return -EINVAL;
+ }
+
+- if (request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ /*
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/at1700.c linux-2.6.7-netdev_random/drivers/net/at1700.c
+--- linux-2.6.7/drivers/net/at1700.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/at1700.c 2004-06-24 10:16:08.460208272 +0200
+@@ -542,7 +542,7 @@ found:
+ lp->jumpered = is_fmv18x;
+ lp->mca_slot = slot;
+ /* Snarf the interrupt vector now. */
+- ret = request_irq(irq, &net_interrupt, 0, dev->name, dev);
++ ret = request_irq(irq, &net_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk (" AT1700 at %#3x is unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/atarilance.c linux-2.6.7-netdev_random/drivers/net/atarilance.c
+--- linux-2.6.7/drivers/net/atarilance.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/atarilance.c 2004-06-24 10:48:56.429031536 +0200
+@@ -548,7 +548,7 @@ static unsigned long __init lance_probe1
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+- if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO,
++ if (request_irq(IRQ_AUTO_5, lance_interrupt, IRQ_TYPE_PRIO | SA_NET_RANDOM,
+ "PAM/Riebl-ST Ethernet", dev)) {
+ printk( "Lance: request for irq %d failed\n", IRQ_AUTO_5 );
+ return( 0 );
+@@ -565,7 +565,7 @@ static unsigned long __init lance_probe1
+ printk( "Lance: request for VME interrupt failed\n" );
+ return( 0 );
+ }
+- if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
++ if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO | SA_NET_RANDOM,
+ "Riebl-VME Ethernet", dev)) {
+ printk( "Lance: request for irq %ld failed\n", irq );
+ return( 0 );
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/atp.c linux-2.6.7-netdev_random/drivers/net/atp.c
+--- linux-2.6.7/drivers/net/atp.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/atp.c 2004-06-24 10:16:08.471206600 +0200
+@@ -438,7 +438,7 @@ static int net_open(struct net_device *d
+ /* The interrupt line is turned off (tri-stated) when the device isn't in
+ use. That's especially important for "attached" interfaces where the
+ port or interrupt may be shared. */
+- ret = request_irq(dev->irq, &atp_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, &atp_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/au1000_eth.c linux-2.6.7-netdev_random/drivers/net/au1000_eth.c
+--- linux-2.6.7/drivers/net/au1000_eth.c 2004-06-16 07:19:29.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/au1000_eth.c 2004-06-24 10:16:08.482204928 +0200
+@@ -942,7 +942,7 @@ static int au1000_open(struct net_device
+ }
+ netif_start_queue(dev);
+
+- if ((retval = request_irq(dev->irq, &au1000_interrupt, 0,
++ if ((retval = request_irq(dev->irq, &au1000_interrupt, SA_NET_RANDOM,
+ dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/b44.c linux-2.6.7-netdev_random/drivers/net/b44.c
+--- linux-2.6.7/drivers/net/b44.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/b44.c 2004-06-24 10:16:08.499202344 +0200
+@@ -1247,7 +1247,7 @@ static int b44_open(struct net_device *d
+ if (err)
+ return err;
+
+- err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ, dev->name, dev);
++ err = request_irq(dev->irq, b44_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err)
+ goto err_out_free;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/bagetlance.c linux-2.6.7-netdev_random/drivers/net/bagetlance.c
+--- linux-2.6.7/drivers/net/bagetlance.c 2004-06-16 07:18:54.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/bagetlance.c 2004-06-24 10:49:20.606356024 +0200
+@@ -625,7 +625,7 @@ static int __init lance_probe1( struct n
+ if (lp->cardtype == PAM_CARD ||
+ memaddr == (unsigned short *)0xffe00000) {
+ /* PAMs card and Riebl on ST use level 5 autovector */
+- if (request_irq(BAGET_LANCE_IRQ, lance_interrupt, IRQ_TYPE_PRIO,
++ if (request_irq(BAGET_LANCE_IRQ, lance_interrupt, IRQ_TYPE_PRIO | SA_NET_RANDOM,
+ "PAM/Riebl-ST Ethernet", dev))
+ goto probe_fail;
+ dev->irq = (unsigned short)BAGET_LANCE_IRQ;
+@@ -640,7 +640,7 @@ static int __init lance_probe1( struct n
+ printk( "Lance: request for VME interrupt failed\n" );
+ goto probe_fail;
+ }
+- if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO,
++ if (request_irq(irq, lance_interrupt, IRQ_TYPE_PRIO | SA_NET_RANDOM,
+ "Riebl-VME Ethernet", dev))
+ goto probe_fail;
+ dev->irq = irq;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/bmac.c linux-2.6.7-netdev_random/drivers/net/bmac.c
+--- linux-2.6.7/drivers/net/bmac.c 2004-06-16 07:18:55.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/bmac.c 2004-06-24 10:16:08.517199608 +0200
+@@ -1350,7 +1350,7 @@ static int __devinit bmac_probe(struct m
+
+ init_timer(&bp->tx_timeout);
+
+- ret = request_irq(dev->irq, bmac_misc_intr, 0, "BMAC-misc", dev);
++ ret = request_irq(dev->irq, bmac_misc_intr, SA_NET_RANDOM, "BMAC-misc", dev);
+ if (ret) {
+ printk(KERN_ERR "BMAC: can't get irq %d\n", dev->irq);
+ goto err_out_iounmap_rx;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/cs89x0.c linux-2.6.7-netdev_random/drivers/net/cs89x0.c
+--- linux-2.6.7/drivers/net/cs89x0.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/cs89x0.c 2004-06-24 10:16:08.527198088 +0200
+@@ -1134,7 +1134,7 @@ net_open(struct net_device *dev)
+
+ for (i = 2; i < CS8920_NO_INTS; i++) {
+ if ((1 << i) & lp->irq_map) {
+- if (request_irq(i, net_interrupt, 0, dev->name, dev) == 0) {
++ if (request_irq(i, net_interrupt, SA_NET_RANDOM, dev->name, dev) == 0) {
+ dev->irq = i;
+ write_irq(dev, lp->chip_type, i);
+ /* writereg(dev, PP_BufCFG, GENERATE_SW_INTERRUPT); */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/de600.c linux-2.6.7-netdev_random/drivers/net/de600.c
+--- linux-2.6.7/drivers/net/de600.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/de600.c 2004-06-24 10:16:08.535196872 +0200
+@@ -134,7 +134,7 @@ static inline u8 de600_read_byte(unsigne
+ static int de600_open(struct net_device *dev)
+ {
+ unsigned long flags;
+- int ret = request_irq(DE600_IRQ, de600_interrupt, 0, dev->name, dev);
++ int ret = request_irq(DE600_IRQ, de600_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n", dev->name, DE600_IRQ);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/de620.c linux-2.6.7-netdev_random/drivers/net/de620.c
+--- linux-2.6.7/drivers/net/de620.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/de620.c 2004-06-24 10:16:08.545195352 +0200
+@@ -444,7 +444,7 @@ de620_get_register(struct net_device *de
+ */
+ static int de620_open(struct net_device *dev)
+ {
+- int ret = request_irq(dev->irq, de620_interrupt, 0, dev->name, dev);
++ int ret = request_irq(dev->irq, de620_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk (KERN_ERR "%s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/declance.c linux-2.6.7-netdev_random/drivers/net/declance.c
+--- linux-2.6.7/drivers/net/declance.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/declance.c 2004-06-24 10:18:39.248285000 +0200
+@@ -785,14 +785,14 @@ static int lance_open(struct net_device
+ netif_start_queue(dev);
+
+ /* Associate IRQ with lance_interrupt */
+- if (request_irq(dev->irq, &lance_interrupt, 0, "lance", dev)) {
++ if (request_irq(dev->irq, &lance_interrupt, SA_NET_RANDOM, "lance", dev)) {
+ printk("%s: Can't get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+ if (lp->dma_irq >= 0) {
+ unsigned long flags;
+
+- if (request_irq(lp->dma_irq, &lance_dma_merr_int, 0,
++ if (request_irq(lp->dma_irq, &lance_dma_merr_int, SA_NET_RANDOM,
+ "lance error", dev)) {
+ free_irq(dev->irq, dev);
+ printk("%s: Can't get DMA IRQ %d\n", dev->name,
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/defxx.c linux-2.6.7-netdev_random/drivers/net/defxx.c
+--- linux-2.6.7/drivers/net/defxx.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/defxx.c 2004-06-24 10:16:08.583189576 +0200
+@@ -1221,7 +1221,7 @@ static int dfx_open(struct net_device *d
+
+ /* Register IRQ - support shared interrupts by passing device ptr */
+
+- ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, (void *)dfx_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Requested IRQ %d is busy\n", dev->name, dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/depca.c linux-2.6.7-netdev_random/drivers/net/depca.c
+--- linux-2.6.7/drivers/net/depca.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/depca.c 2004-06-24 10:16:08.593188056 +0200
+@@ -840,7 +840,7 @@ static int depca_open(struct net_device
+
+ depca_dbg_open(dev);
+
+- if (request_irq(dev->irq, &depca_interrupt, 0, lp->adapter_name, dev)) {
++ if (request_irq(dev->irq, &depca_interrupt, SA_NET_RANDOM, lp->adapter_name, dev)) {
+ printk("depca_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/dgrs.c linux-2.6.7-netdev_random/drivers/net/dgrs.c
+--- linux-2.6.7/drivers/net/dgrs.c 2004-06-16 07:18:56.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/dgrs.c 2004-06-24 10:16:08.612185168 +0200
+@@ -1191,7 +1191,7 @@ dgrs_probe1(struct net_device *dev)
+ if (priv->plxreg)
+ OUTL(dev->base_addr + PLX_LCL2PCI_DOORBELL, 1);
+
+- rc = request_irq(dev->irq, &dgrs_intr, SA_SHIRQ, "RightSwitch", dev);
++ rc = request_irq(dev->irq, &dgrs_intr, SA_SHIRQ | SA_NET_RANDOM, "RightSwitch", dev);
+ if (rc)
+ goto err_out;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/dl2k.c linux-2.6.7-netdev_random/drivers/net/dl2k.c
+--- linux-2.6.7/drivers/net/dl2k.c 2004-06-16 07:19:17.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/dl2k.c 2004-06-24 10:16:08.622183648 +0200
+@@ -437,7 +437,7 @@ rio_open (struct net_device *dev)
+ int i;
+ u16 macctrl;
+
+- i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq (dev->irq, &rio_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/e1000/e1000_main.c linux-2.6.7-netdev_random/drivers/net/e1000/e1000_main.c
+--- linux-2.6.7/drivers/net/e1000/e1000_main.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/e1000/e1000_main.c 2004-06-24 10:16:08.691173160 +0200
+@@ -266,7 +266,7 @@ e1000_up(struct e1000_adapter *adapter)
+ e1000_alloc_rx_buffers(adapter);
+
+ if((err = request_irq(adapter->pdev->irq, &e1000_intr,
+- SA_SHIRQ | SA_SAMPLE_RANDOM,
++ SA_SHIRQ | SA_NET_RANDOM,
+ netdev->name, netdev)))
+ return err;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/e100.c linux-2.6.7-netdev_random/drivers/net/e100.c
+--- linux-2.6.7/drivers/net/e100.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/e100.c 2004-06-24 10:16:08.642180608 +0200
+@@ -1660,7 +1660,7 @@ static int e100_up(struct nic *nic)
+ e100_set_multicast_list(nic->netdev);
+ e100_start_receiver(nic);
+ mod_timer(&nic->watchdog, jiffies);
+- if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ,
++ if((err = request_irq(nic->pdev->irq, e100_intr, SA_SHIRQ | SA_NET_RANDOM,
+ nic->netdev->name, nic->netdev)))
+ goto err_no_irq;
+ e100_enable_irq(nic);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/e2100.c linux-2.6.7-netdev_random/drivers/net/e2100.c
+--- linux-2.6.7/drivers/net/e2100.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/e2100.c 2004-06-24 10:16:08.707170728 +0200
+@@ -286,7 +286,7 @@ e21_open(struct net_device *dev)
+ short ioaddr = dev->base_addr;
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ /* Set the interrupt line and memory base on the hardware. */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/eepro100.c linux-2.6.7-netdev_random/drivers/net/eepro100.c
+--- linux-2.6.7/drivers/net/eepro100.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/eepro100.c 2004-06-24 10:16:08.738166016 +0200
+@@ -1032,7 +1032,7 @@ speedo_open(struct net_device *dev)
+ sp->in_interrupt = 0;
+
+ /* .. we can safely take handler calls during init. */
+- retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &speedo_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/eepro.c linux-2.6.7-netdev_random/drivers/net/eepro.c
+--- linux-2.6.7/drivers/net/eepro.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/eepro.c 2004-06-24 10:16:08.719168904 +0200
+@@ -964,7 +964,7 @@ static int eepro_open(struct net_device
+ return -EAGAIN;
+ }
+
+- if (request_irq(dev->irq , &eepro_interrupt, 0, dev->name, dev)) {
++ if (request_irq(dev->irq , &eepro_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/eexpress.c linux-2.6.7-netdev_random/drivers/net/eexpress.c
+--- linux-2.6.7/drivers/net/eexpress.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/eexpress.c 2004-06-24 10:16:08.748164496 +0200
+@@ -461,7 +461,7 @@ static int eexp_open(struct net_device *
+ if (!dev->irq || !irqrmap[dev->irq])
+ return -ENXIO;
+
+- ret = request_irq(dev->irq,&eexp_irq,0,dev->name,dev);
++ ret = request_irq(dev->irq,&eexp_irq,SA_NET_RANDOM,dev->name,dev);
+ if (ret) return ret;
+
+ if (!request_region(ioaddr, EEXP_IO_EXTENT, "EtherExpress")) {
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/epic100.c linux-2.6.7-netdev_random/drivers/net/epic100.c
+--- linux-2.6.7/drivers/net/epic100.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/epic100.c 2004-06-24 10:16:08.767161608 +0200
+@@ -680,7 +680,7 @@ static int epic_open(struct net_device *
+ /* Soft reset the chip. */
+ outl(0x4001, ioaddr + GENCTL);
+
+- if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &epic_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ epic_init_ring(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/es3210.c linux-2.6.7-netdev_random/drivers/net/es3210.c
+--- linux-2.6.7/drivers/net/es3210.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/es3210.c 2004-06-24 10:16:08.775160392 +0200
+@@ -248,7 +248,7 @@ static int __init es_probe1(struct net_d
+ printk(" assigning IRQ %d", dev->irq);
+ }
+
+- if (request_irq(dev->irq, ei_interrupt, 0, "es3210", dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, "es3210", dev)) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ retval = -EAGAIN;
+ goto out;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/eth16i.c linux-2.6.7-netdev_random/drivers/net/eth16i.c
+--- linux-2.6.7/drivers/net/eth16i.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/eth16i.c 2004-06-24 10:16:08.787158568 +0200
+@@ -538,7 +538,7 @@ static int __init eth16i_probe1(struct n
+
+ /* Try to obtain interrupt vector */
+
+- if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, (void *)&eth16i_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_WARNING "%s: %s at %#3x, but is unusable due conflicting IRQ %d.\n",
+ dev->name, cardname, ioaddr, dev->irq);
+ goto out;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ewrk3.c linux-2.6.7-netdev_random/drivers/net/ewrk3.c
+--- linux-2.6.7/drivers/net/ewrk3.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ewrk3.c 2004-06-24 10:16:08.797157048 +0200
+@@ -630,7 +630,7 @@ static int ewrk3_open(struct net_device
+ STOP_EWRK3;
+
+ if (!lp->hard_strapped) {
+- if (request_irq(dev->irq, (void *) ewrk3_interrupt, 0, "ewrk3", dev)) {
++ if (request_irq(dev->irq, (void *) ewrk3_interrupt, SA_NET_RANDOM, "ewrk3", dev)) {
+ printk("ewrk3_open(): Requested IRQ%d is busy\n", dev->irq);
+ status = -EAGAIN;
+ } else {
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/fc/iph5526.c linux-2.6.7-netdev_random/drivers/net/fc/iph5526.c
+--- linux-2.6.7/drivers/net/fc/iph5526.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/fc/iph5526.c 2004-06-24 10:16:08.814154464 +0200
+@@ -3786,7 +3786,7 @@ int iph5526_detect(Scsi_Host_Template *t
+ int irqval = 0;
+ /* Found it, get IRQ.
+ */
+- irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ : 0, fi->name, host);
++ irqval = request_irq(pci_irq_line, &tachyon_interrupt, pci_irq_line ? SA_SHIRQ | SA_NET_RANDOM : 0, fi->name, host);
+ if (irqval) {
+ printk("iph5526.c : Unable to get IRQ %d (irqval = %d).\n", pci_irq_line, irqval);
+ scsi_unregister(host);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/fealnx.c linux-2.6.7-netdev_random/drivers/net/fealnx.c
+--- linux-2.6.7/drivers/net/fealnx.c 2004-06-16 07:19:42.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/fealnx.c 2004-06-24 10:16:08.832151728 +0200
+@@ -861,7 +861,7 @@ static int netdev_open(struct net_device
+
+ writel(0x00000001, ioaddr + BCR); /* Reset */
+
+- if (request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ init_ring(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/fec.c linux-2.6.7-netdev_random/drivers/net/fec.c
+--- linux-2.6.7/drivers/net/fec.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/fec.c 2004-06-24 10:16:08.842150208 +0200
+@@ -1057,13 +1057,13 @@ static void __inline__ fec_request_intrs
+ volatile unsigned long *icrp;
+
+ /* Setup interrupt handlers. */
+- if (request_irq(86, fec_enet_interrupt, 0, "fec(RX)", dev) != 0)
++ if (request_irq(86, fec_enet_interrupt, SA_NET_RANDOM, "fec(RX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(86)!\n");
+- if (request_irq(87, fec_enet_interrupt, 0, "fec(TX)", dev) != 0)
++ if (request_irq(87, fec_enet_interrupt, SA_NET_RANDOM, "fec(TX)", dev) != 0)
+ printk("FEC: Could not allocate FEC(RC) IRQ(87)!\n");
+- if (request_irq(88, fec_enet_interrupt, 0, "fec(OTHER)", dev) != 0)
++ if (request_irq(88, fec_enet_interrupt, SA_NET_RANDOM, "fec(OTHER)", dev) != 0)
+ printk("FEC: Could not allocate FEC(OTHER) IRQ(88)!\n");
+- if (request_irq(66, mii_link_interrupt, 0, "fec(MII)", dev) != 0)
++ if (request_irq(66, mii_link_interrupt, SA_NET_RANDOM, "fec(MII)", dev) != 0)
+ printk("FEC: Could not allocate MII IRQ(66)!\n");
+
+ /* Unmask interrupt at ColdFire 5272 SIM */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/fmv18x.c linux-2.6.7-netdev_random/drivers/net/fmv18x.c
+--- linux-2.6.7/drivers/net/fmv18x.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/fmv18x.c 2004-06-24 10:16:08.853148536 +0200
+@@ -224,7 +224,7 @@ static int __init fmv18x_probe1(struct n
+ }
+
+ /* Snarf the interrupt vector now. */
+- retval = request_irq(dev->irq, &net_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk ("FMV-18x found at %#3x, but it's unusable due to a conflict on"
+ "IRQ %d.\n", ioaddr, dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/forcedeth.c linux-2.6.7-netdev_random/drivers/net/forcedeth.c
+--- linux-2.6.7/drivers/net/forcedeth.c 2004-06-16 07:18:55.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/forcedeth.c 2004-06-24 10:16:08.863147016 +0200
+@@ -1319,7 +1319,7 @@ static int nv_open(struct net_device *de
+ writel(NVREG_IRQSTAT_MASK, base + NvRegIrqStatus);
+ pci_push(base);
+
+- ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, &nv_nic_irq, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ goto out_drain;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/gt96100eth.c linux-2.6.7-netdev_random/drivers/net/gt96100eth.c
+--- linux-2.6.7/drivers/net/gt96100eth.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/gt96100eth.c 2004-06-24 10:16:08.884143824 +0200
+@@ -1080,7 +1080,7 @@ gt96100_open(struct net_device *dev)
+ }
+
+ if ((retval = request_irq(dev->irq, &gt96100_interrupt,
+- SA_SHIRQ, dev->name, dev))) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ err("unable to get IRQ %d\n", dev->irq);
+ return retval;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamachi.c linux-2.6.7-netdev_random/drivers/net/hamachi.c
+--- linux-2.6.7/drivers/net/hamachi.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamachi.c 2004-06-24 10:16:08.895142152 +0200
+@@ -859,7 +859,7 @@ static int hamachi_open(struct net_devic
+ u32 rx_int_var, tx_int_var;
+ u16 fifo_info;
+
+- i = request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &hamachi_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamradio/baycom_ser_fdx.c linux-2.6.7-netdev_random/drivers/net/hamradio/baycom_ser_fdx.c
+--- linux-2.6.7/drivers/net/hamradio/baycom_ser_fdx.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamradio/baycom_ser_fdx.c 2004-06-24 10:16:08.924137744 +0200
+@@ -433,7 +433,7 @@ static int ser12_open(struct net_device
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ "baycom_ser_fdx", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamradio/baycom_ser_hdx.c linux-2.6.7-netdev_random/drivers/net/hamradio/baycom_ser_hdx.c
+--- linux-2.6.7/drivers/net/hamradio/baycom_ser_hdx.c 2004-06-16 07:18:55.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamradio/baycom_ser_hdx.c 2004-06-24 10:16:08.933136376 +0200
+@@ -487,7 +487,7 @@ static int ser12_open(struct net_device
+ outb(0, FCR(dev->base_addr)); /* disable FIFOs */
+ outb(0x0d, MCR(dev->base_addr));
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, ser12_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ "baycom_ser12", dev)) {
+ release_region(dev->base_addr, SER12_EXTENT);
+ return -EBUSY;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamradio/dmascc.c linux-2.6.7-netdev_random/drivers/net/hamradio/dmascc.c
+--- linux-2.6.7/drivers/net/hamradio/dmascc.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamradio/dmascc.c 2004-06-24 10:16:08.965131512 +0200
+@@ -712,7 +712,7 @@ static int scc_open(struct net_device *d
+
+ /* Request IRQ if not already used by other channel */
+ if (!info->irq_used) {
+- if (request_irq(dev->irq, scc_isr, 0, "dmascc", info)) {
++ if (request_irq(dev->irq, scc_isr, SA_NET_RANDOM, "dmascc", info)) {
+ return -EAGAIN;
+ }
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamradio/scc.c linux-2.6.7-netdev_random/drivers/net/hamradio/scc.c
+--- linux-2.6.7/drivers/net/hamradio/scc.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamradio/scc.c 2004-06-24 10:16:08.982128928 +0200
+@@ -1742,7 +1742,7 @@ static int scc_net_ioctl(struct net_devi
+
+ if (!Ivec[hwcfg.irq].used && hwcfg.irq)
+ {
+- if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT, "AX.25 SCC", NULL))
++ if (request_irq(hwcfg.irq, scc_isr, SA_INTERRUPT | SA_NET_RANDOM, "AX.25 SCC", NULL))
+ printk(KERN_WARNING "z8530drv: warning, cannot get IRQ %d\n", hwcfg.irq);
+ else
+ Ivec[hwcfg.irq].used = 1;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hamradio/yam.c linux-2.6.7-netdev_random/drivers/net/hamradio/yam.c
+--- linux-2.6.7/drivers/net/hamradio/yam.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hamradio/yam.c 2004-06-24 10:16:08.992127408 +0200
+@@ -885,7 +885,7 @@ static int yam_open(struct net_device *d
+ goto out_release_base;
+ }
+ outb(0, IER(dev->base_addr));
+- if (request_irq(dev->irq, yam_interrupt, SA_INTERRUPT | SA_SHIRQ, dev->name, dev)) {
++ if (request_irq(dev->irq, yam_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: irq %d busy\n", dev->name, dev->irq);
+ ret = -EBUSY;
+ goto out_release_base;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hp100.c linux-2.6.7-netdev_random/drivers/net/hp100.c
+--- linux-2.6.7/drivers/net/hp100.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hp100.c 2004-06-24 10:16:09.035120872 +0200
+@@ -1064,7 +1064,7 @@ static int hp100_open(struct net_device
+ /* New: if bus is PCI or EISA, interrupts might be shared interrupts */
+ if (request_irq(dev->irq, hp100_interrupt,
+ lp->bus == HP100_BUS_PCI || lp->bus ==
+- HP100_BUS_EISA ? SA_SHIRQ : SA_INTERRUPT,
++ HP100_BUS_EISA ? SA_SHIRQ | SA_NET_RANDOM : SA_INTERRUPT,
+ "hp100", dev)) {
+ printk("hp100: %s: unable to get IRQ %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hp.c linux-2.6.7-netdev_random/drivers/net/hp.c
+--- linux-2.6.7/drivers/net/hp.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hp.c 2004-06-24 10:16:09.020123152 +0200
+@@ -176,13 +176,13 @@ static int __init hp_probe1(struct net_d
+ int *irqp = wordmode ? irq_16list : irq_8list;
+ do {
+ int irq = *irqp;
+- if (request_irq (irq, NULL, 0, "bogus", NULL) != -EBUSY) {
++ if (request_irq (irq, NULL, SA_NET_RANDOM, "bogus", NULL) != -EBUSY) {
+ unsigned long cookie = probe_irq_on();
+ /* Twinkle the interrupt, and check if it's seen. */
+ outb_p(irqmap[irq] | HP_RUN, ioaddr + HP_CONFIGURE);
+ outb_p( 0x00 | HP_RUN, ioaddr + HP_CONFIGURE);
+ if (irq == probe_irq_off(cookie) /* It's a good IRQ line! */
+- && request_irq (irq, ei_interrupt, 0, dev->name, dev) == 0) {
++ && request_irq (irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev) == 0) {
+ printk(" selecting IRQ %d.\n", irq);
+ dev->irq = *irqp;
+ break;
+@@ -197,7 +197,7 @@ static int __init hp_probe1(struct net_d
+ } else {
+ if (dev->irq == 2)
+ dev->irq = 9;
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hp-plus.c linux-2.6.7-netdev_random/drivers/net/hp-plus.c
+--- linux-2.6.7/drivers/net/hp-plus.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hp-plus.c 2004-06-24 10:16:09.011124520 +0200
+@@ -280,7 +280,7 @@ hpp_open(struct net_device *dev)
+ int option_reg;
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ return retval;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/hydra.c linux-2.6.7-netdev_random/drivers/net/hydra.c
+--- linux-2.6.7/drivers/net/hydra.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/hydra.c 2004-06-24 10:16:09.046119200 +0200
+@@ -117,7 +117,7 @@ static int __devinit hydra_init(struct z
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+- if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, "Hydra Ethernet",
++ if (request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, "Hydra Ethernet",
+ dev)) {
+ free_netdev(dev);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ibmlana.c linux-2.6.7-netdev_random/drivers/net/ibmlana.c
+--- linux-2.6.7/drivers/net/ibmlana.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ibmlana.c 2004-06-24 10:16:09.062116768 +0200
+@@ -780,7 +780,7 @@ static int ibmlana_open(struct net_devic
+
+ /* register resources - only necessary for IRQ */
+
+- result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_SAMPLE_RANDOM, dev->name, dev);
++ result = request_irq(priv->realirq, irq_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (result != 0) {
+ printk(KERN_ERR "%s: failed to register irq %d\n", dev->name, dev->irq);
+ return result;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ibmveth.c linux-2.6.7-netdev_random/drivers/net/ibmveth.c
+--- linux-2.6.7/drivers/net/ibmveth.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ibmveth.c 2004-06-24 10:51:53.501112488 +0200
+@@ -524,7 +524,7 @@ static int ibmveth_open(struct net_devic
+ }
+
+ ibmveth_debug_printk("registering irq 0x%x\n", netdev->irq);
+- if((rc = request_irq(netdev->irq, &ibmveth_interrupt, 0, netdev->name, netdev)) != 0) {
++ if((rc = request_irq(netdev->irq, &ibmveth_interrupt, SA_NET_RANDOM, netdev->name, netdev)) != 0) {
+ ibmveth_error_printk("unable to request irq 0x%x, rc %d\n", netdev->irq, rc);
+ do {
+ rc = h_free_logical_lan(adapter->vdev->unit_address);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ioc3-eth.c linux-2.6.7-netdev_random/drivers/net/ioc3-eth.c
+--- linux-2.6.7/drivers/net/ioc3-eth.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ioc3-eth.c 2004-06-24 10:16:09.081113880 +0200
+@@ -1045,7 +1045,7 @@ static int ioc3_open(struct net_device *
+ {
+ struct ioc3_private *ip = netdev_priv(dev);
+
+- if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ, ioc3_str, dev)) {
++ if (request_irq(dev->irq, ioc3_interrupt, SA_SHIRQ | SA_NET_RANDOM, ioc3_str, dev)) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/ali-ircc.c linux-2.6.7-netdev_random/drivers/net/irda/ali-ircc.c
+--- linux-2.6.7/drivers/net/irda/ali-ircc.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/ali-ircc.c 2004-06-24 10:16:09.094111904 +0200
+@@ -1317,7 +1317,7 @@ static int ali_ircc_net_open(struct net_
+ iobase = self->io.fir_base;
+
+ /* Request IRQ and install Interrupt Handler */
+- if (request_irq(self->io.irq, ali_ircc_interrupt, 0, dev->name, dev))
++ if (request_irq(self->io.irq, ali_ircc_interrupt, SA_NET_RANDOM, dev->name, dev))
+ {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/au1k_ir.c linux-2.6.7-netdev_random/drivers/net/irda/au1k_ir.c
+--- linux-2.6.7/drivers/net/irda/au1k_ir.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/au1k_ir.c 2004-06-24 10:19:15.515771504 +0200
+@@ -353,13 +353,13 @@ static int au1k_irda_start(struct net_de
+ }
+
+ if ((retval = request_irq(AU1000_IRDA_TX_INT, &au1k_irda_interrupt,
+- 0, dev->name, dev))) {
++ SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+ return retval;
+ }
+ if ((retval = request_irq(AU1000_IRDA_RX_INT, &au1k_irda_interrupt,
+- 0, dev->name, dev))) {
++ SA_NET_RANDOM, dev->name, dev))) {
+ free_irq(AU1000_IRDA_TX_INT, dev);
+ printk(KERN_ERR "%s: unable to get IRQ %d\n",
+ dev->name, dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/donauboe.c linux-2.6.7-netdev_random/drivers/net/irda/donauboe.c
+--- linux-2.6.7/drivers/net/irda/donauboe.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/donauboe.c 2004-06-24 10:16:09.139105064 +0200
+@@ -1372,7 +1372,7 @@ toshoboe_net_open (struct net_device *de
+ return 0;
+
+ if (request_irq (self->io.irq, toshoboe_interrupt,
+- SA_SHIRQ | SA_INTERRUPT, dev->name, (void *) self))
++ SA_SHIRQ | SA_INTERRUPT | SA_NET_RANDOM, dev->name, (void *) self))
+ {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/irport.c linux-2.6.7-netdev_random/drivers/net/irda/irport.c
+--- linux-2.6.7/drivers/net/irda/irport.c 2004-06-16 07:19:42.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/irport.c 2004-06-24 10:16:09.151103240 +0200
+@@ -902,7 +902,7 @@ int irport_net_open(struct net_device *d
+
+ iobase = self->io.sir_base;
+
+- if (request_irq(self->io.irq, self->interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, self->interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/nsc-ircc.c linux-2.6.7-netdev_random/drivers/net/irda/nsc-ircc.c
+--- linux-2.6.7/drivers/net/irda/nsc-ircc.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/nsc-ircc.c 2004-06-24 10:16:09.163101416 +0200
+@@ -2008,7 +2008,7 @@ static int nsc_ircc_net_open(struct net_
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, nsc_ircc_interrupt, 0, dev->name, dev)) {
++ if (request_irq(self->io.irq, nsc_ircc_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/sa1100_ir.c linux-2.6.7-netdev_random/drivers/net/irda/sa1100_ir.c
+--- linux-2.6.7/drivers/net/irda/sa1100_ir.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/sa1100_ir.c 2004-06-24 10:16:09.173099896 +0200
+@@ -844,7 +844,7 @@ static int sa1100_irda_start(struct net_
+
+ si->speed = 9600;
+
+- err = request_irq(dev->irq, sa1100_irda_irq, 0, dev->name, dev);
++ err = request_irq(dev->irq, sa1100_irda_irq, SA_NET_RANDOM, dev->name, dev);
+ if (err)
+ goto err_irq;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/smsc-ircc2.c linux-2.6.7-netdev_random/drivers/net/irda/smsc-ircc2.c
+--- linux-2.6.7/drivers/net/irda/smsc-ircc2.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/smsc-ircc2.c 2004-06-24 10:16:09.186097920 +0200
+@@ -1547,7 +1547,7 @@ static int smsc_ircc_net_open(struct net
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, smsc_ircc_interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, smsc_ircc_interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ IRDA_DEBUG(0, "%s(), unable to allocate irq=%d\n",
+ __FUNCTION__, self->io.irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/via-ircc.c linux-2.6.7-netdev_random/drivers/net/irda/via-ircc.c
+--- linux-2.6.7/drivers/net/irda/via-ircc.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/via-ircc.c 2004-06-24 10:16:09.197096248 +0200
+@@ -1467,7 +1467,7 @@ static int via_ircc_net_open(struct net_
+ ASSERT(self != NULL, return 0;);
+ iobase = self->io.fir_base;
+ if (request_irq
+- (self->io.irq, via_ircc_interrupt, 0, dev->name, dev)) {
++ (self->io.irq, via_ircc_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ WARNING("%s, unable to allocate irq=%d\n", driver_name,
+ self->io.irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/vlsi_ir.c linux-2.6.7-netdev_random/drivers/net/irda/vlsi_ir.c
+--- linux-2.6.7/drivers/net/irda/vlsi_ir.c 2004-06-16 07:18:55.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/vlsi_ir.c 2004-06-24 10:16:09.215093512 +0200
+@@ -1515,7 +1515,7 @@ static int vlsi_open(struct net_device *
+
+ outb(IRINTR_INT_MASK, ndev->base_addr+VLSI_PIO_IRINTR);
+
+- if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ,
++ if (request_irq(ndev->irq, vlsi_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ drivername, ndev)) {
+ WARNING("%s: couldn't get IRQ: %d\n", __FUNCTION__, ndev->irq);
+ goto errout_io;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/irda/w83977af_ir.c linux-2.6.7-netdev_random/drivers/net/irda/w83977af_ir.c
+--- linux-2.6.7/drivers/net/irda/w83977af_ir.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/irda/w83977af_ir.c 2004-06-24 10:16:09.227091688 +0200
+@@ -1197,7 +1197,7 @@ static int w83977af_net_open(struct net_
+
+ iobase = self->io.fir_base;
+
+- if (request_irq(self->io.irq, w83977af_interrupt, 0, dev->name,
++ if (request_irq(self->io.irq, w83977af_interrupt, SA_NET_RANDOM, dev->name,
+ (void *) dev)) {
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/isa-skeleton.c linux-2.6.7-netdev_random/drivers/net/isa-skeleton.c
+--- linux-2.6.7/drivers/net/isa-skeleton.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/isa-skeleton.c 2004-06-24 10:16:09.236090320 +0200
+@@ -251,7 +251,7 @@ static int __init netcard_probe1(struct
+ dev->irq = 9;
+
+ {
+- int irqval = request_irq(dev->irq, &net_interrupt, 0, cardname, dev);
++ int irqval = request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, cardname, dev);
+ if (irqval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n",
+ dev->name, dev->irq, irqval);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ixgb/ixgb_main.c linux-2.6.7-netdev_random/drivers/net/ixgb/ixgb_main.c
+--- linux-2.6.7/drivers/net/ixgb/ixgb_main.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ixgb/ixgb_main.c 2004-06-24 10:19:36.823532232 +0200
+@@ -204,7 +204,7 @@ int ixgb_up(struct ixgb_adapter *adapter
+ ixgb_alloc_rx_buffers(adapter);
+
+ if ((err = request_irq(adapter->pdev->irq, &ixgb_intr,
+- SA_SHIRQ | SA_SAMPLE_RANDOM,
++ SA_SHIRQ | SA_NET_RANDOM,
+ netdev->name, netdev)))
+ return err;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/lance.c linux-2.6.7-netdev_random/drivers/net/lance.c
+--- linux-2.6.7/drivers/net/lance.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/lance.c 2004-06-24 10:16:09.303080136 +0200
+@@ -743,7 +743,7 @@ lance_open(struct net_device *dev)
+ int i;
+
+ if (dev->irq == 0 ||
+- request_irq(dev->irq, &lance_interrupt, 0, lp->name, dev)) {
++ request_irq(dev->irq, &lance_interrupt, SA_NET_RANDOM, lp->name, dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/lasi_82596.c linux-2.6.7-netdev_random/drivers/net/lasi_82596.c
+--- linux-2.6.7/drivers/net/lasi_82596.c 2004-06-16 07:18:58.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/lasi_82596.c 2004-06-24 10:16:09.322077248 +0200
+@@ -1017,7 +1017,7 @@ static int i596_open(struct net_device *
+ {
+ DEB(DEB_OPEN,printk("%s: i596_open() irq %d.\n", dev->name, dev->irq));
+
+- if (request_irq(dev->irq, &i596_interrupt, 0, "i82596", dev)) {
++ if (request_irq(dev->irq, &i596_interrupt, SA_NET_RANDOM, "i82596", dev)) {
+ printk("%s: IRQ %d not free\n", dev->name, dev->irq);
+ goto out;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/lne390.c linux-2.6.7-netdev_random/drivers/net/lne390.c
+--- linux-2.6.7/drivers/net/lne390.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/lne390.c 2004-06-24 10:16:09.339074664 +0200
+@@ -228,7 +228,7 @@ static int __init lne390_probe1(struct n
+ }
+ printk(" IRQ %d,", dev->irq);
+
+- if ((ret = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev))) {
++ if ((ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev))) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return ret;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/lp486e.c linux-2.6.7-netdev_random/drivers/net/lp486e.c
+--- linux-2.6.7/drivers/net/lp486e.c 2004-06-16 07:19:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/lp486e.c 2004-06-24 10:16:09.350072992 +0200
+@@ -849,7 +849,7 @@ static int i596_open(struct net_device *
+ {
+ int i;
+
+- i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &i596_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) {
+ printk(KERN_ERR "%s: IRQ %d not free\n", dev->name, dev->irq);
+ return i;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/mac8390.c linux-2.6.7-netdev_random/drivers/net/mac8390.c
+--- linux-2.6.7/drivers/net/mac8390.c 2004-06-16 07:19:35.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/mac8390.c 2004-06-24 10:16:09.370069952 +0200
+@@ -538,7 +538,7 @@ static int __init mac8390_initdev(struct
+ static int mac8390_open(struct net_device *dev)
+ {
+ ei_open(dev);
+- if (request_irq(dev->irq, ei_interrupt, 0, "8390 Ethernet", dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, "8390 Ethernet", dev)) {
+ printk ("%s: unable to get IRQ %d.\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/mac89x0.c linux-2.6.7-netdev_random/drivers/net/mac89x0.c
+--- linux-2.6.7/drivers/net/mac89x0.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/mac89x0.c 2004-06-24 10:16:09.382068128 +0200
+@@ -335,7 +335,7 @@ net_open(struct net_device *dev)
+ writereg(dev, PP_BusCTL, readreg(dev, PP_BusCTL) & ~ENABLE_IRQ);
+
+ /* Grab the interrupt */
+- if (request_irq(dev->irq, &net_interrupt, 0, "cs89x0", dev))
++ if (request_irq(dev->irq, &net_interrupt, SA_NET_RANDOM, "cs89x0", dev))
+ return -EAGAIN;
+
+ /* Set up the IRQ - Apparently magic */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/mace.c linux-2.6.7-netdev_random/drivers/net/mace.c
+--- linux-2.6.7/drivers/net/mace.c 2004-06-16 07:19:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/mace.c 2004-06-24 10:16:09.401065240 +0200
+@@ -238,7 +238,7 @@ static int __devinit mace_probe(struct m
+ */
+ mace_reset(dev);
+
+- rc = request_irq(dev->irq, mace_interrupt, 0, "MACE", dev);
++ rc = request_irq(dev->irq, mace_interrupt, SA_NET_RANDOM, "MACE", dev);
+ if (rc) {
+ printk(KERN_ERR "MACE: can't get irq %d\n", dev->irq);
+ goto err_unmap_rx_dma;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/macmace.c linux-2.6.7-netdev_random/drivers/net/macmace.c
+--- linux-2.6.7/drivers/net/macmace.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/macmace.c 2004-06-24 10:16:09.413063416 +0200
+@@ -319,11 +319,11 @@ static int mace_open(struct net_device *
+ mb->plscc = PORTSEL_AUI;
+ /* mb->utr = RTRD; */
+
+- if (request_irq(dev->irq, mace_interrupt, 0, dev->name, dev)) {
++ if (request_irq(dev->irq, mace_interrupt, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+- if (request_irq(mp->dma_intr, mace_dma_intr, 0, dev->name, dev)) {
++ if (request_irq(mp->dma_intr, mace_dma_intr, SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_ERR "%s: can't get irq %d\n", dev->name, mp->dma_intr);
+ free_irq(dev->irq, dev);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/meth.c linux-2.6.7-netdev_random/drivers/net/meth.c
+--- linux-2.6.7/drivers/net/meth.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/meth.c 2004-06-24 10:20:05.957103256 +0200
+@@ -326,7 +326,7 @@ static int meth_open(struct net_device *
+ if (ret < 0)
+ goto out_free_tx_ring;
+
+- ret = request_irq(dev->irq, meth_interrupt, 0, meth_str, dev);
++ ret = request_irq(dev->irq, meth_interrupt, SA_NET_RANDOM, meth_str, dev);
+ if (ret) {
+ printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
+ goto out_free_rx_ring;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/myri_sbus.c linux-2.6.7-netdev_random/drivers/net/myri_sbus.c
+--- linux-2.6.7/drivers/net/myri_sbus.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/myri_sbus.c 2004-06-24 10:16:09.482052928 +0200
+@@ -1071,7 +1071,7 @@ static int __init myri_ether_init(struct
+ /* Register interrupt handler now. */
+ DET(("Requesting MYRIcom IRQ line.\n"));
+ if (request_irq(dev->irq, &myri_interrupt,
+- SA_SHIRQ, "MyriCOM Ethernet", (void *) dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, "MyriCOM Ethernet", (void *) dev)) {
+ printk("MyriCOM: Cannot register interrupt handler.\n");
+ goto err;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/natsemi.c linux-2.6.7-netdev_random/drivers/net/natsemi.c
+--- linux-2.6.7/drivers/net/natsemi.c 2004-06-16 07:18:56.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/natsemi.c 2004-06-24 10:16:09.504049584 +0200
+@@ -1088,7 +1088,7 @@ static int netdev_open(struct net_device
+ /* Reset the chip, just in case. */
+ natsemi_reset(dev);
+
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ if (netif_msg_ifup(np))
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne2.c linux-2.6.7-netdev_random/drivers/net/ne2.c
+--- linux-2.6.7/drivers/net/ne2.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne2.c 2004-06-24 10:16:09.527046088 +0200
+@@ -470,7 +470,7 @@ static int __init ne2_probe1(struct net_
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d (irqval=%d).\n",
+ dev->irq, retval);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne2k_cbus.c linux-2.6.7-netdev_random/drivers/net/ne2k_cbus.c
+--- linux-2.6.7/drivers/net/ne2k_cbus.c 2004-06-16 07:19:09.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne2k_cbus.c 2004-06-24 10:16:09.561040920 +0200
+@@ -500,7 +500,7 @@ static int __init ne_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
++ ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out_kfree;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne2k-pci.c linux-2.6.7-netdev_random/drivers/net/ne2k-pci.c
+--- linux-2.6.7/drivers/net/ne2k-pci.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne2k-pci.c 2004-06-24 10:16:09.548042896 +0200
+@@ -419,7 +419,7 @@ static int ne2k_pci_set_fdx(struct net_d
+
+ static int ne2k_pci_open(struct net_device *dev)
+ {
+- int ret = request_irq(dev->irq, ei_interrupt, SA_SHIRQ, dev->name, dev);
++ int ret = request_irq(dev->irq, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne3210.c linux-2.6.7-netdev_random/drivers/net/ne3210.c
+--- linux-2.6.7/drivers/net/ne3210.c 2004-06-16 07:19:17.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne3210.c 2004-06-24 10:16:09.571039400 +0200
+@@ -140,7 +140,7 @@ static int __init ne3210_eisa_probe (str
+ dev->irq = irq_map[(inb(ioaddr + NE3210_CFG2) >> 3) & 0x07];
+ printk(".\nne3210.c: using IRQ %d, ", dev->irq);
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ goto out2;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne.c linux-2.6.7-netdev_random/drivers/net/ne.c
+--- linux-2.6.7/drivers/net/ne.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne.c 2004-06-24 10:16:09.517047608 +0200
+@@ -464,7 +464,7 @@ static int __init ne_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
++ ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ne-h8300.c linux-2.6.7-netdev_random/drivers/net/ne-h8300.c
+--- linux-2.6.7/drivers/net/ne-h8300.c 2004-06-16 07:19:10.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ne-h8300.c 2004-06-24 10:52:56.320562480 +0200
+@@ -283,7 +283,7 @@ static int __init ne_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- ret = request_irq(dev->irq, ei_interrupt, 0, name, dev);
++ ret = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev);
+ if (ret) {
+ printk (" unable to get IRQ %d (errno=%d).\n", dev->irq, ret);
+ goto err_out;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ni5010.c linux-2.6.7-netdev_random/drivers/net/ni5010.c
+--- linux-2.6.7/drivers/net/ni5010.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ni5010.c 2004-06-24 10:16:09.583037576 +0200
+@@ -381,7 +381,7 @@ static int ni5010_open(struct net_device
+
+ PRINTK2((KERN_DEBUG "%s: entering ni5010_open()\n", dev->name));
+
+- if (request_irq(dev->irq, &ni5010_interrupt, 0, boardname, dev)) {
++ if (request_irq(dev->irq, &ni5010_interrupt, SA_NET_RANDOM, boardname, dev)) {
+ printk(KERN_WARNING "%s: Cannot get irq %#2x\n", dev->name, dev->irq);
+ return -EAGAIN;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ni52.c linux-2.6.7-netdev_random/drivers/net/ni52.c
+--- linux-2.6.7/drivers/net/ni52.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ni52.c 2004-06-24 10:16:09.594035904 +0200
+@@ -265,7 +265,7 @@ static int ni52_open(struct net_device *
+ startrecv586(dev);
+ ni_enaint();
+
+- ret = request_irq(dev->irq, &ni52_interrupt,0,dev->name,dev);
++ ret = request_irq(dev->irq, &ni52_interrupt,SA_NET_RANDOM,dev->name,dev);
+ if (ret)
+ {
+ ni_reset586();
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ni65.c linux-2.6.7-netdev_random/drivers/net/ni65.c
+--- linux-2.6.7/drivers/net/ni65.c 2004-06-16 07:19:02.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ni65.c 2004-06-24 10:16:09.607033928 +0200
+@@ -296,7 +296,7 @@ static void ni65_set_performance(struct
+ static int ni65_open(struct net_device *dev)
+ {
+ struct priv *p = (struct priv *) dev->priv;
+- int irqval = request_irq(dev->irq, &ni65_interrupt,0,
++ int irqval = request_irq(dev->irq, &ni65_interrupt,SA_NET_RANDOM,
+ cards[p->cardno].cardname,dev);
+ if (irqval) {
+ printk(KERN_ERR "%s: unable to get IRQ %d (irqval=%d).\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/ns83820.c linux-2.6.7-netdev_random/drivers/net/ns83820.c
+--- linux-2.6.7/drivers/net/ns83820.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/ns83820.c 2004-06-24 10:16:09.619032104 +0200
+@@ -1850,7 +1850,7 @@ static int __devinit ns83820_init_one(st
+ setup_ee_mem_bitbanger(&dev->ee, (long)dev->base + MEAR, 3, 2, 1, 0,
+ 0);
+
+- err = request_irq(pci_dev->irq, ns83820_irq, SA_SHIRQ,
++ err = request_irq(pci_dev->irq, ns83820_irq, SA_SHIRQ | SA_NET_RANDOM,
+ ndev->name, ndev);
+ if (err) {
+ printk(KERN_INFO "ns83820: unable to register irq %d\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/oaknet.c linux-2.6.7-netdev_random/drivers/net/oaknet.c
+--- linux-2.6.7/drivers/net/oaknet.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/oaknet.c 2004-06-24 10:16:09.630030432 +0200
+@@ -162,7 +162,7 @@ static int __init oaknet_init(void)
+ /* Attempt to get the interrupt line */
+
+ ret = -EAGAIN;
+- if (request_irq(dev->irq, ei_interrupt, 0, name, dev)) {
++ if (request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, name, dev)) {
+ printk("%s: unable to request interrupt %d.\n",
+ dev->name, dev->irq);
+ goto out_region;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/pci-skeleton.c linux-2.6.7-netdev_random/drivers/net/pci-skeleton.c
+--- linux-2.6.7/drivers/net/pci-skeleton.c 2004-06-16 07:18:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/pci-skeleton.c 2004-06-24 10:16:09.644028304 +0200
+@@ -1080,7 +1080,7 @@ static int netdrv_open (struct net_devic
+
+ DPRINTK ("ENTER\n");
+
+- retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq (dev->irq, netdrv_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ DPRINTK ("EXIT, returning %d\n", retval);
+ return retval;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/pcnet32.c linux-2.6.7-netdev_random/drivers/net/pcnet32.c
+--- linux-2.6.7/drivers/net/pcnet32.c 2004-06-16 07:19:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/pcnet32.c 2004-06-24 10:20:35.382629896 +0200
+@@ -1364,7 +1364,7 @@ pcnet32_open(struct net_device *dev)
+
+ if (dev->irq == 0 ||
+ request_irq(dev->irq, &pcnet32_interrupt,
+- lp->shared_irq ? SA_SHIRQ : 0, dev->name, (void *)dev)) {
++ lp->shared_irq ? SA_SHIRQ | SA_NET_RANDOM : SA_NET_RANDOM, dev->name, (void *)dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/r8169.c linux-2.6.7-netdev_random/drivers/net/r8169.c
+--- linux-2.6.7/drivers/net/r8169.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/r8169.c 2004-06-24 10:20:52.459033888 +0200
+@@ -1047,7 +1047,7 @@ rtl8169_open(struct net_device *dev)
+ int retval;
+
+ retval =
+- request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
++ request_irq(dev->irq, rtl8169_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval < 0)
+ goto out;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/rrunner.c linux-2.6.7-netdev_random/drivers/net/rrunner.c
+--- linux-2.6.7/drivers/net/rrunner.c 2004-06-16 07:19:02.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/rrunner.c 2004-06-24 10:16:09.812002768 +0200
+@@ -1209,7 +1209,7 @@ static int rr_open(struct net_device *de
+ readl(&regs->HostCtrl);
+ spin_unlock_irqrestore(&rrpriv->lock, flags);
+
+- if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ, dev->name, dev)) {
++ if (request_irq(dev->irq, rr_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)) {
+ printk(KERN_WARNING "%s: Requested IRQ %d is busy\n",
+ dev->name, dev->irq);
+ ecode = -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/s2io.c linux-2.6.7-netdev_random/drivers/net/s2io.c
+--- linux-2.6.7/drivers/net/s2io.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/s2io.c 2004-06-24 10:54:39.712844464 +0200
+@@ -2073,7 +2073,7 @@ int s2io_open(struct net_device *dev)
+
+ /* After proper initialization of H/W, register ISR */
+ err =
+- request_irq((int) sp->irq, s2io_isr, SA_SHIRQ, sp->name, dev);
++ request_irq((int) sp->irq, s2io_isr, SA_SHIRQ | SA_NET_RANDOM, sp->name, dev);
+ if (err) {
+ s2io_reset(sp);
+ DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/saa9730.c linux-2.6.7-netdev_random/drivers/net/saa9730.c
+--- linux-2.6.7/drivers/net/saa9730.c 2004-06-16 07:19:35.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/saa9730.c 2004-06-24 10:16:09.823001096 +0200
+@@ -805,7 +805,7 @@ static int lan_saa9730_open(struct net_d
+ (struct lan_saa9730_private *) dev->priv;
+
+ /* Associate IRQ with lan_saa9730_interrupt */
+- if (request_irq(dev->irq, &lan_saa9730_interrupt, 0, "SAA9730 Eth",
++ if (request_irq(dev->irq, &lan_saa9730_interrupt, SA_NET_RANDOM, "SAA9730 Eth",
+ dev)) {
+ printk("lan_saa9730_open: Can't get irq %d\n", dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sb1000.c linux-2.6.7-netdev_random/drivers/net/sb1000.c
+--- linux-2.6.7/drivers/net/sb1000.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sb1000.c 2004-06-24 10:16:09.845997600 +0200
+@@ -968,7 +968,7 @@ sb1000_open(struct net_device *dev)
+ lp->rx_frame_id[1] = 0;
+ lp->rx_frame_id[2] = 0;
+ lp->rx_frame_id[3] = 0;
+- if (request_irq(dev->irq, &sb1000_interrupt, 0, "sb1000", dev)) {
++ if (request_irq(dev->irq, &sb1000_interrupt, SA_NET_RANDOM, "sb1000", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sb1250-mac.c linux-2.6.7-netdev_random/drivers/net/sb1250-mac.c
+--- linux-2.6.7/drivers/net/sb1250-mac.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sb1250-mac.c 2004-06-24 10:21:24.934096928 +0200
+@@ -2458,7 +2458,7 @@ static int sbmac_open(struct net_device
+ */
+
+ SBMAC_READCSR(sc->sbm_isr);
+- if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &sbmac_intr, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EBUSY;
+
+ /*
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/seeq8005.c linux-2.6.7-netdev_random/drivers/net/seeq8005.c
+--- linux-2.6.7/drivers/net/seeq8005.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/seeq8005.c 2004-06-24 10:16:09.894990152 +0200
+@@ -323,7 +323,7 @@ static int __init seeq8005_probe1(struct
+
+ #if 0
+ {
+- int irqval = request_irq(dev->irq, &seeq8005_interrupt, 0, "seeq8005", dev);
++ int irqval = request_irq(dev->irq, &seeq8005_interrupt, SA_NET_RANDOM, "seeq8005", dev);
+ if (irqval) {
+ printk ("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, irqval);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sgiseeq.c linux-2.6.7-netdev_random/drivers/net/sgiseeq.c
+--- linux-2.6.7/drivers/net/sgiseeq.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sgiseeq.c 2004-06-24 10:21:41.854524632 +0200
+@@ -621,7 +621,7 @@ int sgiseeq_init(struct hpc3_regs* regs,
+ goto err_out_free_dev;
+ }
+
+- if (request_irq(irq, sgiseeq_interrupt, 0, sgiseeqstr, dev)) {
++ if (request_irq(irq, sgiseeq_interrupt, SA_NET_RANDOM, sgiseeqstr, dev)) {
+ printk(KERN_ERR "Seeq8003: Can't get irq %d\n", dev->irq);
+ err = -EAGAIN;
+ goto err_out_free_page;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sis900.c linux-2.6.7-netdev_random/drivers/net/sis900.c
+--- linux-2.6.7/drivers/net/sis900.c 2004-06-16 07:19:22.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sis900.c 2004-06-24 10:16:10.055965680 +0200
+@@ -932,7 +932,7 @@ sis900_open(struct net_device *net_dev)
+ pci_read_config_byte(sis_priv->pci_dev, PCI_CLASS_REVISION, &revision);
+ sis630_set_eq(net_dev, revision);
+
+- ret = request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ, net_dev->name, net_dev);
++ ret = request_irq(net_dev->irq, &sis900_interrupt, SA_SHIRQ | SA_NET_RANDOM, net_dev->name, net_dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sk98lin/skge.c linux-2.6.7-netdev_random/drivers/net/sk98lin/skge.c
+--- linux-2.6.7/drivers/net/sk98lin/skge.c 2004-06-16 07:18:59.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sk98lin/skge.c 2004-06-24 10:16:10.088960664 +0200
+@@ -964,9 +964,9 @@ SK_BOOL DualNet;
+ spin_unlock_irqrestore(&pAC->SlowPathLock, Flags);
+
+ if (pAC->GIni.GIMacsFound == 2) {
+- Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ, pAC->Name, dev);
++ Ret = request_irq(dev->irq, SkGeIsr, SA_SHIRQ | SA_NET_RANDOM, pAC->Name, dev);
+ } else if (pAC->GIni.GIMacsFound == 1) {
+- Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ,
++ Ret = request_irq(dev->irq, SkGeIsrOnePort, SA_SHIRQ | SA_NET_RANDOM,
+ pAC->Name, dev);
+ } else {
+ printk(KERN_WARNING "sk98lin: Illegal number of ports: %d\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/skfp/skfddi.c linux-2.6.7-netdev_random/drivers/net/skfp/skfddi.c
+--- linux-2.6.7/drivers/net/skfp/skfddi.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/skfp/skfddi.c 2004-06-24 10:16:10.160949720 +0200
+@@ -523,7 +523,7 @@ static int skfp_open(struct net_device *
+
+ PRINTK(KERN_INFO "entering skfp_open\n");
+ /* Register IRQ - support shared interrupts by passing device ptr */
+- err = request_irq(dev->irq, (void *) skfp_interrupt, SA_SHIRQ,
++ err = request_irq(dev->irq, (void *) skfp_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (err)
+ return err;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sk_g16.c linux-2.6.7-netdev_random/drivers/net/sk_g16.c
+--- linux-2.6.7/drivers/net/sk_g16.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sk_g16.c 2004-06-24 10:16:10.103958384 +0200
+@@ -889,7 +889,7 @@ static int SK_open(struct net_device *de
+
+ do
+ {
+- irqval = request_irq(irqtab[i], &SK_interrupt, 0, "sk_g16", dev);
++ irqval = request_irq(irqtab[i], &SK_interrupt, SA_NET_RANDOM, "sk_g16", dev);
+ i++;
+ } while (irqval && irqtab[i]);
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sk_mca.c linux-2.6.7-netdev_random/drivers/net/sk_mca.c
+--- linux-2.6.7/drivers/net/sk_mca.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sk_mca.c 2004-06-24 10:16:10.131954128 +0200
+@@ -830,7 +830,7 @@ static int skmca_open(struct net_device
+ /* register resources - only necessary for IRQ */
+ result =
+ request_irq(priv->realirq, irq_handler,
+- SA_SHIRQ | SA_SAMPLE_RANDOM, "sk_mca", dev);
++ SA_SHIRQ | SA_NET_RANDOM, "sk_mca", dev);
+ if (result != 0) {
+ printk("%s: failed to register irq %d\n", dev->name,
+ dev->irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/smc9194.c linux-2.6.7-netdev_random/drivers/net/smc9194.c
+--- linux-2.6.7/drivers/net/smc9194.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/smc9194.c 2004-06-24 10:16:10.254935432 +0200
+@@ -1001,7 +1001,7 @@ static int __init smc_probe(struct net_d
+ memset(dev->priv, 0, sizeof(struct smc_local));
+
+ /* Grab the IRQ */
+- retval = request_irq(dev->irq, &smc_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, &smc_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ printk("%s: unable to get IRQ %d (irqval=%d).\n", dev->name,
+ dev->irq, retval);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/smc-mca.c linux-2.6.7-netdev_random/drivers/net/smc-mca.c
+--- linux-2.6.7/drivers/net/smc-mca.c 2004-06-16 07:18:58.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/smc-mca.c 2004-06-24 10:16:10.187945616 +0200
+@@ -349,7 +349,7 @@ static int ultramca_open(struct net_devi
+ int ioaddr = dev->base_addr - ULTRA_NIC_OFFSET; /* ASIC addr */
+ int retval;
+
+- if ((retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ outb(ULTRA_MEMENB, ioaddr); /* Enable memory */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/smc-ultra32.c linux-2.6.7-netdev_random/drivers/net/smc-ultra32.c
+--- linux-2.6.7/drivers/net/smc-ultra32.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/smc-ultra32.c 2004-06-24 10:16:10.239937712 +0200
+@@ -282,7 +282,7 @@ out:
+ static int ultra32_open(struct net_device *dev)
+ {
+ int ioaddr = dev->base_addr - ULTRA32_NIC_OFFSET; /* ASIC addr */
+- int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : SA_SHIRQ;
++ int irq_flags = (inb(ioaddr + ULTRA32_CFG5) & 0x08) ? 0 : SA_SHIRQ | SA_NET_RANDOM;
+ int retval;
+
+ retval = request_irq(dev->irq, ei_interrupt, irq_flags, dev->name, dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/smc-ultra.c linux-2.6.7-netdev_random/drivers/net/smc-ultra.c
+--- linux-2.6.7/drivers/net/smc-ultra.c 2004-06-16 07:18:59.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/smc-ultra.c 2004-06-24 10:16:10.212941816 +0200
+@@ -377,7 +377,7 @@ ultra_open(struct net_device *dev)
+ unsigned char irq2reg[] = {0, 0, 0x04, 0x08, 0, 0x0C, 0, 0x40,
+ 0, 0x04, 0x44, 0x48, 0, 0, 0, 0x4C, };
+
+- retval = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ retval = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (retval)
+ return retval;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sonic.c linux-2.6.7-netdev_random/drivers/net/sonic.c
+--- linux-2.6.7/drivers/net/sonic.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sonic.c 2004-06-24 10:16:10.269933152 +0200
+@@ -41,8 +41,8 @@ static int sonic_open(struct net_device
+ * covering another bug otherwise corrupting data. This doesn't mean
+ * this glue works ok under all situations.
+ */
+-// if (sonic_request_irq(dev->irq, &sonic_interrupt, 0, "sonic", dev)) {
+- if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT,
++// if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_NET_RANDOM, "sonic", dev)) {
++ if (sonic_request_irq(dev->irq, &sonic_interrupt, SA_INTERRUPT | SA_NET_RANDOM,
+ "sonic", dev)) {
+ printk("\n%s: unable to get IRQ %d .\n", dev->name, dev->irq);
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/starfire.c linux-2.6.7-netdev_random/drivers/net/starfire.c
+--- linux-2.6.7/drivers/net/starfire.c 2004-06-16 07:20:27.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/starfire.c 2004-06-24 10:16:10.284930872 +0200
+@@ -1111,7 +1111,7 @@ static int netdev_open(struct net_device
+
+ COMPAT_MOD_INC_USE_COUNT;
+
+- retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ COMPAT_MOD_DEC_USE_COUNT;
+ return retval;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/stnic.c linux-2.6.7-netdev_random/drivers/net/stnic.c
+--- linux-2.6.7/drivers/net/stnic.c 2004-06-16 07:19:42.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/stnic.c 2004-06-24 10:16:10.309927072 +0200
+@@ -130,7 +130,7 @@ static int __init stnic_probe(void)
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- err = request_irq (dev->irq, ei_interrupt, 0, dev->name, dev);
++ err = request_irq (dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ printk (KERN_EMERG " unable to get IRQ %d.\n", dev->irq);
+ free_netdev(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sun3_82586.c linux-2.6.7-netdev_random/drivers/net/sun3_82586.c
+--- linux-2.6.7/drivers/net/sun3_82586.c 2004-06-16 07:19:29.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sun3_82586.c 2004-06-24 10:16:10.324924792 +0200
+@@ -191,7 +191,7 @@ static int sun3_82586_open(struct net_de
+ startrecv586(dev);
+ sun3_enaint();
+
+- ret = request_irq(dev->irq, &sun3_82586_interrupt,0,dev->name,dev);
++ ret = request_irq(dev->irq, &sun3_82586_interrupt,SA_NET_RANDOM,dev->name,dev);
+ if (ret)
+ {
+ sun3_reset586();
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sun3lance.c linux-2.6.7-netdev_random/drivers/net/sun3lance.c
+--- linux-2.6.7/drivers/net/sun3lance.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sun3lance.c 2004-06-24 10:16:10.336922968 +0200
+@@ -342,7 +342,7 @@ static int __init lance_probe( struct ne
+
+ REGA(CSR0) = CSR0_STOP;
+
+- request_irq(LANCE_IRQ, lance_interrupt, SA_INTERRUPT, "SUN3 Lance", dev);
++ request_irq(LANCE_IRQ, lance_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "SUN3 Lance", dev);
+ dev->irq = (unsigned short)LANCE_IRQ;
+
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sunbmac.c linux-2.6.7-netdev_random/drivers/net/sunbmac.c
+--- linux-2.6.7/drivers/net/sunbmac.c 2004-06-16 07:19:29.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sunbmac.c 2004-06-24 10:16:10.361919168 +0200
+@@ -909,7 +909,7 @@ static int bigmac_open(struct net_device
+ struct bigmac *bp = (struct bigmac *) dev->priv;
+ int ret;
+
+- ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ, dev->name, bp);
++ ret = request_irq(dev->irq, &bigmac_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, bp);
+ if (ret) {
+ printk(KERN_ERR "BIGMAC: Can't order irq %d to go.\n", dev->irq);
+ return ret;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sundance.c linux-2.6.7-netdev_random/drivers/net/sundance.c
+--- linux-2.6.7/drivers/net/sundance.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sundance.c 2004-06-24 10:16:10.374917192 +0200
+@@ -861,7 +861,7 @@ static int netdev_open(struct net_device
+
+ /* Do we need to reset the chip??? */
+
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ return i;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sungem.c linux-2.6.7-netdev_random/drivers/net/sungem.c
+--- linux-2.6.7/drivers/net/sungem.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sungem.c 2004-06-24 10:16:10.400913240 +0200
+@@ -2171,7 +2171,7 @@ static int gem_open(struct net_device *d
+ * on the controller
+ */
+ if (request_irq(gp->pdev->irq, gem_interrupt,
+- SA_SHIRQ, dev->name, (void *)dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, (void *)dev)) {
+ printk(KERN_ERR "%s: failed to request irq !\n", gp->dev->name);
+
+ spin_lock_irq(&gp->lock);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sunhme.c linux-2.6.7-netdev_random/drivers/net/sunhme.c
+--- linux-2.6.7/drivers/net/sunhme.c 2004-06-16 07:19:11.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sunhme.c 2004-06-24 10:16:10.425909440 +0200
+@@ -2209,7 +2209,7 @@ static int happy_meal_open(struct net_de
+ */
+ if ((hp->happy_flags & (HFLAG_QUATTRO|HFLAG_PCI)) != HFLAG_QUATTRO) {
+ if (request_irq(dev->irq, &happy_meal_interrupt,
+- SA_SHIRQ, dev->name, (void *)dev)) {
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, (void *)dev)) {
+ HMD(("EAGAIN\n"));
+ #ifdef __sparc__
+ printk(KERN_ERR "happy_meal(SBUS): Can't order irq %s to go.\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sunlance.c linux-2.6.7-netdev_random/drivers/net/sunlance.c
+--- linux-2.6.7/drivers/net/sunlance.c 2004-06-16 07:19:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sunlance.c 2004-06-24 10:16:10.440907160 +0200
+@@ -923,7 +923,7 @@ static int lance_open(struct net_device
+
+ STOP_LANCE(lp);
+
+- if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, &lance_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ lancestr, (void *) dev)) {
+ printk(KERN_ERR "Lance: Can't get irq %s\n", __irq_itoa(dev->irq));
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/sunqe.c linux-2.6.7-netdev_random/drivers/net/sunqe.c
+--- linux-2.6.7/drivers/net/sunqe.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/sunqe.c 2004-06-24 10:16:10.454905032 +0200
+@@ -889,7 +889,7 @@ static int __init qec_ether_init(struct
+ * for it now.
+ */
+ if (request_irq(sdev->irqs[0], &qec_interrupt,
+- SA_SHIRQ, "QuadEther", (void *) qecp)) {
++ SA_SHIRQ | SA_NET_RANDOM, "QuadEther", (void *) qecp)) {
+ printk(KERN_ERR "QuadEther: Can't register QEC master irq handler.\n");
+ res = -EAGAIN;
+ goto out4;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tc35815.c linux-2.6.7-netdev_random/drivers/net/tc35815.c
+--- linux-2.6.7/drivers/net/tc35815.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tc35815.c 2004-06-24 10:16:10.477901536 +0200
+@@ -880,7 +880,7 @@ tc35815_open(struct net_device *dev)
+ */
+
+ if (dev->irq == 0 ||
+- request_irq(dev->irq, &tc35815_interrupt, SA_SHIRQ, cardname, dev)) {
++ request_irq(dev->irq, &tc35815_interrupt, SA_SHIRQ | SA_NET_RANDOM, cardname, dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tg3.c linux-2.6.7-netdev_random/drivers/net/tg3.c
+--- linux-2.6.7/drivers/net/tg3.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tg3.c 2004-06-24 10:16:10.515895760 +0200
+@@ -5620,7 +5620,7 @@ static int tg3_open(struct net_device *d
+ return err;
+
+ err = request_irq(dev->irq, tg3_interrupt,
+- SA_SHIRQ, dev->name, dev);
++ SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+
+ if (err) {
+ tg3_free_consistent(tp);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tlan.c linux-2.6.7-netdev_random/drivers/net/tlan.c
+--- linux-2.6.7/drivers/net/tlan.c 2004-06-16 07:18:58.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tlan.c 2004-06-24 10:16:10.541891808 +0200
+@@ -941,7 +941,7 @@ static int TLan_Open( struct net_device
+ int err;
+
+ priv->tlanRev = TLan_DioRead8( dev->base_addr, TLAN_DEF_REVISION );
+- err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ, TLanSignature, dev );
++ err = request_irq( dev->irq, TLan_HandleInterrupt, SA_SHIRQ | SA_NET_RANDOM, TLanSignature, dev );
+
+ if ( err ) {
+ printk(KERN_ERR "TLAN: Cannot open %s because IRQ %d is already in use.\n", dev->name, dev->irq );
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/3c359.c linux-2.6.7-netdev_random/drivers/net/tokenring/3c359.c
+--- linux-2.6.7/drivers/net/tokenring/3c359.c 2004-06-16 07:20:25.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/3c359.c 2004-06-24 10:16:10.563888464 +0200
+@@ -576,7 +576,7 @@ static int xl_open(struct net_device *de
+
+ u16 switchsettings, switchsettings_eeprom ;
+
+- if(request_irq(dev->irq, &xl_interrupt, SA_SHIRQ , "3c359", dev)) {
++ if(request_irq(dev->irq, &xl_interrupt, SA_SHIRQ | SA_NET_RANDOM, "3c359", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/abyss.c linux-2.6.7-netdev_random/drivers/net/tokenring/abyss.c
+--- linux-2.6.7/drivers/net/tokenring/abyss.c 2004-06-16 07:19:02.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/abyss.c 2004-06-24 10:16:10.574886792 +0200
+@@ -123,7 +123,7 @@ static int __devinit abyss_attach(struct
+ goto err_out_trdev;
+ }
+
+- ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
++ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/ibmtr.c linux-2.6.7-netdev_random/drivers/net/tokenring/ibmtr.c
+--- linux-2.6.7/drivers/net/tokenring/ibmtr.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/ibmtr.c 2004-06-24 10:16:10.586884968 +0200
+@@ -684,7 +684,7 @@ static int __devinit ibmtr_probe1(struct
+
+ /* The PCMCIA has already got the interrupt line and the io port,
+ so no chance of anybody else getting it - MLP */
+- if (request_irq(dev->irq = irq, &tok_interrupt, 0, "ibmtr", dev) != 0) {
++ if (request_irq(dev->irq = irq, &tok_interrupt, SA_NET_RANDOM, "ibmtr", dev) != 0) {
+ DPRINTK("Could not grab irq %d. Halting Token Ring driver.\n",
+ irq);
+ iounmap(t_mmio);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/lanstreamer.c linux-2.6.7-netdev_random/drivers/net/tokenring/lanstreamer.c
+--- linux-2.6.7/drivers/net/tokenring/lanstreamer.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/lanstreamer.c 2004-06-24 10:16:10.607881776 +0200
+@@ -598,7 +598,7 @@ static int streamer_open(struct net_devi
+ rc=streamer_reset(dev);
+ }
+
+- if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ, "lanstreamer", dev)) {
++ if (request_irq(dev->irq, &streamer_interrupt, SA_SHIRQ | SA_NET_RANDOM, "lanstreamer", dev)) {
+ return -EAGAIN;
+ }
+ #if STREAMER_DEBUG
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/madgemc.c linux-2.6.7-netdev_random/drivers/net/tokenring/madgemc.c
+--- linux-2.6.7/drivers/net/tokenring/madgemc.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/madgemc.c 2004-06-24 10:16:10.619879952 +0200
+@@ -333,7 +333,7 @@ static int __init madgemc_probe(void)
+ */
+ outb(0, dev->base_addr + MC_CONTROL_REG0); /* sanity */
+ madgemc_setsifsel(dev, 1);
+- if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, madgemc_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ "madgemc", dev))
+ goto getout;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/olympic.c linux-2.6.7-netdev_random/drivers/net/tokenring/olympic.c
+--- linux-2.6.7/drivers/net/tokenring/olympic.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/olympic.c 2004-06-24 10:16:10.639876912 +0200
+@@ -442,7 +442,7 @@ static int olympic_open(struct net_devic
+
+ DECLARE_WAITQUEUE(wait,current) ;
+
+- if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ , "olympic", dev)) {
++ if(request_irq(dev->irq, &olympic_interrupt, SA_SHIRQ | SA_NET_RANDOM, "olympic", dev)) {
+ return -EAGAIN;
+ }
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/proteon.c linux-2.6.7-netdev_random/drivers/net/tokenring/proteon.c
+--- linux-2.6.7/drivers/net/tokenring/proteon.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/proteon.c 2004-06-24 10:16:10.648875544 +0200
+@@ -178,7 +178,7 @@ static int __init setup_card(struct net_
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+- if (!request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (!request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ cardname, dev))
+ break;
+ }
+@@ -200,7 +200,7 @@ static int __init setup_card(struct net_
+ dev->name, dev->irq);
+ goto out3;
+ }
+- if (request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/skisa.c linux-2.6.7-netdev_random/drivers/net/tokenring/skisa.c
+--- linux-2.6.7/drivers/net/tokenring/skisa.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/skisa.c 2004-06-24 10:16:10.658874024 +0200
+@@ -195,7 +195,7 @@ static int __init setup_card(struct net_
+ for(j = 0; irqlist[j] != 0; j++)
+ {
+ dev->irq = irqlist[j];
+- if (!request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (!request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ isa_cardname, dev))
+ break;
+ }
+@@ -217,7 +217,7 @@ static int __init setup_card(struct net_
+ dev->name, dev->irq);
+ goto out3;
+ }
+- if (request_irq(dev->irq, tms380tr_interrupt, 0,
++ if (request_irq(dev->irq, tms380tr_interrupt, SA_NET_RANDOM,
+ isa_cardname, dev))
+ {
+ printk(KERN_INFO "%s: Selected IRQ %d not available\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/smctr.c linux-2.6.7-netdev_random/drivers/net/tokenring/smctr.c
+--- linux-2.6.7/drivers/net/tokenring/smctr.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/smctr.c 2004-06-24 10:16:10.678870984 +0200
+@@ -532,7 +532,7 @@ static int __init smctr_chk_mca(struct n
+ dev->irq = 15;
+ break;
+ }
+- if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev)) {
++ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ | SA_NET_RANDOM, smctr_name, dev)) {
+ release_region(dev->base_addr, SMCTR_IO_EXTENT);
+ return -ENODEV;
+ }
+@@ -1062,7 +1062,7 @@ static int __init smctr_chk_isa(struct n
+ goto out2;
+ }
+
+- if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ, smctr_name, dev))
++ if (request_irq(dev->irq, smctr_interrupt, SA_SHIRQ | SA_NET_RANDOM, smctr_name, dev))
+ goto out2;
+
+ /* Get 58x Rom Base */
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tokenring/tmspci.c linux-2.6.7-netdev_random/drivers/net/tokenring/tmspci.c
+--- linux-2.6.7/drivers/net/tokenring/tmspci.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tokenring/tmspci.c 2004-06-24 10:16:10.687869616 +0200
+@@ -122,7 +122,7 @@ static int __devinit tms_pci_attach(stru
+ goto err_out_trdev;
+ }
+
+- ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ,
++ ret = request_irq(pdev->irq, tms380tr_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (ret)
+ goto err_out_region;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/de2104x.c linux-2.6.7-netdev_random/drivers/net/tulip/de2104x.c
+--- linux-2.6.7/drivers/net/tulip/de2104x.c 2004-06-16 07:19:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/de2104x.c 2004-06-24 10:16:10.705866880 +0200
+@@ -1383,7 +1383,7 @@ static int de_open (struct net_device *d
+ goto err_out_free;
+ }
+
+- rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ, dev->name, dev);
++ rc = request_irq(dev->irq, de_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (rc) {
+ printk(KERN_ERR "%s: IRQ %d request failure, err=%d\n",
+ dev->name, dev->irq, rc);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/de4x5.c linux-2.6.7-netdev_random/drivers/net/tulip/de4x5.c
+--- linux-2.6.7/drivers/net/tulip/de4x5.c 2004-06-16 07:18:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/de4x5.c 2004-06-24 10:16:10.724863992 +0200
+@@ -1320,10 +1320,10 @@ de4x5_open(struct net_device *dev)
+ lp->state = OPEN;
+ de4x5_dbg_open(dev);
+
+- if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ,
++ if (request_irq(dev->irq, (void *)de4x5_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ lp->adapter_name, dev)) {
+ printk("de4x5_open(): Requested IRQ%d is busy - attemping FAST/SHARE...", dev->irq);
+- if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ,
++ if (request_irq(dev->irq, de4x5_interrupt, SA_INTERRUPT | SA_SHIRQ | SA_NET_RANDOM,
+ lp->adapter_name, dev)) {
+ printk("\n Cannot get IRQ- reconfigure your hardware.\n");
+ disable_ast(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/dmfe.c linux-2.6.7-netdev_random/drivers/net/tulip/dmfe.c
+--- linux-2.6.7/drivers/net/tulip/dmfe.c 2004-06-16 07:19:53.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/dmfe.c 2004-06-24 10:16:10.742861256 +0200
+@@ -504,7 +504,7 @@ static int dmfe_open(struct DEVICE *dev)
+
+ DMFE_DBUG(0, "dmfe_open", 0);
+
+- ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ, dev->name, dev);
++ ret = request_irq(dev->irq, &dmfe_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ return ret;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/tulip_core.c linux-2.6.7-netdev_random/drivers/net/tulip/tulip_core.c
+--- linux-2.6.7/drivers/net/tulip/tulip_core.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/tulip_core.c 2004-06-24 10:16:10.763858064 +0200
+@@ -486,7 +486,7 @@ tulip_open(struct net_device *dev)
+ {
+ int retval;
+
+- if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ, dev->name, dev)))
++ if ((retval = request_irq(dev->irq, &tulip_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)))
+ return retval;
+
+ tulip_init_ring (dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/winbond-840.c linux-2.6.7-netdev_random/drivers/net/tulip/winbond-840.c
+--- linux-2.6.7/drivers/net/tulip/winbond-840.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/winbond-840.c 2004-06-24 10:16:10.773856544 +0200
+@@ -693,7 +693,7 @@ static int netdev_open(struct net_device
+ writel(0x00000001, ioaddr + PCIBusCfg); /* Reset */
+
+ netif_device_detach(dev);
+- i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &intr_handler, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i)
+ goto out_err;
+
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/xircom_cb.c linux-2.6.7-netdev_random/drivers/net/tulip/xircom_cb.c
+--- linux-2.6.7/drivers/net/tulip/xircom_cb.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/xircom_cb.c 2004-06-24 10:16:10.783855024 +0200
+@@ -448,7 +448,7 @@ static int xircom_open(struct net_device
+ int retval;
+ enter("xircom_open");
+ printk(KERN_INFO "xircom cardbus adaptor found, registering as %s, using irq %i \n",dev->name,dev->irq);
+- retval = request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev);
++ retval = request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (retval) {
+ leave("xircom_open - No IRQ");
+ return retval;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/tulip/xircom_tulip_cb.c linux-2.6.7-netdev_random/drivers/net/tulip/xircom_tulip_cb.c
+--- linux-2.6.7/drivers/net/tulip/xircom_tulip_cb.c 2004-06-16 07:18:59.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/tulip/xircom_tulip_cb.c 2004-06-24 10:16:10.796853048 +0200
+@@ -806,7 +806,7 @@ xircom_open(struct net_device *dev)
+ {
+ struct xircom_private *tp = dev->priv;
+
+- if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ, dev->name, dev))
++ if (request_irq(dev->irq, &xircom_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))
+ return -EAGAIN;
+
+ xircom_up(dev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/typhoon.c linux-2.6.7-netdev_random/drivers/net/typhoon.c
+--- linux-2.6.7/drivers/net/typhoon.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/typhoon.c 2004-06-24 10:16:10.815850160 +0200
+@@ -2113,7 +2113,7 @@ typhoon_open(struct net_device *dev)
+ goto out_sleep;
+ }
+
+- err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
++ err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if(err < 0)
+ goto out_sleep;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/via-rhine.c linux-2.6.7-netdev_random/drivers/net/via-rhine.c
+--- linux-2.6.7/drivers/net/via-rhine.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/via-rhine.c 2004-06-24 10:22:33.447681280 +0200
+@@ -1152,7 +1152,7 @@ static int rhine_open(struct net_device
+ /* Reset the chip. */
+ writew(CmdReset, ioaddr + ChipCmd);
+
+- i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
++ i = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name,
+ dev);
+ if (i)
+ return i;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/c101.c linux-2.6.7-netdev_random/drivers/net/wan/c101.c
+--- linux-2.6.7/drivers/net/wan/c101.c 2004-06-16 07:19:44.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/c101.c 2004-06-24 10:16:10.854844232 +0200
+@@ -325,7 +325,7 @@ static int __init c101_run(unsigned long
+ return -ENOBUFS;
+ }
+
+- if (request_irq(irq, sca_intr, 0, devname, card)) {
++ if (request_irq(irq, sca_intr, SA_NET_RANDOM, devname, card)) {
+ printk(KERN_ERR "c101: could not allocate IRQ\n");
+ c101_destroy_card(card);
+ return(-EBUSY);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/cosa.c linux-2.6.7-netdev_random/drivers/net/wan/cosa.c
+--- linux-2.6.7/drivers/net/wan/cosa.c 2004-06-16 07:18:52.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/cosa.c 2004-06-24 10:16:11.401761088 +0200
+@@ -570,7 +570,7 @@ static int cosa_probe(int base, int irq,
+ cosa->usage = 0;
+ cosa->nchannels = 2; /* FIXME: how to determine this? */
+
+- if (request_irq(cosa->irq, cosa_interrupt, 0, cosa->type, cosa)) {
++ if (request_irq(cosa->irq, cosa_interrupt, SA_NET_RANDOM, cosa->type, cosa)) {
+ err = -1;
+ goto err_out;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/cycx_main.c linux-2.6.7-netdev_random/drivers/net/wan/cycx_main.c
+--- linux-2.6.7/drivers/net/wan/cycx_main.c 2004-06-16 07:19:23.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/cycx_main.c 2004-06-24 10:16:11.420758200 +0200
+@@ -214,7 +214,7 @@ static int cycx_wan_setup(struct wan_dev
+ /* Allocate IRQ */
+ irq = conf->irq == 2 ? 9 : conf->irq; /* IRQ2 -> IRQ9 */
+
+- if (request_irq(irq, cycx_isr, 0, wandev->name, card)) {
++ if (request_irq(irq, cycx_isr, SA_NET_RANDOM, wandev->name, card)) {
+ printk(KERN_ERR "%s: can't reserve IRQ %d!\n",
+ wandev->name, irq);
+ goto out;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/dscc4.c linux-2.6.7-netdev_random/drivers/net/wan/dscc4.c
+--- linux-2.6.7/drivers/net/wan/dscc4.c 2004-06-16 07:20:27.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/dscc4.c 2004-06-24 10:16:11.453753184 +0200
+@@ -749,7 +749,7 @@ static int __devinit dscc4_init_one(stru
+
+ priv = (struct dscc4_pci_priv *)pci_get_drvdata(pdev);
+
+- if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ, DRV_NAME, priv->root)){
++ if (request_irq(pdev->irq, &dscc4_irq, SA_SHIRQ | SA_NET_RANDOM, DRV_NAME, priv->root)){
+ printk(KERN_WARNING "%s: IRQ %d busy\n", DRV_NAME, pdev->irq);
+ goto err_out_free1;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/farsync.c linux-2.6.7-netdev_random/drivers/net/wan/farsync.c
+--- linux-2.6.7/drivers/net/wan/farsync.c 2004-06-16 07:19:42.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/farsync.c 2004-06-24 10:23:43.843979416 +0200
+@@ -2521,7 +2521,7 @@ fst_add_one(struct pci_dev *pdev, const
+ dbg(DBG_PCI, "kernel mem %p, ctlmem %p\n", card->mem, card->ctlmem);
+
+ /* Register the interrupt handler */
+- if (request_irq(pdev->irq, fst_intr, SA_SHIRQ, FST_DEV_NAME, card)) {
++ if (request_irq(pdev->irq, fst_intr, SA_SHIRQ | SA_NET_RANDOM, FST_DEV_NAME, card)) {
+ printk_err("Unable to register interrupt %d\n", card->irq);
+ pci_release_regions(pdev);
+ pci_disable_device(pdev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/hostess_sv11.c linux-2.6.7-netdev_random/drivers/net/wan/hostess_sv11.c
+--- linux-2.6.7/drivers/net/wan/hostess_sv11.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/hostess_sv11.c 2004-06-24 10:16:11.518743304 +0200
+@@ -263,7 +263,7 @@ static struct sv11_device *sv11_init(int
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+- if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "Hostess SV/11", dev)<0)
++ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "Hostess SV/11", dev)<0)
+ {
+ printk(KERN_WARNING "hostess: IRQ %d already in use.\n", irq);
+ goto fail1;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/lmc/lmc_main.c linux-2.6.7-netdev_random/drivers/net/wan/lmc/lmc_main.c
+--- linux-2.6.7/drivers/net/wan/lmc/lmc_main.c 2004-06-16 07:19:10.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/lmc/lmc_main.c 2004-06-24 10:16:11.557737376 +0200
+@@ -1060,7 +1060,7 @@ static int lmc_open (struct net_device *
+ lmc_softreset (sc);
+
+ /* Since we have to use PCI bus, this should work on x86,alpha,ppc */
+- if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ, dev->name, dev)){
++ if (request_irq (dev->irq, &lmc_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev)){
+ printk(KERN_WARNING "%s: could not get irq: %d\n", dev->name, dev->irq);
+ lmc_trace(dev, "lmc_open irq failed out");
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/n2.c linux-2.6.7-netdev_random/drivers/net/wan/n2.c
+--- linux-2.6.7/drivers/net/wan/n2.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/n2.c 2004-06-24 10:16:11.573734944 +0200
+@@ -377,7 +377,7 @@ static int __init n2_run(unsigned long i
+ }
+ card->io = io;
+
+- if (request_irq(irq, &sca_intr, 0, devname, card)) {
++ if (request_irq(irq, &sca_intr, SA_NET_RANDOM, devname, card)) {
+ printk(KERN_ERR "n2: could not allocate IRQ\n");
+ n2_destroy_card(card);
+ return(-EBUSY);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/pc300_drv.c linux-2.6.7-netdev_random/drivers/net/wan/pc300_drv.c
+--- linux-2.6.7/drivers/net/wan/pc300_drv.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/pc300_drv.c 2004-06-24 10:16:11.592732056 +0200
+@@ -3603,7 +3603,7 @@ cpc_init_one(struct pci_dev *pdev, const
+ }
+
+ /* Allocate IRQ */
+- if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ, "Cyclades-PC300", card)) {
++ if (request_irq(card->hw.irq, cpc_intr, SA_SHIRQ | SA_NET_RANDOM, "Cyclades-PC300", card)) {
+ printk ("PC300 found at RAM 0x%08lx, but could not allocate IRQ%d.\n",
+ card->hw.ramphys, card->hw.irq);
+ goto err_io_unmap;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/pci200syn.c linux-2.6.7-netdev_random/drivers/net/wan/pci200syn.c
+--- linux-2.6.7/drivers/net/wan/pci200syn.c 2004-06-16 07:19:01.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/pci200syn.c 2004-06-24 10:16:11.609729472 +0200
+@@ -395,7 +395,7 @@ static int __devinit pci200_pci_init_one
+ writew(readw(p) | 0x0040, p);
+
+ /* Allocate IRQ */
+- if(request_irq(pdev->irq, sca_intr, SA_SHIRQ, devname, card)) {
++ if(request_irq(pdev->irq, sca_intr, SA_SHIRQ | SA_NET_RANDOM, devname, card)) {
+ printk(KERN_WARNING "pci200syn: could not allocate IRQ%d.\n",
+ pdev->irq);
+ pci200_pci_remove_one(pdev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/sbni.c linux-2.6.7-netdev_random/drivers/net/wan/sbni.c
+--- linux-2.6.7/drivers/net/wan/sbni.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/sbni.c 2004-06-24 10:16:11.637725216 +0200
+@@ -1189,7 +1189,7 @@ sbni_open( struct net_device *dev )
+ }
+ }
+
+- if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ, dev->name, dev) ) {
++ if( request_irq(dev->irq, sbni_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev) ) {
+ printk( KERN_ERR "%s: unable to get IRQ %d.\n",
+ dev->name, dev->irq );
+ return -EAGAIN;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/sdla.c linux-2.6.7-netdev_random/drivers/net/wan/sdla.c
+--- linux-2.6.7/drivers/net/wan/sdla.c 2004-06-16 07:20:26.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/sdla.c 2004-06-24 10:16:11.666720808 +0200
+@@ -1461,7 +1461,7 @@ got_type:
+ }
+
+ err = -EAGAIN;
+- if (request_irq(dev->irq, &sdla_isr, 0, dev->name, dev))
++ if (request_irq(dev->irq, &sdla_isr, SA_NET_RANDOM, dev->name, dev))
+ goto fail;
+
+ if (flp->type == SDLA_S507) {
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/sdlamain.c linux-2.6.7-netdev_random/drivers/net/wan/sdlamain.c
+--- linux-2.6.7/drivers/net/wan/sdlamain.c 2004-06-16 07:19:43.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/sdlamain.c 2004-06-24 10:16:11.681718528 +0200
+@@ -458,7 +458,7 @@ static int setup(struct wan_device* wand
+ /* when using the S514 PCI adapter */
+
+ if(request_irq(irq, sdla_isr,
+- (card->hw.type == SDLA_S514) ? SA_SHIRQ : 0,
++ (card->hw.type == SDLA_S514) ? SA_SHIRQ | SA_NET_RANDOM : 0,
+ wandev->name, card)){
+
+ printk(KERN_INFO "%s: Can't reserve IRQ %d!\n", wandev->name, irq);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/sealevel.c linux-2.6.7-netdev_random/drivers/net/wan/sealevel.c
+--- linux-2.6.7/drivers/net/wan/sealevel.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/sealevel.c 2004-06-24 10:16:11.709714272 +0200
+@@ -321,7 +321,7 @@ static __init struct slvl_board *slvl_in
+ /* We want a fast IRQ for this device. Actually we'd like an even faster
+ IRQ ;) - This is one driver RtLinux is made for */
+
+- if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT, "SeaLevel", dev)<0)
++ if(request_irq(irq, &z8530_interrupt, SA_INTERRUPT | SA_NET_RANDOM, "SeaLevel", dev)<0)
+ {
+ printk(KERN_WARNING "sealevel: IRQ %d already in use.\n", irq);
+ goto fail1_1;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wan/wanxl.c linux-2.6.7-netdev_random/drivers/net/wan/wanxl.c
+--- linux-2.6.7/drivers/net/wan/wanxl.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wan/wanxl.c 2004-06-24 10:16:11.735710320 +0200
+@@ -750,7 +750,7 @@ static int __devinit wanxl_pci_init_one(
+ card_name(pdev), plx_phy, ramsize / 1024, mem_phy, pdev->irq);
+
+ /* Allocate IRQ */
+- if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ, "wanXL", card)) {
++ if (request_irq(pdev->irq, wanxl_intr, SA_SHIRQ | SA_NET_RANDOM, "wanXL", card)) {
+ printk(KERN_WARNING "wanXL %s: could not allocate IRQ%i.\n",
+ card_name(pdev), pdev->irq);
+ wanxl_pci_remove_one(pdev);
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wd.c linux-2.6.7-netdev_random/drivers/net/wd.c
+--- linux-2.6.7/drivers/net/wd.c 2004-06-16 07:18:57.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wd.c 2004-06-24 10:16:11.748708344 +0200
+@@ -300,7 +300,7 @@ static int __init wd_probe1(struct net_d
+
+ /* Snarf the interrupt now. There's no point in waiting since we cannot
+ share and the board will usually be enabled. */
+- i = request_irq(dev->irq, ei_interrupt, 0, dev->name, dev);
++ i = request_irq(dev->irq, ei_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (i) {
+ printk (" unable to get IRQ %d.\n", dev->irq);
+ return i;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/airo.c linux-2.6.7-netdev_random/drivers/net/wireless/airo.c
+--- linux-2.6.7/drivers/net/wireless/airo.c 2004-06-16 07:18:58.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/airo.c 2004-06-24 10:16:11.791701808 +0200
+@@ -2745,7 +2745,7 @@ struct net_device *_init_airo_card( unsi
+ SET_NETDEV_DEV(dev, &pci->dev);
+ }
+
+- rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ, dev->name, dev );
++ rc = request_irq( dev->irq, airo_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev );
+ if (rc) {
+ printk(KERN_ERR "airo: register interrupt %d failed, rc %d\n", irq, rc );
+ goto err_out_unlink;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/airport.c linux-2.6.7-netdev_random/drivers/net/wireless/airport.c
+--- linux-2.6.7/drivers/net/wireless/airport.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/airport.c 2004-06-24 10:16:11.804699832 +0200
+@@ -243,7 +243,7 @@ airport_attach(struct macio_dev *mdev, c
+ /* Reset it before we get the interrupt */
+ hermes_init(hw);
+
+- if (request_irq(dev->irq, orinoco_interrupt, 0, "Airport", dev)) {
++ if (request_irq(dev->irq, orinoco_interrupt, SA_NET_RANDOM, "Airport", dev)) {
+ printk(KERN_ERR "airport: Couldn't get IRQ %d\n", dev->irq);
+ goto failed;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/arlan-main.c linux-2.6.7-netdev_random/drivers/net/wireless/arlan-main.c
+--- linux-2.6.7/drivers/net/wireless/arlan-main.c 2004-06-16 07:19:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/arlan-main.c 2004-06-24 10:16:11.819697552 +0200
+@@ -1116,7 +1116,7 @@ static int arlan_open(struct net_device
+
+ ARLAN_DEBUG_ENTRY("arlan_open");
+
+- ret = request_irq(dev->irq, &arlan_interrupt, 0, dev->name, dev);
++ ret = request_irq(dev->irq, &arlan_interrupt, SA_NET_RANDOM, dev->name, dev);
+ if (ret)
+ {
+ printk(KERN_ERR "%s: unable to get IRQ %d .\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/atmel.c linux-2.6.7-netdev_random/drivers/net/wireless/atmel.c
+--- linux-2.6.7/drivers/net/wireless/atmel.c 2004-06-16 07:19:02.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/atmel.c 2004-06-24 10:16:11.848693144 +0200
+@@ -1579,7 +1579,7 @@ struct net_device *init_atmel_card( unsi
+ dev->irq = irq;
+ dev->base_addr = port;
+
+- if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ, dev->name, dev))) {
++ if ((rc = request_irq(dev->irq, service_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev))) {
+ printk(KERN_ERR "%s: register interrupt %d failed, rc %d\n", dev->name, irq, rc );
+ goto err_out_free;
+ }
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/orinoco_pci.c linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_pci.c
+--- linux-2.6.7/drivers/net/wireless/orinoco_pci.c 2004-06-16 07:19:42.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_pci.c 2004-06-24 10:16:11.872689496 +0200
+@@ -229,7 +229,7 @@ static int orinoco_pci_init_one(struct p
+ HERMES_MEM, HERMES_32BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ,
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM,
+ dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_pci: Error allocating IRQ %d.\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/orinoco_plx.c linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_plx.c
+--- linux-2.6.7/drivers/net/wireless/orinoco_plx.c 2004-06-16 07:20:04.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_plx.c 2004-06-24 10:16:11.884687672 +0200
+@@ -242,7 +242,7 @@ static int orinoco_plx_init_one(struct p
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name, dev);
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_plx: Error allocating IRQ %d.\n", pdev->irq);
+ err = -EBUSY;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/orinoco_tmd.c linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_tmd.c
+--- linux-2.6.7/drivers/net/wireless/orinoco_tmd.c 2004-06-16 07:18:59.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/orinoco_tmd.c 2004-06-24 10:16:11.895686000 +0200
+@@ -134,7 +134,7 @@ static int orinoco_tmd_init_one(struct p
+ HERMES_IO, HERMES_16BIT_REGSPACING);
+ pci_set_drvdata(pdev, dev);
+
+- err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ, dev->name,
++ err = request_irq(pdev->irq, orinoco_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name,
+ dev);
+ if (err) {
+ printk(KERN_ERR "orinoco_tmd: Error allocating IRQ %d.\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/wireless/wavelan.c linux-2.6.7-netdev_random/drivers/net/wireless/wavelan.c
+--- linux-2.6.7/drivers/net/wireless/wavelan.c 2004-06-16 07:20:03.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/wireless/wavelan.c 2004-06-24 10:16:11.929680832 +0200
+@@ -4022,7 +4022,7 @@ static int wavelan_open(struct net_devic
+ return -ENXIO;
+ }
+
+- if (request_irq(dev->irq, &wavelan_interrupt, 0, "WaveLAN", dev) != 0)
++ if (request_irq(dev->irq, &wavelan_interrupt, SA_NET_RANDOM, "WaveLAN", dev) != 0)
+ {
+ #ifdef DEBUG_CONFIG_ERROR
+ printk(KERN_WARNING "%s: wavelan_open(): invalid IRQ\n",
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/yellowfin.c linux-2.6.7-netdev_random/drivers/net/yellowfin.c
+--- linux-2.6.7/drivers/net/yellowfin.c 2004-06-16 07:19:13.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/yellowfin.c 2004-06-24 10:16:11.942678856 +0200
+@@ -632,7 +632,7 @@ static int yellowfin_open(struct net_dev
+ /* Reset the chip. */
+ outl(0x80000000, ioaddr + DMACtrl);
+
+- i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(dev->irq, &yellowfin_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ if (yellowfin_debug > 1)
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/znet.c linux-2.6.7-netdev_random/drivers/net/znet.c
+--- linux-2.6.7/drivers/net/znet.c 2004-06-16 07:18:37.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/znet.c 2004-06-24 10:16:11.955676880 +0200
+@@ -173,7 +173,7 @@ static int znet_request_resources (struc
+ struct znet_private *znet = dev->priv;
+ unsigned long flags;
+
+- if (request_irq (dev->irq, &znet_interrupt, 0, "ZNet", dev))
++ if (request_irq (dev->irq, &znet_interrupt, SA_NET_RANDOM, "ZNet", dev))
+ goto failed;
+ if (request_dma (znet->rx_dma, "ZNet rx"))
+ goto free_irq;
+diff -uprN -X dontdiff linux-2.6.7/drivers/net/zorro8390.c linux-2.6.7-netdev_random/drivers/net/zorro8390.c
+--- linux-2.6.7/drivers/net/zorro8390.c 2004-06-16 07:19:36.000000000 +0200
++++ linux-2.6.7-netdev_random/drivers/net/zorro8390.c 2004-06-24 10:16:11.968674904 +0200
+@@ -198,7 +198,7 @@ static int __devinit zorro8390_init(stru
+ dev->irq = IRQ_AMIGA_PORTS;
+
+ /* Install the Interrupt handler */
+- i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ, dev->name, dev);
++ i = request_irq(IRQ_AMIGA_PORTS, ei_interrupt, SA_SHIRQ | SA_NET_RANDOM, dev->name, dev);
+ if (i) return i;
+
+ for(i = 0; i < ETHER_ADDR_LEN; i++) {