summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorJoshua Kinard <kumba@gentoo.org>2004-02-23 10:48:55 +0000
committerJoshua Kinard <kumba@gentoo.org>2004-02-23 10:48:55 +0000
commit43bdb2ea08f9753abb72fbea8ee5f56a0f09e36c (patch)
tree5057c38cdea481c5f8a6ab932f9cd6ef007b56e3 /sys-kernel
parentmoved 13 to stable, 12 is causing issues all over the place it seems (diff)
downloadhistorical-43bdb2ea08f9753abb72fbea8ee5f56a0f09e36c.tar.gz
historical-43bdb2ea08f9753abb72fbea8ee5f56a0f09e36c.tar.bz2
historical-43bdb2ea08f9753abb72fbea8ee5f56a0f09e36c.zip
More cleanups, mainly in renaming the security-based patches to include their CVE number for easy reference on their purpose. Also added an ebuild for 2.4.25 kernel, taken from linux-mips CVS on 20040222.
Diffstat (limited to 'sys-kernel')
-rw-r--r--sys-kernel/mips-sources/ChangeLog19
-rw-r--r--sys-kernel/mips-sources/Manifest22
-rw-r--r--sys-kernel/mips-sources/files/CAN-2003-0961-do_brk.patch (renamed from sys-kernel/mips-sources/files/do_brk_fix.patch)0
-rw-r--r--sys-kernel/mips-sources/files/CAN-2003-0985-mremap.patch (renamed from sys-kernel/mips-sources/files/mremap-fix-try2.patch)0
-rw-r--r--sys-kernel/mips-sources/files/CAN-2004-0010-ncpfs.patch200
-rw-r--r--sys-kernel/mips-sources/files/CAN-2004-0077-do_munmap.patch (renamed from sys-kernel/mips-sources/files/do_munmap-fix.patch)0
-rw-r--r--sys-kernel/mips-sources/files/digest-mips-sources-2.4.253
-rw-r--r--sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit.patch (renamed from sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit-fix.patch)0
-rw-r--r--sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-fix.patch12
-rw-r--r--sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-inlinelimit.patch38
-rw-r--r--sys-kernel/mips-sources/files/pax-linux-2.4.25-200402192035.patch8597
-rw-r--r--sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild18
-rw-r--r--sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild18
-rw-r--r--sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild19
-rw-r--r--sys-kernel/mips-sources/mips-sources-2.4.25.ebuild69
15 files changed, 8980 insertions, 35 deletions
diff --git a/sys-kernel/mips-sources/ChangeLog b/sys-kernel/mips-sources/ChangeLog
index 62fab3ffcb68..429c1e6ee2a4 100644
--- a/sys-kernel/mips-sources/ChangeLog
+++ b/sys-kernel/mips-sources/ChangeLog
@@ -1,6 +1,23 @@
# ChangeLog for sys-kernel/mips-sources
# Copyright 2002-2004 Gentoo Technologies, Inc.; Distributed under the GPL v2
-# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/ChangeLog,v 1.25 2004/02/18 21:41:17 kumba Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/ChangeLog,v 1.26 2004/02/23 10:48:55 kumba Exp $
+
+*mips-sources-2.4.25 (23 Feb 2004)
+
+ 23 Feb 2004; Joshua Kinard <kumba@gentoo.org> mips-sources-2.4.21-r7.ebuild,
+ mips-sources-2.4.22-r10.ebuild, mips-sources-2.4.23-r6.ebuild,
+ mips-sources-2.4.25.ebuild, files/CAN-2003-0961-do_brk.patch,
+ files/CAN-2003-0985-mremap.patch, files/CAN-2004-0010-ncpfs.patch,
+ files/CAN-2004-0077-do_munmap.patch, files/do_brk_fix.patch,
+ files/do_munmap-fix.patch,
+ files/mipscvs-2.4.23-makefile-inlinelimit-fix.patch,
+ files/mipscvs-2.4.23-makefile-inlinelimit.patch,
+ files/mipscvs-2.4.25-makefile-fix.patch,
+ files/mipscvs-2.4.25-makefile-inlinelimit.patch,
+ files/mremap-fix-try2.patch, files/pax-linux-2.4.25-200402192035.patch:
+ More cleanups, mainly in renaming the security-based patches to include their
+ CVE number for easy reference on their purpose. Also added an ebuild for
+ 2.4.25 kernel, taken from linux-mips CVS on 20040222.
*mips-sources-2.4.23-r6 (18 Feb 2004)
diff --git a/sys-kernel/mips-sources/Manifest b/sys-kernel/mips-sources/Manifest
index 514f7fd0cbf6..baa39dc1dba7 100644
--- a/sys-kernel/mips-sources/Manifest
+++ b/sys-kernel/mips-sources/Manifest
@@ -1,17 +1,23 @@
-MD5 6fdff6930e3649eb153f8a4e79374ae2 ChangeLog 7914
+MD5 7198fe4dee68c23d3cde981830003056 ChangeLog 8805
MD5 ad25a2a0b6ade60c13ad8040f3c319c6 metadata.xml 378
-MD5 e715f3613ad5e87f383e3b6248ddbc01 mips-sources-2.4.21-r7.ebuild 2307
-MD5 ea1a1dddba8bde8a01060bf4146cc880 mips-sources-2.4.22-r10.ebuild 2135
-MD5 8d6cc18b83a4b37b5cb4006091d73d0a mips-sources-2.4.23-r6.ebuild 2861
+MD5 f68ccd8232d3fad9033d4bad71f68dbf mips-sources-2.4.21-r7.ebuild 2311
+MD5 827706c9fd682fa87523068eae0ae756 mips-sources-2.4.22-r10.ebuild 2139
+MD5 b26e5e89167ed7dbe332e080fb1a7e1d mips-sources-2.4.23-r6.ebuild 2925
+MD5 e36930b56e543566ea859addde717eed mips-sources-2.4.25.ebuild 2112
MD5 e300a3e148a7dcc38a0e099494fb6cb3 files/bigendian-byteorder-fix.patch 1557
MD5 0a3a1be2517b274c5a0c0748a437b6bd files/digest-mips-sources-2.4.21-r7 219
MD5 93f8e3701539d6699921e4becaa72f2d files/digest-mips-sources-2.4.22-r10 218
MD5 5aa63a0a1f498866f08339bcbd33f8e3 files/digest-mips-sources-2.4.23-r6 426
-MD5 e637c6fa41097ea2c4693d0766f2e1c5 files/do_brk_fix.patch 242
MD5 054c3c8e9c8804a13c79d01ec52793ed files/mipscvs-2.4.21-makefile-fix.patch 915
MD5 1d8857f9bf884fc661beb87cc9d043d1 files/mipscvs-2.4.22-makefile-fix.patch 1873
-MD5 a361526659bf1585152b2c819e329941 files/mipscvs-2.4.23-makefile-inlinelimit-fix.patch 1314
-MD5 174438d215b70cad5ffb00ca8123c062 files/do_munmap-fix.patch 837
MD5 6ff178aa1398ac2347921194944376f8 files/mipscvs-2.4.23-makefile-fix.patch 1906
-MD5 5e4b24d4c540af721c5bc7e3ad77f40b files/mremap-fix-try2.patch 414
+MD5 a361526659bf1585152b2c819e329941 files/mipscvs-2.4.23-makefile-inlinelimit.patch 1314
+MD5 fbb6766828584e454bf053286aad6207 files/mipscvs-2.4.25-makefile-fix.patch 428
+MD5 95ba3093147f4188db0ea4949e4317cf files/mipscvs-2.4.25-makefile-inlinelimit.patch 1588
+MD5 b0d077b1ad269163db41ecf3156654f4 files/pax-linux-2.4.25-200402192035.patch 278052
+MD5 e637c6fa41097ea2c4693d0766f2e1c5 files/CAN-2003-0961-do_brk.patch 242
+MD5 5e4b24d4c540af721c5bc7e3ad77f40b files/CAN-2003-0985-mremap.patch 414
MD5 1f646066ee81f7aea1f79a79e38a252d files/rtc-fixes.patch 2125
+MD5 de2c00e2df9c2bf74e5c090ade2c23b0 files/digest-mips-sources-2.4.25 217
+MD5 147fec50180ad91b6260fc7201dcb90f files/CAN-2004-0010-ncpfs.patch 6050
+MD5 174438d215b70cad5ffb00ca8123c062 files/CAN-2004-0077-do_munmap.patch 837
diff --git a/sys-kernel/mips-sources/files/do_brk_fix.patch b/sys-kernel/mips-sources/files/CAN-2003-0961-do_brk.patch
index fef1f1e981e2..fef1f1e981e2 100644
--- a/sys-kernel/mips-sources/files/do_brk_fix.patch
+++ b/sys-kernel/mips-sources/files/CAN-2003-0961-do_brk.patch
diff --git a/sys-kernel/mips-sources/files/mremap-fix-try2.patch b/sys-kernel/mips-sources/files/CAN-2003-0985-mremap.patch
index 03f49e4f6d7d..03f49e4f6d7d 100644
--- a/sys-kernel/mips-sources/files/mremap-fix-try2.patch
+++ b/sys-kernel/mips-sources/files/CAN-2003-0985-mremap.patch
diff --git a/sys-kernel/mips-sources/files/CAN-2004-0010-ncpfs.patch b/sys-kernel/mips-sources/files/CAN-2004-0010-ncpfs.patch
new file mode 100644
index 000000000000..6b4b1cefa49e
--- /dev/null
+++ b/sys-kernel/mips-sources/files/CAN-2004-0010-ncpfs.patch
@@ -0,0 +1,200 @@
+diff -urN linux-2.4.25-pre6/fs/ncpfs/dir.c linux-2.4.25-pre7/fs/ncpfs/dir.c
+--- linux-2.4.25-pre6/fs/ncpfs/dir.c 2002-11-28 15:53:15.000000000 -0800
++++ linux-2.4.25-pre7/fs/ncpfs/dir.c 2004-01-23 10:53:26.000000000 -0800
+@@ -266,8 +266,8 @@
+ struct ncp_server *server;
+ struct inode *dir = dentry->d_parent->d_inode;
+ struct ncp_entry_info finfo;
+- int res, val = 0, len = dentry->d_name.len + 1;
+- __u8 __name[len];
++ int res, val = 0, len;
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+ if (!dentry->d_inode || !dir)
+ goto finished;
+@@ -291,14 +291,15 @@
+ dentry->d_parent->d_name.name, dentry->d_name.name,
+ NCP_GET_AGE(dentry));
+
++ len = sizeof(__name);
+ if (ncp_is_server_root(dir)) {
+ res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, 1);
++ dentry->d_name.len, 1);
+ if (!res)
+ res = ncp_lookup_volume(server, __name, &(finfo.i));
+ } else {
+ res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, !ncp_preserve_case(dir));
++ dentry->d_name.len, !ncp_preserve_case(dir));
+ if (!res)
+ res = ncp_obtain_info(server, dir, __name, &(finfo.i));
+ }
+@@ -548,9 +549,9 @@
+ int valid = 0;
+ int hashed = 0;
+ ino_t ino = 0;
+- __u8 __name[256];
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+- qname.len = 256;
++ qname.len = sizeof(__name);
+ if (ncp_vol2io(NCP_SERVER(inode), __name, &qname.len,
+ entry->i.entryName, entry->i.nameLen,
+ !ncp_preserve_entry_case(inode, entry->i.NSCreator)))
+@@ -705,16 +706,19 @@
+ {
+ struct ncp_server* server = NCP_SBP(sb);
+ struct nw_info_struct i;
+- int result, len = strlen(server->m.mounted_vol) + 1;
+- __u8 __name[len];
++ int result;
+
+ if (ncp_single_volume(server)) {
++ int len;
+ struct dentry* dent;
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+- result = -ENOENT;
+- if (ncp_io2vol(server, __name, &len, server->m.mounted_vol,
+- len-1, 1))
++ len = sizeof(__name);
++ result = ncp_io2vol(server, __name, &len, server->m.mounted_vol,
++ strlen(server->m.mounted_vol), 1);
++ if (result)
+ goto out;
++ result = -ENOENT;
+ if (ncp_lookup_volume(server, __name, &i)) {
+ PPRINTK("ncp_conn_logged_in: %s not found\n",
+ server->m.mounted_vol);
+@@ -745,8 +749,8 @@
+ struct ncp_server *server = NCP_SERVER(dir);
+ struct inode *inode = NULL;
+ struct ncp_entry_info finfo;
+- int error, res, len = dentry->d_name.len + 1;
+- __u8 __name[len];
++ int error, res, len;
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+ error = -EIO;
+ if (!ncp_conn_valid(server))
+@@ -755,14 +759,15 @@
+ PPRINTK("ncp_lookup: server lookup for %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+
++ len = sizeof(__name);
+ if (ncp_is_server_root(dir)) {
+ res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, 1);
++ dentry->d_name.len, 1);
+ if (!res)
+ res = ncp_lookup_volume(server, __name, &(finfo.i));
+ } else {
+ res = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, !ncp_preserve_case(dir));
++ dentry->d_name.len, !ncp_preserve_case(dir));
+ if (!res)
+ res = ncp_obtain_info(server, dir, __name, &(finfo.i));
+ }
+@@ -825,9 +830,9 @@
+ {
+ struct ncp_server *server = NCP_SERVER(dir);
+ struct ncp_entry_info finfo;
+- int error, result, len = dentry->d_name.len + 1;
++ int error, result, len;
+ int opmode;
+- __u8 __name[len];
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+ PPRINTK("ncp_create_new: creating %s/%s, mode=%x\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name, mode);
+@@ -836,8 +841,9 @@
+ goto out;
+
+ ncp_age_dentry(server, dentry);
++ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, !ncp_preserve_case(dir));
++ dentry->d_name.len, !ncp_preserve_case(dir));
+ if (error)
+ goto out;
+
+@@ -880,8 +886,8 @@
+ {
+ struct ncp_entry_info finfo;
+ struct ncp_server *server = NCP_SERVER(dir);
+- int error, len = dentry->d_name.len + 1;
+- __u8 __name[len];
++ int error, len;
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+ DPRINTK("ncp_mkdir: making %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+@@ -890,8 +896,9 @@
+ goto out;
+
+ ncp_age_dentry(server, dentry);
++ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, !ncp_preserve_case(dir));
++ dentry->d_name.len, !ncp_preserve_case(dir));
+ if (error)
+ goto out;
+
+@@ -909,8 +916,8 @@
+ static int ncp_rmdir(struct inode *dir, struct dentry *dentry)
+ {
+ struct ncp_server *server = NCP_SERVER(dir);
+- int error, result, len = dentry->d_name.len + 1;
+- __u8 __name[len];
++ int error, result, len;
++ __u8 __name[NCP_MAXPATHLEN + 1];
+
+ DPRINTK("ncp_rmdir: removing %s/%s\n",
+ dentry->d_parent->d_name.name, dentry->d_name.name);
+@@ -923,8 +930,9 @@
+ if (!d_unhashed(dentry))
+ goto out;
+
++ len = sizeof(__name);
+ error = ncp_io2vol(server, __name, &len, dentry->d_name.name,
+- len-1, !ncp_preserve_case(dir));
++ dentry->d_name.len, !ncp_preserve_case(dir));
+ if (error)
+ goto out;
+
+@@ -1022,9 +1030,8 @@
+ {
+ struct ncp_server *server = NCP_SERVER(old_dir);
+ int error;
+- int old_len = old_dentry->d_name.len + 1;
+- int new_len = new_dentry->d_name.len + 1;
+- __u8 __old_name[old_len], __new_name[new_len];
++ int old_len, new_len;
++ __u8 __old_name[NCP_MAXPATHLEN + 1], __new_name[NCP_MAXPATHLEN + 1];
+
+ DPRINTK("ncp_rename: %s/%s to %s/%s\n",
+ old_dentry->d_parent->d_name.name, old_dentry->d_name.name,
+@@ -1037,15 +1044,17 @@
+ ncp_age_dentry(server, old_dentry);
+ ncp_age_dentry(server, new_dentry);
+
++ old_len = sizeof(__old_name);
+ error = ncp_io2vol(server, __old_name, &old_len,
+- old_dentry->d_name.name, old_len-1,
+- !ncp_preserve_case(old_dir));
++ old_dentry->d_name.name, old_dentry->d_name.len,
++ !ncp_preserve_case(old_dir));
+ if (error)
+ goto out;
+
++ new_len = sizeof(__new_name);
+ error = ncp_io2vol(server, __new_name, &new_len,
+- new_dentry->d_name.name, new_len-1,
+- !ncp_preserve_case(new_dir));
++ new_dentry->d_name.name, new_dentry->d_name.len,
++ !ncp_preserve_case(new_dir));
+ if (error)
+ goto out;
+
+
diff --git a/sys-kernel/mips-sources/files/do_munmap-fix.patch b/sys-kernel/mips-sources/files/CAN-2004-0077-do_munmap.patch
index e120b35b7adb..e120b35b7adb 100644
--- a/sys-kernel/mips-sources/files/do_munmap-fix.patch
+++ b/sys-kernel/mips-sources/files/CAN-2004-0077-do_munmap.patch
diff --git a/sys-kernel/mips-sources/files/digest-mips-sources-2.4.25 b/sys-kernel/mips-sources/files/digest-mips-sources-2.4.25
new file mode 100644
index 000000000000..0b8c1a6edbd7
--- /dev/null
+++ b/sys-kernel/mips-sources/files/digest-mips-sources-2.4.25
@@ -0,0 +1,3 @@
+MD5 5fc8e9f43fa44ac29ddf9a9980af57d8 linux-2.4.25.tar.bz2 30626548
+MD5 10b3bc2866a95e483f19093c980d9786 mipscvs-2.4.25-20040222.diff.bz2 89680
+MD5 3164a83e10562b34fa250fd9b892b5b2 cobalt-patches-24xx-1.1.tar.bz2 3831
diff --git a/sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit-fix.patch b/sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit.patch
index 1519f803b994..1519f803b994 100644
--- a/sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit-fix.patch
+++ b/sys-kernel/mips-sources/files/mipscvs-2.4.23-makefile-inlinelimit.patch
diff --git a/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-fix.patch b/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-fix.patch
new file mode 100644
index 000000000000..462c575b57ac
--- /dev/null
+++ b/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-fix.patch
@@ -0,0 +1,12 @@
+--- arch/mips64/Makefile.orig 2004-02-22 16:38:07.553362088 -0500
++++ arch/mips64/Makefile 2004-02-22 16:41:35.328775424 -0500
+@@ -342,7 +342,7 @@ endif
+ # ELF files from 32-bit files by conversion.
+ #
+ ifdef CONFIG_BOOT_ELF64
+-GCCFLAGS += -Wa,-32 $(call check_gas,-Wa$(comma)-mgp64,)
++GCCFLAGS += -Wa,-mabi=o64 $(call check_gas,-Wa$(comma)-mgp64,)
+ LINKFLAGS += -T arch/mips64/ld.script.elf32
+ #AS += -64
+ #LD += -m elf64bmip
+
diff --git a/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-inlinelimit.patch b/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-inlinelimit.patch
new file mode 100644
index 000000000000..527852b03e1c
--- /dev/null
+++ b/sys-kernel/mips-sources/files/mipscvs-2.4.25-makefile-inlinelimit.patch
@@ -0,0 +1,38 @@
+--- arch/mips/Makefile.orig 2004-02-22 16:37:51.641781016 -0500
++++ arch/mips/Makefile 2004-02-22 16:38:27.264365560 -0500
+@@ -746,5 +746,6 @@ archmrproper:
+ archdep:
+ if [ ! -f $(TOPDIR)/include/asm-$(ARCH)/offset.h ]; then \
+ touch $(TOPDIR)/include/asm-$(ARCH)/offset.h; \
++ $(MAKE) -C arch/mips/tools clean; \
+ fi;
+ @$(MAKEBOOT) dep
+--- arch/mips64/Makefile.orig 2004-02-22 16:38:07.553362088 -0500
++++ arch/mips64/Makefile 2004-02-22 16:41:35.328775424 -0500
+@@ -26,6 +26,9 @@ ifdef CONFIG_CROSSCOMPILE
+ CROSS_COMPILE = $(tool-prefix)
+ endif
+
++check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
++check_gas = $(shell if $(CC) $(1) -Wa,-Z -c -o /dev/null -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
++
+ #
+ # The ELF GCC uses -G 0 -mabicalls -fpic as default. We don't need PIC
+ # code in the kernel since it only slows down the whole thing. For the
+@@ -49,9 +52,6 @@ GCCFLAGS += -mno-sched-prolog -fno-omit-
+ endif
+ endif
+
+-check_gcc = $(shell if $(CC) $(1) -S -o /dev/null -xc /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
+-check_gas = $(shell if $(CC) $(1) -Wa,-Z -c -o /dev/null -xassembler /dev/null > /dev/null 2>&1; then echo "$(1)"; else echo "$(2)"; fi)
+-
+ #
+ # Use: $(call set_gccflags,<cpu0>,<isa0>,<cpu1>,<isa1>)
+ #
+@@ -402,5 +402,6 @@ archmrproper:
+ archdep:
+ if [ ! -f $(TOPDIR)/include/asm-$(ARCH)/offset.h ]; then \
+ touch $(TOPDIR)/include/asm-$(ARCH)/offset.h; \
++ $(MAKE) -C arch/mips/tools clean; \
+ fi;
+ @$(MAKEBOOT) dep
diff --git a/sys-kernel/mips-sources/files/pax-linux-2.4.25-200402192035.patch b/sys-kernel/mips-sources/files/pax-linux-2.4.25-200402192035.patch
new file mode 100644
index 000000000000..689f1fad03f6
--- /dev/null
+++ b/sys-kernel/mips-sources/files/pax-linux-2.4.25-200402192035.patch
@@ -0,0 +1,8597 @@
+diff -Nurp linux-2.4.25/Documentation/Configure.help linux-2.4.25-pax/Documentation/Configure.help
+--- linux-2.4.25/Documentation/Configure.help 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/Documentation/Configure.help 2004-02-19 11:12:52.000000000 -0500
+@@ -28732,6 +28732,304 @@ CONFIG_SOUND_WM97XX
+
+ If unsure, say N.
+
++Support soft mode
++CONFIG_PAX_SOFTMODE
++ Enabling this option will allow you to run PaX in soft mode, that
++ is, PaX features will not be enforced by default, only on executables
++ marked explicitly. You must also enable PT_PAX_FLAGS support as it
++ is the only way to mark executables for soft mode use.
++
++ Soft mode can be activated by using the "pax_softmode=1" kernel command
++ line option on boot. Furthermore you can control various PaX features
++ at runtime via the entries in /proc/sys/kernel/pax.
++
++Use legacy ELF header marking
++CONFIG_PAX_EI_PAX
++ Enabling this option will allow you to control PaX features on
++ a per executable basis. The control flags will be read from
++ an otherwise reserved part of the ELF header. This marking has
++ numerous drawbacks (no support for soft-mode, toolchain does not
++ know about the non-standard use of the ELF header) therefore it
++ has been deprecated in favour of PT_PAX_FLAGS support.
++
++ You should enable this option only if your toolchain does not yet
++ support the new control flag location (PT_PAX_FLAGS) or you still
++ have applications not marked by PT_PAX_FLAGS.
++
++ Note that if you enable PT_PAX_FLAGS marking support as well,
++ it will override the legacy EI_PAX marks.
++
++Use ELF program header marking
++CONFIG_PAX_PT_PAX_FLAGS
++ Enabling this option will allow you to control PaX features on
++ a per executable basis. The control flags will be read from
++ a PaX specific ELF program header (PT_PAX_FLAGS). This marking
++ has the benefits of supporting both soft mode and being fully
++ integrated into the toolchain (the binutils patch is available
++ from http://pax.grsecurity.net).
++
++ Note that if you enable the legacy EI_PAX marking support as well,
++ it will be overriden by the PT_PAX_FLAGS marking.
++
++MAC system integration
++CONFIG_PAX_NO_ACL_FLAGS
++ Mandatory Access Control systems have the option of controlling
++ PaX flags on a per executable basis, choose the method supported
++ by your particular system.
++
++ - "none": if your MAC system does not interact with PaX,
++ - "direct": if your MAC system defines pax_set_flags() itself,
++ - "hook": if your MAC system uses the pax_set_flags_func callback.
++
++ NOTE: this option is for developers/integrators only.
++
++Enforce non-executable pages
++CONFIG_PAX_NOEXEC
++ By design some architectures do not allow for protecting memory
++ pages against execution or even if they do, Linux does not make
++ use of this feature. In practice this means that if a page is
++ readable (such as the stack or heap) it is also executable.
++
++ There is a well known exploit technique that makes use of this
++ fact and a common programming mistake where an attacker can
++ introduce code of his choice somewhere in the attacked program's
++ memory (typically the stack or the heap) and then execute it.
++
++ If the attacked program was running with different (typically
++ higher) privileges than that of the attacker, then he can elevate
++ his own privilege level (e.g. get a root shell, write to files for
++ which he does not have write access to, etc).
++
++ Enabling this option will let you choose from various features
++ that prevent the injection and execution of 'foreign' code in
++ a program.
++
++ This will also break programs that rely on the old behaviour and
++ expect that dynamically allocated memory via the malloc() family
++ of functions is executable (which it is not). Notable examples
++ are the XFree86 4.x server, the java runtime and wine.
++
++ The 'chpax' utility available at http://pax.grsecurity.net/
++ allows you to control this and other features on a per file basis.
++
++Paging based non-executable pages
++CONFIG_PAX_PAGEEXEC
++ This implementation is based on the paging feature of the CPU.
++ On i386 it has a variable performance impact on applications
++ depending on their memory usage pattern. You should carefully
++ test your applications before using this feature in production.
++ On alpha, ia64, parisc, sparc, sparc64 and x86_64 there is no
++ performance impact. On ppc there is a slight performance impact.
++
++Segmentation based non-executable pages
++CONFIG_PAX_SEGMEXEC
++ This implementation is based on the segmentation feature of the
++ CPU and has little performance impact, however applications will
++ be limited to a 1.5 GB address space instead of the normal 3 GB.
++
++Emulate trampolines
++CONFIG_PAX_EMUTRAMP
++ There are some programs and libraries that for one reason or
++ another attempt to execute special small code snippets from
++ non-executable memory pages. Most notable examples are the
++ signal handler return code generated by the kernel itself and
++ the GCC trampolines.
++
++ If you enabled CONFIG_PAX_PAGEEXEC or CONFIG_PAX_SEGMEXEC then
++ such programs will no longer work under your kernel.
++
++ As a remedy you can say Y here and use the 'chpax' utility to
++ enable trampoline emulation for the affected programs yet still
++ have the protection provided by the non-executable pages.
++
++ On parisc and ppc you MUST enable this option and EMUSIGRT as
++ well, otherwise your system will not even boot.
++
++ Alternatively you can say N here and use the 'chpax' utility
++ to disable CONFIG_PAX_PAGEEXEC and CONFIG_PAX_SEGMEXEC for the
++ affected files.
++
++ NOTE: enabling this feature *may* open up a loophole in the
++ protection provided by non-executable pages that an attacker
++ could abuse. Therefore the best solution is to not have any
++ files on your system that would require this option. This can
++ be achieved by not using libc5 (which relies on the kernel
++ signal handler return code) and not using or rewriting programs
++ that make use of the nested function implementation of GCC.
++ Skilled users can just fix GCC itself so that it implements
++ nested function calls in a way that does not interfere with PaX.
++
++Automatically emulate sigreturn trampolines
++CONFIG_PAX_EMUSIGRT
++ Enabling this option will have the kernel automatically detect
++ and emulate signal return trampolines executing on the stack
++ that would otherwise lead to task termination.
++
++ This solution is intended as a temporary one for users with
++ legacy versions of libc (libc5, glibc 2.0, uClibc before 0.9.17,
++ Modula-3 runtime, etc) or executables linked to such, basically
++ everything that does not specify its own SA_RESTORER function in
++ normal executable memory like glibc 2.1+ does.
++
++ On parisc and ppc you MUST enable this option, otherwise your
++ system will not even boot.
++
++ NOTE: this feature cannot be disabled on a per executable basis
++ and since it *does* open up a loophole in the protection provided
++ by non-executable pages, the best solution is to not have any
++ files on your system that would require this option.
++
++Restrict mprotect()
++CONFIG_PAX_MPROTECT
++ Enabling this option will prevent programs from
++ - changing the executable status of memory pages that were
++ not originally created as executable,
++ - making read-only executable pages writable again,
++ - creating executable pages from anonymous memory.
++
++ You should say Y here to complete the protection provided by
++ the enforcement of non-executable pages.
++
++ NOTE: you can use the 'chpax' utility to control this feature
++ on a per file basis.
++
++Disallow ELF text relocations
++CONFIG_PAX_NOELFRELOCS
++ Non-executable pages and mprotect() restrictions are effective
++ in preventing the introduction of new executable code into an
++ attacked task's address space. There remain only two venues
++ for this kind of attack: if the attacker can execute already
++ existing code in the attacked task then he can either have it
++ create and mmap() a file containing his code or have it mmap()
++ an already existing ELF library that does not have position
++ independent code in it and use mprotect() on it to make it
++ writable and copy his code there. While protecting against
++ the former approach is beyond PaX, the latter can be prevented
++ by having only PIC ELF libraries on one's system (which do not
++ need to relocate their code). If you are sure this is your case,
++ then enable this option otherwise be careful as you may not even
++ be able to boot or log on your system (for example, some PAM
++ modules are erroneously compiled as non-PIC by default).
++
++ NOTE: if you are using dynamic ELF executables (as suggested
++ when using ASLR) then you must have made sure that you linked
++ your files using the PIC version of crt1 (the et_dyn.tar.gz package
++ referenced there has already been updated to support this).
++
++Allow ELF ET_EXEC text relocations
++CONFIG_PAX_ETEXECRELOCS
++ On some architectures there are incorrectly created applications
++ that require text relocations and would not work without enabling
++ this option. If you are an alpha, ia64 or parisc user, you should
++ enable this option and disable it once you have made sure that
++ none of your applications need it.
++
++Automatically emulate ELF PLT
++CONFIG_PAX_EMUPLT
++ Enabling this option will have the kernel automatically detect
++ and emulate the Procedure Linkage Table entries in ELF files.
++ On some architectures such entries are in writable memory, and
++ become non-executable leading to task termination. Therefore
++ it is mandatory that you enable this option on alpha, parisc, ppc,
++ sparc and sparc64, otherwise your system would not even boot.
++
++ NOTE: this feature *does* open up a loophole in the protection
++ provided by the non-executable pages, therefore the proper
++ solution is to modify the toolchain to produce a PLT that does
++ not need to be writable.
++
++Enforce non-executable kernel pages
++CONFIG_PAX_KERNEXEC
++ This is the kernel land equivalent of PAGEEXEC and MPROTECT,
++ that is, enabling this option will make it harder to inject
++ and execute 'foreign' code in kernel memory itself.
++
++Address Space Layout Randomization
++CONFIG_PAX_ASLR
++ Many if not most exploit techniques rely on the knowledge of
++ certain addresses in the attacked program. The following options
++ will allow the kernel to apply a certain amount of randomization
++ to specific parts of the program thereby forcing an attacker to
++ guess them in most cases. Any failed guess will most likely crash
++ the attacked program which allows the kernel to detect such attempts
++ and react on them. PaX itself provides no reaction mechanisms,
++ instead it is strongly encouraged that you make use of Nergal's
++ segvguard (ftp://ftp.pl.openwall.com/misc/segvguard/) or grsecurity's
++ (http://www.grsecurity.net/) built-in crash detection features or
++ develop one yourself.
++
++ By saying Y here you can choose to randomize the following areas:
++ - top of the task's kernel stack
++ - top of the task's userland stack
++ - base address for mmap() requests that do not specify one
++ (this includes all libraries)
++ - base address of the main executable
++
++ It is strongly recommended to say Y here as address space layout
++ randomization has negligible impact on performance yet it provides
++ a very effective protection.
++
++ NOTE: you can use the 'chpax' utility to control most of these features
++ on a per file basis.
++
++Randomize kernel stack base
++CONFIG_PAX_RANDKSTACK
++ By saying Y here the kernel will randomize every task's kernel
++ stack on every system call. This will not only force an attacker
++ to guess it but also prevent him from making use of possible
++ leaked information about it.
++
++ Since the kernel stack is a rather scarce resource, randomization
++ may cause unexpected stack overflows, therefore you should very
++ carefully test your system. Note that once enabled in the kernel
++ configuration, this feature cannot be disabled on a per file basis.
++
++Randomize user stack base
++CONFIG_PAX_RANDUSTACK
++ By saying Y here the kernel will randomize every task's userland
++ stack. The randomization is done in two steps where the second
++ one may apply a big amount of shift to the top of the stack and
++ cause problems for programs that want to use lots of memory (more
++ than 2.5 GB if SEGMEXEC is not active, or 1.25 GB when it is).
++ For this reason the second step can be controlled by 'chpax' on
++ a per file basis.
++
++Randomize mmap() base
++CONFIG_PAX_RANDMMAP
++ By saying Y here the kernel will use a randomized base address for
++ mmap() requests that do not specify one themselves. As a result
++ all dynamically loaded libraries will appear at random addresses
++ and therefore be harder to exploit by a technique where an attacker
++ attempts to execute library code for his purposes (e.g. spawn a
++ shell from an exploited program that is running at an elevated
++ privilege level).
++
++ Furthermore, if a program is relinked as a dynamic ELF file, its
++ base address will be randomized as well, completing the full
++ randomization of the address space layout. Attacking such programs
++ becomes a guess game. You can find an example of doing this at
++ http://pax.grsecurity.net/et_dyn.tar.gz and practical samples
++ at http://www.grsecurity.net/grsec-gcc-specs.tar.gz .
++
++ NOTE: you can use the 'chpax' utility to control this feature
++ on a per file basis.
++
++Randomize ET_EXEC base
++CONFIG_PAX_RANDEXEC
++ By saying Y here the kernel will randomize the base address of normal
++ ET_EXEC ELF executables as well. This is accomplished by mapping the
++ executable in memory in a special way which also allows for detecting
++ attackers who attempt to execute its code for their purposes. Since
++ this special mapping causes performance degradation and the attack
++ detection may create false alarms as well, you should carefully test
++ your executables when this feature is enabled.
++
++ This solution is intended only as a temporary one until you relink
++ your programs as a dynamic ELF file.
++
++ NOTE: you can use the 'chpax' utility to control this feature
++ on a per file basis.
++
+ #
+ # A couple of things I keep forgetting:
+ # capitalize: AppleTalk, Ethernet, DOS, DMA, FAT, FTP, Internet,
+diff -Nurp linux-2.4.25/arch/alpha/config.in linux-2.4.25-pax/arch/alpha/config.in
+--- linux-2.4.25/arch/alpha/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/alpha/config.in 2004-02-19 11:12:52.000000000 -0500
+@@ -466,5 +466,63 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++# bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ bool ' Allow ELF ET_EXEC text relocations' CONFIG_PAX_ETEXECRELOCS
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/alpha/kernel/osf_sys.c linux-2.4.25-pax/arch/alpha/kernel/osf_sys.c
+--- linux-2.4.25/arch/alpha/kernel/osf_sys.c 2003-06-13 10:51:29.000000000 -0400
++++ linux-2.4.25-pax/arch/alpha/kernel/osf_sys.c 2004-02-19 11:12:52.000000000 -0500
+@@ -230,6 +230,11 @@ asmlinkage unsigned long osf_mmap(unsign
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ #if 0
+ if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
+ printk("%s: unimplemented OSF mmap flags %04lx\n",
+@@ -1357,6 +1362,10 @@ arch_get_unmapped_area(struct file *filp
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != -ENOMEM)
+@@ -1364,8 +1373,15 @@ arch_get_unmapped_area(struct file *filp
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+- len, limit);
++
++ addr = TASK_UNMAPPED_BASE;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (current->flags & PF_PAX_RANDMMAP)
++ addr += current->mm->delta_mmap;
++#endif
++
++ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != -ENOMEM)
+ return addr;
+
+diff -Nurp linux-2.4.25/arch/alpha/mm/fault.c linux-2.4.25-pax/arch/alpha/mm/fault.c
+--- linux-2.4.25/arch/alpha/mm/fault.c 2002-11-28 18:53:08.000000000 -0500
++++ linux-2.4.25-pax/arch/alpha/mm/fault.c 2004-02-19 11:12:52.000000000 -0500
+@@ -53,6 +53,139 @@ __load_new_mm_context(struct mm_struct *
+ __reload_thread(&current->thread);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->r26 == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int ldah, ldq, jmp;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
++ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
++ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
++ jmp == 0x6BFB0000U)
++ {
++ unsigned long r27, addr;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
++
++ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ err = get_user(r27, (unsigned long*)addr);
++ if (err)
++ break;
++
++ regs->r27 = r27;
++ regs->pc = r27;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #2 */
++ unsigned int ldah, lda, br;
++
++ err = get_user(ldah, (unsigned int *)regs->pc);
++ err |= get_user(lda, (unsigned int *)(regs->pc+4));
++ err |= get_user(br, (unsigned int *)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((ldah & 0xFFFF0000U)== 0x277B0000U &&
++ (lda & 0xFFFF0000U) == 0xA77B0000U &&
++ (br & 0xFFE00000U) == 0xC3E00000U)
++ {
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
++ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
++ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
++
++ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
++ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int br;
++
++ err = get_user(br, (unsigned int *)regs->pc);
++
++ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
++ unsigned int br2, ldq, nop, jmp;
++ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
++
++ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
++ err = get_user(br2, (unsigned int *)addr);
++ err |= get_user(ldq, (unsigned int *)(addr+4));
++ err |= get_user(nop, (unsigned int *)(addr+8));
++ err |= get_user(jmp, (unsigned int *)(addr+12));
++ err |= get_user(resolver, (unsigned long *)(addr+16));
++
++ if (err)
++ break;
++
++ if (br2 == 0xC3600000U &&
++ ldq == 0xA77B000CU &&
++ nop == 0x47FF041FU &&
++ jmp == 0x6B7B0000U)
++ {
++ regs->r28 = regs->pc+4;
++ regs->r27 = addr+16;
++ regs->pc = resolver;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
+
+ /*
+ * This routine handles page faults. It determines the address,
+@@ -133,8 +266,34 @@ do_page_fault(unsigned long address, uns
+ good_area:
+ info.si_code = SEGV_ACCERR;
+ if (cause < 0) {
+- if (!(vma->vm_flags & VM_EXEC))
++ if (!(vma->vm_flags & VM_EXEC)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->pc)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)rdusp());
++ do_exit(SIGKILL);
++#else
+ goto bad_area;
++#endif
++
++ }
+ } else if (!cause) {
+ /* Allow reads even for write-only mappings */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
+diff -Nurp linux-2.4.25/arch/i386/Makefile linux-2.4.25-pax/arch/i386/Makefile
+--- linux-2.4.25/arch/i386/Makefile 2003-06-13 10:51:29.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/Makefile 2004-02-19 11:12:52.000000000 -0500
+@@ -114,6 +114,9 @@ arch/i386/mm: dummy
+
+ MAKEBOOT = $(MAKE) -C arch/$(ARCH)/boot
+
++arch/i386/vmlinux.lds: arch/i386/vmlinux.lds.S FORCE
++ $(CPP) -C -P -I$(HPATH) -D__KERNEL__ -imacros $(HPATH)/linux/config.h -imacros $(HPATH)/asm-i386/segment.h -imacros $(HPATH)/asm-i386/page.h -Ui386 arch/i386/vmlinux.lds.S >arch/i386/vmlinux.lds
++
+ vmlinux: arch/i386/vmlinux.lds
+
+ FORCE: ;
+@@ -150,6 +153,7 @@ archclean:
+ @$(MAKEBOOT) clean
+
+ archmrproper:
++ rm -f arch/i386/vmlinux.lds
+
+ archdep:
+ @$(MAKEBOOT) dep
+diff -Nurp linux-2.4.25/arch/i386/boot/bootsect.S linux-2.4.25-pax/arch/i386/boot/bootsect.S
+--- linux-2.4.25/arch/i386/boot/bootsect.S 2003-08-25 07:44:39.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/boot/bootsect.S 2004-02-19 11:12:52.000000000 -0500
+@@ -237,7 +237,7 @@ rp_read:
+ #ifdef __BIG_KERNEL__
+ # look in setup.S for bootsect_kludge
+ bootsect_kludge = 0x220 # 0x200 + 0x20 which is the size of the
+- lcall bootsect_kludge # bootsector + bootsect_kludge offset
++ lcall *bootsect_kludge # bootsector + bootsect_kludge offset
+ #else
+ movw %es, %ax
+ subw $SYSSEG, %ax
+diff -Nurp linux-2.4.25/arch/i386/boot/setup.S linux-2.4.25-pax/arch/i386/boot/setup.S
+--- linux-2.4.25/arch/i386/boot/setup.S 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/boot/setup.S 2004-02-19 11:12:52.000000000 -0500
+@@ -637,7 +637,7 @@ edd_done:
+ cmpw $0, %cs:realmode_swtch
+ jz rmodeswtch_normal
+
+- lcall %cs:realmode_swtch
++ lcall *%cs:realmode_swtch
+
+ jmp rmodeswtch_end
+
+diff -Nurp linux-2.4.25/arch/i386/config.in linux-2.4.25-pax/arch/i386/config.in
+--- linux-2.4.25/arch/i386/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/config.in 2004-02-19 11:12:52.000000000 -0500
+@@ -99,6 +99,7 @@ if [ "$CONFIG_M586MMX" = "y" ]; then
+ fi
+ if [ "$CONFIG_M686" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 5
++ define_bool CONFIG_X86_ALIGNMENT_16 y
+ define_bool CONFIG_X86_HAS_TSC y
+ define_bool CONFIG_X86_GOOD_APIC y
+ bool 'PGE extensions (not for Cyrix/Transmeta)' CONFIG_X86_PGE
+@@ -108,6 +109,7 @@ if [ "$CONFIG_M686" = "y" ]; then
+ fi
+ if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 5
++ define_bool CONFIG_X86_ALIGNMENT_16 y
+ define_bool CONFIG_X86_HAS_TSC y
+ define_bool CONFIG_X86_GOOD_APIC y
+ define_bool CONFIG_X86_PGE y
+@@ -116,6 +118,7 @@ if [ "$CONFIG_MPENTIUMIII" = "y" ]; then
+ fi
+ if [ "$CONFIG_MPENTIUM4" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 7
++ define_bool CONFIG_X86_ALIGNMENT_16 y
+ define_bool CONFIG_X86_HAS_TSC y
+ define_bool CONFIG_X86_GOOD_APIC y
+ define_bool CONFIG_X86_PGE y
+@@ -135,6 +138,7 @@ if [ "$CONFIG_MK8" = "y" ]; then
+ fi
+ if [ "$CONFIG_MK7" = "y" ]; then
+ define_int CONFIG_X86_L1_CACHE_SHIFT 6
++ define_bool CONFIG_X86_ALIGNMENT_16 y
+ define_bool CONFIG_X86_HAS_TSC y
+ define_bool CONFIG_X86_GOOD_APIC y
+ define_bool CONFIG_X86_USE_3DNOW y
+@@ -485,5 +489,78 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ if [ "$CONFIG_M586" = "y" -o \
++ "$CONFIG_M586TSC" = "y" -o \
++ "$CONFIG_M586MMX" = "y" -o \
++ "$CONFIG_M686" = "y" -o \
++ "$CONFIG_MPENTIUMIII" = "y" -o \
++ "$CONFIG_MPENTIUM4" = "y" -o \
++ "$CONFIG_MK7" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ fi
++ bool 'Segmentation based non-executable pages' CONFIG_PAX_SEGMEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" -o "$CONFIG_PAX_SEGMEXEC" = "y" ]; then
++ bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++ if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++ bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++ fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++ bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ fi
++ fi
++ if [ "$CONFIG_MODULES" = "n" ]; then
++ bool 'Enforce non-executable kernel pages' CONFIG_PAX_KERNEXEC
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ if [ "$CONFIG_X86_TSC" = "y" ]; then
++ bool ' Randomize kernel stack base' CONFIG_PAX_RANDKSTACK
++ fi
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" -o "$CONFIG_PAX_SEGMEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/i386/kernel/apm.c linux-2.4.25-pax/arch/i386/kernel/apm.c
+--- linux-2.4.25/arch/i386/kernel/apm.c 2003-08-25 07:44:39.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/kernel/apm.c 2004-02-19 11:12:52.000000000 -0500
+@@ -614,7 +614,7 @@ static u8 apm_bios_call(u32 func, u32 eb
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
++ "lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+ "setc %%al\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -666,7 +666,7 @@ static u8 apm_bios_call_simple(u32 func,
+ __asm__ __volatile__(APM_DO_ZERO_SEGS
+ "pushl %%edi\n\t"
+ "pushl %%ebp\n\t"
+- "lcall %%cs:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
++ "lcall *%%ss:" SYMBOL_NAME_STR(apm_bios_entry) "\n\t"
+ "setc %%bl\n\t"
+ "popl %%ebp\n\t"
+ "popl %%edi\n\t"
+@@ -1985,6 +1985,12 @@ static int __init apm_init(void)
+ __va((unsigned long)0x40 << 4));
+ _set_limit((char *)&gdt[APM_40 >> 3], 4095 - (0x40 << 4));
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ set_base(gdt2[APM_40 >> 3],
++ __va((unsigned long)0x40 << 4));
++ _set_limit((char *)&gdt2[APM_40 >> 3], 4095 - (0x40 << 4));
++#endif
++
+ apm_bios_entry.offset = apm_info.bios.offset;
+ apm_bios_entry.segment = APM_CS;
+ set_base(gdt[APM_CS >> 3],
+@@ -1993,6 +1999,16 @@ static int __init apm_init(void)
+ __va((unsigned long)apm_info.bios.cseg_16 << 4));
+ set_base(gdt[APM_DS >> 3],
+ __va((unsigned long)apm_info.bios.dseg << 4));
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ set_base(gdt2[APM_CS >> 3],
++ __va((unsigned long)apm_info.bios.cseg << 4));
++ set_base(gdt2[APM_CS_16 >> 3],
++ __va((unsigned long)apm_info.bios.cseg_16 << 4));
++ set_base(gdt2[APM_DS >> 3],
++ __va((unsigned long)apm_info.bios.dseg << 4));
++#endif
++
+ #ifndef APM_RELAX_SEGMENTS
+ if (apm_info.bios.version == 0x100) {
+ #endif
+@@ -2002,6 +2018,13 @@ static int __init apm_init(void)
+ _set_limit((char *)&gdt[APM_CS_16 >> 3], 64 * 1024 - 1);
+ /* For the DEC Hinote Ultra CT475 (and others?) */
+ _set_limit((char *)&gdt[APM_DS >> 3], 64 * 1024 - 1);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ _set_limit((char *)&gdt2[APM_CS >> 3], 64 * 1024 - 1);
++ _set_limit((char *)&gdt2[APM_CS_16 >> 3], 64 * 1024 - 1);
++ _set_limit((char *)&gdt2[APM_DS >> 3], 64 * 1024 - 1);
++#endif
++
+ #ifndef APM_RELAX_SEGMENTS
+ } else {
+ _set_limit((char *)&gdt[APM_CS >> 3],
+@@ -2010,6 +2033,16 @@ static int __init apm_init(void)
+ (apm_info.bios.cseg_16_len - 1) & 0xffff);
+ _set_limit((char *)&gdt[APM_DS >> 3],
+ (apm_info.bios.dseg_len - 1) & 0xffff);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ _set_limit((char *)&gdt2[APM_CS >> 3],
++ (apm_info.bios.cseg_len - 1) & 0xffff);
++ _set_limit((char *)&gdt2[APM_CS_16 >> 3],
++ (apm_info.bios.cseg_16_len - 1) & 0xffff);
++ _set_limit((char *)&gdt2[APM_DS >> 3],
++ (apm_info.bios.dseg_len - 1) & 0xffff);
++#endif
++
+ }
+ #endif
+
+diff -Nurp linux-2.4.25/arch/i386/kernel/entry.S linux-2.4.25-pax/arch/i386/kernel/entry.S
+--- linux-2.4.25/arch/i386/kernel/entry.S 2003-06-13 10:51:29.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/kernel/entry.S 2004-02-19 11:12:52.000000000 -0500
+@@ -209,6 +209,17 @@ ENTRY(system_call)
+ jae badsys
+ call *SYMBOL_NAME(sys_call_table)(,%eax,4)
+ movl %eax,EAX(%esp) # save the return value
++
++#ifdef CONFIG_PAX_RANDKSTACK
++ cli # need_resched and signals atomic test
++ cmpl $0,need_resched(%ebx)
++ jne reschedule
++ cmpl $0,sigpending(%ebx)
++ jne signal_return
++ call SYMBOL_NAME(pax_randomize_kstack)
++ jmp restore_all
++#endif
++
+ ENTRY(ret_from_sys_call)
+ cli # need_resched and signals atomic test
+ cmpl $0,need_resched(%ebx)
+@@ -389,8 +400,56 @@ ENTRY(alignment_check)
+ jmp error_code
+
+ ENTRY(page_fault)
++#ifdef CONFIG_PAX_PAGEEXEC
++ ALIGN
++ pushl $ SYMBOL_NAME(pax_do_page_fault)
++#else
+ pushl $ SYMBOL_NAME(do_page_fault)
++#endif
++
++#ifndef CONFIG_PAX_EMUTRAMP
+ jmp error_code
++#else
++ pushl %ds
++ pushl %eax
++ xorl %eax,%eax
++ pushl %ebp
++ pushl %edi
++ pushl %esi
++ pushl %edx
++ decl %eax # eax = -1
++ pushl %ecx
++ pushl %ebx
++ cld
++ movl %es,%ecx
++ movl ORIG_EAX(%esp), %esi # get the error code
++ movl ES(%esp), %edi # get the function address
++ movl %eax, ORIG_EAX(%esp)
++ movl %ecx, ES(%esp)
++ movl %esp,%edx
++ pushl %esi # push the error code
++ pushl %edx # push the pt_regs pointer
++ movl $(__KERNEL_DS),%edx
++ movl %edx,%ds
++ movl %edx,%es
++ GET_CURRENT(%ebx)
++ call *%edi
++ addl $8,%esp
++ decl %eax
++ jnz ret_from_exception
++
++ popl %ebx
++ popl %ecx
++ popl %edx
++ popl %esi
++ popl %edi
++ popl %ebp
++ popl %eax
++ popl %ds
++ popl %es
++ addl $4,%esp
++ jmp system_call
++#endif
+
+ ENTRY(machine_check)
+ pushl $0
+@@ -402,7 +461,7 @@ ENTRY(spurious_interrupt_bug)
+ pushl $ SYMBOL_NAME(do_spurious_interrupt_bug)
+ jmp error_code
+
+-.data
++.section .rodata, "a"
+ ENTRY(sys_call_table)
+ .long SYMBOL_NAME(sys_ni_syscall) /* 0 - old "setup()" system call*/
+ .long SYMBOL_NAME(sys_exit)
+diff -Nurp linux-2.4.25/arch/i386/kernel/head.S linux-2.4.25-pax/arch/i386/kernel/head.S
+--- linux-2.4.25/arch/i386/kernel/head.S 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/head.S 2004-02-19 11:12:52.000000000 -0500
+@@ -41,6 +41,7 @@
+ *
+ * On entry, %esi points to the real-mode code as a 32-bit pointer.
+ */
++.global startup_32
+ startup_32:
+ /*
+ * Set segments to known values
+@@ -86,7 +87,7 @@ startup_32:
+ PRESENT+RW+USER */
+ 2: stosl
+ add $0x1000,%eax
+- cmp $empty_zero_page-__PAGE_OFFSET,%edi
++ cmp $0x00c00007,%eax
+ jne 2b
+
+ /*
+@@ -100,9 +101,19 @@ startup_32:
+ movl %eax,%cr0 /* ..and set paging (PG) bit */
+ jmp 1f /* flush the prefetch-queue */
+ 1:
++
++#if !defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_SMP)
++
++#ifdef CONFIG_PAX_KERNEXEC
++ orw %bx,%bx
++ jz 1f
++#endif
++
+ movl $1f,%eax
+ jmp *%eax /* make sure eip is relocated */
+ 1:
++#endif
++
+ /* Set up the stack pointer */
+ lss stack_start,%esp
+
+@@ -121,7 +132,7 @@ startup_32:
+ */
+ xorl %eax,%eax
+ movl $ SYMBOL_NAME(__bss_start),%edi
+- movl $ SYMBOL_NAME(_end),%ecx
++ movl $ SYMBOL_NAME(__bss_end),%ecx
+ subl %edi,%ecx
+ rep
+ stosb
+@@ -272,8 +283,6 @@ L6:
+ jmp L6 # main should never return here, but
+ # just in case, we know what happens.
+
+-ready: .byte 0
+-
+ /*
+ * We depend on ET to be correct. This checks for 287/387.
+ */
+@@ -319,13 +328,6 @@ rp_sidt:
+ jne rp_sidt
+ ret
+
+-ENTRY(stack_start)
+- .long SYMBOL_NAME(init_task_union)+8192
+- .long __KERNEL_DS
+-
+-/* This is the default interrupt "handler" :-) */
+-int_msg:
+- .asciz "Unknown interrupt\n"
+ ALIGN
+ ignore_int:
+ cld
+@@ -347,6 +349,18 @@ ignore_int:
+ popl %eax
+ iret
+
++.data
++ready: .byte 0
++
++ENTRY(stack_start)
++ .long SYMBOL_NAME(init_task_union)+8192
++ .long __KERNEL_DS
++
++.section .rodata,"a"
++/* This is the default interrupt "handler" :-) */
++int_msg:
++ .asciz "Unknown interrupt\n"
++
+ /*
+ * The interrupt descriptor table has room for 256 idt's,
+ * the global descriptor table is dependent on the number
+@@ -372,41 +386,58 @@ gdt_descr:
+ SYMBOL_NAME(gdt):
+ .long SYMBOL_NAME(gdt_table)
+
++#ifdef CONFIG_PAX_SEGMEXEC
++.globl SYMBOL_NAME(gdt2)
++ .word 0
++gdt_descr2:
++ .word GDT_ENTRIES*8-1
++SYMBOL_NAME(gdt2):
++ .long SYMBOL_NAME(gdt_table2)
++#endif
++
+ /*
+ * This is initialized to create an identity-mapping at 0-8M (for bootup
+ * purposes) and another mapping of the 0-8M area at virtual address
+ * PAGE_OFFSET.
+ */
+-.org 0x1000
++.section .data.swapper_pg_dir,"a"
+ ENTRY(swapper_pg_dir)
+- .long 0x00102007
+- .long 0x00103007
+- .fill BOOT_USER_PGD_PTRS-2,4,0
++ .long pg0-__PAGE_OFFSET+7
++ .long pg1-__PAGE_OFFSET+7
++ .long pg2-__PAGE_OFFSET+7
++ .fill BOOT_USER_PGD_PTRS-3,4,0
+ /* default: 766 entries */
+- .long 0x00102007
+- .long 0x00103007
++ .long pg0-__PAGE_OFFSET+7
++ .long pg1-__PAGE_OFFSET+7
++ .long pg2-__PAGE_OFFSET+7
+ /* default: 254 entries */
+- .fill BOOT_KERNEL_PGD_PTRS-2,4,0
++ .fill BOOT_KERNEL_PGD_PTRS-3,4,0
+
+ /*
+ * The page tables are initialized to only 8MB here - the final page
+ * tables are set up later depending on memory size.
+ */
+-.org 0x2000
++.section .data.pg0,"a"
+ ENTRY(pg0)
++ .fill 1024,4,0
+
+-.org 0x3000
++.section .data.pg1,"a"
+ ENTRY(pg1)
++ .fill 1024,4,0
++
++.section .data.pg2,"a"
++ENTRY(pg2)
++ .fill 1024,4,0
+
+ /*
+ * empty_zero_page must immediately follow the page tables ! (The
+ * initialization loop counts until empty_zero_page)
+ */
+-
+-.org 0x4000
++.section .data.empty_zero_page,"a"
+ ENTRY(empty_zero_page)
++ .fill 1024,4,0
+
+-.org 0x5000
++.text
+
+ /*
+ * Real beginning of normal "text" segment
+@@ -419,7 +450,7 @@ ENTRY(_stext)
+ * in the text section because it has alignment requirements
+ * that we cannot fulfill any other way.
+ */
+-.data
++.section .rodata,"a"
+
+ ALIGN
+ /*
+@@ -430,19 +461,62 @@ ALIGN
+ */
+ ENTRY(gdt_table)
+ .quad 0x0000000000000000 /* NULL descriptor */
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++ .quad 0x00cf9b000000ffff /* 0x08 kernel 4GB code at 0x00000000 */
++#else
+ .quad 0x0000000000000000 /* not used */
+- .quad 0x00cf9a000000ffff /* 0x10 kernel 4GB code at 0x00000000 */
+- .quad 0x00cf92000000ffff /* 0x18 kernel 4GB data at 0x00000000 */
+- .quad 0x00cffa000000ffff /* 0x23 user 4GB code at 0x00000000 */
+- .quad 0x00cff2000000ffff /* 0x2b user 4GB data at 0x00000000 */
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0xc0cf9b400000ffff /* 0x10 kernel 4GB code at 0xc0400000 */
++#else
++ .quad 0x00cf9b000000ffff /* 0x10 kernel 4GB code at 0x00000000 */
++#endif
++
++ .quad 0x00cf93000000ffff /* 0x18 kernel 4GB data at 0x00000000 */
++ .quad 0x00cffb000000ffff /* 0x23 user 4GB code at 0x00000000 */
++ .quad 0x00cff3000000ffff /* 0x2b user 4GB data at 0x00000000 */
+ .quad 0x0000000000000000 /* not used */
+ .quad 0x0000000000000000 /* not used */
+ /*
+ * The APM segments have byte granularity and their bases
+ * and limits are set at run time.
+ */
+- .quad 0x0040920000000000 /* 0x40 APM set up for bad BIOS's */
+- .quad 0x00409a0000000000 /* 0x48 APM CS code */
+- .quad 0x00009a0000000000 /* 0x50 APM CS 16 code (16 bit) */
+- .quad 0x0040920000000000 /* 0x58 APM DS data */
++ .quad 0x0040930000000000 /* 0x40 APM set up for bad BIOS's */
++ .quad 0x00409b0000000000 /* 0x48 APM CS code */
++ .quad 0x00009b0000000000 /* 0x50 APM CS 16 code (16 bit) */
++ .quad 0x0040930000000000 /* 0x58 APM DS data */
+ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ENTRY(gdt_table2)
++ .quad 0x0000000000000000 /* NULL descriptor */
++
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++ .quad 0x00cf9b000000ffff /* 0x08 kernel 4GB code at 0x00000000 */
++#else
++ .quad 0x0000000000000000 /* not used */
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ .quad 0xc0cf9b400000ffff /* 0x10 kernel 4GB code at 0xc0400000 */
++#else
++ .quad 0x00cf9b000000ffff /* 0x10 kernel 4GB code at 0x00000000 */
++#endif
++
++ .quad 0x00cf93000000ffff /* 0x18 kernel 4GB data at 0x00000000 */
++ .quad 0x60c5fb000000ffff /* 0x23 user 1.5GB code at 0x60000000 */
++ .quad 0x00c5f3000000ffff /* 0x2b user 1.5GB data at 0x00000000 */
++ .quad 0x0000000000000000 /* not used */
++ .quad 0x0000000000000000 /* not used */
++ /*
++ * The APM segments have byte granularity and their bases
++ * and limits are set at run time.
++ */
++ .quad 0x0040930000000000 /* 0x40 APM set up for bad BIOS's */
++ .quad 0x00409b0000000000 /* 0x48 APM CS code */
++ .quad 0x00009b0000000000 /* 0x50 APM CS 16 code (16 bit) */
++ .quad 0x0040930000000000 /* 0x58 APM DS data */
++ .fill NR_CPUS*4,8,0 /* space for TSS's and LDT's */
++#endif
+diff -Nurp linux-2.4.25/arch/i386/kernel/i386_ksyms.c linux-2.4.25-pax/arch/i386/kernel/i386_ksyms.c
+--- linux-2.4.25/arch/i386/kernel/i386_ksyms.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/i386_ksyms.c 2004-02-19 11:12:52.000000000 -0500
+@@ -74,6 +74,11 @@ EXPORT_SYMBOL(pm_power_off);
+ EXPORT_SYMBOL(get_cmos_time);
+ EXPORT_SYMBOL(apm_info);
+ EXPORT_SYMBOL(gdt);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++EXPORT_SYMBOL(gdt2);
++#endif
++
+ EXPORT_SYMBOL(empty_zero_page);
+
+ #ifdef CONFIG_DEBUG_IOVIRT
+diff -Nurp linux-2.4.25/arch/i386/kernel/ldt.c linux-2.4.25-pax/arch/i386/kernel/ldt.c
+--- linux-2.4.25/arch/i386/kernel/ldt.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/ldt.c 2004-02-19 11:12:52.000000000 -0500
+@@ -151,7 +151,7 @@ static int read_default_ldt(void * ptr,
+ {
+ int err;
+ unsigned long size;
+- void *address;
++ const void *address;
+
+ err = 0;
+ address = &default_ldt[0];
+@@ -214,6 +214,13 @@ static int write_ldt(void * ptr, unsigne
+ }
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (ldt_info.contents & 2)) {
++ error = -EINVAL;
++ goto out_unlock;
++ }
++#endif
++
+ entry_1 = ((ldt_info.base_addr & 0x0000ffff) << 16) |
+ (ldt_info.limit & 0x0ffff);
+ entry_2 = (ldt_info.base_addr & 0xff000000) |
+@@ -224,7 +231,7 @@ static int write_ldt(void * ptr, unsigne
+ ((ldt_info.seg_not_present ^ 1) << 15) |
+ (ldt_info.seg_32bit << 22) |
+ (ldt_info.limit_in_pages << 23) |
+- 0x7000;
++ 0x7100;
+ if (!oldmode)
+ entry_2 |= (ldt_info.useable << 20);
+
+diff -Nurp linux-2.4.25/arch/i386/kernel/pci-pc.c linux-2.4.25-pax/arch/i386/kernel/pci-pc.c
+--- linux-2.4.25/arch/i386/kernel/pci-pc.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/pci-pc.c 2004-02-19 11:12:52.000000000 -0500
+@@ -17,6 +17,7 @@
+ #include <asm/io.h>
+ #include <asm/smp.h>
+ #include <asm/smpboot.h>
++#include <asm/desc.h>
+
+ #include "pci-i386.h"
+
+@@ -575,10 +576,16 @@ union bios32 {
+ * the array there.
+ */
+
++#if defined(CONFIG_PAX_KERNEXEC) && defined(CONFIG_PCI_BIOS)
++#define __FLAT_KERNEL_CS 0x08
++#else
++#define __FLAT_KERNEL_CS __KERNEL_CS
++#endif
++
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} bios32_indirect = { 0, __KERNEL_CS };
++} bios32_indirect = { 0, __FLAT_KERNEL_CS };
+
+ /*
+ * Returns the entry point for the given service, NULL on error
+@@ -619,7 +626,9 @@ static unsigned long bios32_service(unsi
+ static struct {
+ unsigned long address;
+ unsigned short segment;
+-} pci_indirect = { 0, __KERNEL_CS };
++} pci_indirect = { 0, __FLAT_KERNEL_CS };
++
++#undef __FLAT_KERNEL_CS
+
+ static int pci_bios_present;
+
+diff -Nurp linux-2.4.25/arch/i386/kernel/process.c linux-2.4.25-pax/arch/i386/kernel/process.c
+--- linux-2.4.25/arch/i386/kernel/process.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/process.c 2004-02-19 11:12:52.000000000 -0500
+@@ -209,18 +209,18 @@ __setup("reboot=", reboot_setup);
+ doesn't work with at least one type of 486 motherboard. It is easy
+ to stop this code working; hence the copious comments. */
+
+-static unsigned long long
++static const unsigned long long
+ real_mode_gdt_entries [3] =
+ {
+ 0x0000000000000000ULL, /* Null descriptor */
+- 0x00009a000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
+- 0x000092000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
++ 0x00009b000000ffffULL, /* 16-bit real-mode 64k code at 0x00000000 */
++ 0x000093000100ffffULL /* 16-bit real-mode 64k data at 0x00000100 */
+ };
+
+ static struct
+ {
+ unsigned short size __attribute__ ((packed));
+- unsigned long long * base __attribute__ ((packed));
++ const unsigned long long * base __attribute__ ((packed));
+ }
+ real_mode_gdt = { sizeof (real_mode_gdt_entries) - 1, real_mode_gdt_entries },
+ real_mode_idt = { 0x3ff, 0 },
+@@ -552,7 +552,7 @@ int copy_thread(int nr, unsigned long cl
+ {
+ struct pt_regs * childregs;
+
+- childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p)) - 1;
++ childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p - sizeof(unsigned long))) - 1;
+ struct_cpy(childregs, regs);
+ childregs->eax = 0;
+ childregs->esp = esp;
+@@ -613,6 +613,16 @@ void dump_thread(struct pt_regs * regs,
+ dump->u_fpvalid = dump_fpu (regs, &dump->i387);
+ }
+
++#ifdef CONFIG_PAX_SEGMEXEC
++void pax_switch_segments(struct task_struct * tsk)
++{
++ if (tsk->flags & PF_PAX_SEGMEXEC)
++ __asm__ __volatile__("lgdt %0": "=m" (gdt_descr2));
++ else
++ __asm__ __volatile__("lgdt %0": "=m" (gdt_descr));
++}
++#endif
++
+ /*
+ * This special macro can be used to load a debugging register
+ */
+@@ -652,6 +662,10 @@ void __switch_to(struct task_struct *pre
+
+ unlazy_fpu(prev_p);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_switch_segments(next_p);
++#endif
++
+ /*
+ * Reload esp0, LDT and the page table pointer:
+ */
+@@ -792,3 +806,30 @@ unsigned long get_wchan(struct task_stru
+ }
+ #undef last_sched
+ #undef first_sched
++
++#ifdef CONFIG_PAX_RANDKSTACK
++asmlinkage void pax_randomize_kstack(void)
++{
++ struct tss_struct *tss = init_tss + smp_processor_id();
++ unsigned long time;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (!pax_aslr)
++ return;
++#endif
++
++ rdtscl(time);
++
++ /* P4 seems to return a 0 LSB, ignore it */
++#ifdef CONFIG_MPENTIUM4
++ time &= 0x3EUL;
++ time <<= 1;
++#else
++ time &= 0x1FUL;
++ time <<= 2;
++#endif
++
++ tss->esp0 ^= time;
++ current->thread.esp0 = tss->esp0;
++}
++#endif
+diff -Nurp linux-2.4.25/arch/i386/kernel/setup.c linux-2.4.25-pax/arch/i386/kernel/setup.c
+--- linux-2.4.25/arch/i386/kernel/setup.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/setup.c 2004-02-19 11:12:52.000000000 -0500
+@@ -3191,7 +3191,7 @@ void __init cpu_init (void)
+ set_tss_desc(nr,t);
+ gdt_table[__TSS(nr)].b &= 0xfffffdff;
+ load_TR(nr);
+- load_LDT(&init_mm.context);
++ _load_LDT(&init_mm.context);
+
+ /*
+ * Clear all 6 debug registers:
+@@ -3257,7 +3257,16 @@ int __init ppro_with_ram_bug(void)
+ printk(KERN_INFO "Your Pentium Pro seems ok.\n");
+ return 0;
+ }
+-
++
++#ifdef CONFIG_PAX_SOFTMODE
++static int __init setup_pax_softmode(char *str)
++{
++ get_option (&str, &pax_softmode);
++ return 1;
++}
++__setup("pax_softmode=", setup_pax_softmode);
++#endif
++
+ /*
+ * Local Variables:
+ * mode:c
+diff -Nurp linux-2.4.25/arch/i386/kernel/sys_i386.c linux-2.4.25-pax/arch/i386/kernel/sys_i386.c
+--- linux-2.4.25/arch/i386/kernel/sys_i386.c 2003-08-25 07:44:39.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/kernel/sys_i386.c 2004-02-19 11:12:52.000000000 -0500
+@@ -48,6 +48,11 @@ static inline long do_mmap2(
+ int error = -EBADF;
+ struct file * file = NULL;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+@@ -56,7 +61,7 @@ static inline long do_mmap2(
+ }
+
+ down_write(&current->mm->mmap_sem);
+- error = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
++ error = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
+ up_write(&current->mm->mmap_sem);
+
+ if (file)
+diff -Nurp linux-2.4.25/arch/i386/kernel/trampoline.S linux-2.4.25-pax/arch/i386/kernel/trampoline.S
+--- linux-2.4.25/arch/i386/kernel/trampoline.S 2002-11-28 18:53:09.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/trampoline.S 2004-02-19 11:12:52.000000000 -0500
+@@ -54,7 +54,7 @@ r_base = .
+ lmsw %ax # into protected mode
+ jmp flush_instr
+ flush_instr:
+- ljmpl $__KERNEL_CS, $0x00100000
++ ljmpl $__KERNEL_CS, $SYMBOL_NAME(startup_32)-__PAGE_OFFSET
+ # jump to startup_32 in arch/i386/kernel/head.S
+
+ idt_48:
+diff -Nurp linux-2.4.25/arch/i386/kernel/traps.c linux-2.4.25-pax/arch/i386/kernel/traps.c
+--- linux-2.4.25/arch/i386/kernel/traps.c 2002-11-28 18:53:09.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/kernel/traps.c 2004-02-19 11:12:52.000000000 -0500
+@@ -54,7 +54,7 @@ asmlinkage int system_call(void);
+ asmlinkage void lcall7(void);
+ asmlinkage void lcall27(void);
+
+-struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
++const struct desc_struct default_ldt[] = { { 0, 0 }, { 0, 0 }, { 0, 0 },
+ { 0, 0 }, { 0, 0 } };
+
+ /*
+@@ -228,14 +228,23 @@ void show_registers(struct pt_regs *regs
+ show_stack((unsigned long*)esp);
+
+ printk("\nCode: ");
++
++#ifndef CONFIG_PAX_KERNEXEC
+ if(regs->eip < PAGE_OFFSET)
+ goto bad;
++#endif
+
+ for(i=0;i<20;i++)
+ {
+ unsigned char c;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if(__get_user(c, &((unsigned char*)regs->eip)[i+__KERNEL_TEXT_OFFSET])) {
++#else
+ if(__get_user(c, &((unsigned char*)regs->eip)[i])) {
+ bad:
++#endif
++
+ printk(" Bad EIP value.");
+ break;
+ }
+@@ -258,8 +267,13 @@ static void handle_BUG(struct pt_regs *r
+
+ eip = regs->eip;
+
++#ifdef CONFIG_PAX_KERNEXEC
++ eip += __KERNEL_TEXT_OFFSET;
++#else
+ if (eip < PAGE_OFFSET)
+ goto no_bug;
++#endif
++
+ if (__get_user(ud2, (unsigned short *)eip))
+ goto no_bug;
+ if (ud2 != 0x0b0f)
+@@ -267,7 +281,13 @@ static void handle_BUG(struct pt_regs *r
+ if (__get_user(line, (unsigned short *)(eip + 2)))
+ goto bug;
+ if (__get_user(file, (char **)(eip + 4)) ||
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __get_user(c, file + __KERNEL_TEXT_OFFSET))
++#else
+ (unsigned long)file < PAGE_OFFSET || __get_user(c, file))
++#endif
++
+ file = "<bad filename>";
+
+ printk("kernel BUG at %s:%d!\n", file, line);
+@@ -422,6 +442,13 @@ gp_in_kernel:
+ regs->eip = fixup;
+ return;
+ }
++
++#ifdef CONFIG_PAX_KERNEXEC
++ if ((regs->xcs & 0xFFFF) == __KERNEL_CS)
++ die("PAX: suspicious general protection fault", regs, error_code);
++ else
++#endif
++
+ die("general protection fault", regs, error_code);
+ }
+ }
+@@ -527,13 +554,12 @@ asmlinkage void do_debug(struct pt_regs
+ {
+ unsigned int condition;
+ struct task_struct *tsk = current;
+- unsigned long eip = regs->eip;
+ siginfo_t info;
+
+ __asm__ __volatile__("movl %%db6,%0" : "=r" (condition));
+
+ /* If the user set TF, it's simplest to clear it right away. */
+- if ((eip >=PAGE_OFFSET) && (regs->eflags & TF_MASK))
++ if (!(regs->xcs & 3) && (regs->eflags & TF_MASK) && !(regs->eflags & VM_MASK))
+ goto clear_TF;
+
+ /* Mask out spurious debug traps due to lazy DR7 setting */
+@@ -826,7 +852,7 @@ static void __init set_system_gate(unsig
+ _set_gate(idt_table+n,15,3,addr);
+ }
+
+-static void __init set_call_gate(void *a, void *addr)
++static void __init set_call_gate(const void *a, void *addr)
+ {
+ _set_gate(a,12,3,addr);
+ }
+@@ -852,14 +878,58 @@ __asm__ __volatile__ ("movw %w3,0(%2)\n\
+ "rorl $16,%%eax" \
+ : "=m"(*(n)) : "a" (addr), "r"(n), "ir"(limit), "i"(type))
+
+-void set_tss_desc(unsigned int n, void *addr)
++void set_tss_desc(unsigned int n, const void *addr)
+ {
+ _set_tssldt_desc(gdt_table+__TSS(n), (int)addr, 235, 0x89);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ _set_tssldt_desc(gdt_table2+__TSS(n), (int)addr, 235, 0x89);
++#endif
++
+ }
+
+-void set_ldt_desc(unsigned int n, void *addr, unsigned int size)
++void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size)
+ {
+ _set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ _set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
++#endif
++
++}
++
++void set_ldt_desc(unsigned int n, const void *addr, unsigned int size)
++{
++
++#ifdef CONFIG_PAX_KERNEXEC
++ unsigned long temp, cr3;
++ pgd_t* pgd;
++ pmd_t* pmd;
++
++ asm("movl %%cr3,%0":"=r" (cr3));
++ for (temp = __KERNEL_TEXT_OFFSET; temp < __KERNEL_TEXT_OFFSET + 0x00400000UL; temp += (1UL << PMD_SHIFT)) {
++ pgd = (pgd_t *)__va(cr3) + __pgd_offset(temp);
++ pmd = pmd_offset(pgd, temp);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) | _PAGE_RW));
++ }
++ __flush_tlb_all();
++#endif
++
++ _set_tssldt_desc(gdt_table+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ _set_tssldt_desc(gdt_table2+__LDT(n), (int)addr, ((size << 3)-1), 0x82);
++#endif
++
++#ifdef CONFIG_PAX_KERNEXEC
++ for (temp = __KERNEL_TEXT_OFFSET; temp < __KERNEL_TEXT_OFFSET + 0x00400000UL; temp += (1UL << PMD_SHIFT)) {
++ pgd = (pgd_t *)__va(cr3) + __pgd_offset(temp);
++ pmd = pmd_offset(pgd, temp);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++ flush_tlb_all();
++#endif
++
+ }
+
+ #ifdef CONFIG_X86_VISWS_APIC
+diff -Nurp linux-2.4.25/arch/i386/mm/fault.c linux-2.4.25-pax/arch/i386/mm/fault.c
+--- linux-2.4.25/arch/i386/mm/fault.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/mm/fault.c 2004-02-19 11:12:52.000000000 -0500
+@@ -19,6 +19,8 @@
+ #include <linux/init.h>
+ #include <linux/tty.h>
+ #include <linux/vt_kern.h> /* For unblank_screen() */
++#include <linux/unistd.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/uaccess.h>
+@@ -127,6 +129,10 @@ void bust_spinlocks(int yes)
+ asmlinkage void do_invalid_op(struct pt_regs *, unsigned long);
+ extern unsigned long idt;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++static int pax_handle_fetch_fault(struct pt_regs *regs);
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+@@ -137,23 +143,32 @@ extern unsigned long idt;
+ * bit 1 == 0 means read, 1 means write
+ * bit 2 == 0 means kernel, 1 means user-mode
+ */
+-asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++static int do_page_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
++#else
++asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long error_code)
++#endif
+ {
+ struct task_struct *tsk;
+ struct mm_struct *mm;
+ struct vm_area_struct * vma;
++#ifndef CONFIG_PAX_PAGEEXEC
+ unsigned long address;
++#endif
+ unsigned long page;
+ unsigned long fixup;
+ int write;
+ siginfo_t info;
+
++#ifndef CONFIG_PAX_PAGEEXEC
+ /* get the address */
+ __asm__("movl %%cr2,%0":"=r" (address));
+
+ /* It's safe to allow irq's after cr2 has been saved */
+ if (regs->eflags & X86_EFLAGS_IF)
+ local_irq_enable();
++#endif
+
+ tsk = current;
+
+@@ -258,7 +273,7 @@ good_area:
+ tsk->thread.screen_bitmap |= 1 << bit;
+ }
+ up_read(&mm->mmap_sem);
+- return;
++ return 0;
+
+ /*
+ * Something tried to access memory that isn't in our memory map..
+@@ -269,6 +284,41 @@ bad_area:
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (error_code & 4) {
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++
++#if defined(CONFIG_PAX_EMUTRAMP) || defined(CONFIG_PAX_RANDEXEC)
++ if ((error_code == 4) && (regs->eip + SEGMEXEC_TASK_SIZE == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 5:
++ return 0;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 4:
++ return 0;
++
++ case 3:
++ case 2:
++ return 1;
++#endif
++
++ case 1:
++ default:
++ }
++ }
++#endif
++
++ if (address >= SEGMEXEC_TASK_SIZE) {
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ tsk->thread.cr2 = address;
+ /* Kernel addresses are always protection faults */
+ tsk->thread.error_code = error_code | (address >= TASK_SIZE);
+@@ -278,7 +328,7 @@ bad_area:
+ /* info.si_code has been set above */
+ info.si_addr = (void *)address;
+ force_sig_info(SIGSEGV, &info, tsk);
+- return;
++ return 0;
+ }
+
+ /*
+@@ -291,7 +341,7 @@ bad_area:
+
+ if (nr == 6) {
+ do_invalid_op(regs, 0);
+- return;
++ return 0;
+ }
+ }
+
+@@ -299,7 +349,7 @@ no_context:
+ /* Are we prepared to handle this kernel fault? */
+ if ((fixup = search_exception_table(regs->eip)) != 0) {
+ regs->eip = fixup;
+- return;
++ return 0;
+ }
+
+ /*
+@@ -311,6 +361,13 @@ no_context:
+
+ if (address < PAGE_SIZE)
+ printk(KERN_ALERT "Unable to handle kernel NULL pointer dereference");
++
++#ifdef CONFIG_PAX_KERNEXEC
++ else if (init_mm.start_code + __KERNEL_TEXT_OFFSET <= address && address < init_mm.end_code + __KERNEL_TEXT_OFFSET)
++ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code",
++ tsk->comm, tsk->pid, tsk->uid, tsk->euid);
++#endif
++
+ else
+ printk(KERN_ALERT "Unable to handle kernel paging request");
+ printk(" at virtual address %08lx\n",address);
+@@ -363,7 +420,7 @@ do_sigbus:
+ /* Kernel mode? Handle exceptions or die */
+ if (!(error_code & 4))
+ goto no_context;
+- return;
++ return 0;
+
+ vmalloc_fault:
+ {
+@@ -396,6 +453,454 @@ vmalloc_fault:
+ pte_k = pte_offset(pmd_k, address);
+ if (!pte_present(*pte_k))
+ goto no_context;
+- return;
++ return 0;
++ }
++}
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/* PaX: called with the page_table_lock spinlock held */
++static inline pte_t * pax_get_pte(struct mm_struct *mm, unsigned long address)
++{
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ pgd = pgd_offset(mm, address);
++ if (!pgd || !pgd_present(*pgd))
++ return 0;
++ pmd = pmd_offset(pgd, address);
++ if (!pmd || !pmd_present(*pmd))
++ return 0;
++ return pte_offset(pmd, address);
++}
++#endif
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++/*
++ * PaX: decide what to do with offenders (regs->eip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when sigreturn trampoline was detected
++ * 3 when rt_sigreturn trampoline was detected
++ * 4 when gcc trampoline was detected
++ * 5 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++#ifdef CONFIG_PAX_EMUTRAMP
++ static const unsigned char trans[8] = {6, 1, 2, 0, 13, 5, 3, 4};
++#endif
++
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ unsigned long esp_4;
++
++ if (regs->eip >= current->mm->start_code &&
++ regs->eip < current->mm->end_code)
++ {
++ err = get_user(esp_4, (unsigned long*)(regs->esp-4UL));
++ if (err || esp_4 == regs->eip)
++ return 1;
++
++ regs->eip += current->mm->delta_exec;
++ return 5;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: sigreturn emulation */
++ unsigned char pop, mov;
++ unsigned short sys;
++ unsigned long nr;
++
++ err = get_user(pop, (unsigned char *)(regs->eip));
++ err |= get_user(mov, (unsigned char *)(regs->eip + 1));
++ err |= get_user(nr, (unsigned long *)(regs->eip + 2));
++ err |= get_user(sys, (unsigned short *)(regs->eip + 6));
++
++ if (err)
++ break;
++
++ if (pop == 0x58 &&
++ mov == 0xb8 &&
++ nr == __NR_sigreturn &&
++ sys == 0x80cd)
++ {
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ int sig;
++ struct k_sigaction *ka;
++ __sighandler_t handler;
++
++ if (get_user(sig, (int *)regs->esp))
++ return 1;
++ if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP)
++ return 1;
++ ka = &current->sig->action[sig-1];
++ handler = ka->sa.sa_handler;
++ if (handler == SIG_DFL || handler == SIG_IGN) {
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++ } else if (ka->sa.sa_flags & SA_SIGINFO)
++ return 1;
++#endif
++
++ regs->esp += 4;
++ regs->eax = nr;
++ regs->eip += 8;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned char mov;
++ unsigned short sys;
++ unsigned long nr;
++
++ err = get_user(mov, (unsigned char *)(regs->eip));
++ err |= get_user(nr, (unsigned long *)(regs->eip + 1));
++ err |= get_user(sys, (unsigned short *)(regs->eip + 5));
++
++ if (err)
++ break;
++
++ if (mov == 0xb8 &&
++ nr == __NR_rt_sigreturn &&
++ sys == 0x80cd)
++ {
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ int sig;
++ struct k_sigaction *ka;
++ __sighandler_t handler;
++
++ if (get_user(sig, (int *)regs->esp))
++ return 1;
++ if (sig < 1 || sig > _NSIG || sig == SIGKILL || sig == SIGSTOP)
++ return 1;
++ ka = &current->sig->action[sig-1];
++ handler = ka->sa.sa_handler;
++ if (handler == SIG_DFL || handler == SIG_IGN) {
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++ } else if (!(ka->sa.sa_flags & SA_SIGINFO))
++ return 1;
++#endif
++
++ regs->eax = nr;
++ regs->eip += 7;
++ return 3;
++ }
++ } while (0);
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: gcc trampoline emulation #1 */
++ unsigned char mov1, mov2;
++ unsigned short jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov1, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(mov2, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(jmp, (unsigned short *)(regs->eip + 10));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov1 & 0xF8) == 0xB8 &&
++ (mov2 & 0xF8) == 0xB8 &&
++ (mov1 & 0x07) != (mov2 & 0x07) &&
++ (jmp & 0xF8FF) == 0xE0FF &&
++ (mov2 & 0x07) == ((jmp>>8) & 0x07) &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov1 & 0x07]] = addr1;
++ ((unsigned long *)regs)[trans[mov2 & 0x07]] = addr2;
++ regs->eip = addr2;
++ return 4;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #2 */
++ unsigned char mov, jmp;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-2));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ (call & 0xF8FF) == 0xD0FF &&
++ regs->eip == ((unsigned long*)regs)[trans[(call>>8) & 0x07]])
++ {
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 4;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #3 */
++ unsigned char mov, jmp;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-3));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x55FF)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->ebp + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 4;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #4 */
++ unsigned char mov, jmp, sib;
++ char offset;
++ unsigned long addr1, addr2, ret;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-4));
++ err |= get_user(sib, (unsigned char *)(ret-2));
++ err |= get_user(offset, (char *)(ret-1));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x54FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + (unsigned long)(long)offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 4;
++ }
++ } while (0);
++
++ do { /* PaX: gcc trampoline emulation #5 */
++ unsigned char mov, jmp, sib;
++ unsigned long addr1, addr2, ret, offset;
++ unsigned short call;
++
++ err = get_user(mov, (unsigned char *)regs->eip);
++ err |= get_user(addr1, (unsigned long *)(regs->eip + 1));
++ err |= get_user(jmp, (unsigned char *)(regs->eip + 5));
++ err |= get_user(addr2, (unsigned long *)(regs->eip + 6));
++ err |= get_user(ret, (unsigned long *)regs->esp);
++
++ if (err)
++ break;
++
++ err = get_user(call, (unsigned short *)(ret-7));
++ err |= get_user(sib, (unsigned char *)(ret-5));
++ err |= get_user(offset, (unsigned long *)(ret-4));
++ if (err)
++ break;
++
++ if ((mov & 0xF8) == 0xB8 &&
++ jmp == 0xE9 &&
++ call == 0x94FF &&
++ sib == 0x24)
++ {
++ unsigned long addr;
++
++ err = get_user(addr, (unsigned long*)(regs->esp + 4 + offset));
++ if (err || regs->eip != addr)
++ break;
++
++ ((unsigned long *)regs)[trans[mov & 0x07]] = addr1;
++ regs->eip += addr2 + 10;
++ return 4;
++ }
++ } while (0);
++#endif
++
++ return 1; /* PaX in action */
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned char c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%02x ", c);
+ }
++ printk("\n");
+ }
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: handle the extra page faults or pass it down to the original handler
++ *
++ * returns 0 when nothing special was detected
++ * 1 when sigreturn trampoline (syscall) has to be emulated
++ */
++asmlinkage int pax_do_page_fault(struct pt_regs *regs, unsigned long error_code)
++{
++ struct mm_struct *mm = current->mm;
++ unsigned long address;
++ pte_t *pte;
++ unsigned char pte_mask;
++ int ret;
++
++ __asm__("movl %%cr2,%0":"=r" (address));
++
++ /* It's safe to allow irq's after cr2 has been saved */
++ if (likely(regs->eflags & X86_EFLAGS_IF))
++ local_irq_enable();
++
++ if (unlikely((error_code & 5) != 5 ||
++ address >= TASK_SIZE ||
++ !(current->flags & PF_PAX_PAGEEXEC)))
++ return do_page_fault(regs, error_code, address);
++
++ /* PaX: it's our fault, let's handle it if we can */
++
++ /* PaX: take a look at read faults before acquiring any locks */
++ if (unlikely((error_code == 5) && (regs->eip == address))) {
++ /* instruction fetch attempt from a protected page in user mode */
++ ret = pax_handle_fetch_fault(regs);
++ switch (ret) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 5:
++ return 0;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 4:
++ return 0;
++
++ case 3:
++ case 2:
++ return 1;
++#endif
++
++ case 1:
++ default:
++ pax_report_fault(regs, (void*)regs->eip, (void*)regs->esp);
++ do_exit(SIGKILL);
++ }
++ }
++
++ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & 2) << (_PAGE_BIT_DIRTY-1));
++
++ spin_lock(&mm->page_table_lock);
++ pte = pax_get_pte(mm, address);
++ if (unlikely(!pte || !(pte_val(*pte) & _PAGE_PRESENT) || pte_exec(*pte))) {
++ spin_unlock(&mm->page_table_lock);
++ do_page_fault(regs, error_code, address);
++ return 0;
++ }
++
++ if (unlikely((error_code == 7) && !pte_write(*pte))) {
++ /* write attempt to a protected page in user mode */
++ spin_unlock(&mm->page_table_lock);
++ do_page_fault(regs, error_code, address);
++ return 0;
++ }
++
++ /*
++ * PaX: fill DTLB with user rights and retry
++ */
++ __asm__ __volatile__ (
++ "orb %2,%1\n"
++#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
++/*
++ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
++ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
++ * page fault when examined during a TLB load attempt. this is true not only
++ * for PTEs holding a non-present entry but also present entries that will
++ * raise a page fault (such as those set up by PaX, or the copy-on-write
++ * mechanism). in effect it means that we do *not* need to flush the TLBs
++ * for our target pages since their PTEs are simply not in the TLBs at all.
++
++ * the best thing in omitting it is that we gain around 15-20% speed in the
++ * fast path of the page fault handler and can get rid of tracing since we
++ * can no longer flush unintended entries.
++ */
++ "invlpg %0\n"
++#endif
++ "testb $0,%0\n"
++ "xorb %3,%1\n"
++ :
++ : "m" (*(char*)address), "m" (*(char*)pte), "q" (pte_mask), "i" (_PAGE_USER)
++ : "memory", "cc");
++ spin_unlock(&mm->page_table_lock);
++ return 0;
++}
++#endif
+diff -Nurp linux-2.4.25/arch/i386/mm/init.c linux-2.4.25-pax/arch/i386/mm/init.c
+--- linux-2.4.25/arch/i386/mm/init.c 2003-06-13 10:51:29.000000000 -0400
++++ linux-2.4.25-pax/arch/i386/mm/init.c 2004-02-19 11:12:52.000000000 -0500
+@@ -37,6 +37,7 @@
+ #include <asm/e820.h>
+ #include <asm/apic.h>
+ #include <asm/tlb.h>
++#include <asm/desc.h>
+
+ mmu_gather_t mmu_gathers[NR_CPUS];
+ unsigned long highstart_pfn, highend_pfn;
+@@ -122,7 +123,7 @@ void show_mem(void)
+
+ /* References to section boundaries */
+
+-extern char _text, _etext, _edata, __bss_start, _end;
++extern char _text, _etext, _data, _edata, __bss_start, _end;
+ extern char __init_begin, __init_end;
+
+ static inline void set_pte_phys (unsigned long vaddr,
+@@ -521,7 +522,7 @@ void __init mem_init(void)
+ reservedpages = free_pages_init();
+
+ codesize = (unsigned long) &_etext - (unsigned long) &_text;
+- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
++ datasize = (unsigned long) &_edata - (unsigned long) &_data;
+ initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
+
+ printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, %dk reserved, %dk data, %dk init, %ldk highmem)\n",
+@@ -589,6 +590,38 @@ void free_initmem(void)
+ totalram_pages++;
+ }
+ printk (KERN_INFO "Freeing unused kernel memory: %dk freed\n", (&__init_end - &__init_begin) >> 10);
++
++#ifdef CONFIG_PAX_KERNEXEC
++ /* PaX: limit KERNEL_CS to actual size */
++ {
++ unsigned long limit;
++
++ limit = (unsigned long)&_etext >> PAGE_SHIFT;
++ gdt_table[2].a = (gdt_table[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
++ gdt_table[2].b = (gdt_table[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
++
++#ifdef CONFIG_PCI_BIOS
++ printk(KERN_INFO "PAX: warning, PCI BIOS might still be in use, keeping flat KERNEL_CS.\n");
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ gdt_table2[2].a = (gdt_table2[2].a & 0xFFFF0000UL) | (limit & 0x0FFFFUL);
++ gdt_table2[2].b = (gdt_table2[2].b & 0xFFF0FFFFUL) | (limit & 0xF0000UL);
++#endif
++
++ /* PaX: make KERNEL_CS read-only */
++ for (addr = __KERNEL_TEXT_OFFSET; addr < __KERNEL_TEXT_OFFSET + 0x00400000UL; addr += (1UL << PMD_SHIFT)) {
++ pgd_t *pgd;
++ pmd_t *pmd;
++
++ pgd = pgd_offset_k(addr);
++ pmd = pmd_offset(pgd, addr);
++ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
++ }
++ flush_tlb_all();
++ }
++#endif
++
+ }
+
+ #ifdef CONFIG_BLK_DEV_INITRD
+diff -Nurp linux-2.4.25/arch/i386/vmlinux.lds linux-2.4.25-pax/arch/i386/vmlinux.lds
+--- linux-2.4.25/arch/i386/vmlinux.lds 2002-02-25 14:37:53.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/vmlinux.lds 1969-12-31 19:00:00.000000000 -0500
+@@ -1,82 +0,0 @@
+-/* ld script to make i386 Linux kernel
+- * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
+- */
+-OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
+-OUTPUT_ARCH(i386)
+-ENTRY(_start)
+-SECTIONS
+-{
+- . = 0xC0000000 + 0x100000;
+- _text = .; /* Text and read-only data */
+- .text : {
+- *(.text)
+- *(.fixup)
+- *(.gnu.warning)
+- } = 0x9090
+-
+- _etext = .; /* End of text section */
+-
+- .rodata : { *(.rodata) *(.rodata.*) }
+- .kstrtab : { *(.kstrtab) }
+-
+- . = ALIGN(16); /* Exception table */
+- __start___ex_table = .;
+- __ex_table : { *(__ex_table) }
+- __stop___ex_table = .;
+-
+- __start___ksymtab = .; /* Kernel symbol table */
+- __ksymtab : { *(__ksymtab) }
+- __stop___ksymtab = .;
+-
+- .data : { /* Data */
+- *(.data)
+- CONSTRUCTORS
+- }
+-
+- _edata = .; /* End of data section */
+-
+- . = ALIGN(8192); /* init_task */
+- .data.init_task : { *(.data.init_task) }
+-
+- . = ALIGN(4096); /* Init code and data */
+- __init_begin = .;
+- .text.init : { *(.text.init) }
+- .data.init : { *(.data.init) }
+- . = ALIGN(16);
+- __setup_start = .;
+- .setup.init : { *(.setup.init) }
+- __setup_end = .;
+- __initcall_start = .;
+- .initcall.init : { *(.initcall.init) }
+- __initcall_end = .;
+- . = ALIGN(4096);
+- __init_end = .;
+-
+- . = ALIGN(4096);
+- .data.page_aligned : { *(.data.idt) }
+-
+- . = ALIGN(32);
+- .data.cacheline_aligned : { *(.data.cacheline_aligned) }
+-
+- __bss_start = .; /* BSS */
+- .bss : {
+- *(.bss)
+- }
+- _end = . ;
+-
+- /* Sections to be discarded */
+- /DISCARD/ : {
+- *(.text.exit)
+- *(.data.exit)
+- *(.exitcall.exit)
+- }
+-
+- /* Stabs debugging sections. */
+- .stab 0 : { *(.stab) }
+- .stabstr 0 : { *(.stabstr) }
+- .stab.excl 0 : { *(.stab.excl) }
+- .stab.exclstr 0 : { *(.stab.exclstr) }
+- .stab.index 0 : { *(.stab.index) }
+- .stab.indexstr 0 : { *(.stab.indexstr) }
+- .comment 0 : { *(.comment) }
+-}
+diff -Nurp linux-2.4.25/arch/i386/vmlinux.lds.S linux-2.4.25-pax/arch/i386/vmlinux.lds.S
+--- linux-2.4.25/arch/i386/vmlinux.lds.S 1969-12-31 19:00:00.000000000 -0500
++++ linux-2.4.25-pax/arch/i386/vmlinux.lds.S 2004-02-19 11:12:52.000000000 -0500
+@@ -0,0 +1,125 @@
++/* ld script to make i386 Linux kernel
++ * Written by Martin Mares <mj@atrey.karlin.mff.cuni.cz>;
++ */
++OUTPUT_FORMAT("elf32-i386", "elf32-i386", "elf32-i386")
++OUTPUT_ARCH(i386)
++ENTRY(_start)
++SECTIONS
++{
++ . = __PAGE_OFFSET + 0x100000;
++ .text.startup : {
++ BYTE(0xEA) /* jmp far */
++ LONG(startup_32 + __KERNEL_TEXT_OFFSET - __PAGE_OFFSET)
++ SHORT(__KERNEL_CS)
++ }
++
++ . = ALIGN(32);
++ _data = .;
++ .data : { /* Data */
++ *(.data)
++ CONSTRUCTORS
++ }
++
++ . = ALIGN(32);
++ .data.cacheline_aligned : { *(.data.cacheline_aligned) }
++
++ . = ALIGN(8192);
++ .data.init_task : { *(.data.init_task) }
++
++ . = ALIGN(4096);
++ .data.page_aligned : {
++ *(.data.swapper_pg_dir)
++ *(.data.pg0)
++ *(.data.pg1)
++ *(.data.pg2)
++ }
++
++ _edata = .; /* End of data section */
++
++ __bss_start = .; /* BSS */
++ .bss : {
++ *(.bss)
++ LONG(0)
++ }
++ __bss_end = . ;
++
++ . = ALIGN(4096); /* Init code and data */
++ __init_begin = .;
++ .data.init : { *(.data.init) }
++ . = ALIGN(16);
++ __setup_start = .;
++ .setup.init : { *(.setup.init) }
++ __setup_end = .;
++ __initcall_start = .;
++ .initcall.init : { *(.initcall.init) }
++ __initcall_end = .;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ __text_init_start = .;
++ .text.init (. - __KERNEL_TEXT_OFFSET) : AT (__text_init_start) {
++ *(.text.init)
++ . = ALIGN(4*1024*1024) - 1;
++ BYTE(0)
++ }
++ __init_end = . + __KERNEL_TEXT_OFFSET;
++
++/*
++ * PaX: this must be kept in synch with the KERNEL_CS base
++ * in the GDTs in arch/i386/kernel/head.S
++ */
++ _text = .; /* Text and read-only data */
++ .text : AT (. + __KERNEL_TEXT_OFFSET) {
++#else
++ .text.init : { *(.text.init) }
++ . = ALIGN(4096);
++ __init_end = .;
++ _text = .; /* Text and read-only data */
++ .text : {
++#endif
++
++ *(.text)
++ *(.fixup)
++ *(.gnu.warning)
++ } = 0x9090
++
++ _etext = .; /* End of text section */
++ . = ALIGN(4096);
++ . += __KERNEL_TEXT_OFFSET;
++ .rodata.page_aligned : {
++ *(.data.empty_zero_page)
++ *(.data.idt)
++ }
++ .rodata : { *(.rodata) *(.rodata.*) }
++ .kstrtab : { *(.kstrtab) }
++
++ . = ALIGN(16); /* Exception table */
++ __start___ex_table = .;
++ __ex_table : { *(__ex_table) }
++ __stop___ex_table = .;
++
++ __start___ksymtab = .; /* Kernel symbol table */
++ __ksymtab : { *(__ksymtab) }
++ __stop___ksymtab = .;
++
++#ifdef CONFIG_PAX_KERNEXEC
++ _end = ALIGN(4*1024*1024);
++#else
++ _end = .;
++#endif
++
++ /* Sections to be discarded */
++ /DISCARD/ : {
++ *(.text.exit)
++ *(.data.exit)
++ *(.exitcall.exit)
++ }
++
++ /* Stabs debugging sections. */
++ .stab 0 : { *(.stab) }
++ .stabstr 0 : { *(.stabstr) }
++ .stab.excl 0 : { *(.stab.excl) }
++ .stab.exclstr 0 : { *(.stab.exclstr) }
++ .stab.index 0 : { *(.stab.index) }
++ .stab.indexstr 0 : { *(.stab.indexstr) }
++ .comment 0 : { *(.comment) }
++}
+diff -Nurp linux-2.4.25/arch/ia64/config.in linux-2.4.25-pax/arch/ia64/config.in
+--- linux-2.4.25/arch/ia64/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/ia64/config.in 2004-02-19 11:12:52.000000000 -0500
+@@ -319,3 +319,60 @@ fi
+ int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
+
+ endmenu
++
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++ bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++# bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ bool ' Allow ELF ET_EXEC text relocations' CONFIG_PAX_ETEXECRELOCS
++ fi
++ fi
++ fi
++fi
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
+diff -Nurp linux-2.4.25/arch/ia64/ia32/binfmt_elf32.c linux-2.4.25-pax/arch/ia64/ia32/binfmt_elf32.c
+--- linux-2.4.25/arch/ia64/ia32/binfmt_elf32.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/ia64/ia32/binfmt_elf32.c 2004-02-19 11:12:52.000000000 -0500
+@@ -51,6 +51,17 @@ static void elf32_set_personality (void)
+ #undef SET_PERSONALITY
+ #define SET_PERSONALITY(ex, ibcs2) elf32_set_personality()
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) IA32_PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - IA32_PAGE_SHIFT)
++#endif
++
+ /* Ugly but avoids duplication */
+ #include "../../../fs/binfmt_elf.c"
+
+@@ -180,8 +191,15 @@ ia32_setup_arg_pages (struct linux_binpr
+ mpnt->vm_mm = current->mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = IA32_STACK_TOP;
+- mpnt->vm_page_prot = PAGE_COPY;
+ mpnt->vm_flags = VM_STACK_FLAGS;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC))
++ mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7];
++ else
++#endif
++
++ mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
+ mpnt->vm_ops = NULL;
+ mpnt->vm_pgoff = 0;
+ mpnt->vm_file = NULL;
+diff -Nurp linux-2.4.25/arch/ia64/ia32/sys_ia32.c linux-2.4.25-pax/arch/ia64/ia32/sys_ia32.c
+--- linux-2.4.25/arch/ia64/ia32/sys_ia32.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/ia64/ia32/sys_ia32.c 2004-02-19 11:12:52.000000000 -0500
+@@ -534,6 +534,11 @@ sys32_mmap (struct mmap_arg_struct *arg)
+
+ flags = a.flags;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(a.fd);
+@@ -555,6 +560,11 @@ sys32_mmap2 (unsigned int addr, unsigned
+ struct file *file = NULL;
+ unsigned long retval;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -Nurp linux-2.4.25/arch/ia64/kernel/sys_ia64.c linux-2.4.25-pax/arch/ia64/kernel/sys_ia64.c
+--- linux-2.4.25/arch/ia64/kernel/sys_ia64.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/ia64/kernel/sys_ia64.c 2004-02-19 11:12:52.000000000 -0500
+@@ -34,6 +34,13 @@ arch_get_unmapped_area (struct file *fil
+ if (rgn_index(addr)==REGION_HPAGE)
+ addr = 0;
+ #endif
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -180,6 +187,11 @@ do_mmap2 (unsigned long addr, unsigned l
+ unsigned long roff;
+ struct file *file = 0;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -Nurp linux-2.4.25/arch/ia64/mm/fault.c linux-2.4.25-pax/arch/ia64/mm/fault.c
+--- linux-2.4.25/arch/ia64/mm/fault.c 2003-08-25 07:44:39.000000000 -0400
++++ linux-2.4.25-pax/arch/ia64/mm/fault.c 2004-02-19 11:12:52.000000000 -0500
+@@ -70,6 +70,53 @@ mapped_kernel_page_is_present (unsigned
+ return pte_present(pte);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->cr_iip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->cr_iip >= current->mm->start_code &&
++ regs->cr_iip < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if (regs->b0 == regs->cr_iip)
++ return 1;
++#endif
++ regs->cr_iip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 8; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void
+ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *regs)
+ {
+@@ -122,9 +169,31 @@ ia64_do_page_fault (unsigned long addres
+ | (((isr >> IA64_ISR_W_BIT) & 1UL) << VM_WRITE_BIT)
+ | (((isr >> IA64_ISR_R_BIT) & 1UL) << VM_READ_BIT));
+
+- if ((vma->vm_flags & mask) != mask)
++ if ((vma->vm_flags & mask) != mask) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
++ if (!(current->flags & PF_PAX_PAGEEXEC) || address != regs->cr_iip)
++ goto bad_area;
++
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->cr_iip, (void*)regs->r12);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
+
++ }
++
+ survive:
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -Nurp linux-2.4.25/arch/mips/config-shared.in linux-2.4.25-pax/arch/mips/config-shared.in
+--- linux-2.4.25/arch/mips/config-shared.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/mips/config-shared.in 2004-02-19 11:12:52.000000000 -0500
+@@ -1132,5 +1132,62 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++ bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++# bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++ if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++# if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++# fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/mips/kernel/syscall.c linux-2.4.25-pax/arch/mips/kernel/syscall.c
+--- linux-2.4.25/arch/mips/kernel/syscall.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/mips/kernel/syscall.c 2004-02-19 11:12:52.000000000 -0500
+@@ -82,6 +82,11 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -92,6 +97,13 @@ unsigned long arch_get_unmapped_area(str
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ addr = TASK_UNMAPPED_BASE;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+diff -Nurp linux-2.4.25/arch/mips/mm/fault.c linux-2.4.25-pax/arch/mips/mm/fault.c
+--- linux-2.4.25/arch/mips/mm/fault.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/mips/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -69,6 +69,24 @@ void bust_spinlocks(int yes)
+ }
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -Nurp linux-2.4.25/arch/mips64/kernel/binfmt_elfn32.c linux-2.4.25-pax/arch/mips64/kernel/binfmt_elfn32.c
+--- linux-2.4.25/arch/mips64/kernel/binfmt_elfn32.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/mips64/kernel/binfmt_elfn32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -50,6 +50,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -Nurp linux-2.4.25/arch/mips64/kernel/binfmt_elfo32.c linux-2.4.25-pax/arch/mips64/kernel/binfmt_elfo32.c
+--- linux-2.4.25/arch/mips64/kernel/binfmt_elfo32.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/mips64/kernel/binfmt_elfo32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -52,6 +52,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #undef ELF_ET_DYN_BASE
+ #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #include <asm/processor.h>
+ #include <linux/module.h>
+ #include <linux/config.h>
+diff -Nurp linux-2.4.25/arch/mips64/kernel/syscall.c linux-2.4.25-pax/arch/mips64/kernel/syscall.c
+--- linux-2.4.25/arch/mips64/kernel/syscall.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/mips64/kernel/syscall.c 2004-02-19 11:12:53.000000000 -0500
+@@ -77,6 +77,11 @@ unsigned long arch_get_unmapped_area(str
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+@@ -87,6 +92,13 @@ unsigned long arch_get_unmapped_area(str
+ (!vmm || addr + len <= vmm->vm_start))
+ return addr;
+ }
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ addr = TASK_UNMAPPED_BASE;
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+diff -Nurp linux-2.4.25/arch/mips64/mm/fault.c linux-2.4.25-pax/arch/mips64/mm/fault.c
+--- linux-2.4.25/arch/mips64/mm/fault.c 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/mips64/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -90,6 +90,24 @@ void bust_spinlocks(int yes)
+ }
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+diff -Nurp linux-2.4.25/arch/parisc/config.in linux-2.4.25-pax/arch/parisc/config.in
+--- linux-2.4.25/arch/parisc/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/parisc/config.in 2004-02-19 11:12:53.000000000 -0500
+@@ -202,5 +202,67 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++ if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++ bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++ fi
++ bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++ if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++ bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++ fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++# bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ bool ' Allow ELF ET_EXEC text relocations' CONFIG_PAX_ETEXECRELOCS
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/parisc/kernel/sys_parisc.c linux-2.4.25-pax/arch/parisc/kernel/sys_parisc.c
+--- linux-2.4.25/arch/parisc/kernel/sys_parisc.c 2002-11-28 18:53:10.000000000 -0500
++++ linux-2.4.25-pax/arch/parisc/kernel/sys_parisc.c 2004-02-19 11:12:53.000000000 -0500
+@@ -90,6 +90,11 @@ unsigned long arch_get_unmapped_area(str
+ inode = filp->f_dentry->d_inode;
+ }
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++#endif
++
+ if (inode && (flags & MAP_SHARED) && (inode->i_mapping->i_mmap_shared)) {
+ addr = get_shared_area(inode, addr, len, pgoff);
+ } else {
+@@ -104,6 +109,12 @@ static unsigned long do_mmap2(unsigned l
+ {
+ struct file * file = NULL;
+ unsigned long error = -EBADF;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+diff -Nurp linux-2.4.25/arch/parisc/kernel/sys_parisc32.c linux-2.4.25-pax/arch/parisc/kernel/sys_parisc32.c
+--- linux-2.4.25/arch/parisc/kernel/sys_parisc32.c 2003-06-13 10:51:31.000000000 -0400
++++ linux-2.4.25-pax/arch/parisc/kernel/sys_parisc32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -185,6 +185,11 @@ do_execve32(char * filename, u32 * argv,
+ return retval;
+
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ DBG(("do_execve32(%s, %p, %p, %p)\n", filename, argv, envp, regs));
+diff -Nurp linux-2.4.25/arch/parisc/kernel/traps.c linux-2.4.25-pax/arch/parisc/kernel/traps.c
+--- linux-2.4.25/arch/parisc/kernel/traps.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/parisc/kernel/traps.c 2004-02-19 11:12:53.000000000 -0500
+@@ -637,9 +637,7 @@ void handle_interruption(int code, struc
+
+ down_read(&current->mm->mmap_sem);
+ vma = find_vma(current->mm,regs->iaoq[0]);
+- if (vma && (regs->iaoq[0] >= vma->vm_start)
+- && (vma->vm_flags & VM_EXEC)) {
+-
++ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
+ fault_address = regs->iaoq[0];
+ fault_space = regs->iasq[0];
+
+diff -Nurp linux-2.4.25/arch/parisc/mm/fault.c linux-2.4.25-pax/arch/parisc/mm/fault.c
+--- linux-2.4.25/arch/parisc/mm/fault.c 2003-06-13 10:51:31.000000000 -0400
++++ linux-2.4.25-pax/arch/parisc/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -15,6 +15,7 @@
+ #include <linux/ptrace.h>
+ #include <linux/sched.h>
+ #include <linux/interrupt.h>
++#include <linux/unistd.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/traps.h>
+@@ -53,7 +54,7 @@
+ static unsigned long
+ parisc_acctyp(unsigned long code, unsigned int inst)
+ {
+- if (code == 6 || code == 16)
++ if (code == 6 || code == 7 || code == 16)
+ return VM_EXEC;
+
+ switch (inst & 0xf0000000) {
+@@ -139,6 +140,136 @@ parisc_acctyp(unsigned long code, unsign
+ }
+ #endif
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when rt_sigreturn trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (instruction_pointer(regs) >= current->mm->start_code &&
++ instruction_pointer(regs) < current->mm->end_code)
++ {
++#if 0
++ /* PaX: this needs fixing */
++ if ((regs->gr[2] & ~3UL) == instruction_pointer(regs))
++ return 1;
++#endif
++ regs->iaoq[0] += current->mm->delta_exec;
++ if ((regs->iaoq[1] & ~3UL) >= current->mm->start_code &&
++ (regs->iaoq[1] & ~3UL) < current->mm->end_code)
++ regs->iaoq[1] += current->mm->delta_exec;
++ return 4;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: unpatched PLT emulation */
++ unsigned int bl, depwi;
++
++ err = get_user(bl, (unsigned int*)instruction_pointer(regs));
++ err |= get_user(depwi, (unsigned int*)(instruction_pointer(regs)+4));
++
++ if (err)
++ break;
++
++ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
++ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
++
++ err = get_user(ldw, (unsigned int*)addr);
++ err |= get_user(bv, (unsigned int*)(addr+4));
++ err |= get_user(ldw2, (unsigned int*)(addr+8));
++
++ if (err)
++ break;
++
++ if (ldw == 0x0E801096U &&
++ bv == 0xEAC0C000U &&
++ ldw2 == 0x0E881095U)
++ {
++ unsigned int resolver, map;
++
++ err = get_user(resolver, (unsigned int*)(instruction_pointer(regs)+8));
++ err |= get_user(map, (unsigned int*)(instruction_pointer(regs)+12));
++ if (err)
++ break;
++
++ regs->gr[20] = instruction_pointer(regs)+8;
++ regs->gr[21] = map;
++ regs->gr[22] = resolver;
++ regs->iaoq[0] = resolver | 3UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ return 3;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++
++#ifndef CONFIG_PAX_EMUSIGRT
++ if (!(current->flags & PF_PAX_EMUTRAMP))
++ return 1;
++#endif
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int ldi1, ldi2, bel, nop;
++
++ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
++ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
++ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
++ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
++
++ if (err)
++ break;
++
++ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
++ ldi2 == 0x3414015AU &&
++ bel == 0xE4008200U &&
++ nop == 0x08000240U)
++ {
++ regs->gr[25] = (ldi1 & 2) >> 1;
++ regs->gr[20] = __NR_rt_sigreturn;
++ regs->gr[31] = regs->iaoq[1] + 16;
++ regs->sr[0] = regs->iasq[1];
++ regs->iaoq[0] = 0x100UL;
++ regs->iaoq[1] = regs->iaoq[0] + 4;
++ regs->iasq[0] = regs->sr[2];
++ regs->iasq[1] = regs->sr[2];
++ return 2;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ void do_page_fault(struct pt_regs *regs, unsigned long code,
+ unsigned long address)
+ {
+@@ -164,8 +295,38 @@ good_area:
+
+ acc_type = parisc_acctyp(code,regs->iir);
+
+- if ((vma->vm_flags & acc_type) != acc_type)
++ if ((vma->vm_flags & acc_type) != acc_type) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
++ (address & ~3UL) == instruction_pointer(regs))
++ {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)instruction_pointer(regs), (void*)regs->gr[30]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ goto bad_area;
++ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault, make
+diff -Nurp linux-2.4.25/arch/ppc/config.in linux-2.4.25-pax/arch/ppc/config.in
+--- linux-2.4.25/arch/ppc/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/ppc/config.in 2004-02-19 11:12:53.000000000 -0500
+@@ -634,3 +634,61 @@ fi
+ int 'Kernel messages buffer length shift (0 = default)' CONFIG_LOG_BUF_SHIFT 0
+
+ endmenu
++
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++ if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++ bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++ fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++# bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ fi
++ define_bool CONFIG_PAX_SYSCALL y
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
+diff -Nurp linux-2.4.25/arch/ppc/kernel/head_4xx.S linux-2.4.25-pax/arch/ppc/kernel/head_4xx.S
+--- linux-2.4.25/arch/ppc/kernel/head_4xx.S 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/ppc/kernel/head_4xx.S 2004-02-19 11:12:53.000000000 -0500
+@@ -296,15 +296,12 @@ label:
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+- * We set execute, because we don't have the granularity to
+- * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ li r22, 0x0ce2
+ andc r21, r21, r22 /* Make sure 20, 21 are zero */
+- ori r21, r21, _PAGE_HWEXEC /* make it executable */
+
+ /* find the TLB index that caused the fault. It has to be here.
+ */
+@@ -783,7 +780,6 @@ finish_tlb_load:
+ stw r23, tlb_4xx_index@l(0)
+
+ 6:
+- ori r21, r21, _PAGE_HWEXEC /* make it executable */
+ tlbwe r21, r23, TLB_DATA /* Load TLB LO */
+
+ /* Create EPN. This is the faulting address plus a static
+diff -Nurp linux-2.4.25/arch/ppc/kernel/syscalls.c linux-2.4.25-pax/arch/ppc/kernel/syscalls.c
+--- linux-2.4.25/arch/ppc/kernel/syscalls.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/ppc/kernel/syscalls.c 2004-02-19 11:12:53.000000000 -0500
+@@ -191,6 +191,11 @@ do_mmap2(unsigned long addr, size_t len,
+ struct file * file = NULL;
+ int ret = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ if (!(file = fget(fd)))
+@@ -202,7 +207,7 @@ do_mmap2(unsigned long addr, size_t len,
+ goto out;
+
+ down_write(&current->mm->mmap_sem);
+- ret = do_mmap_pgoff(file, addr, len, prot, flags, pgoff);
++ ret = do_mmap(file, addr, len, prot, flags, pgoff << PAGE_SHIFT);
+ up_write(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+diff -Nurp linux-2.4.25/arch/ppc/mm/fault.c linux-2.4.25-pax/arch/ppc/mm/fault.c
+--- linux-2.4.25/arch/ppc/mm/fault.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/ppc/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -26,6 +26,9 @@
+ #include <linux/mman.h>
+ #include <linux/mm.h>
+ #include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -52,6 +55,360 @@ extern void die_if_kernel(char *, struct
+ void bad_page_fault(struct pt_regs *, unsigned long, int sig);
+ void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
+
++#ifdef CONFIG_PAX_EMUSIGRT
++void pax_syscall_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_syscall = 0UL;
++}
++
++static struct page* pax_syscall_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return page;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x44000002U; /* sc */
++ __flush_dcache_icache(kaddr);
++ kunmap(page);
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_syscall_close,
++ nopage: pax_syscall_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->nip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched GOT trampoline was detected
++ * 3 when patched PLT trampoline was detected
++ * 4 when unpatched PLT trampoline was detected
++ * 5 when legitimate ET_EXEC was detected
++ * 6 when sigreturn trampoline was detected
++ * 7 when rt_sigreturn trampoline was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->nip >= current->mm->start_code &&
++ regs->nip < current->mm->end_code)
++ {
++ if (regs->link == regs->nip)
++ return 1;
++
++ regs->nip += current->mm->delta_exec;
++ return 5;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched GOT emulation */
++ unsigned int blrl;
++
++ err = get_user(blrl, (unsigned int*)regs->nip);
++
++ if (!err && blrl == 0x4E800021U) {
++ unsigned long temp = regs->nip;
++
++ regs->nip = regs->link & 0xFFFFFFFCUL;
++ regs->link = temp + 4UL;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int b;
++
++ err = get_user(b, (unsigned int *)regs->nip);
++
++ if (!err && (b & 0xFC000003U) == 0x48000000U) {
++ regs->nip += (((b | 0xFC000000UL) ^ 0x02000000UL) + 0x02000000UL);
++ return 3;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation #1 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(rlwinm, (unsigned int*)addr);
++ err |= get_user(add, (unsigned int*)(addr+4));
++ err |= get_user(li2, (unsigned int*)(addr+8));
++ err |= get_user(addis2, (unsigned int*)(addr+12));
++ err |= get_user(mtctr, (unsigned int*)(addr+16));
++ err |= get_user(li3, (unsigned int*)(addr+20));
++ err |= get_user(addis3, (unsigned int*)(addr+24));
++ err |= get_user(bctr, (unsigned int*)(addr+28));
++
++ if (err)
++ break;
++
++ if (rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++
++#if 0
++ do { /* PaX: unpatched PLT emulation #2 */
++ unsigned int lis, lwzu, b, bctr;
++
++ err = get_user(lis, (unsigned int *)regs->nip);
++ err |= get_user(lwzu, (unsigned int *)(regs->nip+4));
++ err |= get_user(b, (unsigned int *)(regs->nip+8));
++ err |= get_user(bctr, (unsigned int *)(regs->nip+12));
++
++ if (err)
++ break;
++
++ if ((lis & 0xFFFF0000U) == 0x39600000U &&
++ (lwzu & 0xU) == 0xU &&
++ (b & 0xFC000003U) == 0x48000000U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int addis, addi, rlwinm, add, li2, addis2, mtctr, li3, addis3, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 12 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(addi, (unsigned int*)(addr+4));
++ err |= get_user(rlwinm, (unsigned int*)(addr+8));
++ err |= get_user(add, (unsigned int*)(addr+12));
++ err |= get_user(li2, (unsigned int*)(addr+16));
++ err |= get_user(addis2, (unsigned int*)(addr+20));
++ err |= get_user(mtctr, (unsigned int*)(addr+24));
++ err |= get_user(li3, (unsigned int*)(addr+28));
++ err |= get_user(addis3, (unsigned int*)(addr+32));
++ err |= get_user(bctr, (unsigned int*)(addr+36));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (addi & 0xFFFF0000U) == 0x396B0000U &&
++ rlwinm == 0x556C083CU &&
++ add == 0x7D6C5A14U &&
++ (li2 & 0xFFFF0000U) == 0x39800000U &&
++ (addis2 & 0xFFFF0000U) == 0x3D8C0000U &&
++ mtctr == 0x7D8903A6U &&
++ (li3 & 0xFFFF0000U) == 0x39800000U &&
++ (addis3 & 0xFFFF0000U) == 0x3D8C0000U &&
++ bctr == 0x4E800420U)
++ {
++ regs->gpr[PT_R11] =
++ regs->gpr[PT_R11] = 3 * (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] = (((li3 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->gpr[PT_R12] += (addis3 & 0xFFFFU) << 16;
++ regs->ctr = (((li2 | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ regs->ctr += (addis2 & 0xFFFFU) << 16;
++ regs->nip = regs->ctr;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++ do { /* PaX: unpatched PLT emulation #3 */
++ unsigned int li, b;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(b, (unsigned int *)(regs->nip+4));
++
++ if (!err && (li & 0xFFFF0000U) == 0x39600000U && (b & 0xFC000003U) == 0x48000000U) {
++ unsigned int addis, lwz, mtctr, bctr;
++ unsigned long addr = b | 0xFC000000UL;
++
++ addr = regs->nip + 4 + ((addr ^ 0x02000000UL) + 0x02000000UL);
++ err = get_user(addis, (unsigned int*)addr);
++ err |= get_user(lwz, (unsigned int*)(addr+4));
++ err |= get_user(mtctr, (unsigned int*)(addr+8));
++ err |= get_user(bctr, (unsigned int*)(addr+12));
++
++ if (err)
++ break;
++
++ if ((addis & 0xFFFF0000U) == 0x3D6B0000U &&
++ (lwz & 0xFFFF0000U) == 0x816B0000U &&
++ mtctr == 0x7D6903A6U &&
++ bctr == 0x4E800420U)
++ {
++ unsigned int r11;
++
++ addr = (addis << 16) + (((li | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++ addr += (((lwz | 0xFFFF0000UL) ^ 0x00008000UL) + 0x00008000UL);
++
++ err = get_user(r11, (unsigned int*)addr);
++ if (err)
++ break;
++
++ regs->gpr[PT_R11] = r11;
++ regs->ctr = r11;
++ regs->nip = r11;
++ return 4;
++ }
++ }
++ } while (0);
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ do { /* PaX: sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38007777U && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned long call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->gpr[PT_R0] = 0x7777UL;
++ regs->nip = call_syscall;
++ return 6;
++ }
++ } while (0);
++
++ do { /* PaX: rt_sigreturn emulation */
++ unsigned int li, sc;
++
++ err = get_user(li, (unsigned int *)regs->nip);
++ err |= get_user(sc, (unsigned int *)(regs->nip+4));
++
++ if (!err && li == 0x38006666U && sc == 0x44000002U) {
++ struct vm_area_struct *vma;
++ unsigned int call_syscall;
++
++ down_read(&current->mm->mmap_sem);
++ call_syscall = current->mm->call_syscall;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_syscall))
++ goto rt_emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_syscall) {
++ call_syscall = current->mm->call_syscall;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto rt_emulate;
++ }
++
++ call_syscall = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_syscall & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_syscall);
++ current->mm->call_syscall = call_syscall;
++ up_write(&current->mm->mmap_sem);
++
++rt_emulate:
++ regs->gpr[PT_R0] = 0x6666UL;
++ regs->nip = call_syscall;
++ return 7;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ /*
+ * Check whether the instruction at regs->nip is a store using
+ * an update addressing form which will update r1.
+@@ -112,7 +469,7 @@ void do_page_fault(struct pt_regs *regs,
+ * indicate errors in DSISR but can validly be set in SRR1.
+ */
+ if (regs->trap == 0x400)
+- error_code &= 0x48200000;
++ error_code &= 0x58200000;
+ else
+ is_write = error_code & 0x02000000;
+ #endif /* CONFIG_4xx || CONFIG_BOOKE */
+@@ -245,6 +602,38 @@ bad_area:
+
+ /* User mode accesses cause a SIGSEGV */
+ if (user_mode(regs)) {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC) {
++ if ((regs->trap == 0x400) && (regs->nip == address)) {
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ case 4:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 5:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_EMUSIGRT
++ case 6:
++ case 7:
++ return;
++#endif
++
++ }
++
++ pax_report_fault(regs, (void*)regs->nip, (void*)regs->gpr[1]);
++ do_exit(SIGKILL);
++ }
++ }
++#endif
++
+ info.si_signo = SIGSEGV;
+ info.si_errno = 0;
+ info.si_code = code;
+diff -Nurp linux-2.4.25/arch/sparc/config.in linux-2.4.25-pax/arch/sparc/config.in
+--- linux-2.4.25/arch/sparc/config.in 2004-02-18 08:36:30.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc/config.in 2004-02-19 11:12:53.000000000 -0500
+@@ -280,5 +280,65 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++# bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ if [ "$CONFIG_PAX_EMUPLT" = "y" ]; then
++ define_bool CONFIG_PAX_DLRESOLVE y
++ fi
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/sparc/kernel/sys_sparc.c linux-2.4.25-pax/arch/sparc/kernel/sys_sparc.c
+--- linux-2.4.25/arch/sparc/kernel/sys_sparc.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/sparc/kernel/sys_sparc.c 2004-02-19 11:12:53.000000000 -0500
+@@ -54,6 +54,13 @@ unsigned long arch_get_unmapped_area(str
+ return -ENOMEM;
+ if (ARCH_SUN4C_SUN4 && len > 0x20000000)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -225,6 +232,11 @@ static unsigned long do_mmap2(unsigned l
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+diff -Nurp linux-2.4.25/arch/sparc/kernel/sys_sunos.c linux-2.4.25-pax/arch/sparc/kernel/sys_sunos.c
+--- linux-2.4.25/arch/sparc/kernel/sys_sunos.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc/kernel/sys_sunos.c 2004-02-19 11:12:53.000000000 -0500
+@@ -68,6 +68,11 @@ asmlinkage unsigned long sunos_mmap(unsi
+ struct file * file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if(flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -Nurp linux-2.4.25/arch/sparc/mm/fault.c linux-2.4.25-pax/arch/sparc/mm/fault.c
+--- linux-2.4.25/arch/sparc/mm/fault.c 2003-06-13 10:51:32.000000000 -0400
++++ linux-2.4.25-pax/arch/sparc/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -19,6 +19,9 @@
+ #include <linux/smp.h>
+ #include <linux/smp_lock.h>
+ #include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/system.h>
+ #include <asm/segment.h>
+@@ -200,6 +203,265 @@ asmlinkage int lookup_fault(unsigned lon
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return page;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++
++/*
++ * PaX: decide what to do with offenders (regs->pc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->pc >= current->mm->start_code &&
++ regs->pc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->pc)
++ return 1;
++
++ regs->pc += current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code &&
++ regs->npc < current->mm->end_code)
++ regs->npc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->pc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->pc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->pc -= current->mm->delta_exec;
++ if (regs->npc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->npc < current->mm->end_code + current->mm->delta_exec)
++ regs->npc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->pc);
++ err |= get_user(sethi2, (unsigned int*)(regs->pc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned int addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->pc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned int addr;
++
++ addr = regs->pc + 4 + (((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(jmpl, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
++ regs->pc = addr;
++ regs->npc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->pc);
++ err |= get_user(ba, (unsigned int*)(regs->pc+4));
++ err |= get_user(nop, (unsigned int*)(regs->pc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned int addr, save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
++ else
++ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->pc = call_dl_resolve;
++ regs->npc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->pc-4));
++ err |= get_user(call, (unsigned int*)regs->pc);
++ err |= get_user(nop, (unsigned int*)(regs->pc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->pc;
++ regs->pc = dl_resolve;
++ regs->npc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
+ unsigned long address)
+ {
+@@ -263,6 +525,29 @@ good_area:
+ if(!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ return;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->pc, (void*)regs->u_regs[UREG_FP]);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Allow reads even for write-only mappings */
+ if(!(vma->vm_flags & (VM_READ | VM_EXEC)))
+ goto bad_area;
+diff -Nurp linux-2.4.25/arch/sparc/mm/init.c linux-2.4.25-pax/arch/sparc/mm/init.c
+--- linux-2.4.25/arch/sparc/mm/init.c 2002-11-28 18:53:12.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc/mm/init.c 2004-02-19 11:12:53.000000000 -0500
+@@ -350,17 +350,17 @@ void __init paging_init(void)
+
+ /* Initialize the protection map with non-constant, MMU dependent values. */
+ protection_map[0] = PAGE_NONE;
+- protection_map[1] = PAGE_READONLY;
+- protection_map[2] = PAGE_COPY;
+- protection_map[3] = PAGE_COPY;
++ protection_map[1] = PAGE_READONLY_NOEXEC;
++ protection_map[2] = PAGE_COPY_NOEXEC;
++ protection_map[3] = PAGE_COPY_NOEXEC;
+ protection_map[4] = PAGE_READONLY;
+ protection_map[5] = PAGE_READONLY;
+ protection_map[6] = PAGE_COPY;
+ protection_map[7] = PAGE_COPY;
+ protection_map[8] = PAGE_NONE;
+- protection_map[9] = PAGE_READONLY;
+- protection_map[10] = PAGE_SHARED;
+- protection_map[11] = PAGE_SHARED;
++ protection_map[9] = PAGE_READONLY_NOEXEC;
++ protection_map[10] = PAGE_SHARED_NOEXEC;
++ protection_map[11] = PAGE_SHARED_NOEXEC;
+ protection_map[12] = PAGE_READONLY;
+ protection_map[13] = PAGE_READONLY;
+ protection_map[14] = PAGE_SHARED;
+diff -Nurp linux-2.4.25/arch/sparc/mm/srmmu.c linux-2.4.25-pax/arch/sparc/mm/srmmu.c
+--- linux-2.4.25/arch/sparc/mm/srmmu.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc/mm/srmmu.c 2004-02-19 11:12:53.000000000 -0500
+@@ -2047,6 +2047,13 @@ void __init ld_mmu_srmmu(void)
+ BTFIXUPSET_INT(page_shared, pgprot_val(SRMMU_PAGE_SHARED));
+ BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
+ BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ BTFIXUPSET_INT(page_shared_noexec, pgprot_val(SRMMU_PAGE_SHARED_NOEXEC));
++ BTFIXUPSET_INT(page_copy_noexec, pgprot_val(SRMMU_PAGE_COPY_NOEXEC));
++ BTFIXUPSET_INT(page_readonly_noexec, pgprot_val(SRMMU_PAGE_RDONLY_NOEXEC));
++#endif
++
+ BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
+ page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
+ pg_iobits = SRMMU_VALID | SRMMU_WRITE | SRMMU_REF;
+diff -Nurp linux-2.4.25/arch/sparc64/config.in linux-2.4.25-pax/arch/sparc64/config.in
+--- linux-2.4.25/arch/sparc64/config.in 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc64/config.in 2004-02-19 11:12:53.000000000 -0500
+@@ -316,5 +316,65 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++# bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ bool ' Automatically emulate ELF PLT' CONFIG_PAX_EMUPLT
++ if [ "$CONFIG_PAX_EMUPLT" = "y" ]; then
++ define_bool CONFIG_PAX_DLRESOLVE y
++ fi
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source crypto/Config.in
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/sparc64/kernel/itlb_base.S linux-2.4.25-pax/arch/sparc64/kernel/itlb_base.S
+--- linux-2.4.25/arch/sparc64/kernel/itlb_base.S 2003-06-13 10:51:32.000000000 -0400
++++ linux-2.4.25-pax/arch/sparc64/kernel/itlb_base.S 2004-02-19 11:12:53.000000000 -0500
+@@ -41,7 +41,9 @@
+ CREATE_VPTE_OFFSET2(%g4, %g6) ! Create VPTE offset
+ ldxa [%g3 + %g6] ASI_P, %g5 ! Load VPTE
+ 1: brgez,pn %g5, 3f ! Not valid, branch out
+- nop ! Delay-slot
++ and %g5, _PAGE_EXEC, %g4
++ brz,pn %g4, 3f ! Not executable, branch out
++ nop ! Delay-slot
+ 2: stxa %g5, [%g0] ASI_ITLB_DATA_IN ! Load PTE into TLB
+ retry ! Trap return
+ 3: rdpr %pstate, %g4 ! Move into alternate globals
+@@ -74,8 +76,6 @@ winfix_trampoline:
+ nop
+ nop
+ nop
+- nop
+- nop
+ CREATE_VPTE_NOP
+
+ #undef CREATE_VPTE_OFFSET1
+diff -Nurp linux-2.4.25/arch/sparc64/kernel/sys_sparc.c linux-2.4.25-pax/arch/sparc64/kernel/sys_sparc.c
+--- linux-2.4.25/arch/sparc64/kernel/sys_sparc.c 2003-08-25 07:44:40.000000000 -0400
++++ linux-2.4.25-pax/arch/sparc64/kernel/sys_sparc.c 2004-02-19 11:12:53.000000000 -0500
+@@ -63,6 +63,13 @@ unsigned long arch_get_unmapped_area(str
+ task_size = 0xf0000000UL;
+ if (len > task_size || len > -PAGE_OFFSET)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_BASE + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_BASE;
+
+@@ -289,6 +296,11 @@ asmlinkage unsigned long sys_mmap(unsign
+ struct file * file = NULL;
+ unsigned long retval = -EBADF;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+diff -Nurp linux-2.4.25/arch/sparc64/kernel/sys_sparc32.c linux-2.4.25-pax/arch/sparc64/kernel/sys_sparc32.c
+--- linux-2.4.25/arch/sparc64/kernel/sys_sparc32.c 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc64/kernel/sys_sparc32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -3235,6 +3235,11 @@ do_execve32(char * filename, u32 * argv,
+ int i;
+
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
+
+ file = open_exec(filename);
+diff -Nurp linux-2.4.25/arch/sparc64/kernel/sys_sunos32.c linux-2.4.25-pax/arch/sparc64/kernel/sys_sunos32.c
+--- linux-2.4.25/arch/sparc64/kernel/sys_sunos32.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc64/kernel/sys_sunos32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -68,6 +68,11 @@ asmlinkage u32 sunos_mmap(u32 addr, u32
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ if(flags & MAP_NORESERVE) {
+ static int cnt;
+ if (cnt++ < 10)
+diff -Nurp linux-2.4.25/arch/sparc64/mm/fault.c linux-2.4.25-pax/arch/sparc64/mm/fault.c
+--- linux-2.4.25/arch/sparc64/mm/fault.c 2002-11-28 18:53:12.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc64/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -16,6 +16,9 @@
+ #include <linux/smp_lock.h>
+ #include <linux/init.h>
+ #include <linux/interrupt.h>
++#include <linux/slab.h>
++#include <linux/pagemap.h>
++#include <linux/compiler.h>
+
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+@@ -299,6 +302,361 @@ cannot_handle:
+ unhandled_fault (address, current, regs);
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_EMUPLT
++static void pax_emuplt_close(struct vm_area_struct * vma)
++{
++ vma->vm_mm->call_dl_resolve = 0UL;
++}
++
++static struct page* pax_emuplt_nopage(struct vm_area_struct *vma, unsigned long address, int write_access)
++{
++ struct page* page;
++ unsigned int *kaddr;
++
++ page = alloc_page(GFP_HIGHUSER);
++ if (!page)
++ return page;
++
++ kaddr = kmap(page);
++ memset(kaddr, 0, PAGE_SIZE);
++ kaddr[0] = 0x9DE3BFA8U; /* save */
++ flush_dcache_page(page);
++ kunmap(page);
++ return page;
++}
++
++static struct vm_operations_struct pax_vm_ops = {
++ close: pax_emuplt_close,
++ nopage: pax_emuplt_nopage,
++};
++
++static void pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
++{
++ vma->vm_mm = current->mm;
++ vma->vm_start = addr;
++ vma->vm_end = addr + PAGE_SIZE;
++ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++ vma->vm_ops = &pax_vm_ops;
++ vma->vm_pgoff = 0UL;
++ vma->vm_file = NULL;
++ vma->vm_private_data = NULL;
++ insert_vm_struct(current->mm, vma);
++ ++current->mm->total_vm;
++}
++#endif
++
++/*
++ * PaX: decide what to do with offenders (regs->tpc = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when patched PLT trampoline was detected
++ * 3 when unpatched PLT trampoline was detected
++ * 4 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++
++#ifdef CONFIG_PAX_EMUPLT
++ int err;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->tpc >= current->mm->start_code &&
++ regs->tpc < current->mm->end_code)
++ {
++ if (regs->u_regs[UREG_RETPC] + 8UL == regs->tpc)
++ return 1;
++
++ regs->tpc += current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code &&
++ regs->tnpc < current->mm->end_code)
++ regs->tnpc += current->mm->delta_exec;
++ return 4;
++ }
++ if (regs->tpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tpc < current->mm->end_code + current->mm->delta_exec)
++ {
++ regs->tpc -= current->mm->delta_exec;
++ if (regs->tnpc >= current->mm->start_code + current->mm->delta_exec &&
++ regs->tnpc < current->mm->end_code + current->mm->delta_exec)
++ regs->tnpc -= current->mm->delta_exec;
++ }
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUPLT
++ do { /* PaX: patched PLT emulation #1 */
++ unsigned int sethi1, sethi2, jmpl;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
++ addr = regs->u_regs[UREG_G1];
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ { /* PaX: patched PLT emulation #2 */
++ unsigned int ba;
++
++ err = get_user(ba, (unsigned int*)regs->tpc);
++
++ if (!err && (ba & 0xFFC00000U) == 0x30800000U) {
++ unsigned long addr;
++
++ addr = regs->tpc + 4 + (((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ }
++
++ do { /* PaX: patched PLT emulation #3 */
++ unsigned int sethi, jmpl, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ (jmpl & 0xFFFFE000U) == 0x81C06000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ addr = (sethi & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] = addr;
++ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #4 */
++ unsigned int mov1, call, mov2;
++
++ err = get_user(mov1, (unsigned int*)regs->tpc);
++ err |= get_user(call, (unsigned int*)(regs->tpc+4));
++ err |= get_user(mov2, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if (mov1 == 0x8210000FU &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ mov2 == 0x9E100001U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
++ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #5 */
++ unsigned int sethi1, sethi2, or1, or2, sllx, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(or1, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or2, (unsigned int*)(regs->tpc+12));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+16));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+20));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+24));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ (or1 & 0xFFFFE000U) == 0x82106000U &&
++ (or2 & 0xFFFFE000U) == 0x8A116000U &&
++ sllx == 0x83287020 &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: patched PLT emulation #6 */
++ unsigned int sethi1, sethi2, sllx, or, jmpl, nop;
++
++ err = get_user(sethi1, (unsigned int*)regs->tpc);
++ err |= get_user(sethi2, (unsigned int*)(regs->tpc+4));
++ err |= get_user(sllx, (unsigned int*)(regs->tpc+8));
++ err |= get_user(or, (unsigned int*)(regs->tpc+12));
++ err |= get_user(jmpl, (unsigned int*)(regs->tpc+16));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+20));
++
++ if (err)
++ break;
++
++ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
++ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
++ sllx == 0x83287020 &&
++ (or & 0xFFFFE000U) == 0x8A116000U &&
++ jmpl == 0x81C04005U &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++
++ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
++ regs->u_regs[UREG_G1] <<= 32;
++ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
++ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
++ regs->tpc = addr;
++ regs->tnpc = addr+4;
++ return 2;
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 1 */
++ unsigned int sethi, ba, nop;
++
++ err = get_user(sethi, (unsigned int*)regs->tpc);
++ err |= get_user(ba, (unsigned int*)(regs->tpc+4));
++ err |= get_user(nop, (unsigned int*)(regs->tpc+8));
++
++ if (err)
++ break;
++
++ if ((sethi & 0xFFC00000U) == 0x03000000U &&
++ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
++ nop == 0x01000000U)
++ {
++ unsigned long addr;
++ unsigned int save, call;
++
++ if ((ba & 0xFFC00000U) == 0x30800000U)
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
++ else
++ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
++
++ err = get_user(save, (unsigned int*)addr);
++ err |= get_user(call, (unsigned int*)(addr+4));
++ err |= get_user(nop, (unsigned int*)(addr+8));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ struct vm_area_struct *vma;
++ unsigned long call_dl_resolve;
++
++ down_read(&current->mm->mmap_sem);
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_read(&current->mm->mmap_sem);
++ if (likely(call_dl_resolve))
++ goto emulate;
++
++ vma = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++
++ down_write(&current->mm->mmap_sem);
++ if (current->mm->call_dl_resolve) {
++ call_dl_resolve = current->mm->call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ goto emulate;
++ }
++
++ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
++ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
++ up_write(&current->mm->mmap_sem);
++ if (vma) kmem_cache_free(vm_area_cachep, vma);
++ return 1;
++ }
++
++ pax_insert_vma(vma, call_dl_resolve);
++ current->mm->call_dl_resolve = call_dl_resolve;
++ up_write(&current->mm->mmap_sem);
++
++emulate:
++ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
++ regs->tpc = call_dl_resolve;
++ regs->tnpc = addr+4;
++ return 3;
++ }
++ }
++ } while (0);
++
++ do { /* PaX: unpatched PLT emulation step 2 */
++ unsigned int save, call, nop;
++
++ err = get_user(save, (unsigned int*)(regs->tpc-4));
++ err |= get_user(call, (unsigned int*)regs->tpc);
++ err |= get_user(nop, (unsigned int*)(regs->tpc+4));
++ if (err)
++ break;
++
++ if (save == 0x9DE3BFA8U &&
++ (call & 0xC0000000U) == 0x40000000U &&
++ nop == 0x01000000U)
++ {
++ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
++
++ regs->u_regs[UREG_RETPC] = regs->tpc;
++ regs->tpc = dl_resolve;
++ regs->tnpc = dl_resolve+4;
++ return 3;
++ }
++ } while (0);
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 5; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned int*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ asmlinkage void do_sparc64_fault(struct pt_regs *regs)
+ {
+ struct mm_struct *mm = current->mm;
+@@ -338,6 +696,7 @@ asmlinkage void do_sparc64_fault(struct
+
+ if ((current->thread.flags & SPARC_FLAG_32BIT) != 0) {
+ regs->tpc &= 0xffffffff;
++ regs->tnpc &= 0xffffffff;
+ address &= 0xffffffff;
+ }
+
+@@ -346,6 +705,34 @@ asmlinkage void do_sparc64_fault(struct
+ if (!vma)
+ goto bad_area;
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ /* PaX: detect ITLB misses on non-exec pages */
++ if ((current->flags & PF_PAX_PAGEEXEC) && vma->vm_start <= address &&
++ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
++ {
++ if (address != regs->tpc)
++ goto good_area;
++
++ up_read(&mm->mmap_sem);
++ switch (pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_EMUPLT
++ case 2:
++ case 3:
++ goto fault_done;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 4:
++ goto fault_done;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->tpc, (void*)(regs->u_regs[UREG_FP] + STACK_BIAS));
++ do_exit(SIGKILL);
++ }
++#endif
++
+ /* Pure DTLB misses do not tell us whether the fault causing
+ * load/store/atomic was a write or not, it only says that there
+ * was no match. So in such a case we (carefully) read the
+diff -Nurp linux-2.4.25/arch/sparc64/solaris/misc.c linux-2.4.25-pax/arch/sparc64/solaris/misc.c
+--- linux-2.4.25/arch/sparc64/solaris/misc.c 2002-11-28 18:53:12.000000000 -0500
++++ linux-2.4.25-pax/arch/sparc64/solaris/misc.c 2004-02-19 11:12:53.000000000 -0500
+@@ -53,6 +53,11 @@ static u32 do_solaris_mmap(u32 addr, u32
+ struct file *file = NULL;
+ unsigned long retval, ret_type;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ /* Do we need it here? */
+ set_personality(PER_SVR4);
+ if (flags & MAP_NORESERVE) {
+diff -Nurp linux-2.4.25/arch/x86_64/config.in linux-2.4.25-pax/arch/x86_64/config.in
+--- linux-2.4.25/arch/x86_64/config.in 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/config.in 2004-02-19 11:12:53.000000000 -0500
+@@ -251,4 +251,60 @@ int 'Kernel messages buffer length shift
+
+ endmenu
+
++mainmenu_option next_comment
++comment 'PaX options'
++
++mainmenu_option next_comment
++comment 'PaX Control'
++bool 'Support soft mode' CONFIG_PAX_SOFTMODE
++bool 'Use legacy ELF header marking' CONFIG_PAX_EI_PAX
++bool 'Use ELF program header marking' CONFIG_PAX_PT_PAX_FLAGS
++choice 'MAC system integration' \
++ "none CONFIG_PAX_NO_ACL_FLAGS \
++ direct CONFIG_PAX_HAVE_ACL_FLAGS \
++ hook CONFIG_PAX_HOOK_ACL_FLAGS" none
++endmenu
++
++mainmenu_option next_comment
++comment 'Non-executable pages'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Enforce non-executable pages' CONFIG_PAX_NOEXEC
++ if [ "$CONFIG_PAX_NOEXEC" = "y" ]; then
++ bool 'Paging based non-executable pages' CONFIG_PAX_PAGEEXEC
++ if [ "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++# bool ' Emulate trampolines' CONFIG_PAX_EMUTRAMP
++# if [ "$CONFIG_PAX_EMUTRAMP" = "y" ]; then
++# bool ' Automatically emulate sigreturn trampolines' CONFIG_PAX_EMUSIGRT
++# fi
++ bool ' Restrict mprotect()' CONFIG_PAX_MPROTECT
++ if [ "$CONFIG_PAX_MPROTECT" = "y" ]; then
++ bool ' Disallow ELF text relocations' CONFIG_PAX_NOELFRELOCS
++ fi
++ fi
++ fi
++fi
++endmenu
++
++mainmenu_option next_comment
++comment 'Address Space Layout Randomization'
++if [ "$CONFIG_PAX_EI_PAX" = "y" -o \
++ "$CONFIG_PAX_PT_PAX_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HAVE_ACL_FLAGS" = "y" -o \
++ "$CONFIG_PAX_HOOK_ACL_FLAGS" = "y" ]; then
++ bool 'Address Space Layout Randomization' CONFIG_PAX_ASLR
++ if [ "$CONFIG_PAX_ASLR" = "y" ]; then
++ bool ' Randomize user stack base' CONFIG_PAX_RANDUSTACK
++ bool ' Randomize mmap() base' CONFIG_PAX_RANDMMAP
++ if [ "$CONFIG_PAX_RANDMMAP" = "y" -a "$CONFIG_PAX_PAGEEXEC" = "y" ]; then
++ bool ' Randomize ET_EXEC base' CONFIG_PAX_RANDEXEC
++ fi
++ fi
++fi
++endmenu
++
++endmenu
++
+ source lib/Config.in
+diff -Nurp linux-2.4.25/arch/x86_64/ia32/ia32_binfmt.c linux-2.4.25-pax/arch/x86_64/ia32/ia32_binfmt.c
+--- linux-2.4.25/arch/x86_64/ia32/ia32_binfmt.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/ia32/ia32_binfmt.c 2004-02-19 11:12:53.000000000 -0500
+@@ -28,7 +28,14 @@ struct elf_phdr;
+
+ #define ELF_NAME "elf/i386"
+
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
++
+ #define ELF_ET_DYN_BASE (IA32_PAGE_OFFSET/3 + 0x1000000)
+
+ #undef ELF_ARCH
+@@ -129,6 +136,17 @@ struct elf_prpsinfo
+ #include <asm/ia32.h>
+ #include <linux/elf.h>
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#endif
++
+ typedef struct user_i387_ia32_struct elf_fpregset_t;
+ typedef struct user32_fxsr_struct elf_fpxregset_t;
+
+@@ -243,7 +261,13 @@ int ia32_setup_arg_pages(struct linux_bi
+ mpnt->vm_mm = current->mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = IA32_STACK_TOP;
+- mpnt->vm_flags = vm_stack_flags32;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ mpnt->vm_flags = VM_STACK_FLAGS;
++#else
++ mpnt->vm_flags = vm_stack_flags32;
++#endif
++
+ mpnt->vm_page_prot = (mpnt->vm_flags & VM_EXEC) ?
+ PAGE_COPY_EXEC : PAGE_COPY;
+ mpnt->vm_ops = NULL;
+diff -Nurp linux-2.4.25/arch/x86_64/ia32/sys_ia32.c linux-2.4.25-pax/arch/x86_64/ia32/sys_ia32.c
+--- linux-2.4.25/arch/x86_64/ia32/sys_ia32.c 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/ia32/sys_ia32.c 2004-02-19 11:12:53.000000000 -0500
+@@ -2107,6 +2107,11 @@ asmlinkage long sys32_mmap2(unsigned lon
+ unsigned long error;
+ struct file * file = NULL;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ return -EINVAL;
++#endif
++
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+diff -Nurp linux-2.4.25/arch/x86_64/kernel/setup64.c linux-2.4.25-pax/arch/x86_64/kernel/setup64.c
+--- linux-2.4.25/arch/x86_64/kernel/setup64.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/kernel/setup64.c 2004-02-19 11:12:53.000000000 -0500
+@@ -36,8 +36,15 @@ struct desc_ptr idt_descr = { 256 * 16,
+ correct flags everywhere. */
+ unsigned long __supported_pte_mask = ~0UL;
+ static int do_not_nx __initdata = 0;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++unsigned long vm_stack_flags = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++unsigned long vm_stack_flags32 = VM_GROWSDOWN | __VM_DATA_DEFAULT_FLAGS;
++#else
+ unsigned long vm_stack_flags = __VM_STACK_FLAGS;
+ unsigned long vm_stack_flags32 = __VM_STACK_FLAGS;
++#endif
++
+ unsigned long vm_data_default_flags = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_data_default_flags32 = __VM_DATA_DEFAULT_FLAGS;
+ unsigned long vm_force_exec32 = PROT_EXEC;
+diff -Nurp linux-2.4.25/arch/x86_64/kernel/sys_x86_64.c linux-2.4.25-pax/arch/x86_64/kernel/sys_x86_64.c
+--- linux-2.4.25/arch/x86_64/kernel/sys_x86_64.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/kernel/sys_x86_64.c 2004-02-19 11:12:53.000000000 -0500
+@@ -46,6 +46,11 @@ long sys_mmap(unsigned long addr, unsign
+ if (off & ~PAGE_MASK)
+ goto out;
+
++#ifdef CONFIG_PAX_RANDEXEC
++ if (flags & MAP_MIRROR)
++ goto out;
++#endif
++
+ error = -EBADF;
+ file = NULL;
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+@@ -72,6 +77,13 @@ unsigned long arch_get_unmapped_area(str
+ unsigned long end = TASK_SIZE;
+
+ if (current->thread.flags & THREAD_IA32) {
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_32 + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_32;
+ end = 0xffff0000;
+@@ -82,10 +94,24 @@ unsigned long arch_get_unmapped_area(str
+ base down for this case. This may give conflicts
+ with the heap, but we assume that malloc falls back
+ to mmap. Give it 1GB of playground for now. -AK */
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = 0x40000000 + (current->mm->delta_mmap & 0x0FFFFFFFU);
++ else
++#endif
++
+ if (!addr)
+ addr = 0x40000000;
+ end = 0x80000000;
+ } else {
++
++#ifdef CONFIG_PAX_RANDMMAP
++ if ((current->flags & PF_PAX_RANDMMAP) && (!addr || filp))
++ addr = TASK_UNMAPPED_64 + current->mm->delta_mmap;
++ else
++#endif
++
+ if (!addr)
+ addr = TASK_UNMAPPED_64;
+ end = TASK_SIZE;
+diff -Nurp linux-2.4.25/arch/x86_64/mm/fault.c linux-2.4.25-pax/arch/x86_64/mm/fault.c
+--- linux-2.4.25/arch/x86_64/mm/fault.c 2003-11-28 13:26:19.000000000 -0500
++++ linux-2.4.25-pax/arch/x86_64/mm/fault.c 2004-02-19 11:12:53.000000000 -0500
+@@ -173,6 +173,62 @@ static int is_prefetch(struct pt_regs *r
+ return prefetch;
+ }
+
++#ifdef CONFIG_PAX_PAGEEXEC
++/*
++ * PaX: decide what to do with offenders (regs->rip = fault address)
++ *
++ * returns 1 when task should be killed
++ * 2 when legitimate ET_EXEC was detected
++ */
++static int pax_handle_fetch_fault(struct pt_regs *regs)
++{
++ int err;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC) {
++ if (regs->rip >= current->mm->start_code &&
++ regs->rip < current->mm->end_code)
++ {
++ if (current->thread.flags & THREAD_IA32) {
++ unsigned int esp_4;
++
++ err = get_user(esp_4, (unsigned int*)(regs->rsp-4UL));
++ if (err || esp_4 == regs->rip)
++ return 1;
++ } else {
++ unsigned long esp_8;
++
++ err = get_user(esp_8, (unsigned long*)(regs->rsp-8UL));
++ if (err || esp_8 == regs->rip)
++ return 1;
++ }
++
++ regs->rip += current->mm->delta_exec;
++ return 2;
++ }
++ }
++#endif
++
++ return 1;
++}
++
++void pax_report_insns(void *pc)
++{
++ unsigned long i;
++
++ printk(KERN_ERR "PAX: bytes at PC: ");
++ for (i = 0; i < 20; i++) {
++ unsigned int c;
++ if (get_user(c, (unsigned char*)pc+i)) {
++ printk("<invalid address>.");
++ break;
++ }
++ printk("%08x ", c);
++ }
++ printk("\n");
++}
++#endif
++
+ int page_fault_trace;
+ int exception_trace = 1;
+
+@@ -267,6 +323,23 @@ again:
+ * we can handle it..
+ */
+ good_area:
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((current->flags & PF_PAX_PAGEEXEC) && (error_code & 16) && !(vma->vm_flags & VM_EXEC)) {
++ up_read(&mm->mmap_sem);
++ switch(pax_handle_fetch_fault(regs)) {
++
++#ifdef CONFIG_PAX_RANDEXEC
++ case 2:
++ return;
++#endif
++
++ }
++ pax_report_fault(regs, (void*)regs->rip, (void*)regs->rsp);
++ do_exit(SIGKILL);
++ }
++#endif
++
+ info.si_code = SEGV_ACCERR;
+ write = 0;
+ switch (error_code & 3) {
+diff -Nurp linux-2.4.25/drivers/char/mem.c linux-2.4.25-pax/drivers/char/mem.c
+--- linux-2.4.25/drivers/char/mem.c 2003-11-28 13:26:20.000000000 -0500
++++ linux-2.4.25-pax/drivers/char/mem.c 2004-02-19 11:12:53.000000000 -0500
+@@ -402,7 +402,23 @@ static inline size_t read_zero_pagealign
+ count = size;
+
+ zap_page_range(mm, addr, count);
+- zeromap_page_range(addr, count, PAGE_COPY);
++ zeromap_page_range(addr, count, vma->vm_page_prot);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ unsigned long addr_m;
++ struct vm_area_struct * vma_m;
++
++ addr_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, addr_m);
++ if (vma_m && vma_m->vm_start == addr_m && (vma_m->vm_flags & VM_MIRROR)) {
++ addr_m = addr + (unsigned long)vma->vm_private_data;
++ zap_page_range(mm, addr_m, count);
++ } else
++ printk(KERN_ERR "PAX: VMMIRROR: read_zero bug, %08lx, %08lx\n",
++ addr, vma->vm_start);
++ }
++#endif
+
+ size -= count;
+ buf += count;
+diff -Nurp linux-2.4.25/drivers/char/random.c linux-2.4.25-pax/drivers/char/random.c
+--- linux-2.4.25/drivers/char/random.c 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/drivers/char/random.c 2004-02-19 11:12:53.000000000 -0500
+@@ -2310,7 +2310,27 @@ __u32 check_tcp_syn_cookie(__u32 cookie,
+ }
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++unsigned long pax_get_random_long(void)
++{
++ static time_t rekey_time;
++ static __u32 secret[12];
++ time_t t;
+
++ /*
++ * Pick a random secret every REKEY_INTERVAL seconds.
++ */
++ t = CURRENT_TIME;
++ if (!rekey_time || (t - rekey_time) > REKEY_INTERVAL) {
++ rekey_time = t;
++ get_random_bytes(secret, sizeof(secret));
++ }
++
++ secret[1] = halfMD4Transform(secret+8, secret);
++ secret[0] = halfMD4Transform(secret+8, secret);
++ return *(unsigned long *)secret;
++}
++#endif
+
+ #ifndef CONFIG_ARCH_S390
+ EXPORT_SYMBOL(add_keyboard_randomness);
+diff -Nurp linux-2.4.25/fs/Makefile linux-2.4.25-pax/fs/Makefile
+--- linux-2.4.25/fs/Makefile 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/fs/Makefile 2004-02-19 11:12:53.000000000 -0500
+@@ -7,7 +7,7 @@
+
+ O_TARGET := fs.o
+
+-export-objs := filesystems.o open.o dcache.o buffer.o dquot.o
++export-objs := filesystems.o open.o dcache.o buffer.o dquot.o exec.o
+ mod-subdirs := nls
+
+ obj-y := open.o read_write.o devices.o file_table.o buffer.o \
+diff -Nurp linux-2.4.25/fs/binfmt_aout.c linux-2.4.25-pax/fs/binfmt_aout.c
+--- linux-2.4.25/fs/binfmt_aout.c 2001-11-02 20:39:20.000000000 -0500
++++ linux-2.4.25-pax/fs/binfmt_aout.c 2004-02-19 11:12:53.000000000 -0500
+@@ -307,6 +307,24 @@ static int load_aout_binary(struct linux
+ current->mm->mmap = NULL;
+ compute_creds(bprm);
+ current->flags &= ~PF_FORKNOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
++ current->flags |= PF_PAX_PAGEEXEC;
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
++ current->flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
++ current->flags |= PF_PAX_MPROTECT;
++#endif
++
++ }
++#endif
++
+ #ifdef __sparc__
+ if (N_MAGIC(ex) == NMAGIC) {
+ loff_t pos = fd_offset;
+@@ -393,7 +411,7 @@ static int load_aout_binary(struct linux
+
+ down_write(&current->mm->mmap_sem);
+ error = do_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
+- PROT_READ | PROT_WRITE | PROT_EXEC,
++ PROT_READ | PROT_WRITE,
+ MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
+ fd_offset + ex.a_text);
+ up_write(&current->mm->mmap_sem);
+diff -Nurp linux-2.4.25/fs/binfmt_elf.c linux-2.4.25-pax/fs/binfmt_elf.c
+--- linux-2.4.25/fs/binfmt_elf.c 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/fs/binfmt_elf.c 2004-02-19 11:12:53.000000000 -0500
+@@ -33,10 +33,12 @@
+ #include <linux/smp_lock.h>
+ #include <linux/compiler.h>
+ #include <linux/highmem.h>
++#include <linux/random.h>
+
+ #include <asm/uaccess.h>
+ #include <asm/param.h>
+ #include <asm/pgalloc.h>
++#include <asm/system.h>
+
+ #define DLINFO_ITEMS 13
+
+@@ -86,6 +88,12 @@ static void set_brk(unsigned long start,
+ if (end <= start)
+ return;
+ do_brk(start, end - start);
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (current->flags & PF_PAX_RANDEXEC)
++ do_mmap_pgoff(NULL, ELF_PAGEALIGN(start + current->mm->delta_exec), 0UL, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_FIXED | MAP_MIRROR, start);
++#endif
++
+ }
+
+
+@@ -414,6 +422,204 @@ out:
+ return elf_entry;
+ }
+
++#if (defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)) && defined(CONFIG_PAX_SOFTMODE)
++static unsigned long pax_parse_softmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (elf_phdata->p_flags & PF_PAGEEXEC)
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (elf_phdata->p_flags & PF_SEGMEXEC) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (elf_phdata->p_flags & PF_EMUTRAMP)
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (elf_phdata->p_flags & PF_MPROTECT)
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDMMAP)
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (elf_phdata->p_flags & PF_RANDEXEC)
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++static unsigned long pax_parse_hardmode(const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC)) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_phdata->p_flags & PF_NORANDEXEC))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++static int pax_parse_ei_pax(const struct elfhdr * const elf_ex)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
++ pax_flags |= PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC)) {
++ pax_flags &= ~PF_PAX_PAGEEXEC;
++ pax_flags |= PF_PAX_SEGMEXEC;
++ }
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
++ pax_flags |= PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ if ((pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
++ pax_flags |= PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
++ pax_flags |= PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if ((elf_ex->e_ident[EI_PAX] & EF_PAX_RANDEXEC) && (elf_ex->e_type == ET_EXEC) &&
++ (pax_flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)))
++ pax_flags |= PF_PAX_RANDEXEC;
++#endif
++
++ return pax_flags;
++}
++#endif
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++static int pax_parse_elf_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
++{
++ unsigned long pax_flags = 0UL;
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ unsigned long i;
++#endif
++
++#ifdef CONFIG_PAX_EI_PAX
++ pax_flags = pax_parse_ei_pax(elf_ex);
++#endif
++
++#ifdef CONFIG_PAX_PT_PAX_FLAGS
++ for (i = 0UL; i < elf_ex->e_phnum; i++)
++ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
++ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
++ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
++ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
++ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)) ||
++ ((elf_phdata[i].p_flags & PF_RANDEXEC) && ((elf_phdata[i].p_flags & PF_NORANDEXEC) || elf_ex->e_type == ET_DYN)) ||
++ (!(elf_phdata[i].p_flags & PF_NORANDEXEC) && (elf_ex->e_type == ET_DYN)))
++ return -EINVAL;
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_softmode)
++ pax_flags = pax_parse_softmode(&elf_phdata[i]);
++ else
++#endif
++
++ pax_flags = pax_parse_hardmode(&elf_phdata[i]);
++ break;
++ }
++#endif
++
++ if (0 > pax_check_flags(&pax_flags))
++ return -EINVAL;
++
++ current->flags |= pax_flags;
++ return 0;
++}
++#endif
++
+ /*
+ * These are the functions used to load ELF style executables and shared
+ * libraries. There is no binary dependent code anywhere else.
+@@ -446,7 +652,12 @@ static int load_elf_binary(struct linux_
+ struct exec interp_ex;
+ char passed_fileno[6];
+ struct files_struct *files;
+-
++
++#ifdef CONFIG_PAX_RANDEXEC
++ unsigned long load_addr_random = 0UL;
++ unsigned long load_bias_random = 0UL;
++#endif
++
+ /* Get the exec-header */
+ elf_ex = *((struct elfhdr *) bprm->buf);
+
+@@ -622,7 +833,44 @@ static int load_elf_binary(struct linux_
+ current->mm->end_data = 0;
+ current->mm->end_code = 0;
+ current->mm->mmap = NULL;
++
++#ifdef CONFIG_PAX_ASLR
++ current->mm->delta_mmap = 0UL;
++ current->mm->delta_exec = 0UL;
++ current->mm->delta_stack = 0UL;
++#endif
++
+ current->flags &= ~PF_FORKNOEXEC;
++
++#if defined(CONFIG_PAX_EI_PAX) || defined(CONFIG_PAX_PT_PAX_FLAGS)
++ if (0 > pax_parse_elf_flags(&elf_ex, elf_phdata)) {
++ send_sig(SIGKILL, current, 0);
++ goto out_free_dentry;
++ }
++#endif
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++ pax_set_flags(bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++ if (pax_set_flags_func)
++ (*pax_set_flags_func)(bprm);
++#endif
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ if (current->flags & PF_PAX_PAGEEXEC)
++ current->mm->call_dl_resolve = 0UL;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ if (current->flags & PF_PAX_RANDMMAP) {
++#define pax_delta_mask(delta, lsb, len) (((delta) & ((1UL << (len)) - 1)) << (lsb))
++
++ current->mm->delta_mmap = pax_delta_mask(pax_get_random_long(), PAX_DELTA_MMAP_LSB(current), PAX_DELTA_MMAP_LEN(current));
++ current->mm->delta_exec = pax_delta_mask(pax_get_random_long(), PAX_DELTA_EXEC_LSB(current), PAX_DELTA_EXEC_LEN(current));
++ current->mm->delta_stack = pax_delta_mask(pax_get_random_long(), PAX_DELTA_STACK_LSB(current), PAX_DELTA_STACK_LEN(current));
++ }
++#endif
++
+ elf_entry = (unsigned long) elf_ex.e_entry;
+
+ /* Do this so that we can load the interpreter, if need be. We will
+@@ -631,7 +879,7 @@ static int load_elf_binary(struct linux_
+ retval = setup_arg_pages(bprm);
+ if (retval < 0) {
+ send_sig(SIGKILL, current, 0);
+- return retval;
++ goto out_free_dentry;
+ }
+
+ current->mm->start_stack = bprm->p;
+@@ -678,11 +926,85 @@ static int load_elf_binary(struct linux_
+ base, as well as whatever program they might try to exec. This
+ is because the brk will follow the loader, and is not movable. */
+ load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
++
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address at the default exe base if requested */
++ if (current->flags & PF_PAX_RANDMMAP) {
++ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE(current) - vaddr + current->mm->delta_exec);
++ elf_flags |= MAP_FIXED;
++ }
++#endif
++
+ }
+
+- error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
+- if (BAD_ADDR(error))
+- continue;
++#ifdef CONFIG_PAX_RANDEXEC
++ if ((current->flags & PF_PAX_RANDEXEC) && (elf_ex.e_type == ET_EXEC)) {
++ error = -ENOMEM;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot & ~PROT_EXEC, elf_flags);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ unsigned long addr, len;
++
++ addr = ELF_PAGESTART(load_bias + vaddr);
++ len = elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr);
++ if (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len)
++ continue;
++ down_write(&current->mm->mmap_sem);
++ error = do_mmap_pgoff(bprm->file, addr, len, elf_prot, elf_flags, (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT);
++ up_write(&current->mm->mmap_sem);
++ }
++#endif
++
++ if (BAD_ADDR(error))
++ continue;
++
++ /* PaX: mirror at a randomized base */
++ down_write(&current->mm->mmap_sem);
++
++ if (!load_addr_set) {
++ load_addr_random = get_unmapped_area(bprm->file, 0UL, elf_ppnt->p_filesz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), (elf_ppnt->p_offset - ELF_PAGEOFFSET(elf_ppnt->p_vaddr)) >> PAGE_SHIFT, MAP_PRIVATE);
++ if (BAD_ADDR(load_addr_random)) {
++ up_write(&current->mm->mmap_sem);
++ continue;
++ }
++ load_bias_random = load_addr_random - vaddr;
++ }
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (elf_prot & PROT_EXEC) {
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), elf_ppnt->p_memsz + ELF_PAGEOFFSET(elf_ppnt->p_vaddr), PROT_NONE, MAP_PRIVATE | MAP_FIXED, 0UL);
++ if (!BAD_ADDR(load_addr_random)) {
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr + SEGMEXEC_TASK_SIZE), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ if (!BAD_ADDR(load_addr_random))
++ load_addr_random -= SEGMEXEC_TASK_SIZE;
++ }
++ } else
++ load_addr_random = do_mmap_pgoff(NULL, ELF_PAGESTART(load_bias_random + vaddr), 0UL, elf_prot, elf_flags | MAP_MIRROR, error);
++ }
++#endif
++
++ up_write(&current->mm->mmap_sem);
++ if (BAD_ADDR(load_addr_random))
++ continue;
++ } else
++#endif
++
++ {
++ error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt, elf_prot, elf_flags);
++ if (BAD_ADDR(error))
++ continue;
++ }
+
+ if (!load_addr_set) {
+ load_addr_set = 1;
+@@ -693,6 +1015,11 @@ static int load_elf_binary(struct linux_
+ load_addr += load_bias;
+ reloc_func_desc = load_addr;
+ }
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->mm->delta_exec = load_addr_random - load_addr;
++#endif
++
+ }
+ k = elf_ppnt->p_vaddr;
+ if (k < start_code) start_code = k;
+@@ -719,6 +1046,24 @@ static int load_elf_binary(struct linux_
+ start_data += load_bias;
+ end_data += load_bias;
+
++#ifdef CONFIG_PAX_RANDMMAP
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ if (current->flags & PF_PAX_RANDMMAP)
++ elf_brk += pax_delta_mask(pax_get_random_long(), 4, PAGE_SHIFT);
++#undef pax_delta_mask
++#endif
++
++ /* Calling set_brk effectively mmaps the pages that we need
++ * for the bss and break sections
++ */
++ set_brk(elf_bss, elf_brk);
++
++ padzero(elf_bss);
++
+ if (elf_interpreter) {
+ if (interpreter_type == INTERPRETER_AOUT)
+ elf_entry = load_aout_interp(&interp_ex,
+@@ -767,13 +1112,6 @@ static int load_elf_binary(struct linux_
+ current->mm->end_data = end_data;
+ current->mm->start_stack = bprm->p;
+
+- /* Calling set_brk effectively mmaps the pages that we need
+- * for the bss and break sections
+- */
+- set_brk(elf_bss, elf_brk);
+-
+- padzero(elf_bss);
+-
+ #if 0
+ printk("(start_brk) %lx\n" , (long) current->mm->start_brk);
+ printk("(end_code) %lx\n" , (long) current->mm->end_code);
+@@ -810,6 +1148,10 @@ static int load_elf_binary(struct linux_
+ ELF_PLAT_INIT(regs, reloc_func_desc);
+ #endif
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ pax_switch_segments(current);
++#endif
++
+ start_thread(regs, elf_entry, bprm->p);
+ if (current->ptrace & PT_PTRACED)
+ send_sig(SIGTRAP, current, 0);
+diff -Nurp linux-2.4.25/fs/exec.c linux-2.4.25-pax/fs/exec.c
+--- linux-2.4.25/fs/exec.c 2004-02-18 08:36:31.000000000 -0500
++++ linux-2.4.25-pax/fs/exec.c 2004-02-19 11:12:53.000000000 -0500
+@@ -37,6 +37,7 @@
+ #include <linux/personality.h>
+ #include <linux/swap.h>
+ #include <linux/utsname.h>
++#include <linux/random.h>
+ #define __NO_VERSION__
+ #include <linux/module.h>
+
+@@ -56,6 +57,20 @@ int core_setuid_ok = 0;
+ static struct linux_binfmt *formats;
+ static rwlock_t binfmt_lock = RW_LOCK_UNLOCKED;
+
++#ifdef CONFIG_PAX_SOFTMODE
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++unsigned int pax_aslr=1;
++#endif
++
++unsigned int pax_softmode;
++#endif
++
++#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
++void (*pax_set_flags_func)(struct linux_binprm * bprm);
++EXPORT_SYMBOL(pax_set_flags_func);
++#endif
++
+ int register_binfmt(struct linux_binfmt * fmt)
+ {
+ struct linux_binfmt ** tmp = &formats;
+@@ -290,8 +305,14 @@ void put_dirty_page(struct task_struct *
+ struct vm_area_struct *vma;
+ pgprot_t prot = PAGE_COPY;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) != 1 && (!(tsk->flags & PF_PAX_SEGMEXEC) || page_count(page) != 2))
++#else
+ if (page_count(page) != 1)
++#endif
++
+ printk(KERN_ERR "mem_map disagrees with %p at %08lx\n", page, address);
++
+ pgd = pgd_offset(tsk->mm, address);
+
+ spin_lock(&tsk->mm->page_table_lock);
+@@ -303,9 +324,19 @@ void put_dirty_page(struct task_struct *
+ goto out;
+ if (!pte_none(*pte))
+ goto out;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (page_count(page) == 1) {
++#endif
++
+ lru_cache_add(page);
+ flush_dcache_page(page);
+ flush_page_to_ram(page);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ }
++#endif
++
+ /* lookup is cheap because there is only a single entry in the list */
+ vma = find_vma(tsk->mm, address);
+ if (vma)
+@@ -329,6 +360,10 @@ int setup_arg_pages(struct linux_binprm
+ struct vm_area_struct *mpnt;
+ int i;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ struct vm_area_struct *mpnt_m = NULL;
++#endif
++
+ stack_base = STACK_TOP - MAX_ARG_PAGES*PAGE_SIZE;
+
+ bprm->p += stack_base;
+@@ -339,13 +374,30 @@ int setup_arg_pages(struct linux_binprm
+ mpnt = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
+ if (!mpnt)
+ return -ENOMEM;
+-
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && (VM_STACK_FLAGS & VM_MAYEXEC)) {
++ mpnt_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!mpnt_m) {
++ kmem_cache_free(vm_area_cachep, mpnt);
++ return -ENOMEM;
++ }
++ }
++#endif
++
+ down_write(&current->mm->mmap_sem);
+ {
+ mpnt->vm_mm = current->mm;
+ mpnt->vm_start = PAGE_MASK & (unsigned long) bprm->p;
+ mpnt->vm_end = STACK_TOP;
+ mpnt->vm_flags = VM_STACK_FLAGS;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC))
++ mpnt->vm_page_prot = protection_map[(VM_STACK_FLAGS | VM_EXEC) & 0x7];
++ else
++#endif
++
+ mpnt->vm_page_prot = protection_map[VM_STACK_FLAGS & 0x7];
+ mpnt->vm_ops = NULL;
+ mpnt->vm_pgoff = 0;
+@@ -353,6 +405,25 @@ int setup_arg_pages(struct linux_binprm
+ mpnt->vm_private_data = (void *) 0;
+ insert_vm_struct(current->mm, mpnt);
+ current->mm->total_vm = (mpnt->vm_end - mpnt->vm_start) >> PAGE_SHIFT;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mpnt_m) {
++ *mpnt_m = *mpnt;
++ if (!(VM_STACK_FLAGS & VM_EXEC)) {
++ mpnt_m->vm_flags &= ~(VM_READ | VM_WRITE | VM_EXEC);
++ mpnt_m->vm_page_prot = PAGE_NONE;
++ }
++ mpnt_m->vm_start += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_end += SEGMEXEC_TASK_SIZE;
++ mpnt_m->vm_flags |= VM_MIRROR;
++ mpnt->vm_flags |= VM_MIRROR;
++ mpnt_m->vm_private_data = (void *)(mpnt->vm_start - mpnt_m->vm_start);
++ mpnt->vm_private_data = (void *)(mpnt_m->vm_start - mpnt->vm_start);
++ insert_vm_struct(current->mm, mpnt_m);
++ current->mm->total_vm = (mpnt_m->vm_end - mpnt_m->vm_start) >> PAGE_SHIFT;
++ }
++#endif
++
+ }
+
+ for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
+@@ -360,6 +431,14 @@ int setup_arg_pages(struct linux_binprm
+ if (page) {
+ bprm->page[i] = NULL;
+ put_dirty_page(current,page,stack_base);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (mpnt_m) {
++ page_cache_get(page);
++ put_dirty_page(current,page,stack_base + SEGMEXEC_TASK_SIZE);
++ }
++#endif
++
+ }
+ stack_base += PAGE_SIZE;
+ }
+@@ -615,6 +694,30 @@ int flush_old_exec(struct linux_binprm *
+ }
+ current->comm[i] = '\0';
+
++#ifdef CONFIG_PAX_PAGEEXEC
++ current->flags &= ~PF_PAX_PAGEEXEC;
++#endif
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ current->flags &= ~PF_PAX_EMUTRAMP;
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++ current->flags &= ~PF_PAX_MPROTECT;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ current->flags &= ~PF_PAX_RANDMMAP;
++#endif
++
++#ifdef CONFIG_PAX_RANDEXEC
++ current->flags &= ~PF_PAX_RANDEXEC;
++#endif
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ current->flags &= ~PF_PAX_SEGMEXEC;
++#endif
++
+ flush_thread();
+
+ de_thread(current);
+@@ -920,6 +1023,16 @@ int do_execve(char * filename, char ** a
+ return retval;
+
+ bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
++
++#ifdef CONFIG_PAX_RANDUSTACK
++
++#ifdef CONFIG_PAX_SOFTMODE
++ if (pax_aslr)
++#endif
++
++ bprm.p -= (pax_get_random_long() & ~(sizeof(void *)-1)) & ~PAGE_MASK;
++#endif
++
+ memset(bprm.page, 0, MAX_ARG_PAGES*sizeof(bprm.page[0]));
+
+ bprm.file = file;
+@@ -1102,6 +1215,123 @@ void format_corename(char *corename, con
+ *out_ptr = 0;
+ }
+
++int pax_check_flags(unsigned long * flags)
++{
++ int retval = 0;
++
++#if !defined(__i386__) || !defined(CONFIG_PAX_SEGMEXEC)
++ if (*flags & PF_PAX_SEGMEXEC)
++ {
++ *flags &= ~PF_PAX_SEGMEXEC;
++ retval = -EINVAL;
++ }
++#endif
++
++ if ((*flags & PF_PAX_PAGEEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ && (*flags & PF_PAX_SEGMEXEC)
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_PAGEEXEC;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_MPROTECT)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_MPROTECT;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_EMUTRAMP)
++
++#ifdef CONFIG_PAX_EMUTRAMP
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_EMUTRAMP;
++ retval = -EINVAL;
++ }
++
++ if ((*flags & PF_PAX_RANDEXEC)
++
++#ifdef CONFIG_PAX_RANDEXEC
++ && !(*flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC))
++#endif
++
++ )
++ {
++ *flags &= ~PF_PAX_RANDEXEC;
++ retval = -EINVAL;
++ }
++
++ return retval;
++}
++
++EXPORT_SYMBOL(pax_check_flags);
++
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
++{
++ struct task_struct *tsk = current;
++ struct mm_struct *mm = current->mm;
++ char* buffer_exec = (char*)__get_free_page(GFP_ATOMIC);
++ char* buffer_fault = (char*)__get_free_page(GFP_ATOMIC);
++ char* path_exec=NULL;
++ char* path_fault=NULL;
++ unsigned long start=0UL, end=0UL, offset=0UL;
++
++ if (buffer_exec && buffer_fault) {
++ struct vm_area_struct* vma, * vma_exec=NULL, * vma_fault=NULL;
++
++ down_read(&mm->mmap_sem);
++ vma = mm->mmap;
++ while (vma && (!vma_exec || !vma_fault)) {
++ if ((vma->vm_flags & VM_EXECUTABLE) && vma->vm_file)
++ vma_exec = vma;
++ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
++ vma_fault = vma;
++ vma = vma->vm_next;
++ }
++ if (vma_exec) {
++ path_exec = d_path(vma_exec->vm_file->f_dentry, vma_exec->vm_file->f_vfsmnt, buffer_exec, PAGE_SIZE);
++ if (IS_ERR(path_exec))
++ path_exec = "<path too long>";
++ }
++ if (vma_fault) {
++ start = vma_fault->vm_start;
++ end = vma_fault->vm_end;
++ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
++ if (vma_fault->vm_file) {
++ path_fault = d_path(vma_fault->vm_file->f_dentry, vma_fault->vm_file->f_vfsmnt, buffer_fault, PAGE_SIZE);
++ if (IS_ERR(path_fault))
++ path_fault = "<path too long>";
++ } else
++ path_fault = "<anonymous mapping>";
++ }
++ up_read(&mm->mmap_sem);
++ }
++ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
++ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, "
++ "PC: %p, SP: %p\n", path_exec, tsk->comm, tsk->pid,
++ tsk->uid, tsk->euid, pc, sp);
++ if (buffer_exec) free_page((unsigned long)buffer_exec);
++ if (buffer_fault) free_page((unsigned long)buffer_fault);
++ pax_report_insns(pc);
++ do_coredump(SIGKILL, regs);
++}
++#endif
++
+ int do_coredump(long signr, struct pt_regs * regs)
+ {
+ struct linux_binfmt * binfmt;
+diff -Nurp linux-2.4.25/fs/proc/array.c linux-2.4.25-pax/fs/proc/array.c
+--- linux-2.4.25/fs/proc/array.c 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/fs/proc/array.c 2004-02-19 11:12:53.000000000 -0500
+@@ -530,9 +530,17 @@ static int show_map(struct seq_file *m,
+ seq_printf(m, "%08lx-%08lx %c%c%c%c %08lx %02x:%02x %lu %n",
+ map->vm_start,
+ map->vm_end,
++
++#if 1
++ flags & VM_MAYREAD ? flags & VM_READ ? 'R' : '+' : flags & VM_READ ? 'r' : '-',
++ flags & VM_MAYWRITE ? flags & VM_WRITE ? 'W' : '+' : flags & VM_WRITE ? 'w' : '-',
++ flags & VM_MAYEXEC ? flags & VM_EXEC ? 'X' : '+' : flags & VM_EXEC ? 'x' : '-',
++#else
+ flags & VM_READ ? 'r' : '-',
+ flags & VM_WRITE ? 'w' : '-',
+ flags & VM_EXEC ? 'x' : '-',
++#endif
++
+ flags & VM_MAYSHARE ? 's' : 'p',
+ map->vm_pgoff << PAGE_SHIFT,
+ MAJOR(dev), MINOR(dev), ino, &len);
+diff -Nurp linux-2.4.25/include/asm-alpha/a.out.h linux-2.4.25-pax/include/asm-alpha/a.out.h
+--- linux-2.4.25/include/asm-alpha/a.out.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-alpha/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -98,7 +98,7 @@ struct exec
+ set_personality (((BFPM->sh_bang || EX.ah.entry < 0x100000000 \
+ ? ADDR_LIMIT_32BIT : 0) | PER_OSF4))
+
+-#define STACK_TOP \
++#define __STACK_TOP \
+ (current->personality & ADDR_LIMIT_32BIT ? 0x80000000 : 0x00120000000UL)
+
+ #endif
+diff -Nurp linux-2.4.25/include/asm-alpha/elf.h linux-2.4.25-pax/include/asm-alpha/elf.h
+--- linux-2.4.25/include/asm-alpha/elf.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-alpha/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -41,6 +41,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 28)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality & ADDR_LIMIT_32BIT ? 14 : 19)
++#endif
++
+ /* $0 is set by ld.so to a pointer to a function which might be
+ registered using atexit. This provides a mean for the dynamic
+ linker to call DT_FINI functions for shared libraries that have
+diff -Nurp linux-2.4.25/include/asm-alpha/mman.h linux-2.4.25-pax/include/asm-alpha/mman.h
+--- linux-2.4.25/include/asm-alpha/mman.h 2000-03-16 17:07:09.000000000 -0500
++++ linux-2.4.25-pax/include/asm-alpha/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -24,6 +24,10 @@
+ #define MAP_LOCKED 0x8000 /* lock the mapping */
+ #define MAP_NORESERVE 0x10000 /* don't check for reservations */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x20000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_SYNC 2 /* synchronous memory sync */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -Nurp linux-2.4.25/include/asm-alpha/page.h linux-2.4.25-pax/include/asm-alpha/page.h
+--- linux-2.4.25/include/asm-alpha/page.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-alpha/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -101,6 +101,15 @@ extern __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _ALPHA_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-alpha/pgtable.h linux-2.4.25-pax/include/asm-alpha/pgtable.h
+--- linux-2.4.25/include/asm-alpha/pgtable.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-alpha/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -96,6 +96,17 @@
+ #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
+ #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
+ #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
+
+ #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
+diff -Nurp linux-2.4.25/include/asm-i386/a.out.h linux-2.4.25-pax/include/asm-i386/a.out.h
+--- linux-2.4.25/include/asm-i386/a.out.h 1995-06-16 14:33:06.000000000 -0400
++++ linux-2.4.25-pax/include/asm-i386/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -19,7 +19,11 @@ struct exec
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP TASK_SIZE
++#ifdef CONFIG_PAX_SEGMEXEC
++#define __STACK_TOP ((current->flags & PF_PAX_SEGMEXEC)?TASK_SIZE/2:TASK_SIZE)
++#else
++#define __STACK_TOP TASK_SIZE
++#endif
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-i386/desc.h linux-2.4.25-pax/include/asm-i386/desc.h
+--- linux-2.4.25/include/asm-i386/desc.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-i386/desc.h 2004-02-19 11:12:53.000000000 -0500
+@@ -46,7 +46,8 @@ struct desc_struct {
+ };
+
+ extern struct desc_struct gdt_table[];
+-extern struct desc_struct *idt, *gdt;
++extern struct desc_struct gdt_table2[];
++extern struct desc_struct *idt, *gdt, *gdt2;
+
+ struct Xgt_desc_struct {
+ unsigned short size;
+@@ -55,6 +56,7 @@ struct Xgt_desc_struct {
+
+ #define idt_descr (*(struct Xgt_desc_struct *)((char *)&idt - 2))
+ #define gdt_descr (*(struct Xgt_desc_struct *)((char *)&gdt - 2))
++#define gdt_descr2 (*(struct Xgt_desc_struct *)((char *)&gdt2 - 2))
+
+ #define load_TR(n) __asm__ __volatile__("ltr %%ax"::"a" (__TSS(n)<<3))
+
+@@ -64,10 +66,11 @@ struct Xgt_desc_struct {
+ * This is the ldt that every process will get unless we need
+ * something other than this.
+ */
+-extern struct desc_struct default_ldt[];
++extern const struct desc_struct default_ldt[];
+ extern void set_intr_gate(unsigned int irq, void * addr);
+-extern void set_ldt_desc(unsigned int n, void *addr, unsigned int size);
+-extern void set_tss_desc(unsigned int n, void *addr);
++extern void set_ldt_desc(unsigned int n, const void *addr, unsigned int size);
++extern void __set_ldt_desc(unsigned int n, const void *addr, unsigned int size);
++extern void set_tss_desc(unsigned int n, const void *addr);
+
+ static inline void clear_LDT(void)
+ {
+@@ -82,7 +85,7 @@ static inline void clear_LDT(void)
+ static inline void load_LDT (mm_context_t *pc)
+ {
+ int cpu = smp_processor_id();
+- void *segments = pc->ldt;
++ const void *segments = pc->ldt;
+ int count = pc->size;
+
+ if (!count) {
+@@ -94,6 +97,21 @@ static inline void load_LDT (mm_context_
+ __load_LDT(cpu);
+ }
+
++static inline void _load_LDT (mm_context_t *pc)
++{
++ int cpu = smp_processor_id();
++ const void *segments = pc->ldt;
++ int count = pc->size;
++
++ if (!count) {
++ segments = &default_ldt[0];
++ count = 5;
++ }
++
++ __set_ldt_desc(cpu, segments, count);
++ __load_LDT(cpu);
++}
++
+ #endif /* !__ASSEMBLY__ */
+
+ #endif
+diff -Nurp linux-2.4.25/include/asm-i386/elf.h linux-2.4.25-pax/include/asm-i386/elf.h
+--- linux-2.4.25/include/asm-i386/elf.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-i386/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -55,7 +55,22 @@ typedef struct user_fxsr_struct elf_fpxr
+ the loader. We need to make sure that it is out of the way of the program
+ that it will "exec", and that there is sufficient room for the brk. */
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define ELF_ET_DYN_BASE ((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3*2:TASK_SIZE/3*2)
++#else
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x08048000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->flags & PF_PAX_SEGMEXEC ? 15 : 16)
++#endif
+
+ /* Wow, the "main" arch needs arch dependent functions too.. :) */
+
+diff -Nurp linux-2.4.25/include/asm-i386/hw_irq.h linux-2.4.25-pax/include/asm-i386/hw_irq.h
+--- linux-2.4.25/include/asm-i386/hw_irq.h 2003-08-25 07:44:43.000000000 -0400
++++ linux-2.4.25-pax/include/asm-i386/hw_irq.h 2004-02-19 11:12:53.000000000 -0500
+@@ -128,6 +128,7 @@ extern char _stext, _etext;
+ asmlinkage void x(void); \
+ asmlinkage void call_##x(void); \
+ __asm__( \
++"\n .text" \
+ "\n"__ALIGN_STR"\n" \
+ SYMBOL_NAME_STR(x) ":\n\t" \
+ "pushl $"#v"-256\n\t" \
+@@ -141,6 +142,7 @@ SYMBOL_NAME_STR(x) ":\n\t" \
+ asmlinkage void x(struct pt_regs * regs); \
+ asmlinkage void call_##x(void); \
+ __asm__( \
++"\n .text" \
+ "\n"__ALIGN_STR"\n" \
+ SYMBOL_NAME_STR(x) ":\n\t" \
+ "pushl $"#v"-256\n\t" \
+@@ -155,6 +157,7 @@ SYMBOL_NAME_STR(x) ":\n\t" \
+ #define BUILD_COMMON_IRQ() \
+ asmlinkage void call_do_IRQ(void); \
+ __asm__( \
++ "\n .text" \
+ "\n" __ALIGN_STR"\n" \
+ "common_interrupt:\n\t" \
+ SAVE_ALL \
+@@ -175,6 +178,7 @@ __asm__( \
+ #define BUILD_IRQ(nr) \
+ asmlinkage void IRQ_NAME(nr); \
+ __asm__( \
++"\n .text" \
+ "\n"__ALIGN_STR"\n" \
+ SYMBOL_NAME_STR(IRQ) #nr "_interrupt:\n\t" \
+ "pushl $"#nr"-256\n\t" \
+diff -Nurp linux-2.4.25/include/asm-i386/mman.h linux-2.4.25-pax/include/asm-i386/mman.h
+--- linux-2.4.25/include/asm-i386/mman.h 2000-03-14 20:45:20.000000000 -0500
++++ linux-2.4.25-pax/include/asm-i386/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -18,6 +18,10 @@
+ #define MAP_LOCKED 0x2000 /* pages are locked */
+ #define MAP_NORESERVE 0x4000 /* don't check for reservations */
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define MAP_MIRROR 0x8000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-i386/page.h linux-2.4.25-pax/include/asm-i386/page.h
+--- linux-2.4.25/include/asm-i386/page.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-i386/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -80,6 +80,12 @@ typedef struct { unsigned long pgprot; }
+
+ #define __PAGE_OFFSET (0xC0000000)
+
++#ifdef CONFIG_PAX_KERNEXEC
++#define __KERNEL_TEXT_OFFSET (0xC0400000)
++#else
++#define __KERNEL_TEXT_OFFSET (0)
++#endif
++
+ /*
+ * This much address space is reserved for vmalloc() and iomap()
+ * as well as fixmap mappings.
+@@ -137,6 +143,15 @@ static __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & (PF_PAX_PAGEEXEC|PF_PAX_SEGMEXEC))?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _I386_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-i386/pgtable.h linux-2.4.25-pax/include/asm-i386/pgtable.h
+--- linux-2.4.25/include/asm-i386/pgtable.h 2002-11-28 18:53:15.000000000 -0500
++++ linux-2.4.25-pax/include/asm-i386/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -205,6 +205,16 @@ extern void pgtable_cache_init(void);
+ #define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define __PAGE_KERNEL \
+ (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define __PAGE_KERNEL_NOCACHE \
+@@ -237,18 +247,18 @@ extern void pgtable_cache_init(void);
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+@@ -324,7 +334,7 @@ static inline pte_t pte_modify(pte_t pte
+ ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
+
+ /* to find an entry in a page-table-directory. */
+-#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
++#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
+
+ #define __pgd_offset(address) pgd_index(address)
+
+diff -Nurp linux-2.4.25/include/asm-i386/processor.h linux-2.4.25-pax/include/asm-i386/processor.h
+--- linux-2.4.25/include/asm-i386/processor.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-i386/processor.h 2004-02-19 11:12:53.000000000 -0500
+@@ -261,10 +261,19 @@ extern unsigned int mca_pentium_flag;
+ */
+ #define TASK_SIZE (PAGE_OFFSET)
+
++#ifdef CONFIG_PAX_SEGMEXEC
++#define SEGMEXEC_TASK_SIZE ((PAGE_OFFSET) / 2)
++#endif
++
+ /* This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define TASK_UNMAPPED_BASE ((current->flags & PF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE/3:TASK_SIZE/3)
++#else
+ #define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
++#endif
+
+ /*
+ * Size of io_bitmap in longwords: 32 is ports 0-0x3ff.
+diff -Nurp linux-2.4.25/include/asm-i386/system.h linux-2.4.25-pax/include/asm-i386/system.h
+--- linux-2.4.25/include/asm-i386/system.h 2003-08-25 07:44:43.000000000 -0400
++++ linux-2.4.25-pax/include/asm-i386/system.h 2004-02-19 11:12:53.000000000 -0500
+@@ -12,6 +12,8 @@
+ struct task_struct; /* one of the stranger aspects of C forward declarations.. */
+ extern void FASTCALL(__switch_to(struct task_struct *prev, struct task_struct *next));
+
++void pax_switch_segments(struct task_struct *);
++
+ #define prepare_to_switch() do { } while(0)
+ #define switch_to(prev,next,last) do { \
+ asm volatile("pushl %%esi\n\t" \
+diff -Nurp linux-2.4.25/include/asm-ia64/elf.h linux-2.4.25-pax/include/asm-ia64/elf.h
+--- linux-2.4.25/include/asm-ia64/elf.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -41,6 +41,16 @@
+ */
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->personality == PER_LINUX32 ? 16 : 43 - PAGE_SHIFT)
++#endif
+
+ /*
+ * We use (abuse?) this macro to insert the (empty) vm_area that is
+diff -Nurp linux-2.4.25/include/asm-ia64/ia32.h linux-2.4.25-pax/include/asm-ia64/ia32.h
+--- linux-2.4.25/include/asm-ia64/ia32.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/ia32.h 2004-02-19 11:12:53.000000000 -0500
+@@ -367,7 +367,14 @@ struct old_linux32_dirent {
+ #define ELF_ARCH EM_386
+
+ #define IA32_PAGE_OFFSET 0xc0000000
+-#define IA32_STACK_TOP IA32_PAGE_OFFSET
++
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __IA32_DELTA_STACK (current->mm->delta_stack)
++#else
++#define __IA32_DELTA_STACK 0UL
++#endif
++
++#define IA32_STACK_TOP (IA32_PAGE_OFFSET - __IA32_DELTA_STACK)
+
+ /*
+ * The system segments (GDT, TSS, LDT) have to be mapped below 4GB so the IA-32 engine can
+diff -Nurp linux-2.4.25/include/asm-ia64/mman.h linux-2.4.25-pax/include/asm-ia64/mman.h
+--- linux-2.4.25/include/asm-ia64/mman.h 2001-01-04 15:50:17.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -26,6 +26,10 @@
+ #define MAP_WRITECOMBINED 0x10000 /* write-combine the area */
+ #define MAP_NONCACHED 0x20000 /* don't cache the memory */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x40000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-ia64/page.h linux-2.4.25-pax/include/asm-ia64/page.h
+--- linux-2.4.25/include/asm-ia64/page.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -184,4 +184,13 @@ get_order (unsigned long size)
+ (((current->thread.flags & IA64_THREAD_XSTACK) != 0) \
+ ? VM_EXEC : 0))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* _ASM_IA64_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-ia64/pgtable.h linux-2.4.25-pax/include/asm-ia64/pgtable.h
+--- linux-2.4.25/include/asm-ia64/pgtable.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -114,6 +114,17 @@
+ #define PAGE_SHARED __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
+ #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
+ #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
++# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++# define PAGE_COPY_NOEXEC PAGE_COPY
++#endif
++
+ #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
+ #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
+ #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
+diff -Nurp linux-2.4.25/include/asm-ia64/ustack.h linux-2.4.25-pax/include/asm-ia64/ustack.h
+--- linux-2.4.25/include/asm-ia64/ustack.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ia64/ustack.h 2004-02-19 11:12:53.000000000 -0500
+@@ -11,6 +11,6 @@
+ #define MAX_USER_STACK_SIZE (RGN_MAP_LIMIT/2)
+ /* Make a default stack size of 2GB */
+ #define DEFAULT_USER_STACK_SIZE (1UL << 31)
+-#define STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
++#define __STACK_TOP (0x6000000000000000UL + RGN_MAP_LIMIT)
+
+ #endif /* _ASM_IA64_USTACK_H */
+diff -Nurp linux-2.4.25/include/asm-mips/a.out.h linux-2.4.25-pax/include/asm-mips/a.out.h
+--- linux-2.4.25/include/asm-mips/a.out.h 1995-12-13 05:39:45.000000000 -0500
++++ linux-2.4.25-pax/include/asm-mips/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -19,7 +19,7 @@ struct exec
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-mips/elf.h linux-2.4.25-pax/include/asm-mips/elf.h
+--- linux-2.4.25/include/asm-mips/elf.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-mips/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -107,6 +107,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x00400000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (27 - PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (27 - PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (27 - PAGE_SHIFT)
++#endif
++
+ #ifdef __KERNEL__
+ #define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
+ #endif
+diff -Nurp linux-2.4.25/include/asm-mips/page.h linux-2.4.25-pax/include/asm-mips/page.h
+--- linux-2.4.25/include/asm-mips/page.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-mips/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -135,6 +135,15 @@ static __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
+ #define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
+
+diff -Nurp linux-2.4.25/include/asm-mips64/a.out.h linux-2.4.25-pax/include/asm-mips64/a.out.h
+--- linux-2.4.25/include/asm-mips64/a.out.h 2003-08-25 07:44:44.000000000 -0400
++++ linux-2.4.25-pax/include/asm-mips64/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -26,7 +26,7 @@ struct exec
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
++#define __STACK_TOP (current->thread.mflags & MF_32BIT_ADDR ? TASK_SIZE32 : TASK_SIZE)
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-mips64/elf.h linux-2.4.25-pax/include/asm-mips64/elf.h
+--- linux-2.4.25/include/asm-mips64/elf.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-mips64/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -107,6 +107,17 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_N
+ #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 0x00400000UL : 0x00400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) (((tsk)->thread.mflags & MF_32BIT_ADDR) ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
++#endif
++
+ #ifdef __KERNEL__
+ #define SET_PERSONALITY(ex, ibcs2) \
+ do { current->thread.mflags &= ~MF_ABI_MASK; \
+diff -Nurp linux-2.4.25/include/asm-mips64/page.h linux-2.4.25-pax/include/asm-mips64/page.h
+--- linux-2.4.25/include/asm-mips64/page.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-mips64/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -148,6 +148,15 @@ static __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* defined (__KERNEL__) */
+
+ #endif /* _ASM_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-parisc/a.out.h linux-2.4.25-pax/include/asm-parisc/a.out.h
+--- linux-2.4.25/include/asm-parisc/a.out.h 2000-12-05 15:29:39.000000000 -0500
++++ linux-2.4.25-pax/include/asm-parisc/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -22,7 +22,7 @@ struct exec
+ /* XXX: STACK_TOP actually should be STACK_BOTTOM for parisc.
+ * prumpf */
+
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-parisc/elf.h linux-2.4.25-pax/include/asm-parisc/elf.h
+--- linux-2.4.25/include/asm-parisc/elf.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-parisc/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -135,6 +135,17 @@ struct pt_regs; /* forward declaration..
+
+ #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this CPU supports. This could be done in user space,
+ but it's not easy, and we've already done it here. */
+diff -Nurp linux-2.4.25/include/asm-parisc/mman.h linux-2.4.25-pax/include/asm-parisc/mman.h
+--- linux-2.4.25/include/asm-parisc/mman.h 2000-12-05 15:29:39.000000000 -0500
++++ linux-2.4.25-pax/include/asm-parisc/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -18,6 +18,10 @@
+ #define MAP_NORESERVE 0x4000 /* don't check for reservations */
+ #define MAP_GROWSDOWN 0x8000 /* stack-like segment */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_SYNC 1 /* synchronous memory sync */
+ #define MS_ASYNC 2 /* sync memory asynchronously */
+ #define MS_INVALIDATE 4 /* invalidate the caches */
+diff -Nurp linux-2.4.25/include/asm-parisc/page.h linux-2.4.25-pax/include/asm-parisc/page.h
+--- linux-2.4.25/include/asm-parisc/page.h 2002-11-28 18:53:15.000000000 -0500
++++ linux-2.4.25-pax/include/asm-parisc/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -117,6 +117,15 @@ extern int npmem_ranges;
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _PARISC_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-parisc/pgtable.h linux-2.4.25-pax/include/asm-parisc/pgtable.h
+--- linux-2.4.25/include/asm-parisc/pgtable.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-parisc/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -167,6 +167,17 @@ extern void *vmalloc_start;
+ #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
+ #define PAGE_COPY PAGE_EXECREAD
+ #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
+ #define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
+diff -Nurp linux-2.4.25/include/asm-ppc/a.out.h linux-2.4.25-pax/include/asm-ppc/a.out.h
+--- linux-2.4.25/include/asm-ppc/a.out.h 2003-08-25 07:44:44.000000000 -0400
++++ linux-2.4.25-pax/include/asm-ppc/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -2,7 +2,7 @@
+ #define __PPC_A_OUT_H__
+
+ /* grabbed from the intel stuff */
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+
+ struct exec
+diff -Nurp linux-2.4.25/include/asm-ppc/elf.h linux-2.4.25-pax/include/asm-ppc/elf.h
+--- linux-2.4.25/include/asm-ppc/elf.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-ppc/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -46,6 +46,17 @@ typedef elf_vrreg_t elf_vrregset_t[ELF_N
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 15
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 15
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 15
++#endif
++
+ #define USE_ELF_CORE_DUMP
+ #define ELF_EXEC_PAGESIZE 4096
+
+diff -Nurp linux-2.4.25/include/asm-ppc/mman.h linux-2.4.25-pax/include/asm-ppc/mman.h
+--- linux-2.4.25/include/asm-ppc/mman.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-ppc/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -19,6 +19,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0200
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-ppc/page.h linux-2.4.25-pax/include/asm-ppc/page.h
+--- linux-2.4.25/include/asm-ppc/page.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ppc/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -171,5 +171,14 @@ extern __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+ #endif /* _PPC_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-ppc/pgtable.h linux-2.4.25-pax/include/asm-ppc/pgtable.h
+--- linux-2.4.25/include/asm-ppc/pgtable.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/asm-ppc/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -394,11 +394,21 @@ extern unsigned long vmalloc_start;
+
+ #define PAGE_NONE __pgprot(_PAGE_BASE)
+ #define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW)
+-#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC)
++#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC | _PAGE_HWEXEC)
+ #define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER)
+-#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
++#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC | _PAGE_HWEXEC)
++
++#if defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_40x) && !defined(CONFIG_44x)
++# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_GUARDED)
++# define PAGE_COPY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_GUARDED)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+ #define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_SHARED)
+@@ -411,21 +421,21 @@ extern unsigned long vmalloc_start;
+ * This is the closest we can get..
+ */
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY_X
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY_X
+-#define __P100 PAGE_READONLY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
++#define __P100 PAGE_READONLY_X
+ #define __P101 PAGE_READONLY_X
+-#define __P110 PAGE_COPY
++#define __P110 PAGE_COPY_X
+ #define __P111 PAGE_COPY_X
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY_X
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED_X
+-#define __S100 PAGE_READONLY
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
++#define __S100 PAGE_READONLY_X
+ #define __S101 PAGE_READONLY_X
+-#define __S110 PAGE_SHARED
++#define __S110 PAGE_SHARED_X
+ #define __S111 PAGE_SHARED_X
+
+ #ifndef __ASSEMBLY__
+diff -Nurp linux-2.4.25/include/asm-sparc/a.out.h linux-2.4.25-pax/include/asm-sparc/a.out.h
+--- linux-2.4.25/include/asm-sparc/a.out.h 2000-01-13 15:03:00.000000000 -0500
++++ linux-2.4.25-pax/include/asm-sparc/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -91,7 +91,7 @@ struct relocation_info /* used when head
+
+ #include <asm/page.h>
+
+-#define STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
++#define __STACK_TOP (PAGE_OFFSET - PAGE_SIZE)
+
+ #endif /* __KERNEL__ */
+
+diff -Nurp linux-2.4.25/include/asm-sparc/elf.h linux-2.4.25-pax/include/asm-sparc/elf.h
+--- linux-2.4.25/include/asm-sparc/elf.h 2000-07-11 22:02:37.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -83,6 +83,17 @@ typedef struct {
+
+ #define ELF_ET_DYN_BASE (0x08000000)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) 0x10000UL
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) 16
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) 16
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) 16
++#endif
++
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. This can NOT be done in userspace
+ on Sparc. */
+diff -Nurp linux-2.4.25/include/asm-sparc/mman.h linux-2.4.25-pax/include/asm-sparc/mman.h
+--- linux-2.4.25/include/asm-sparc/mman.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -24,6 +24,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-sparc/page.h linux-2.4.25-pax/include/asm-sparc/page.h
+--- linux-2.4.25/include/asm-sparc/page.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -182,6 +182,15 @@ extern __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* __KERNEL__ */
+
+ #endif /* _SPARC_PAGE_H */
+diff -Nurp linux-2.4.25/include/asm-sparc/pgtable.h linux-2.4.25-pax/include/asm-sparc/pgtable.h
+--- linux-2.4.25/include/asm-sparc/pgtable.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -97,6 +97,13 @@ BTFIXUPDEF_INT(page_none)
+ BTFIXUPDEF_INT(page_shared)
+ BTFIXUPDEF_INT(page_copy)
+ BTFIXUPDEF_INT(page_readonly)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++BTFIXUPDEF_INT(page_shared_noexec)
++BTFIXUPDEF_INT(page_copy_noexec)
++BTFIXUPDEF_INT(page_readonly_noexec)
++#endif
++
+ BTFIXUPDEF_INT(page_kernel)
+
+ #define PMD_SHIFT BTFIXUP_SIMM13(pmd_shift)
+@@ -118,6 +125,16 @@ BTFIXUPDEF_INT(page_kernel)
+ #define PAGE_COPY __pgprot(BTFIXUP_INT(page_copy))
+ #define PAGE_READONLY __pgprot(BTFIXUP_INT(page_readonly))
+
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot(BTFIXUP_INT(page_shared_noexec))
++# define PAGE_COPY_NOEXEC __pgprot(BTFIXUP_INT(page_copy_noexec))
++# define PAGE_READONLY_NOEXEC __pgprot(BTFIXUP_INT(page_readonly_noexec))
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
++
+ extern unsigned long page_kernel;
+
+ #ifdef MODULE
+diff -Nurp linux-2.4.25/include/asm-sparc/pgtsrmmu.h linux-2.4.25-pax/include/asm-sparc/pgtsrmmu.h
+--- linux-2.4.25/include/asm-sparc/pgtsrmmu.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-sparc/pgtsrmmu.h 2004-02-19 11:12:53.000000000 -0500
+@@ -76,6 +76,16 @@
+ SRMMU_EXEC | SRMMU_REF)
+ #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
+ SRMMU_EXEC | SRMMU_REF)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_WRITE | SRMMU_REF)
++#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | \
++ SRMMU_REF)
++#endif
++
+ #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
+ SRMMU_DIRTY | SRMMU_REF)
+
+diff -Nurp linux-2.4.25/include/asm-sparc/uaccess.h linux-2.4.25-pax/include/asm-sparc/uaccess.h
+--- linux-2.4.25/include/asm-sparc/uaccess.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc/uaccess.h 2004-02-19 11:12:53.000000000 -0500
+@@ -39,7 +39,7 @@
+ * No one can read/write anything from userland in the kernel space by setting
+ * large size and address near to PAGE_OFFSET - a fault will break his intentions.
+ */
+-#define __user_ok(addr,size) ((addr) < STACK_TOP)
++#define __user_ok(addr,size) ((addr) < __STACK_TOP)
+ #define __kernel_ok (segment_eq(get_fs(), KERNEL_DS))
+ #define __access_ok(addr,size) (__user_ok((addr) & get_fs().seg,(size)))
+ #define access_ok(type,addr,size) __access_ok((unsigned long)(addr),(size))
+diff -Nurp linux-2.4.25/include/asm-sparc64/a.out.h linux-2.4.25-pax/include/asm-sparc64/a.out.h
+--- linux-2.4.25/include/asm-sparc64/a.out.h 2001-04-27 01:17:26.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc64/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -95,7 +95,7 @@ struct relocation_info /* used when head
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L)
++#define __STACK_TOP (current->thread.flags & SPARC_FLAG_32BIT ? 0xf0000000 : 0x80000000000L)
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-sparc64/elf.h linux-2.4.25-pax/include/asm-sparc64/elf.h
+--- linux-2.4.25/include/asm-sparc64/elf.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc64/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -82,6 +82,16 @@ typedef struct {
+ #define ELF_ET_DYN_BASE 0x0000010000000000UL
+ #endif
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 0x10000UL : 0x100000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 )
++#define PAX_DELTA_EXEC_LSB(tsk) (PAGE_SHIFT + 1)
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 14 : 28 )
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & SPARC_FLAG_32BIT ? 15 : 29 )
++#endif
+
+ /* This yields a mask that user programs can use to figure out what
+ instruction set this cpu supports. */
+diff -Nurp linux-2.4.25/include/asm-sparc64/mman.h linux-2.4.25-pax/include/asm-sparc64/mman.h
+--- linux-2.4.25/include/asm-sparc64/mman.h 2003-06-13 10:51:38.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc64/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -24,6 +24,10 @@
+ #define MAP_DENYWRITE 0x0800 /* ETXTBSY */
+ #define MAP_EXECUTABLE 0x1000 /* mark it as an executable */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x0400
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-sparc64/page.h linux-2.4.25-pax/include/asm-sparc64/page.h
+--- linux-2.4.25/include/asm-sparc64/page.h 2003-08-25 07:44:44.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc64/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -160,6 +160,15 @@ extern __inline__ int get_order(unsigned
+ #define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
++#ifdef CONFIG_PAX_PAGEEXEC
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#endif
++
+ #endif /* !(__KERNEL__) */
+
+ #endif /* !(_SPARC64_PAGE_H) */
+diff -Nurp linux-2.4.25/include/asm-sparc64/pgtable.h linux-2.4.25-pax/include/asm-sparc64/pgtable.h
+--- linux-2.4.25/include/asm-sparc64/pgtable.h 2002-08-02 20:39:45.000000000 -0400
++++ linux-2.4.25-pax/include/asm-sparc64/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -122,7 +122,8 @@
+ #define _PAGE_G 0x0000000000000001 /* Global */
+
+ /* Here are the SpitFire software bits we use in the TTE's. */
+-#define _PAGE_MODIFIED 0x0000000000000800 /* Modified Page (ie. dirty) */
++#define _PAGE_MODIFIED 0x0000000000001000 /* Modified Page (ie. dirty) */
++#define _PAGE_EXEC 0x0000000000000800 /* Executable SW Bit */
+ #define _PAGE_ACCESSED 0x0000000000000400 /* Accessed Page (ie. referenced) */
+ #define _PAGE_READ 0x0000000000000200 /* Readable SW Bit */
+ #define _PAGE_WRITE 0x0000000000000100 /* Writable SW Bit */
+@@ -150,16 +151,30 @@
+
+ /* Don't set the TTE _PAGE_W bit here, else the dirty bit never gets set. */
+ #define PAGE_SHARED __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS | _PAGE_WRITE)
++ __ACCESS_BITS | _PAGE_WRITE | _PAGE_EXEC)
+
+ #define PAGE_COPY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_READONLY __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __ACCESS_BITS)
++ __ACCESS_BITS | _PAGE_EXEC)
+
+ #define PAGE_KERNEL __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
+- __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS)
++ __PRIV_BITS | __ACCESS_BITS | __DIRTY_BITS | \
++ _PAGE_EXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++# define PAGE_SHARED_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS | _PAGE_WRITE)
++# define PAGE_COPY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++# define PAGE_READONLY_NOEXEC __pgprot (_PAGE_PRESENT | _PAGE_VALID | _PAGE_CACHE | \
++ __ACCESS_BITS)
++#else
++# define PAGE_SHARED_NOEXEC PAGE_SHARED
++# define PAGE_COPY_NOEXEC PAGE_COPY
++# define PAGE_READONLY_NOEXEC PAGE_READONLY
++#endif
+
+ #define PAGE_INVALID __pgprot (0)
+
+@@ -170,18 +185,18 @@
+ #define pg_iobits (_PAGE_VALID | _PAGE_PRESENT | __DIRTY_BITS | __ACCESS_BITS | _PAGE_E)
+
+ #define __P000 PAGE_NONE
+-#define __P001 PAGE_READONLY
+-#define __P010 PAGE_COPY
+-#define __P011 PAGE_COPY
++#define __P001 PAGE_READONLY_NOEXEC
++#define __P010 PAGE_COPY_NOEXEC
++#define __P011 PAGE_COPY_NOEXEC
+ #define __P100 PAGE_READONLY
+ #define __P101 PAGE_READONLY
+ #define __P110 PAGE_COPY
+ #define __P111 PAGE_COPY
+
+ #define __S000 PAGE_NONE
+-#define __S001 PAGE_READONLY
+-#define __S010 PAGE_SHARED
+-#define __S011 PAGE_SHARED
++#define __S001 PAGE_READONLY_NOEXEC
++#define __S010 PAGE_SHARED_NOEXEC
++#define __S011 PAGE_SHARED_NOEXEC
+ #define __S100 PAGE_READONLY
+ #define __S101 PAGE_READONLY
+ #define __S110 PAGE_SHARED
+diff -Nurp linux-2.4.25/include/asm-x86_64/a.out.h linux-2.4.25-pax/include/asm-x86_64/a.out.h
+--- linux-2.4.25/include/asm-x86_64/a.out.h 2002-11-28 18:53:15.000000000 -0500
++++ linux-2.4.25-pax/include/asm-x86_64/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -23,7 +23,7 @@ struct exec
+
+ #ifdef __KERNEL__
+
+-#define STACK_TOP TASK_SIZE
++#define __STACK_TOP TASK_SIZE
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/asm-x86_64/elf.h linux-2.4.25-pax/include/asm-x86_64/elf.h
+--- linux-2.4.25/include/asm-x86_64/elf.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/asm-x86_64/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -68,6 +68,17 @@ typedef struct user_fxsr_struct elf_fpxr
+
+ #define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
+
++#ifdef CONFIG_PAX_ASLR
++#define PAX_ELF_ET_DYN_BASE(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 0x08048000UL : 0x400000UL)
++
++#define PAX_DELTA_MMAP_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_MMAP_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#define PAX_DELTA_EXEC_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_EXEC_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#define PAX_DELTA_STACK_LSB(tsk) PAGE_SHIFT
++#define PAX_DELTA_STACK_LEN(tsk) ((tsk)->thread.flags & THREAD_IA32 ? 16 : 24)
++#endif
++
+ /* regs is struct pt_regs, pr_reg is elf_gregset_t (which is
+ now struct_user_regs, they are different). Assumes current is the process
+ getting dumped. */
+diff -Nurp linux-2.4.25/include/asm-x86_64/mman.h linux-2.4.25-pax/include/asm-x86_64/mman.h
+--- linux-2.4.25/include/asm-x86_64/mman.h 2002-11-28 18:53:15.000000000 -0500
++++ linux-2.4.25-pax/include/asm-x86_64/mman.h 2004-02-19 11:12:53.000000000 -0500
+@@ -19,6 +19,10 @@
+ #define MAP_LOCKED 0x2000 /* pages are locked */
+ #define MAP_NORESERVE 0x4000 /* don't check for reservations */
+
++#ifdef CONFIG_PAX_RANDEXEC
++#define MAP_MIRROR 0x8000
++#endif
++
+ #define MS_ASYNC 1 /* sync memory asynchronously */
+ #define MS_INVALIDATE 2 /* invalidate the caches */
+ #define MS_SYNC 4 /* synchronous memory sync */
+diff -Nurp linux-2.4.25/include/asm-x86_64/page.h linux-2.4.25-pax/include/asm-x86_64/page.h
+--- linux-2.4.25/include/asm-x86_64/page.h 2003-08-25 07:44:44.000000000 -0400
++++ linux-2.4.25-pax/include/asm-x86_64/page.h 2004-02-19 11:12:53.000000000 -0500
+@@ -142,6 +142,16 @@ extern __inline__ int get_order(unsigned
+
+ #define __VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
++
++#ifdef CONFIG_PAX_PAGEEXEC
++#define VM_DATA_DEFAULT_FLAGS __VM_DATA_DEFAULT_FLAGS
++#ifdef CONFIG_PAX_MPROTECT
++#define __VM_STACK_FLAGS (((current->flags & PF_PAX_MPROTECT)?0:VM_MAYEXEC) | \
++ ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#else
++#define __VM_STACK_FLAGS (VM_MAYEXEC | ((current->flags & PF_PAX_PAGEEXEC)?0:VM_EXEC))
++#endif
++#else
+ #define __VM_STACK_FLAGS (VM_GROWSDOWN | VM_READ | VM_WRITE | VM_EXEC | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+
+@@ -149,6 +159,7 @@ extern __inline__ int get_order(unsigned
+ ((current->thread.flags & THREAD_IA32) ? vm_data_default_flags32 : \
+ vm_data_default_flags)
+ #define VM_STACK_FLAGS vm_stack_flags
++#endif
+
+ #endif /* __KERNEL__ */
+
+diff -Nurp linux-2.4.25/include/asm-x86_64/pgtable.h linux-2.4.25-pax/include/asm-x86_64/pgtable.h
+--- linux-2.4.25/include/asm-x86_64/pgtable.h 2003-08-25 07:44:44.000000000 -0400
++++ linux-2.4.25-pax/include/asm-x86_64/pgtable.h 2004-02-19 11:12:53.000000000 -0500
+@@ -240,6 +240,8 @@ extern inline void pgd_clear (pgd_t * pg
+ __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED)
+ #define PAGE_EXECONLY PAGE_READONLY_EXEC
+
++#define PAGE_READONLY_NOEXEC PAGE_READONLY
++
+ #define PAGE_LARGE (_PAGE_PSE|_PAGE_PRESENT)
+
+ #define __PAGE_KERNEL \
+diff -Nurp linux-2.4.25/include/linux/a.out.h linux-2.4.25-pax/include/linux/a.out.h
+--- linux-2.4.25/include/linux/a.out.h 2001-11-22 14:46:18.000000000 -0500
++++ linux-2.4.25-pax/include/linux/a.out.h 2004-02-19 11:12:53.000000000 -0500
+@@ -7,6 +7,16 @@
+
+ #include <asm/a.out.h>
+
++#ifdef CONFIG_PAX_RANDUSTACK
++#define __DELTA_STACK (current->mm->delta_stack)
++#else
++#define __DELTA_STACK 0UL
++#endif
++
++#ifndef STACK_TOP
++#define STACK_TOP (__STACK_TOP - __DELTA_STACK)
++#endif
++
+ #endif /* __STRUCT_EXEC_OVERRIDE__ */
+
+ /* these go in the N_MACHTYPE field */
+@@ -37,6 +47,14 @@ enum machine_type {
+ M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
+ };
+
++/* Constants for the N_FLAGS field */
++#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define F_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ #if !defined (N_MAGIC)
+ #define N_MAGIC(exec) ((exec).a_info & 0xffff)
+ #endif
+diff -Nurp linux-2.4.25/include/linux/binfmts.h linux-2.4.25-pax/include/linux/binfmts.h
+--- linux-2.4.25/include/linux/binfmts.h 2001-11-22 14:46:19.000000000 -0500
++++ linux-2.4.25-pax/include/linux/binfmts.h 2004-02-19 11:12:53.000000000 -0500
+@@ -59,6 +59,8 @@ extern void compute_creds(struct linux_b
+ extern int do_coredump(long signr, struct pt_regs * regs);
+ extern void set_binfmt(struct linux_binfmt *new);
+
++void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
++void pax_report_insns(void *pc);
+
+ #if 0
+ /* this went away now */
+diff -Nurp linux-2.4.25/include/linux/elf.h linux-2.4.25-pax/include/linux/elf.h
+--- linux-2.4.25/include/linux/elf.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/linux/elf.h 2004-02-19 11:12:53.000000000 -0500
+@@ -34,6 +34,10 @@ typedef __s64 Elf64_Sxword;
+ #define PT_MIPS_REGINFO 0x70000000
+ #define PT_MIPS_OPTIONS 0x70000001
+
++#define PT_LOOS 0x60000000
++#define PT_GNU_STACK (PT_LOOS + 0x474e551)
++#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
++
+ /* Flags in the e_flags field of the header */
+ #define EF_MIPS_NOREORDER 0x00000001
+ #define EF_MIPS_PIC 0x00000002
+@@ -44,6 +48,14 @@ typedef __s64 Elf64_Sxword;
+ #define EF_MIPS_ABI 0x0000f000
+ #define EF_MIPS_ARCH 0xf0000000
+
++/* Constants for the e_flags field */
++#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
++#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
++#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
++#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
++#define EF_PAX_RANDEXEC 16 /* Randomize ET_EXEC base */
++#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
++
+ /* These constants define the different elf file types */
+ #define ET_NONE 0
+ #define ET_REL 1
+@@ -122,6 +134,8 @@ typedef __s64 Elf64_Sxword;
+ #define DT_DEBUG 21
+ #define DT_TEXTREL 22
+ #define DT_JMPREL 23
++#define DT_FLAGS 30
++ #define DF_TEXTREL 0x00000004
+ #define DT_LOPROC 0x70000000
+ #define DT_HIPROC 0x7fffffff
+ #define DT_MIPS_RLD_VERSION 0x70000001
+@@ -458,6 +472,19 @@ typedef struct elf64_hdr {
+ #define PF_W 0x2
+ #define PF_X 0x1
+
++#define PF_PAGEEXEC (1 << 4) /* Enable PAGEEXEC */
++#define PF_NOPAGEEXEC (1 << 5) /* Disable PAGEEXEC */
++#define PF_SEGMEXEC (1 << 6) /* Enable SEGMEXEC */
++#define PF_NOSEGMEXEC (1 << 7) /* Disable SEGMEXEC */
++#define PF_MPROTECT (1 << 8) /* Enable MPROTECT */
++#define PF_NOMPROTECT (1 << 9) /* Disable MPROTECT */
++#define PF_RANDEXEC (1 << 10) /* Enable RANDEXEC */
++#define PF_NORANDEXEC (1 << 11) /* Disable RANDEXEC */
++#define PF_EMUTRAMP (1 << 12) /* Enable EMUTRAMP */
++#define PF_NOEMUTRAMP (1 << 13) /* Disable EMUTRAMP */
++#define PF_RANDMMAP (1 << 14) /* Enable RANDMMAP */
++#define PF_NORANDMMAP (1 << 15) /* Disable RANDMMAP */
++
+ typedef struct elf32_phdr{
+ Elf32_Word p_type;
+ Elf32_Off p_offset;
+@@ -555,6 +582,8 @@ typedef struct elf64_shdr {
+ #define EI_VERSION 6
+ #define EI_PAD 7
+
++#define EI_PAX 14
++
+ #define ELFMAG0 0x7f /* EI_MAG */
+ #define ELFMAG1 'E'
+ #define ELFMAG2 'L'
+@@ -602,6 +631,7 @@ extern Elf32_Dyn _DYNAMIC [];
+ #define elfhdr elf32_hdr
+ #define elf_phdr elf32_phdr
+ #define elf_note elf32_note
++#define elf_dyn Elf32_Dyn
+
+ #else
+
+@@ -609,6 +639,7 @@ extern Elf64_Dyn _DYNAMIC [];
+ #define elfhdr elf64_hdr
+ #define elf_phdr elf64_phdr
+ #define elf_note elf64_note
++#define elf_dyn Elf64_Dyn
+
+ #endif
+
+diff -Nurp linux-2.4.25/include/linux/mm.h linux-2.4.25-pax/include/linux/mm.h
+--- linux-2.4.25/include/linux/mm.h 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/include/linux/mm.h 2004-02-19 11:12:53.000000000 -0500
+@@ -25,6 +25,7 @@ extern struct list_head inactive_list;
+ #include <asm/page.h>
+ #include <asm/pgtable.h>
+ #include <asm/atomic.h>
++#include <asm/mman.h>
+
+ /*
+ * Linux kernel virtual memory manager primitives.
+@@ -104,9 +105,29 @@ struct vm_area_struct {
+ #define VM_DONTEXPAND 0x00040000 /* Cannot expand with mremap() */
+ #define VM_RESERVED 0x00080000 /* Don't unmap it from swap_out */
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++#define VM_MIRROR 0x00100000 /* vma is mirroring another */
++#endif
++
++#ifdef CONFIG_PAX_MPROTECT
++#define VM_MAYNOTWRITE 0x00200000 /* vma cannot be granted VM_WRITE any more */
++#endif
++
++#ifdef __VM_STACK_FLAGS
++#ifdef ARCH_STACK_GROWSUP
++#define VM_STACK_FLAGS (0x00000233 | __VM_STACK_FLAGS)
++#else
++#define VM_STACK_FLAGS (0x00000133 | __VM_STACK_FLAGS)
++#endif
++#endif
++
+ #ifndef VM_STACK_FLAGS
++#ifdef ARCH_STACK_GROWSUP
++#define VM_STACK_FLAGS 0x00000277
++#else
+ #define VM_STACK_FLAGS 0x00000177
+ #endif
++#endif
+
+ #define VM_READHINTMASK (VM_SEQ_READ | VM_RAND_READ)
+ #define VM_ClearReadHint(v) (v)->vm_flags &= ~VM_READHINTMASK
+@@ -556,21 +577,50 @@ extern unsigned long do_mmap_pgoff(struc
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long pgoff);
+
++extern int do_munmap(struct mm_struct *, unsigned long, size_t);
++
+ static inline unsigned long do_mmap(struct file *file, unsigned long addr,
+ unsigned long len, unsigned long prot,
+ unsigned long flag, unsigned long offset)
+ {
+ unsigned long ret = -EINVAL;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || (addr && addr > SEGMEXEC_TASK_SIZE-len)))
++ goto out;
++#endif
++
+ if ((offset + PAGE_ALIGN(len)) < offset)
+ goto out;
+ if (!(offset & ~PAGE_MASK))
+ ret = do_mmap_pgoff(file, addr, len, prot, flag, offset >> PAGE_SHIFT);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++#define BAD_ADDR(x) ((unsigned long)(x) > TASK_SIZE)
++ if ((current->flags & PF_PAX_SEGMEXEC) && !BAD_ADDR(ret) && ((flag & MAP_TYPE) == MAP_PRIVATE)
++
++#ifdef CONFIG_PAX_MPROTECT
++ && (!(current->flags & PF_PAX_MPROTECT) || ((prot & PROT_EXEC) && file && !(prot & PROT_WRITE)))
++#endif
++
++ )
++ {
++ unsigned long ret_m;
++ prot = prot & PROT_EXEC ? prot : PROT_NONE;
++ ret_m = do_mmap_pgoff(NULL, ret + SEGMEXEC_TASK_SIZE, 0UL, prot, flag | MAP_MIRROR | MAP_FIXED, ret);
++ if (BAD_ADDR(ret_m)) {
++ do_munmap(current->mm, ret, len);
++ ret = ret_m;
++ }
++ }
++#undef BAD_ADDR
++#endif
++
+ out:
+ return ret;
+ }
+
+-extern int do_munmap(struct mm_struct *, unsigned long, size_t);
+-
+ extern unsigned long do_brk(unsigned long, unsigned long);
+
+ static inline void __vma_unlink(struct mm_struct * mm, struct vm_area_struct * vma, struct vm_area_struct * prev)
+@@ -583,6 +633,12 @@ static inline void __vma_unlink(struct m
+
+ static inline int can_vma_merge(struct vm_area_struct * vma, unsigned long vm_flags)
+ {
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((vma->vm_flags | vm_flags) & VM_MIRROR)
++ return 0;
++#endif
++
+ if (!vma->vm_file && vma->vm_flags == vm_flags)
+ return 1;
+ else
+@@ -636,7 +692,12 @@ static inline unsigned int pf_gfp_mask(u
+
+ return gfp_mask;
+ }
+-
++
++/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
++extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
++extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
++ struct vm_area_struct **pprev);
++
+ /* vma is the first one with address < vma->vm_end,
+ * and even address < vma->vm_start. Have to extend vma. */
+ static inline int expand_stack(struct vm_area_struct * vma, unsigned long address)
+@@ -651,8 +712,43 @@ static inline int expand_stack(struct vm
+ address &= PAGE_MASK;
+ spin_lock(&vma->vm_mm->page_table_lock);
+ grow = (vma->vm_start - address) >> PAGE_SHIFT;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m;
++ unsigned long address_m;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(vma->vm_mm, address_m);
++ if (!vma_m || vma_m->vm_start != address_m || !(vma_m->vm_flags & VM_MIRROR) ||
++ vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start) {
++ spin_unlock(&vma->vm_mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: expand bug, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return -ENOMEM;
++ }
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++ if (vma_m->vm_end - address_m > current->rlim[RLIMIT_STACK].rlim_cur ||
++ ((vma_m->vm_mm->total_vm + 2*grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma_m->vm_flags & VM_LOCKED) &&
++ ((vma_m->vm_mm->locked_vm + 2*grow) << PAGE_SHIFT) > current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
++ spin_unlock(&vma->vm_mm->page_table_lock);
++ return -ENOMEM;
++ }
++
++ vma_m->vm_start = address_m;
++ vma_m->vm_pgoff -= grow;
++ vma_m->vm_mm->total_vm += grow;
++ if (vma_m->vm_flags & VM_LOCKED)
++ vma_m->vm_mm->locked_vm += grow;
++ } else
++#endif
++
+ if (vma->vm_end - address > current->rlim[RLIMIT_STACK].rlim_cur ||
+- ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur) {
++ ((vma->vm_mm->total_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_AS].rlim_cur ||
++ ((vma->vm_flags & VM_LOCKED) &&
++ ((vma->vm_mm->locked_vm + grow) << PAGE_SHIFT) > current->rlim[RLIMIT_MEMLOCK].rlim_cur)) {
+ spin_unlock(&vma->vm_mm->page_table_lock);
+ return -ENOMEM;
+ }
+@@ -665,11 +761,6 @@ static inline int expand_stack(struct vm
+ return 0;
+ }
+
+-/* Look up the first VMA which satisfies addr < vm_end, NULL if none. */
+-extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long addr);
+-extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
+- struct vm_area_struct **pprev);
+-
+ /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
+ NULL if none. Assume start_addr < end_addr. */
+ static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
+diff -Nurp linux-2.4.25/include/linux/random.h linux-2.4.25-pax/include/linux/random.h
+--- linux-2.4.25/include/linux/random.h 2000-01-25 17:13:46.000000000 -0500
++++ linux-2.4.25-pax/include/linux/random.h 2004-02-19 11:12:53.000000000 -0500
+@@ -72,6 +72,8 @@ extern __u32 secure_tcpv6_sequence_numbe
+
+ extern __u32 secure_ipv6_id(__u32 *daddr);
+
++extern unsigned long pax_get_random_long(void);
++
+ #ifndef MODULE
+ extern struct file_operations random_fops, urandom_fops;
+ #endif
+diff -Nurp linux-2.4.25/include/linux/sched.h linux-2.4.25-pax/include/linux/sched.h
+--- linux-2.4.25/include/linux/sched.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/linux/sched.h 2004-02-19 11:12:53.000000000 -0500
+@@ -231,6 +231,21 @@ struct mm_struct {
+
+ /* Architecture-specific MM context */
+ mm_context_t context;
++
++#ifdef CONFIG_PAX_DLRESOLVE
++ unsigned long call_dl_resolve;
++#endif
++
++#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
++ unsigned long call_syscall;
++#endif
++
++#ifdef CONFIG_PAX_ASLR
++ unsigned long delta_mmap; /* randomized offset */
++ unsigned long delta_exec; /* randomized offset */
++ unsigned long delta_stack; /* randomized offset */
++#endif
++
+ };
+
+ extern int mmlist_nr;
+@@ -436,6 +451,29 @@ struct task_struct {
+
+ #define PF_USEDFPU 0x00100000 /* task used FPU this quantum (SMP) */
+
++#define PF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
++#define PF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
++#define PF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
++#define PF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
++#define PF_PAX_RANDEXEC 0x10000000 /* Randomize ET_EXEC base */
++#define PF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
++
++#ifdef CONFIG_PAX_SOFTMODE
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++extern unsigned int pax_aslr;
++#endif
++
++extern unsigned int pax_softmode;
++#endif
++
++extern int pax_check_flags(unsigned long *);
++
++#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
++extern void pax_set_flags(struct linux_binprm * bprm);
++#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
++extern void (*pax_set_flags_func)(struct linux_binprm * bprm);
++#endif
++
+ /*
+ * Ptrace flags
+ */
+diff -Nurp linux-2.4.25/include/linux/sysctl.h linux-2.4.25-pax/include/linux/sysctl.h
+--- linux-2.4.25/include/linux/sysctl.h 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/include/linux/sysctl.h 2004-02-19 11:12:53.000000000 -0500
+@@ -128,8 +128,19 @@ enum
+ KERN_PPC_L3CR=57, /* l3cr register on PPC */
+ KERN_EXCEPTION_TRACE=58, /* boolean: exception trace */
+ KERN_CORE_SETUID=59, /* int: set to allow core dumps of setuid apps */
++
++#ifdef CONFIG_PAX_SOFTMODE
++ KERN_PAX=69, /* PaX control */
++#endif
++
+ };
+
++#ifdef CONFIG_PAX_SOFTMODE
++enum {
++ PAX_ASLR=1, /* PaX: disable/enable all randomization features */
++ PAX_SOFTMODE=2 /* PaX: disable/enable soft mode */
++};
++#endif
+
+ /* CTL_VM names: */
+ enum
+diff -Nurp linux-2.4.25/kernel/sysctl.c linux-2.4.25-pax/kernel/sysctl.c
+--- linux-2.4.25/kernel/sysctl.c 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/kernel/sysctl.c 2004-02-19 11:12:53.000000000 -0500
+@@ -127,6 +127,17 @@ static ctl_table debug_table[];
+ static ctl_table dev_table[];
+ extern ctl_table random_table[];
+
++#ifdef CONFIG_PAX_SOFTMODE
++static ctl_table pax_table[] = {
++
++#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK) || defined(CONFIG_PAX_RANDKSTACK)
++ {PAX_ASLR, "aslr", &pax_aslr, sizeof(unsigned int), 0600, NULL, &proc_dointvec},
++#endif
++
++ {PAX_SOFTMODE, "softmode", &pax_softmode, sizeof(unsigned int), 0600, NULL, &proc_dointvec}
++};
++#endif
++
+ /* /proc declarations: */
+
+ #ifdef CONFIG_PROC_FS
+@@ -275,6 +286,11 @@ static ctl_table kern_table[] = {
+ {KERN_EXCEPTION_TRACE,"exception-trace",
+ &exception_trace,sizeof(int),0644,NULL,&proc_dointvec},
+ #endif
++
++#ifdef CONFIG_PAX_SOFTMODE
++ {KERN_PAX,"pax",NULL,0,0500,pax_table},
++#endif
++
+ {0}
+ };
+
+diff -Nurp linux-2.4.25/mm/filemap.c linux-2.4.25-pax/mm/filemap.c
+--- linux-2.4.25/mm/filemap.c 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/mm/filemap.c 2004-02-19 11:12:53.000000000 -0500
+@@ -2324,6 +2324,12 @@ int generic_file_mmap(struct file * file
+ }
+ if (!mapping->a_ops->readpage)
+ return -ENOEXEC;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (current->flags & PF_PAX_PAGEEXEC)
++ vma->vm_page_prot = protection_map[vma->vm_flags & 0x0f];
++#endif
++
+ UPDATE_ATIME(inode);
+ vma->vm_ops = &generic_file_vm_ops;
+ return 0;
+@@ -2553,8 +2559,42 @@ static long madvise_fixup_middle(struct
+ * We can potentially split a vm area into separate
+ * areas, each area with its own behavior.
+ */
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static long __madvise_behavior(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, int behavior);
++
++static long madvise_behavior(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, int behavior)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ error = __madvise_behavior(vma_m, start_m, end_m, behavior);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: madvise bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __madvise_behavior(vma, start, end, behavior);
++}
++
++static long __madvise_behavior(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, int behavior)
++#else
+ static long madvise_behavior(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, int behavior)
++#endif
+ {
+ int error = 0;
+
+diff -Nurp linux-2.4.25/mm/memory.c linux-2.4.25-pax/mm/memory.c
+--- linux-2.4.25/mm/memory.c 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/mm/memory.c 2004-02-19 11:12:53.000000000 -0500
+@@ -925,6 +925,65 @@ static inline void break_cow(struct vm_a
+ establish_pte(vma, address, page_table, pte_mkwrite(pte_mkdirty(mk_pte(new_page, vma->vm_page_prot))));
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++/* PaX: if vma is mirrored, synchronize the mirror's PTE
++ *
++ * mm->page_table_lock is held on entry and is not released on exit or inside
++ * to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
++ */
++static void pax_mirror_fault(struct mm_struct *mm, struct vm_area_struct * vma,
++ unsigned long address, pte_t *pte)
++{
++ unsigned long address_m;
++ struct vm_area_struct * vma_m = NULL;
++ pte_t * pte_m, entry_m;
++ struct page * page_m;
++
++ if (!(vma->vm_flags & VM_MIRROR))
++ return;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, address_m);
++ if (!vma_m || vma_m->vm_start != address_m)
++ return;
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++
++ {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_offset(pgd_m, address_m);
++ pte_m = pte_offset(pmd_m, address_m);
++ }
++
++ if (pte_present(*pte_m)) {
++ flush_cache_page(vma_m, address_m);
++ flush_icache_page(vma_m, pte_page(*pte_m));
++ }
++ entry_m = ptep_get_and_clear(pte_m);
++ if (pte_present(entry_m))
++ flush_tlb_page(vma_m, address_m);
++
++ if (pte_none(entry_m)) {
++ ++mm->rss;
++ } else if (pte_present(entry_m)) {
++ page_cache_release(pte_page(entry_m));
++ } else {
++ free_swap_and_cache(pte_to_swp_entry(entry_m));
++ ++mm->rss;
++ }
++
++ page_m = pte_page(*pte);
++ page_cache_get(page_m);
++ entry_m = mk_pte(page_m, vma_m->vm_page_prot);
++ if (pte_write(*pte))
++ entry_m = pte_mkdirty(pte_mkwrite(entry_m));
++ establish_pte(vma_m, address_m, pte_m, entry_m);
++}
++#endif
++
+ /*
+ * This routine handles present pages, when users try to write
+ * to a shared page. It is done by copying the page to a new address
+@@ -988,6 +1047,11 @@ static int do_wp_page(struct mm_struct *
+
+ /* Free the old page.. */
+ new_page = old_page;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ }
+ spin_unlock(&mm->page_table_lock);
+ page_cache_release(new_page);
+@@ -1178,6 +1242,11 @@ static int do_swap_page(struct mm_struct
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, address, pte);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ spin_unlock(&mm->page_table_lock);
+ return ret;
+ }
+@@ -1223,6 +1292,11 @@ static int do_anonymous_page(struct mm_s
+
+ /* No need to invalidate - it was non-present before */
+ update_mmu_cache(vma, addr, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pax_mirror_fault(mm, vma, addr, page_table);
++#endif
++
+ spin_unlock(&mm->page_table_lock);
+ return 1; /* Minor fault */
+
+@@ -1304,6 +1378,11 @@ static int do_no_page(struct mm_struct *
+
+ /* no need to invalidate: a not-present page shouldn't be cached */
+ update_mmu_cache(vma, address, entry);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ pax_mirror_fault(mm, vma, address, page_table);
++#endif
++
+ spin_unlock(&mm->page_table_lock);
+ return 2; /* Major fault */
+ }
+@@ -1368,6 +1447,11 @@ int handle_mm_fault(struct mm_struct *mm
+ pgd_t *pgd;
+ pmd_t *pmd;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ unsigned long address_m = 0UL;
++ struct vm_area_struct * vma_m = NULL;
++#endif
++
+ current->state = TASK_RUNNING;
+ pgd = pgd_offset(mm, address);
+
+@@ -1376,6 +1460,47 @@ int handle_mm_fault(struct mm_struct *mm
+ * and the SMP-safe atomic PTE updates.
+ */
+ spin_lock(&mm->page_table_lock);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (vma->vm_flags & VM_MIRROR) {
++ pgd_t *pgd_m;
++ pmd_t *pmd_m;
++ pte_t *pte_m;
++
++ address_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(mm, address_m);
++
++ /* PaX: sanity checks */
++ if (!vma_m) {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug, %08lx, %p, %08lx, %p\n",
++ address, vma, address_m, vma_m);
++ return 0;
++ } else if (!(vma_m->vm_flags & VM_MIRROR) ||
++ vma_m->vm_start != address_m ||
++ vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start)
++ {
++ spin_unlock(&mm->page_table_lock);
++ printk(KERN_ERR "PAX: VMMIRROR: fault bug2, %08lx, %08lx, %08lx, %08lx, %08lx\n",
++ address, vma->vm_start, vma_m->vm_start, vma->vm_end, vma_m->vm_end);
++ return 0;
++ }
++
++ address_m = address + (unsigned long)vma->vm_private_data;
++ pgd_m = pgd_offset(mm, address_m);
++ pmd_m = pmd_alloc(mm, pgd_m, address_m);
++ if (!pmd_m) {
++ spin_unlock(&mm->page_table_lock);
++ return -1;
++ }
++ pte_m = pte_alloc(mm, pmd_m, address_m);
++ if (!pte_m) {
++ spin_unlock(&mm->page_table_lock);
++ return -1;
++ }
++ }
++#endif
++
+ pmd = pmd_alloc(mm, pgd, address);
+
+ if (pmd) {
+diff -Nurp linux-2.4.25/mm/mlock.c linux-2.4.25-pax/mm/mlock.c
+--- linux-2.4.25/mm/mlock.c 2001-09-17 18:30:23.000000000 -0400
++++ linux-2.4.25-pax/mm/mlock.c 2004-02-19 11:12:53.000000000 -0500
+@@ -114,8 +114,40 @@ static inline int mlock_fixup_middle(str
+ return 0;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags);
++
++static int mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma(vma->vm_mm, start_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ error = __mlock_fixup(vma_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mlock bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++ return __mlock_fixup(vma, start, end, newflags);
++}
++
++static int __mlock_fixup(struct vm_area_struct * vma,
++ unsigned long start, unsigned long end, unsigned int newflags)
++#else
+ static int mlock_fixup(struct vm_area_struct * vma,
+ unsigned long start, unsigned long end, unsigned int newflags)
++#endif
+ {
+ int pages, retval;
+
+@@ -159,6 +191,17 @@ static int do_mlock(unsigned long start,
+ return -EINVAL;
+ if (end == start)
+ return 0;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ vma = find_vma(current->mm, start);
+ if (!vma || vma->vm_start > start)
+ return -ENOMEM;
+@@ -253,6 +296,16 @@ static int do_mlockall(int flags)
+ for (vma = current->mm->mmap; vma ; vma = vma->vm_next) {
+ unsigned int newflags;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (vma->vm_end > SEGMEXEC_TASK_SIZE)
++ break;
++ } else
++#endif
++
++ if (vma->vm_end > TASK_SIZE)
++ break;
++
+ newflags = vma->vm_flags | VM_LOCKED;
+ if (!(flags & MCL_CURRENT))
+ newflags &= ~VM_LOCKED;
+diff -Nurp linux-2.4.25/mm/mmap.c linux-2.4.25-pax/mm/mmap.c
+--- linux-2.4.25/mm/mmap.c 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/mm/mmap.c 2004-02-19 11:12:53.000000000 -0500
+@@ -206,6 +206,11 @@ static inline unsigned long calc_vm_flag
+ _trans(prot, PROT_WRITE, VM_WRITE) |
+ _trans(prot, PROT_EXEC, VM_EXEC);
+ flag_bits =
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ _trans(flags, MAP_MIRROR, VM_MIRROR) |
++#endif
++
+ _trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN) |
+ _trans(flags, MAP_DENYWRITE, VM_DENYWRITE) |
+ _trans(flags, MAP_EXECUTABLE, VM_EXECUTABLE);
+@@ -401,6 +406,28 @@ unsigned long do_mmap_pgoff(struct file
+ int error;
+ rb_node_t ** rb_link, * rb_parent;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct * vma_m = NULL;
++
++ if (flags & MAP_MIRROR) {
++ /* PaX: sanity checks, to be removed when proved to be stable */
++ if (file || len || ((flags & MAP_TYPE) != MAP_PRIVATE))
++ return -EINVAL;
++
++ vma_m = find_vma(mm, pgoff);
++
++ if (!vma_m ||
++ vma_m->vm_start != pgoff ||
++ (vma_m->vm_flags & VM_MIRROR) ||
++ (!(vma_m->vm_flags & VM_WRITE) && (prot & PROT_WRITE)))
++ return -EINVAL;
++
++ file = vma_m->vm_file;
++ pgoff = vma_m->vm_pgoff;
++ len = vma_m->vm_end - vma_m->vm_start;
++ }
++#endif
++
+ if (file) {
+ if (!file->f_op || !file->f_op->mmap)
+ return -ENODEV;
+@@ -438,6 +465,27 @@ unsigned long do_mmap_pgoff(struct file
+ */
+ vm_flags = calc_vm_flags(prot,flags) | mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT) {
++ if (!file || (prot & PROT_WRITE))
++ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
++ else
++ vm_flags &= ~VM_MAYWRITE;
++
++#ifdef CONFIG_PAX_RANDEXEC
++ if (file && (flags & MAP_MIRROR) && (vm_flags & VM_EXEC))
++ vma_m->vm_flags &= ~VM_MAYWRITE;
++#endif
++
++ }
++#endif
++
++ }
++#endif
++
+ /* mlock MCL_FUTURE? */
+ if (vm_flags & VM_LOCKED) {
+ unsigned long locked = mm->locked_vm << PAGE_SHIFT;
+@@ -523,6 +571,13 @@ munmap_back:
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = vm_flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if ((file || !(current->flags & PF_PAX_PAGEEXEC)) && (vm_flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(vm_flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[vm_flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = pgoff;
+@@ -551,6 +606,14 @@ munmap_back:
+ goto free_vma;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (flags & MAP_MIRROR) {
++ vma_m->vm_flags |= VM_MIRROR;
++ vma_m->vm_private_data = (void *)(vma->vm_start - vma_m->vm_start);
++ vma->vm_private_data = (void *)(vma_m->vm_start - vma->vm_start);
++ }
++#endif
++
+ /* Can addr have changed??
+ *
+ * Answer: Yes, several device drivers can do it in their
+@@ -622,20 +685,49 @@ static inline unsigned long arch_get_unm
+ {
+ struct vm_area_struct *vma;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && len > SEGMEXEC_TASK_SIZE)
++ return -ENOMEM;
++ else
++#endif
++
+ if (len > TASK_SIZE)
+ return -ENOMEM;
+
++#ifdef CONFIG_PAX_RANDMMAP
++ if (!(current->flags & PF_PAX_RANDMMAP) || !filp)
++#endif
++
+ if (addr) {
+ addr = PAGE_ALIGN(addr);
+ vma = find_vma(current->mm, addr);
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE-len < addr)
++ return -ENOMEM;
++#endif
++
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vma->vm_start))
+ return addr;
+ }
+ addr = PAGE_ALIGN(TASK_UNMAPPED_BASE);
+
++#ifdef CONFIG_PAX_RANDMMAP
++ /* PaX: randomize base address if requested */
++ if (current->flags & PF_PAX_RANDMMAP)
++ addr += current->mm->delta_mmap;
++#endif
++
+ for (vma = find_vma(current->mm, addr); ; vma = vma->vm_next) {
+ /* At this point: (!vma || addr < vma->vm_end). */
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE-len < addr)
++ return -ENOMEM;
++ else
++#endif
++
+ if (TASK_SIZE - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+@@ -922,6 +1014,83 @@ no_mmaps:
+ }
+ }
+
++static inline struct vm_area_struct *unmap_vma(struct mm_struct *mm,
++ unsigned long addr, size_t len, struct vm_area_struct *mpnt,
++ struct vm_area_struct *extra)
++{
++ unsigned long st, end, size;
++ struct file *file = NULL;
++
++ st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
++ end = addr+len;
++ end = end > mpnt->vm_end ? mpnt->vm_end : end;
++ size = end - st;
++
++ if (mpnt->vm_flags & VM_DENYWRITE &&
++ (st != mpnt->vm_start || end != mpnt->vm_end) &&
++ (file = mpnt->vm_file) != NULL) {
++ atomic_dec(&file->f_dentry->d_inode->i_writecount);
++ }
++ remove_shared_vm_struct(mpnt);
++ zap_page_range(mm, st, size);
++
++ /*
++ * Fix the mapping, and free the old area if it wasn't reused.
++ */
++ extra = unmap_fixup(mm, mpnt, st, size, extra);
++ if (file)
++ atomic_inc(&file->f_dentry->d_inode->i_writecount);
++ return extra;
++}
++
++static struct vm_area_struct *unmap_vma_list(struct mm_struct *mm,
++ unsigned long addr, size_t len, struct vm_area_struct *free,
++ struct vm_area_struct *extra, struct vm_area_struct *prev)
++{
++ struct vm_area_struct *mpnt;
++
++ /* Ok - we have the memory areas we should free on the 'free' list,
++ * so release them, and unmap the page range..
++ * If the one of the segments is only being partially unmapped,
++ * it will put new vm_area_struct(s) into the address space.
++ * In that case we have to be careful with VM_DENYWRITE.
++ */
++ while ((mpnt = free) != NULL) {
++ free = free->vm_next;
++ extra = unmap_vma(mm, addr, len, mpnt, extra);
++ }
++
++ free_pgtables(mm, prev, addr, addr+len);
++
++ return extra;
++}
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static struct vm_area_struct *unmap_vma_mirror_list(struct mm_struct *mm,
++ unsigned long addr, size_t len, struct vm_area_struct *free_m,
++ struct vm_area_struct *extra_m)
++{
++ struct vm_area_struct *mpnt, *prev;
++
++ while ((mpnt = free_m) != NULL) {
++ unsigned long addr_m, start, end;
++
++ free_m = free_m->vm_next;
++
++ addr_m = addr - (unsigned long)mpnt->vm_private_data;
++ start = addr_m < mpnt->vm_start ? mpnt->vm_start : addr_m;
++ end = addr_m+len;
++ end = end > mpnt->vm_end ? mpnt->vm_end : end;
++ find_vma_prev(mm, mpnt->vm_start, &prev);
++ extra_m = unmap_vma(mm, addr_m, len, mpnt, extra_m);
++
++ free_pgtables(mm, prev, start, end);
++ }
++
++ return extra_m;
++}
++#endif
++
+ /* Munmap is split into 2 main parts -- this part which finds
+ * what needs doing, and the areas themselves, which do the
+ * work. This now handles partial unmappings.
+@@ -931,6 +1100,10 @@ int do_munmap(struct mm_struct *mm, unsi
+ {
+ struct vm_area_struct *mpnt, *prev, **npp, *free, *extra;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ struct vm_area_struct *free_m, *extra_m;
++#endif
++
+ if ((addr & ~PAGE_MASK) || addr > TASK_SIZE || len > TASK_SIZE-addr)
+ return -EINVAL;
+
+@@ -963,60 +1136,69 @@ int do_munmap(struct mm_struct *mm, unsi
+ if (!extra)
+ return -ENOMEM;
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) {
++ extra_m = kmem_cache_alloc(vm_area_cachep, SLAB_KERNEL);
++ if (!extra_m) {
++ kmem_cache_free(vm_area_cachep, extra);
++ return -ENOMEM;
++ }
++ } else
++ extra_m = NULL;
++
++ free_m = NULL;
++#endif
++
+ npp = (prev ? &prev->vm_next : &mm->mmap);
+ free = NULL;
+ spin_lock(&mm->page_table_lock);
+ for ( ; mpnt && mpnt->vm_start < addr+len; mpnt = *npp) {
++ mm->map_count--;
+ *npp = mpnt->vm_next;
+ mpnt->vm_next = free;
+ free = mpnt;
+ rb_erase(&mpnt->vm_rb, &mm->mm_rb);
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (free->vm_flags & VM_MIRROR) {
++ struct vm_area_struct *mpnt_m, *prev_m, **npp_m;
++ unsigned long addr_m = free->vm_start + (unsigned long)free->vm_private_data;
++
++ mm->mmap_cache = NULL; /* Kill the cache. */
++ mpnt_m = find_vma_prev(mm, addr_m, &prev_m);
++ if (mpnt_m && mpnt_m->vm_start == addr_m && (mpnt_m->vm_flags & VM_MIRROR)) {
++ mm->map_count--;
++ npp_m = (prev_m ? &prev_m->vm_next : &mm->mmap);
++ *npp_m = mpnt_m->vm_next;
++ mpnt_m->vm_next = free_m;
++ free_m = mpnt_m;
++ rb_erase(&mpnt_m->vm_rb, &mm->mm_rb);
++ } else
++ printk("PAX: VMMIRROR: munmap bug in %s, %08lx\n", current->comm, free->vm_start);
++ }
++#endif
++
+ }
+ mm->mmap_cache = NULL; /* Kill the cache. */
+ spin_unlock(&mm->page_table_lock);
+
+- /* Ok - we have the memory areas we should free on the 'free' list,
+- * so release them, and unmap the page range..
+- * If the one of the segments is only being partially unmapped,
+- * it will put new vm_area_struct(s) into the address space.
+- * In that case we have to be careful with VM_DENYWRITE.
+- */
+- while ((mpnt = free) != NULL) {
+- unsigned long st, end, size;
+- struct file *file = NULL;
+-
+- free = free->vm_next;
++ extra = unmap_vma_list(mm, addr, len, free, extra, prev);
+
+- st = addr < mpnt->vm_start ? mpnt->vm_start : addr;
+- end = addr+len;
+- end = end > mpnt->vm_end ? mpnt->vm_end : end;
+- size = end - st;
+-
+- if (mpnt->vm_flags & VM_DENYWRITE &&
+- (st != mpnt->vm_start || end != mpnt->vm_end) &&
+- (file = mpnt->vm_file) != NULL) {
+- atomic_dec(&file->f_dentry->d_inode->i_writecount);
+- }
+- remove_shared_vm_struct(mpnt);
+- mm->map_count--;
+-
+- zap_page_range(mm, st, size);
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ extra_m = unmap_vma_mirror_list(mm, addr, len, free_m, extra_m);
++#endif
+
+- /*
+- * Fix the mapping, and free the old area if it wasn't reused.
+- */
+- extra = unmap_fixup(mm, mpnt, st, size, extra);
+- if (file)
+- atomic_inc(&file->f_dentry->d_inode->i_writecount);
+- }
+ validate_mm(mm);
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if (extra_m)
++ kmem_cache_free(vm_area_cachep, extra_m);
++#endif
++
+ /* Release the extra vma struct if it wasn't used */
+ if (extra)
+ kmem_cache_free(vm_area_cachep, extra);
+
+- free_pgtables(mm, prev, addr, addr+len);
+-
+ return 0;
+ }
+
+@@ -1025,6 +1207,12 @@ asmlinkage long sys_munmap(unsigned long
+ int ret;
+ struct mm_struct *mm = current->mm;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if ((current->flags & PF_PAX_SEGMEXEC) &&
++ (len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-len))
++ return -EINVAL;
++#endif
++
+ down_write(&mm->mmap_sem);
+ ret = do_munmap(mm, addr, len);
+ up_write(&mm->mmap_sem);
+@@ -1047,6 +1235,13 @@ unsigned long do_brk(unsigned long addr,
+ if (!len)
+ return addr;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if ((addr + len) > SEGMEXEC_TASK_SIZE || (addr + len) < addr)
++ return -EINVAL;
++ } else
++#endif
++
+ if ((addr + len) > TASK_SIZE || (addr + len) < addr)
+ return -EINVAL;
+
+@@ -1084,6 +1279,18 @@ unsigned long do_brk(unsigned long addr,
+
+ flags = VM_DATA_DEFAULT_FLAGS | mm->def_flags;
+
++#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
++ if (current->flags & (PF_PAX_PAGEEXEC | PF_PAX_SEGMEXEC)) {
++ flags &= ~VM_EXEC;
++
++#ifdef CONFIG_PAX_MPROTECT
++ if (current->flags & PF_PAX_MPROTECT)
++ flags &= ~VM_MAYEXEC;
++#endif
++
++ }
++#endif
++
+ /* Can we just expand an old anonymous mapping? */
+ if (rb_parent && vma_merge(mm, prev, rb_parent, addr, addr + len, flags))
+ goto out;
+@@ -1099,6 +1306,13 @@ unsigned long do_brk(unsigned long addr,
+ vma->vm_start = addr;
+ vma->vm_end = addr + len;
+ vma->vm_flags = flags;
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (flags & (VM_READ|VM_WRITE)))
++ vma->vm_page_prot = protection_map[(flags | VM_EXEC) & 0x0f];
++ else
++#endif
++
+ vma->vm_page_prot = protection_map[flags & 0x0f];
+ vma->vm_ops = NULL;
+ vma->vm_pgoff = 0;
+diff -Nurp linux-2.4.25/mm/mprotect.c linux-2.4.25-pax/mm/mprotect.c
+--- linux-2.4.25/mm/mprotect.c 2003-11-28 13:26:21.000000000 -0500
++++ linux-2.4.25-pax/mm/mprotect.c 2004-02-19 11:12:53.000000000 -0500
+@@ -8,6 +8,11 @@
+ #include <linux/shm.h>
+ #include <linux/mman.h>
+
++#ifdef CONFIG_PAX_MPROTECT
++#include <linux/elf.h>
++#include <linux/fs.h>
++#endif
++
+ #include <asm/uaccess.h>
+ #include <asm/pgalloc.h>
+ #include <asm/pgtable.h>
+@@ -236,6 +241,44 @@ static inline int mprotect_fixup_middle(
+ return 0;
+ }
+
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
++ unsigned long start, unsigned long end, unsigned int newflags);
++
++static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ if (vma->vm_flags & VM_MIRROR) {
++ struct vm_area_struct * vma_m, * prev_m;
++ unsigned long start_m, end_m;
++ int error;
++
++ start_m = vma->vm_start + (unsigned long)vma->vm_private_data;
++ vma_m = find_vma_prev(vma->vm_mm, start_m, &prev_m);
++ if (vma_m && vma_m->vm_start == start_m && (vma_m->vm_flags & VM_MIRROR)) {
++ start_m = start + (unsigned long)vma->vm_private_data;
++ end_m = end + (unsigned long)vma->vm_private_data;
++ if ((current->flags & PF_PAX_SEGMEXEC) && !(newflags & VM_EXEC))
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, vma_m->vm_flags & ~(PROT_READ | PROT_WRITE | PROT_EXEC));
++ else
++ error = __mprotect_fixup(vma_m, &prev_m, start_m, end_m, newflags);
++ if (error)
++ return error;
++ } else {
++ printk("PAX: VMMIRROR: mprotect bug in %s, %08lx\n", current->comm, vma->vm_start);
++ return -ENOMEM;
++ }
++ }
++
++ return __mprotect_fixup(vma, pprev, start, end, newflags);
++}
++
++static int __mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
++ unsigned long start, unsigned long end, unsigned int newflags)
++{
++ pgprot_t newprot;
++ int error;
++#else
+ static int mprotect_fixup(struct vm_area_struct * vma, struct vm_area_struct ** pprev,
+ unsigned long start, unsigned long end, unsigned int newflags)
+ {
+@@ -246,6 +289,14 @@ static int mprotect_fixup(struct vm_area
+ *pprev = vma;
+ return 0;
+ }
++#endif
++
++#ifdef CONFIG_PAX_PAGEEXEC
++ if (!(current->flags & PF_PAX_PAGEEXEC) && (newflags & (VM_READ|VM_WRITE)))
++ newprot = protection_map[(newflags | VM_EXEC) & 0xf];
++ else
++#endif
++
+ newprot = protection_map[newflags & 0xf];
+ if (start == vma->vm_start) {
+ if (end == vma->vm_end)
+@@ -264,6 +315,68 @@ static int mprotect_fixup(struct vm_area
+ return 0;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++/* PaX: non-PIC ELF libraries need relocations on their executable segments
++ * therefore we'll grant them VM_MAYWRITE once during their life.
++ *
++ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
++ * basis because we want to allow the common case and not the special ones.
++ */
++static inline void pax_handle_maywrite(struct vm_area_struct * vma, unsigned long start)
++{
++ struct elfhdr elf_h;
++ struct elf_phdr elf_p, p_dyn;
++ elf_dyn dyn;
++ unsigned long i, j = 65536UL / sizeof(struct elf_phdr);
++
++#ifndef CONFIG_PAX_NOELFRELOCS
++ if ((vma->vm_start != start) ||
++ !vma->vm_file ||
++ !(vma->vm_flags & VM_MAYEXEC) ||
++ (vma->vm_flags & VM_MAYNOTWRITE))
++#endif
++
++ return;
++
++ if (0 > kernel_read(vma->vm_file, 0UL, (char*)&elf_h, sizeof(elf_h)) ||
++ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
++
++#ifdef CONFIG_PAX_ETEXECRELOCS
++ (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC) ||
++#else
++ elf_h.e_type != ET_DYN ||
++#endif
++
++ !elf_check_arch(&elf_h) ||
++ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
++ elf_h.e_phnum > j)
++ return;
++
++ for (i = 0UL; i < elf_h.e_phnum; i++) {
++ if (0 > kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char*)&elf_p, sizeof(elf_p)))
++ return;
++ if (elf_p.p_type == PT_DYNAMIC) {
++ p_dyn = elf_p;
++ j = i;
++ }
++ }
++ if (elf_h.e_phnum <= j)
++ return;
++
++ i = 0UL;
++ do {
++ if (0 > kernel_read(vma->vm_file, p_dyn.p_offset + i*sizeof(dyn), (char*)&dyn, sizeof(dyn)))
++ return;
++ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
++ vma->vm_flags |= VM_MAYWRITE | VM_MAYNOTWRITE;
++ return;
++ }
++ i++;
++ } while (dyn.d_tag != DT_NULL);
++ return;
++}
++#endif
++
+ asmlinkage long sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+ {
+ unsigned long nstart, end, tmp;
+@@ -276,6 +389,17 @@ asmlinkage long sys_mprotect(unsigned lo
+ end = start + len;
+ if (end < start)
+ return -ENOMEM;
++
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (end > SEGMEXEC_TASK_SIZE)
++ return -EINVAL;
++ } else
++#endif
++
++ if (end > TASK_SIZE)
++ return -EINVAL;
++
+ if (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC))
+ return -EINVAL;
+ if (end == start)
+@@ -288,6 +412,11 @@ asmlinkage long sys_mprotect(unsigned lo
+ if (!vma || vma->vm_start > start)
+ goto out;
+
++#ifdef CONFIG_PAX_MPROTECT
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE))
++ pax_handle_maywrite(vma, start);
++#endif
++
+ for (nstart = start ; ; ) {
+ unsigned int newflags;
+ int last = 0;
+@@ -300,6 +429,12 @@ asmlinkage long sys_mprotect(unsigned lo
+ goto out;
+ }
+
++#ifdef CONFIG_PAX_MPROTECT
++ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
++ if ((current->flags & PF_PAX_MPROTECT) && (prot & PROT_WRITE) && (vma->vm_flags & VM_MAYNOTWRITE))
++ newflags &= ~VM_MAYWRITE;
++#endif
++
+ if (vma->vm_end > end) {
+ error = mprotect_fixup(vma, &prev, nstart, end, newflags);
+ goto out;
+diff -Nurp linux-2.4.25/mm/mremap.c linux-2.4.25-pax/mm/mremap.c
+--- linux-2.4.25/mm/mremap.c 2004-02-18 08:36:32.000000000 -0500
++++ linux-2.4.25-pax/mm/mremap.c 2004-02-19 11:12:53.000000000 -0500
+@@ -232,6 +232,18 @@ unsigned long do_mremap(unsigned long ad
+ old_len = PAGE_ALIGN(old_len);
+ new_len = PAGE_ALIGN(new_len);
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-new_len ||
++ old_len > SEGMEXEC_TASK_SIZE || addr > SEGMEXEC_TASK_SIZE-old_len)
++ goto out;
++ } else
++#endif
++
++ if (new_len > TASK_SIZE || addr > TASK_SIZE-new_len ||
++ old_len > TASK_SIZE || addr > TASK_SIZE-old_len)
++ goto out;
++
+ /* new_addr is only valid if MREMAP_FIXED is specified */
+ if (flags & MREMAP_FIXED) {
+ if (new_addr & ~PAGE_MASK)
+@@ -239,6 +251,13 @@ unsigned long do_mremap(unsigned long ad
+ if (!(flags & MREMAP_MAYMOVE))
+ goto out;
+
++#ifdef CONFIG_PAX_SEGMEXEC
++ if (current->flags & PF_PAX_SEGMEXEC) {
++ if (new_len > SEGMEXEC_TASK_SIZE || new_addr > SEGMEXEC_TASK_SIZE-new_len)
++ goto out;
++ } else
++#endif
++
+ if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
+ goto out;
+ /*
+@@ -283,6 +302,16 @@ unsigned long do_mremap(unsigned long ad
+ vma = find_vma(current->mm, addr);
+ if (!vma || vma->vm_start > addr)
+ goto out;
++
++#if defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_RANDEXEC)
++ if ((current->flags & (PF_PAX_SEGMEXEC | PF_PAX_RANDEXEC)) &&
++ (vma->vm_flags & VM_MIRROR))
++ {
++ ret = -EINVAL;
++ goto out;
++ }
++#endif
++
+ /* We can't remap across vm area boundaries */
+ if (old_len > vma->vm_end - addr)
+ goto out;
diff --git a/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild b/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild
index 70710831b9e8..b4adb0f51d97 100644
--- a/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild
+++ b/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild
@@ -1,6 +1,6 @@
# Copyright 1999-2004 Gentoo Technologies, Inc.
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild,v 1.2 2004/02/18 21:48:46 kumba Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.21-r7.ebuild,v 1.3 2004/02/23 10:48:55 kumba Exp $
# Version Data
@@ -54,17 +54,17 @@ src_unpack() {
# Big Endian Fix (Fix in headers for big-endian machines)
epatch ${FILESDIR}/bigendian-byteorder-fix.patch
- # do_brk fix (Fixes exploit that hit several debian servers)
- epatch ${FILESDIR}/do_brk_fix.patch
-
- # mremap fix (Possibly Exploitable)
- epatch ${FILESDIR}/mremap-fix-try2.patch
-
# MIPS RTC Fixes (Fixes memleaks, backport from 2.4.24)
epatch ${FILESDIR}/rtc-fixes.patch
- # do_munmap fix (Possibly Exploitable)
- epatch ${FILESDIR}/do_munmap-fix.patch
+ # Security Fixes
+ echo -e ""
+ ebegin "Applying Security Fixes"
+ epatch ${FILESDIR}/CAN-2003-0961-do_brk.patch
+ epatch ${FILESDIR}/CAN-2003-0985-mremap.patch
+ epatch ${FILESDIR}/CAN-2004-0010-ncpfs.patch
+ epatch ${FILESDIR}/CAN-2004-0077-do_munmap.patch
+ eend
# Cobalt Patches
if [ "${PROFILE_ARCH}" = "cobalt" ]; then
diff --git a/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild b/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild
index 832178c315b2..658fe1b668d5 100644
--- a/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild
+++ b/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild
@@ -1,6 +1,6 @@
# Copyright 1999-2004 Gentoo Technologies, Inc.
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild,v 1.2 2004/02/18 21:48:46 kumba Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.22-r10.ebuild,v 1.3 2004/02/23 10:48:55 kumba Exp $
# Version Data
@@ -49,17 +49,17 @@ src_unpack() {
# Patch arch/mips/Makefile for gcc (Pass -mips3/-mips4 for r4k/r5k cpus)
epatch ${FILESDIR}/mipscvs-${OKV}-makefile-fix.patch
- # do_brk fix (Fixes exploit that hit several debian servers)
- epatch ${FILESDIR}/do_brk_fix.patch
-
- # mremap fix (Possibly Exploitable)
- epatch ${FILESDIR}/mremap-fix-try2.patch
-
# MIPS RTC Fixes (Fixes memleaks, backport from 2.4.24)
epatch ${FILESDIR}/rtc-fixes.patch
- # do_munmap fix (Possibly Exploitable)
- epatch ${FILESDIR}/do_munmap-fix.patch
+ # Security Fixes
+ echo -e ""
+ ebegin "Applying Security Fixes"
+ epatch ${FILESDIR}/CAN-2003-0961-do_brk.patch
+ epatch ${FILESDIR}/CAN-2003-0985-mremap.patch
+ epatch ${FILESDIR}/CAN-2004-0010-ncpfs.patch
+ epatch ${FILESDIR}/CAN-2004-0077-do_munmap.patch
+ eend
# Cobalt Patches
if [ "${PROFILE_ARCH}" = "cobalt" ]; then
diff --git a/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild b/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild
index dd5bf60c322b..cf72f94ca810 100644
--- a/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild
+++ b/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild
@@ -1,6 +1,6 @@
# Copyright 1999-2004 Gentoo Technologies, Inc.
# Distributed under the terms of the GNU General Public License v2
-# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild,v 1.2 2004/02/18 21:48:46 kumba Exp $
+# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.23-r6.ebuild,v 1.3 2004/02/23 10:48:55 kumba Exp $
# Version Data
@@ -54,26 +54,29 @@ src_unpack() {
epatch ${FILESDIR}/mipscvs-${OKV}-makefile-fix.patch
# Patch to fix mips64 Makefile so that -finline-limit=10000 gets added to CFLAGS
- epatch ${FILESDIR}/mipscvs-${OKV}-makefile-inlinelimit-fix.patch
-
- # mremap fix (Possibly Exploitable)
- epatch ${FILESDIR}/mremap-fix-try2.patch
+ epatch ${FILESDIR}/mipscvs-${OKV}-makefile-inlinelimit.patch
# MIPS RTC Fixes (Fixes memleaks, backport from 2.4.24)
epatch ${FILESDIR}/rtc-fixes.patch
- # do_munmap fix (Possibly Exploitable)
- epatch ${FILESDIR}/do_munmap-fix.patch
-
# XFS Patches
# We don't use epatch here because something funny is messed up in the XFS patches,
# thus while they apply, they don't apply properly
+ echo -e ""
ebegin "Applying XFS Patchset"
cat ${WORKDIR}/xfs-${PV}-split-only | patch -p1 2>&1 >/dev/null
cat ${WORKDIR}/xfs-${PV}-split-kernel | patch -p1 2>&1 >/dev/null
cat ${WORKDIR}/xfs-${PV}-split-acl | patch -p1 2>&1 >/dev/null
eend
+ # Security Fixes
+ echo -e ""
+ ebegin "Applying Security Fixes"
+ epatch ${FILESDIR}/CAN-2003-0985-mremap.patch
+ epatch ${FILESDIR}/CAN-2004-0010-ncpfs.patch
+ epatch ${FILESDIR}/CAN-2004-0077-do_munmap.patch
+ eend
+
# Cobalt Patches
if [ "${PROFILE_ARCH}" = "cobalt" ]; then
echo -e ""
diff --git a/sys-kernel/mips-sources/mips-sources-2.4.25.ebuild b/sys-kernel/mips-sources/mips-sources-2.4.25.ebuild
new file mode 100644
index 000000000000..87b2ee8f9067
--- /dev/null
+++ b/sys-kernel/mips-sources/mips-sources-2.4.25.ebuild
@@ -0,0 +1,69 @@
+# Copyright 1999-2004 Gentoo Technologies, Inc.
+# Distributed under the terms of the GNU General Public License v2
+# $Header: /var/cvsroot/gentoo-x86/sys-kernel/mips-sources/mips-sources-2.4.25.ebuild,v 1.1 2004/02/23 10:48:55 kumba Exp $
+
+
+# Version Data
+OKV=${PV/_/-}
+CVSDATE="20040222"
+EXTRAVERSION="-mipscvs-${CVSDATE}"
+KV="${OKV}${EXTRAVERSION}"
+COBALTPATCHVER="1.1"
+PAXDATE="200402192035"
+
+# Miscellaneous stuff
+S=${WORKDIR}/linux-${OKV}-${CVSDATE}
+
+# Eclass stuff
+ETYPE="sources"
+inherit kernel eutils
+
+
+# INCLUDED:
+# 1) linux sources from kernel.org
+# 2) linux-mips.org CVS snapshot diff from 28 Nov 2003
+# 3) patch to fix arch/mips[64]/Makefile to pass appropriate CFLAGS
+# 4) patch to fix the mips64 Makefile to allow building of mips64 kernels
+# 5) added PaX patch (http://pax.grsecurity.net)
+# 6) Patches for Cobalt support
+
+
+DESCRIPTION="Linux-Mips CVS sources for MIPS-based machines, dated ${CVSDATE}"
+SRC_URI="mirror://kernel/linux/kernel/v2.4/linux-${OKV}.tar.bz2
+ mirror://gentoo/mipscvs-${OKV}-${CVSDATE}.diff.bz2
+ mirror://gentoo/cobalt-patches-24xx-${COBALTPATCHVER}.tar.bz2"
+HOMEPAGE="http://www.linux-mips.org/"
+SLOT="${OKV}"
+PROVIDE="virtual/linux-sources"
+KEYWORDS="-* ~mips"
+
+
+src_unpack() {
+ unpack ${A}
+ mv ${WORKDIR}/linux-${OKV} ${WORKDIR}/linux-${OKV}-${CVSDATE}
+ cd ${S}
+
+ # Update the vanilla sources with linux-mips CVS changes
+ epatch ${WORKDIR}/mipscvs-${OKV}-${CVSDATE}.diff
+
+ # Patch arch/mips/Makefile for gcc (Pass -mips3/-mips4 for r4k/r5k cpus)
+ epatch ${FILESDIR}/mipscvs-${OKV}-makefile-fix.patch
+
+ # Patch to fix mips64 Makefile so that -finline-limit=10000 gets added to CFLAGS
+ epatch ${FILESDIR}/mipscvs-${OKV}-makefile-inlinelimit.patch
+
+ # Add in PaX support to the kernel
+ epatch ${FILESDIR}/pax-linux-${OKV}-${PAXDATE}.patch
+
+ # Cobalt Patches
+ if [ "${PROFILE_ARCH}" = "cobalt" ]; then
+ echo -e ""
+ einfo ">>> Patching kernel for Cobalt support ..."
+ for x in ${WORKDIR}/cobalt-patches-24xx-${COBALTPATCHVER}/*.patch; do
+ epatch ${x}
+ done
+ cp ${WORKDIR}/cobalt-patches-24xx-${COBALTPATCHVER}/cobalt-patches.txt ${S}
+ fi
+
+ kernel_universal_unpack
+}