diff -ruNp 828-core-pagedir-old/kernel/power/suspend2_core/pagedir.c 828-core-pagedir-new/kernel/power/suspend2_core/pagedir.c
--- 828-core-pagedir-old/kernel/power/suspend2_core/pagedir.c	1970-01-01 10:00:00.000000000 +1000
+++ 828-core-pagedir-new/kernel/power/suspend2_core/pagedir.c	2004-12-24 12:48:52.000000000 +1100
@@ -0,0 +1,408 @@
+/*
+ * kernel/power/pagedir.c
+ *
+ * Copyright (C) 1998-2001 Gabor Kuti <seasons@fornax.hu>
+ * Copyright (C) 1998,2001,2002 Pavel Machek <pavel@suse.cz>
+ * Copyright (C) 2002-2003 Florent Chabaud <fchabaud@free.fr>
+ * Copyright (C) 2002-2004 Nigel Cunningham <ncunningham@linuxmail.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * Routines for handling pagesets.
+ * Note that pbes aren't actually stored as such. They're stored as
+ * extents (extents is the term, I'm told).
+ */
+
+#define SUSPEND_PAGEDIR_C
+#include <linux/suspend.h>
+#include <linux/highmem.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+#include <linux/list.h>
+
+extern struct pagedir pagedir1, pagedir2, pagedir_resume;
+
+int extra_pagedir_pages_allocated = 0;
+static LIST_HEAD(conflicting_pages);
+
+#include "suspend.h"
+#include "pageflags.h"
+#include "debug.h"
+
+/*
+ * --------------------------------------------------------------------------------------
+ *
+ * 	Local Page Flags routines.
+ *
+ * 	Rather than using the rare and precious flags in struct page, we allocate
+ * 	our own bitmaps dynamically.
+ * 
+ */
+
+/* ------------------------------------------------------------------------- */
+
+/* copy_pageset1
+ *
+ * Description:	Make the atomic copy of pageset1. We can't use copy_page (as we
+ * 		once did) because we can't be sure what side effects it has. On
+ * 		my old Duron, with 3DNOW, kernel_fpu_begin increments preempt
+ * 		count, making our preempt count at resume time 4 instead of 3.
+ * 		
+ * 		We don't want to call kmap_atomic unconditionally because it has
+ * 		the side effect of incrementing the preempt count, which will
+ * 		leave it one too high post resume (the page containing the
+ * 		preempt count will be copied after its incremented. This is
+ * 		essentially the same problem.
+ */
+
+void copy_pageset1(void)
+{
+	int i = 0, source_index = -1, dest_index = -1;
+
+	for (i = 0; i < pageset1_size; i++) {
+		int loop;
+		unsigned long * origvirt, *copyvirt;
+		struct page * origpage;
+
+		source_index = __get_next_bit_on(pageset1_map, source_index);
+		dest_index = __get_next_bit_on(pageset1_copy_map, dest_index);
+
+		origpage = pfn_to_page(source_index);
+		
+		copyvirt = (unsigned long *) page_address(pfn_to_page(dest_index));
+
+	       	if (PageHighMem(origpage))
+			origvirt = kmap_atomic(origpage, KM_USER1);
+		else
+			origvirt = page_address(origpage);
+
+		for (loop=0; loop < (PAGE_SIZE / sizeof(unsigned long)); loop++)
+			*(copyvirt + loop) = *(origvirt + loop);
+	       	if (PageHighMem(origpage))
+			kunmap_atomic(origpage, KM_USER1);
+	}
+}
+
+#ifdef CONFIG_DEBUG_PAGEALLOC
+void suspend_map_atomic_copy_pages(void)
+{
+	int i = 0;
+	struct pbe2 pbe;
+
+	get_first_pbe(&pbe, &pagedir1);
+
+	for (i = 0; i < pageset1_size; i++) {
+		int orig_was_mapped = 1, copy_was_mapped = 1;
+
+	       	if (!PageHighMem(pbe.origaddress)) {
+			orig_was_mapped = suspend_map_kernel_page(pbe.origaddress, 1);
+			if (!orig_was_mapped)
+				SetPageUnmap(pbe.origaddress);
+		}
+		copy_was_mapped = suspend_map_kernel_page(pbe.address, 1);
+		if (!copy_was_mapped)
+			SetPageUnmap(pbe.address);
+
+		get_next_pbe(&pbe);
+	}
+}
+
+void suspend_unmap_atomic_copy_pages(void)
+{
+	int i;
+	for (i = 0; i < max_mapnr; i++)
+		if (PageUnmap(pfn_to_page(i)))
+			suspend_map_kernel_page(pfn_to_page(i), 0);
+}
+#endif
+
+/* free_pagedir_data
+ *
+ * Description:	Free a previously pagedir metadata.
+ */
+void free_pagedir_data(void)
+{
+	int pagenumber;
+
+	PRINTFREEMEM("at start of free_pagedir_data");
+
+	free_local_pageflags(&pageset1_map);
+	free_local_pageflags(&pageset2_map);
+	free_local_pageflags(&pageset1_copy_map);
+
+	/* Free allocated pages */
+	if (allocd_pages_map) {
+		BITMAP_FOR_EACH_SET(allocd_pages_map, pagenumber) {
+			struct page * page = pfn_to_page(pagenumber);
+			ClearPageNosave(page);
+			__free_pages(page, 0);
+			extra_pagedir_pages_allocated--;
+		}
+		free_local_pageflags(&allocd_pages_map);
+	}
+
+	suspend_store_free_mem(SUSPEND_FREE_EXTRA_PD1, 1);
+
+	PRINTFREEMEM("at end of free_pagedir");
+	suspend_message(SUSPEND_PAGESETS, SUSPEND_MEDIUM, 0,
+			"Pageset size1 was %d; size2 was %d.\n",
+			pagedir1.pageset_size,
+			pagedir2.pageset_size);
+	pagedir1.pageset_size = pagedir2.pageset_size = 0;
+}
+
+/* PageInPagedir
+ *
+ * Description:	Determine whether a page is in a pagedir.
+ * Arguments:	struct pagedir *	The pagedir to search.
+ * 		struct page *		The page to look for.
+ * Result:	int			Bitmap of state:
+ * 					Bit 0: Source page
+ * 					Bit 1: Dest page
+ * 					Bit 2: Allocated
+ * 					(Should only result in 0, 1, 2 or 6).
+ */
+
+int PageInPagedir(struct pagedir * p, struct page * page)
+{
+	int result = 0;
+
+	if (PagePageset1(page))
+		result |= 1;
+
+	if (PagePageset1Copy(page))
+		result |= 2;
+
+	if (PageAllocd(page))
+		result |= 4;
+	
+	return result;
+}
+
+/* allocate_extra_pagedir_memory
+ *
+ * Description:	Allocate memory for making the atomic copy of pagedir1 in the
+ * 		case where it is bigger than pagedir2.
+ * Arguments:	struct pagedir *: 	The pagedir for which we should 
+ * 					allocate memory.
+ * 		int:			Size of pageset 1.
+ * 		int:			Size of pageset 2.
+ * Result:	int. Zero on success. One if unable to allocate enough memory.
+ */
+int allocate_extra_pagedir_memory(struct pagedir * p, int pageset_size,
+		int alloc_from)
+{
+	int num_to_alloc = pageset_size - alloc_from - extra_pagedir_pages_allocated;
+	int j, order;
+
+	prepare_status(0, 0, "Preparing page directory.");
+
+	PRINTFREEMEM("at start of allocate_extra_pagedir_memory");
+
+	if (num_to_alloc < 1)
+		num_to_alloc = 0;
+
+	if (num_to_alloc) {
+		int num_added = 0;
+		int origallocd = alloc_from + extra_pagedir_pages_allocated;
+	
+		PRINTFREEMEM("prior to attempt");
+
+		order = generic_fls(num_to_alloc);
+		if (order >= MAX_ORDER)
+			order = MAX_ORDER - 1;
+
+		while (num_added < num_to_alloc) {
+			struct page * newpage;
+			unsigned long virt;
+			
+			while ((1 << order) > (num_to_alloc - num_added))
+				order--;
+
+			virt = get_grabbed_pages(order);
+			while ((!virt) && (order > 0)) {
+				order--;
+				virt = get_grabbed_pages(order);
+			}
+
+			if (!virt) {
+				p->pageset_size += num_added;
+				suspend_message(SUSPEND_PAGESETS, SUSPEND_VERBOSE, 1,
+					"   Allocated (extra) memory for pages"
+					" from %d-%d (%d pages). %d short.\n",
+					origallocd + 1, pageset_size, 
+					pageset_size - origallocd,
+					num_to_alloc - num_added);
+				PRINTFREEMEM("at abort of "
+					"allocate_extra_pagedir_memory");
+				suspend_store_free_mem(SUSPEND_FREE_EXTRA_PD1, 0);
+				return 1;
+			}
+
+			newpage = virt_to_page(virt);
+			suspend_store_free_mem(SUSPEND_FREE_EXTRA_PD1, 0);
+			for (j = 0; j < (1 << order); j++) {
+				SetPageNosave(newpage + j);
+				/* Pages will be freed one at a time. */
+				set_page_count(newpage + j, 1);
+				SetPageAllocd(newpage + j);
+				extra_pagedir_pages_allocated++;
+			}
+			suspend_store_free_mem(SUSPEND_FREE_EXTENT_PAGES, 0);
+			num_added+= (1 << order);
+		}
+		suspend_message(SUSPEND_PAGESETS, SUSPEND_VERBOSE, 1,
+			"   Allocated (extra) memory for pages "
+			"from %d-%d (%d pages).\n",
+			origallocd + 1, pageset_size, 
+			pageset_size - origallocd);
+	}
+
+	p->pageset_size = pageset_size;
+
+	suspend_store_free_mem(SUSPEND_FREE_EXTRA_PD1, 0);
+	PRINTFREEMEM("at end of allocate_extra_pagedir_memory");
+	return 0;
+}
+
+/* mark_pages_for_pageset2
+ *
+ * Description:	Mark unshared pages in processes not needed for suspend as
+ * 		being able to be written out in a separate pagedir.
+ * 		HighMem pages are simply marked as pageset2. They won't be
+ * 		needed during suspend.
+ */
+
+void mark_pages_for_pageset2(void)
+{
+	struct zone * zone;
+	unsigned long flags;
+	int i;
+
+	clear_map(pageset2_map);
+
+	/* 
+	 * Note that we don't clear the map to begin with!
+	 * This is because if we eat memory, we loose track
+	 * of LRU pages that are still in use but taken off
+	 * the LRU. If I can figure out how the VM keeps
+	 * track of them, I might be able to tweak this a
+	 * little further and decrease pageset one's size
+	 * further.
+	 *
+	 * (Memory grabbing clears the pageset2 flag on
+	 * pages that are really freed!).
+	 */
+	
+	/* Add LRU pages */
+	for_each_zone(zone) {
+		spin_lock_irqsave(&zone->lru_lock, flags);
+		if (zone->nr_inactive) {
+			struct page * page;
+			list_for_each_entry(page, &zone->inactive_list, lru)
+				SetPagePageset2(page);
+		}
+		if (zone->nr_active) {
+			struct page * page;
+			list_for_each_entry(page, &zone->active_list, lru)
+				SetPagePageset2(page);
+		}
+		spin_unlock_irqrestore(&zone->lru_lock, flags);
+	}
+
+	for (i = 0; i < max_pfn; i++) {
+		struct page * page = pfn_to_page(i);
+		BUG_ON(PagePageset2(page) && PageSlab(page));
+	}
+}
+
+/* get_nonconflicting_page
+ *
+ * Description: Gets a page that will not be overwritten as we copy the
+ * 		original kernel page.
+ */
+
+unsigned long get_nonconflicting_page(void)
+{
+	unsigned long new_page = get_zeroed_page(GFP_ATOMIC);
+	struct page * page;
+
+	if (new_page)
+		page = virt_to_page(new_page);
+
+	while (new_page && PagePageset1(page)) {
+		list_add(&page->lru, &conflicting_pages);
+		new_page = get_zeroed_page(GFP_ATOMIC);
+		if (new_page)
+			page = virt_to_page(new_page);
+	}
+
+	return new_page;
+}
+
+/* release_conflicting_pages
+ *
+ * Description: Release conflicting pages. If we resume, we don't care (their
+ * 		status will not matter), but if we abort for some reason, they
+ * 		should not leak.
+ */
+
+void release_conflicting_pages(void)
+{
+	struct page *this_page, *next;
+
+	list_for_each_entry_safe(this_page, next, &conflicting_pages, lru)
+		__free_pages(this_page, 0);
+}
+
+/* relocate_page_if_required
+ *
+ * Description: Given the address of a pointer to a page, we check if the page
+ * 		needs relocating and do so if needs be, adjusting the pointer
+ * 		too.
+ */
+
+void relocate_page_if_required(void ** page_pointer_addr)
+{
+	void * current_value = *page_pointer_addr;
+	if PagePageset1(virt_to_page(current_value)) {
+		unsigned long * new_page = (unsigned long *) get_nonconflicting_page();
+		memcpy(new_page, current_value, PAGE_SIZE);
+		free_pages((unsigned long) current_value, 0);
+		*page_pointer_addr = new_page;
+	}
+}
+
+/* get_pageset1_load_addresses
+ * 
+ * Description: We check here that pagedir & pages it points to won't collide
+ * 		with pages where we're going to restore from the loaded pages
+ * 		later.
+ * Returns:	Zero on success, one if couldn't find enough pages (shouldn't
+ * 		happen).
+ */
+
+int get_pageset1_load_addresses(void)
+{
+	int i, nrdone = 0, result = 0;
+	void *this;
+
+	/*
+	 * Because we're trying to make this work when we're saving as much
+	 * memory as possible we need to remember the pages we reject here
+	 * and then free them when we're done.
+	 */
+	
+	for(i=0; i < pagedir_resume.pageset_size; i++) {
+		this = (void *) get_nonconflicting_page();
+		if (!this) {
+			abort_suspend("Error: Ran out of memory seeking locations for reloading data.");
+			result = 1;
+			break;
+		}
+		SetPagePageset1Copy(virt_to_page(this));
+		nrdone++;
+	}
+
+	return result;
+}
