diff -ruNp 830-core-prepare-image-old/kernel/power/suspend2_core/prepare_image.c 830-core-prepare-image-new/kernel/power/suspend2_core/prepare_image.c
--- 830-core-prepare-image-old/kernel/power/suspend2_core/prepare_image.c	1970-01-01 10:00:00.000000000 +1000
+++ 830-core-prepare-image-new/kernel/power/suspend2_core/prepare_image.c	2004-12-24 12:44:24.000000000 +1100
@@ -0,0 +1,804 @@
+/*
+ * kernel/power/prepare_image.c
+ *
+ * Copyright (C) 2003-2004 Nigel Cunningham <ncunningham@linuxmail.org>
+ *
+ * This file is released under the GPLv2.
+ *
+ * We need to eat memory until we can:
+ * 1. Perform the save without changing anything (RAM_NEEDED < max_mapnr)
+ * 2. Fit it all in available space (active_writer->available_space() >= STORAGE_NEEDED)
+ * 3. Reload the pagedir and pageset1 to places that don't collide with their
+ *    final destinations, not knowing to what extent the resumed kernel will
+ *    overlap with the one loaded at boot time. I think the resumed kernel should overlap
+ *    completely, but I don't want to rely on this as it is an unproven assumption. We
+ *    therefore assume there will be no overlap at all (worse case).
+ * 4. Meet the user's requested limit (if any) on the size of the image.
+ *    The limit is in MB, so pages/256 (assuming 4K pages).
+ *
+ *    (Final test in save_image doesn't use EATEN_ENOUGH_MEMORY)
+ */
+
+#define SUSPEND_PREPARE_IMAGE_C
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/suspend.h>
+#include <linux/highmem.h>
+#include <linux/notifier.h>
+
+#include "suspend.h"
+#include "pageflags.h"
+#include "debug.h"
+#include "../plugins.h"
+#include "../proc.h"
+
+extern int pageset1_sizelow, pageset2_sizelow;
+extern unsigned long orig_mem_free;
+extern void mark_pages_for_pageset2(void);
+extern int image_size_limit;
+extern int extra_pagedir_pages_allocated;
+
+int suspend_amount_grabbed = 0;
+static int arefrozen = 0, numnosave = 0;
+static int header_space_allocated = 0;
+extern unsigned long forced_ps1_size, forced_ps2_size;
+
+/*
+ * generate_free_page_map
+ *
+ * Description:	This routine generates a bitmap of free pages from the
+ * 		lists used by the memory manager. We then use the bitmap
+ * 		to quickly calculate which pages to save and in which
+ * 		pagesets.
+ */
+static void generate_free_page_map(void) 
+{
+	int i, order, loop, cpu;
+	struct page * page;
+	unsigned long flags;
+	struct zone *zone;
+	struct per_cpu_pageset *pset;
+
+	for(i=0; i < max_mapnr; i++)
+		SetPageInUse(pfn_to_page(i));
+	
+	for_each_zone(zone) {
+		if (!zone->present_pages)
+			continue;
+		spin_lock_irqsave(&zone->lock, flags);
+		for (order = MAX_ORDER - 1; order >= 0; --order) {
+			list_for_each_entry(page, &zone->free_area[order].free_list, lru)
+				for(loop=0; loop < (1 << order); loop++) {
+					ClearPageInUse(page+loop);
+					ClearPagePageset2(page+loop);
+				}
+		}
+
+		
+		for (cpu = 0; cpu < NR_CPUS; cpu++) {
+			if (!cpu_possible(cpu))
+				continue;
+
+			pset = &zone->pageset[cpu];
+
+			for (i = 0; i < ARRAY_SIZE(pset->pcp); i++) {
+				struct per_cpu_pages *pcp;
+				struct page * page;
+
+				pcp = &pset->pcp[i];
+				list_for_each_entry(page, &pcp->list, lru) {
+					ClearPageInUse(page);
+					ClearPagePageset2(page);
+				}
+			}
+		}
+		
+		spin_unlock_irqrestore(&zone->lock, flags);
+	}
+}
+
+/* size_of_free_region
+ * 
+ * Description:	Return the number of pages that are free, beginning with and 
+ * 		including this one.
+ */
+static int size_of_free_region(struct page * page)
+{
+	struct page * posn = page;
+
+	while (((page_to_pfn(posn)) < max_mapnr) && (!PageInUse(posn))) 
+		posn++;
+	return (posn - page);
+}
+
+static void display_reserved_pages(void)
+{
+	int loop;
+	int extentmin = -1;
+
+	for (loop = 0; loop < max_mapnr; loop++) {
+		struct page * page = pfn_to_page(loop);
+		if (PageReserved(page)) {
+			if (extentmin == -1)
+				extentmin = loop;
+		} else {
+			if (extentmin > -1) {
+				printk("Reserved pages from %p to %p.\n",
+					page_address(pfn_to_page(extentmin)),
+					((char *) page_address(page)) - 1);
+				extentmin = -1;
+			}
+		}
+	}
+
+	if (extentmin > -1)
+		printk("Reserved pages from %p to %p.\n",
+			page_address(pfn_to_page(extentmin)),
+			((char *) page_address(pfn_to_page(max_mapnr))) - 1);
+}
+
+/* 
+ * Description:	Display which pages are marked Nosave.
+ */
+void display_nosave_pages(void)
+{
+	int loop;
+	int extentmin = -1;
+
+	if (!TEST_DEBUG_STATE(SUSPEND_NOSAVE))
+		return;
+
+	display_reserved_pages();
+
+	for (loop = 0; loop < max_mapnr; loop++) {
+		if (PageNosave(pfn_to_page(loop))) {
+			if (extentmin == -1)
+				extentmin = loop;
+		} else {
+			if (extentmin > -1) {
+				printk("Nosave pages from %p to %p.\n",
+					page_address(pfn_to_page(extentmin)),
+					((char *) page_address(pfn_to_page(loop))) - 1);
+				extentmin = -1;
+			}
+		}
+	}
+
+	if (extentmin > -1)
+		printk("Nosave pages from %p to %p.\n",
+			page_address(pfn_to_page(extentmin)),
+			((char *) page_address(pfn_to_page(max_mapnr))) - 1);
+}
+
+/*
+ * count_data_pages
+ *
+ * This routine generates our lists of pages to be stored in each
+ * pageset. Since we store the data using extents, and adding new
+ * extents might allocate a new extent page, this routine may well
+ * be called more than once.
+ */
+static struct pageset_sizes_result count_data_pages(void)
+{
+	int chunk_size, loop, numfree = 0;
+	int usepagedir2;
+	struct pageset_sizes_result result;
+
+	result.size1 = 0;
+	result.size1low = 0;
+	result.size2 = 0;
+	result.size2low = 0;
+	result.needmorespace = 0;
+
+	numnosave = 0;
+
+	clear_map(pageset1_map);
+	clear_map(pageset1_copy_map);
+
+	generate_free_page_map();
+
+	if (TEST_RESULT_STATE(SUSPEND_ABORTED)) {
+		result.size1 = -1;
+		result.size1low = -1;
+		result.size2 = -1;
+		result.size2low = -1;
+		result.needmorespace = 0;
+		return result;
+	}
+
+	if (max_mapnr != num_physpages) {
+		abort_suspend("Max_mapnr is not equal to num_physpages.");
+		result.size1 = -1;
+		result.size1low = -1;
+		result.size2 = -1;
+		result.size2low = -1;
+		result.needmorespace = 0;
+		return result;
+	}
+	/*
+	 * Pages not to be saved are marked Nosave irrespective of being reserved
+	 */
+	for (loop = 0; loop < max_mapnr; loop++) {
+		struct page * page = pfn_to_page(loop);
+		if (PageNosave(page)) {
+			numnosave++;
+			continue;
+		}
+
+		if (!PageReserved(page)) {
+			if ((chunk_size=size_of_free_region(page))!=0) {
+				numfree += chunk_size;
+				loop += chunk_size - 1;
+				continue;
+			}
+		} else {
+			if (PageHighMem(page)) {
+				/* HighMem pages may be marked Reserved. We ignore them. */
+				numnosave++;
+				continue;
+			}
+		};
+
+		usepagedir2 = PagePageset2(page);
+
+		if (usepagedir2) {
+			result.size2++;
+			if (!PageHighMem(page))
+				result.size2low++;
+			SetPagePageset1Copy(page);
+		} else {
+			result.size1++;
+			SetPagePageset1(page);
+			if (!PageHighMem(page))
+				result.size1low++;
+		}
+	}
+	
+	if ((pagedir1.pageset_size) && (result.size1 > pagedir1.pageset_size))
+		result.needmorespace = 1;
+	if ((pagedir2.pageset_size) && (result.size2 > pagedir2.pageset_size))
+		result.needmorespace = 1;
+
+	suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_MEDIUM, 0,
+		"Count data pages: Set1 (%d) + Set2 (%d) + Nosave (%d) + NumFree (%d) = %d.\n",
+		result.size1, result.size2, numnosave, numfree,
+		result.size1 + result.size2 + numnosave + numfree);
+	BITMAP_FOR_EACH_SET(allocd_pages_map, loop)
+		SetPagePageset1Copy(pfn_to_page(loop));
+	return result;
+}
+
+/* amount_needed
+ *
+ * Calculates the amount by which the image size needs to be reduced to meet
+ * our constraints.
+ */
+static int amount_needed(int use_image_size_limit)
+{
+
+	int max1 = max( (int) (RAM_TO_SUSPEND - real_nr_free_pages() - 
+			  nr_free_highpages() - suspend_amount_grabbed),
+			((int) (STORAGE_NEEDED(1) -  
+			  active_writer->ops.writer.storage_available())));
+	if (use_image_size_limit)
+		return max( max1,
+			    (image_size_limit > 0) ? 
+			    ((int) (STORAGE_NEEDED(1) - (image_size_limit << 8))) : 0);
+	return max1;
+}
+
+#define EATEN_ENOUGH_MEMORY() (amount_needed(1) < 1)
+unsigned long storage_available = 0;
+
+/* display_stats
+ *
+ * Display the vital statistics.
+ */
+#ifdef CONFIG_SOFTWARE_SUSPEND_DEBUG
+static void display_stats(void)
+{ 
+	unsigned long storage_allocated = active_writer->ops.writer.storage_allocated();
+	suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_MEDIUM, 1,
+		"Free:%d+%d+%d=%d(%d). Sets:%d(%d),%d(%d). Header:%d. Nosave:%d-%d-%d=%d. Storage:%d/%lu(%lu). Needed:%d|%d|%d.\n", 
+		
+		/* Free */
+		real_nr_free_pages(), suspend_amount_grabbed,
+		real_nr_free_pages() + suspend_amount_grabbed,
+		real_nr_free_pages() - nr_free_highpages(),
+		
+		/* Sets */
+		pageset1_size, pageset1_sizelow,
+		pageset2_size, pageset2_sizelow,
+
+		/* Header */
+		num_extent_pages,
+
+		/* Nosave */
+		numnosave, extra_pagedir_pages_allocated, suspend_amount_grabbed,
+		numnosave - extra_pagedir_pages_allocated - suspend_amount_grabbed,
+
+		/* Storage - converted to pages for comparison */
+		storage_allocated,
+		STORAGE_NEEDED(1),
+		storage_available,
+
+		/* Needed */
+		RAM_TO_SUSPEND - real_nr_free_pages() - nr_free_highpages() - suspend_amount_grabbed,
+		STORAGE_NEEDED(1) - storage_available, 
+		(image_size_limit > 0) ? (STORAGE_NEEDED(1) - (image_size_limit << 8)) : 0);
+}
+#else
+#define display_stats() do { } while(0)
+#endif
+
+/*
+ * Eaten is the number of pages which have been eaten.
+ * Pagedirincluded is the number of pages which have been allocated for the pagedir.
+ */
+extern int allocate_extra_pagedir_memory(struct pagedir * p, int pageset_size, int alloc_from);
+
+struct pageset_sizes_result recalculate_stats(void) 
+{
+	struct pageset_sizes_result result;
+
+	mark_pages_for_pageset2();  /* Need to call this before getting pageset1_size! */
+	result = count_data_pages();
+	suspend_message(SUSPEND_PAGESETS, SUSPEND_VERBOSE, 1,
+		"Result %d and %d.\n",
+		result.size1, result.size2);
+	pageset1_sizelow = result.size1low;
+	pageset2_sizelow = result.size2low;
+	pagedir1.lastpageset_size = pageset1_size = result.size1;
+	pagedir2.lastpageset_size = pageset2_size = result.size2;
+	storage_available = active_writer->ops.writer.storage_available();
+	suspend_store_free_mem(SUSPEND_FREE_EXTENT_PAGES, 0);
+	return result;
+}
+
+/* update_image
+ *
+ * Allocate [more] memory and storage for the image.
+ * Remember, this is iterative!
+ */
+static int update_image(void) 
+{ 
+	struct pageset_sizes_result result;
+	int iteration = 0, orig_num_extent_pages;
+
+	result = recalculate_stats();
+
+	suspend_store_free_mem(SUSPEND_FREE_EXTENT_PAGES, 0);
+
+	do {
+		iteration++;
+
+		orig_num_extent_pages = num_extent_pages;
+
+		suspend_message(SUSPEND_ANY_SECTION, SUSPEND_LOW, 1,
+				"-- Iteration %d.\n", iteration);
+
+		if (suspend_allocate_checksum_pages()) {
+			suspend_message(SUSPEND_ANY_SECTION, SUSPEND_LOW, 1,
+				"Still need to get more pages for checksum pages.\n");
+			return 1;
+		}
+
+		/* Include allowance for growth in pagedir1 while writing pagedir 2 */
+		if (allocate_extra_pagedir_memory(&pagedir1, pageset1_size + 100, pageset2_sizelow)) {
+			suspend_message(SUSPEND_ANY_SECTION, SUSPEND_LOW, 1,
+				"Still need to get more pages for pagedir 1.\n");
+			return 1;
+		}
+
+		if (active_writer->ops.writer.allocate_storage(MAIN_STORAGE_NEEDED(1))) {
+			suspend_message(SUSPEND_ANY_SECTION, SUSPEND_LOW, 1,
+				"Still need to get more storage space for the image proper.\n");
+			suspend_store_free_mem(SUSPEND_FREE_WRITER_STORAGE, 0);
+			return 1;
+		}
+
+		suspend_store_free_mem(SUSPEND_FREE_WRITER_STORAGE, 0);
+
+		set_suspend_state(SUSPEND_SLAB_ALLOC_FALLBACK);
+
+		if (active_writer->ops.writer.allocate_header_space(HEADER_STORAGE_NEEDED)) {
+			suspend_message(SUSPEND_ANY_SECTION, SUSPEND_LOW, 1,
+				"Still need to get more storage space for header.\n");
+			return 1;
+		}
+
+		header_space_allocated = HEADER_STORAGE_NEEDED;
+
+		clear_suspend_state(SUSPEND_SLAB_ALLOC_FALLBACK);
+
+		/* 
+		 * Allocate remaining storage space, if possible, up to the
+		 * maximum we know we'll need. It's okay to allocate the
+		 * maximum if the writer is the swapwriter, but
+		 * we don't want to grab all available space on an NFS share.
+		 * We therefore ignore the expected compression ratio here,
+		 * thereby trying to allocate the maximum image size we could
+		 * need (assuming compression doesn't expand the image), but
+		 * don't complain if we can't get the full amount we're after.
+		 */
+
+		active_writer->ops.writer.allocate_storage(
+			max((long)(active_writer->ops.writer.storage_available() -
+				active_writer->ops.writer.storage_allocated()),
+			     (long)(HEADER_STORAGE_NEEDED + MAIN_STORAGE_NEEDED(1))));
+
+		suspend_store_free_mem(SUSPEND_FREE_WRITER_STORAGE, 0);
+
+		result = recalculate_stats();
+		display_stats();
+
+	} while (((orig_num_extent_pages < num_extent_pages) || 
+		   result.needmorespace ||
+		   header_space_allocated < HEADER_STORAGE_NEEDED ||
+		   active_writer->ops.writer.storage_allocated() < (HEADER_STORAGE_NEEDED + MAIN_STORAGE_NEEDED(1))) 
+		 && (!TEST_RESULT_STATE(SUSPEND_ABORTED)));
+	
+	suspend_message(SUSPEND_ANY_SECTION, SUSPEND_MEDIUM, 1, "-- Exit loop.\n");
+
+	return (amount_needed(0) > 0);
+}
+
+/* ----------------------- Memory grabbing --------------------------
+ *
+ * All of the memory that is available, we grab.
+ * This enables us to get the image size down, even when other
+ * processes might be trying to increase their memory usage. (We
+ * have a hook to disable the OOM killer).
+ *
+ * At the same time, suspend's own routines get memory from this
+ * pool, and so does slab growth. Only get_zeroed_page and siblings
+ * see no memory available.
+ */
+
+static spinlock_t suspend_grabbed_memory_lock = SPIN_LOCK_UNLOCKED;
+struct list_head grabbed_pages[MAX_ORDER];
+
+static void __grab_free_memory(void)
+{
+	int order, k;
+
+	/*
+	 * First, quickly eat all memory that's already free.
+	 */
+	
+	for (order = MAX_ORDER - 1; order > -1; order--) {
+		struct page * new_entry = alloc_pages(GFP_ATOMIC, order);
+		while (new_entry) {
+			INIT_LIST_HEAD(&new_entry->lru);
+			for (k=0; k < (1 << order); k++) {
+				SetPageNosave(new_entry + k);
+				ClearPagePageset2(new_entry + k);
+			}
+			list_add_tail(&new_entry->lru, &grabbed_pages[order]);
+			suspend_amount_grabbed += (1 << order);
+			new_entry = alloc_pages(GFP_ATOMIC, order);
+		}
+	}
+}
+
+static void grab_free_memory(void)
+{
+	unsigned long flags;
+	
+	spin_lock_irqsave(&suspend_grabbed_memory_lock, flags);
+	__grab_free_memory();
+	spin_unlock_irqrestore(&suspend_grabbed_memory_lock, flags);
+}
+
+static void free_grabbed_memory(void)
+{
+	int j, num_freed = 0, order;
+	unsigned long flags;
+
+	spin_lock_irqsave(&suspend_grabbed_memory_lock, flags);
+
+	/* Free all eaten pages immediately */
+	for (order = MAX_ORDER - 1; order > -1; order--) {
+		struct page *this, *next;
+		list_for_each_entry_safe(this, next, &grabbed_pages[order], lru) {
+			for (j=0; j < (1 << order); j++)
+				ClearPageNosave(this + j);
+			list_del_init(&this->lru);
+			__free_pages(this, order);
+			num_freed+= (1 << order);
+		}
+	}
+	suspend_amount_grabbed -= num_freed;
+	BUG_ON(suspend_amount_grabbed);
+	spin_unlock_irqrestore(&suspend_grabbed_memory_lock, flags);
+}
+
+unsigned long get_grabbed_pages(int order)
+{
+	int alternative, j;
+	unsigned long flags;
+	struct page * page;
+	unsigned long virt = 0;
+
+	/* Get grabbed lowmem pages for suspend's use */
+	spin_lock_irqsave(&suspend_grabbed_memory_lock, flags);
+
+try_again:	
+	if (!list_empty(&grabbed_pages[order])) {
+		page = list_entry(grabbed_pages[order].next, struct page, lru);
+		list_del_init(&page->lru);
+		for (j=0; j < (1 << order); j++) {
+			ClearPageNosave(page + j);
+			ClearPagePageset2(page + j);
+			clear_page(page_address(page + j));
+		}
+		suspend_amount_grabbed -= (1 << order);
+		spin_unlock_irqrestore(&suspend_grabbed_memory_lock, flags);
+		return (unsigned long) page_address(page);
+	}
+
+	alternative = order+1;
+	while ((list_empty(&grabbed_pages[alternative])) && (alternative < MAX_ORDER))
+		alternative++;
+
+	/* Maybe we didn't eat any memory - try normal get */
+	if (alternative == MAX_ORDER) {
+		page = alloc_pages(GFP_ATOMIC, order);
+		if (page) {
+			virt = (unsigned long) page_address(page);
+			for (j=0; j < (1 << order); j++) {
+				clear_page((char *) virt + j * PAGE_SIZE);
+				ClearPagePageset2(page + j);
+			}
+		}
+		spin_unlock_irqrestore(&suspend_grabbed_memory_lock, flags);
+		return virt;
+	}
+
+	{
+		page = list_entry(grabbed_pages[alternative].next, struct page, lru);
+		virt = (unsigned long) page_address(page);
+		list_del_init(&page->lru);
+		for (j=0; j < (1 << (alternative)); j++) {
+			ClearPageNosave(page + j);
+			clear_page(page_address(page + j));
+			ClearPagePageset2(page + j);
+		}
+		free_pages(virt, alternative);
+		suspend_amount_grabbed -= (1 << alternative);
+	}
+
+	/* Get the chunk we want to return. May fail if something grabs
+	 * the memory before us. */
+	virt = __get_free_pages(GFP_ATOMIC, order);
+	if (!virt)
+		goto try_again;
+
+	page = virt_to_page(virt);
+
+	/* Grab the rest */
+	__grab_free_memory();
+	
+	spin_unlock_irqrestore(&suspend_grabbed_memory_lock, flags);
+
+	return virt;
+}
+
+/* --------------------------------------------------------------------------- */
+
+extern int freeze_processes(int no_progress);
+
+static int attempt_to_freeze(void)
+{
+	int result;
+	
+	/* Stop processes before checking again */
+	thaw_processes(FREEZER_ALL_THREADS);
+	prepare_status(1, 1, "Freezing processes");
+	result = freeze_processes(0);
+	suspend_message(SUSPEND_FREEZER, SUSPEND_VERBOSE, 0, "- Freeze_processes returned %d.\n",
+		result);
+
+	if (result) {
+		SET_RESULT_STATE(SUSPEND_ABORTED);
+		SET_RESULT_STATE(SUSPEND_FREEZING_FAILED);
+	} else
+		arefrozen = 1;
+
+	return result;
+}
+
+extern asmlinkage long sys_sync(void);
+
+static int eat_memory(void)
+{
+	int orig_memory_still_to_eat, last_amount_needed = 0, times_criteria_met = 0;
+	int free_flags = 0, did_eat_memory = 0;
+	
+	/*
+	 * Note that if we have enough storage space and enough free memory, we may
+	 * exit without eating anything. We give up when the last 10 iterations ate
+	 * no extra pages because we're not going to get much more anyway, but
+	 * the few pages we get will take a lot of time.
+	 *
+	 * We freeze processes before beginning, and then unfreeze them if we
+	 * need to eat memory until we think we have enough. If our attempts
+	 * to freeze fail, we give up and abort.
+	 */
+
+	/* ----------- Stage 1: Freeze Processes ------------- */
+
+	
+	prepare_status(0, 1, "Eating memory.");
+
+	recalculate_stats();
+	display_stats();
+
+	orig_memory_still_to_eat = amount_needed(1);
+	last_amount_needed = orig_memory_still_to_eat;
+
+	switch (image_size_limit) {
+		case -1: /* Don't eat any memory */
+			if (orig_memory_still_to_eat) {
+				SET_RESULT_STATE(SUSPEND_ABORTED);
+				SET_RESULT_STATE(SUSPEND_WOULD_EAT_MEMORY);
+			}
+			break;
+		case -2:  /* Free caches only */
+			free_flags = GFP_NOIO | __GFP_HIGHMEM;
+			break;
+		default:
+			free_flags = GFP_ATOMIC | __GFP_HIGHMEM;
+	}
+		
+	/* ----------- Stage 2: Eat memory ------------- */
+
+	while (((!EATEN_ENOUGH_MEMORY()) || (image_size_limit == -2)) && (!TEST_RESULT_STATE(SUSPEND_ABORTED)) && (times_criteria_met < 10)) {
+		int amount_freed;
+		int amount_wanted = orig_memory_still_to_eat - amount_needed(1);
+		if (amount_wanted < 1)
+			amount_wanted = 1; /* image_size_limit == -2 */
+
+		suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_VERBOSE, 1,
+			"Times met criteria is %d.\n", times_criteria_met);
+		if (orig_memory_still_to_eat)
+			update_status(orig_memory_still_to_eat - amount_needed(1), orig_memory_still_to_eat, " Image size %d ", MB(STORAGE_NEEDED(1)));
+		else
+			update_status(0, 1, "Image size %d ", MB(STORAGE_NEEDED(1)));
+		
+		if ((last_amount_needed - amount_needed(1)) < 10)
+			times_criteria_met++;
+		else
+			times_criteria_met = 0;
+		last_amount_needed = amount_needed(1);
+		amount_freed = shrink_all_memory(last_amount_needed);
+		suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_VERBOSE, 1,
+			"Given %d, shrink_all_memory returned %d.\n", last_amount_needed, amount_freed);
+		grab_free_memory();
+		recalculate_stats();
+		display_stats();
+
+		did_eat_memory = 1;
+
+		check_shift_keys(0, NULL);
+	}
+
+	grab_free_memory();
+	
+	suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_VERBOSE, 1,
+		"Out of main eat memory loop.\n");
+
+	if (did_eat_memory) {
+		unsigned long orig_state = get_suspend_state();
+		suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_VERBOSE, 1,
+			"Ate memory; letting kjournald etc run.\n");
+		thaw_processes(FREEZER_KERNEL_THREADS);
+		/* Freeze_processes will call sys_sync too */
+		freeze_processes(1);
+		grab_free_memory();
+		restore_suspend_state(orig_state);
+		recalculate_stats();
+		display_stats();
+	}
+
+	suspend_message(SUSPEND_EAT_MEMORY, 1, SUSPEND_VERBOSE, "\n");
+	
+	suspend_message(SUSPEND_EAT_MEMORY, SUSPEND_VERBOSE, 1,
+		"(Freezer exit:) Swap needed calculated as (%d+%d)*%d/100+%d+1+%d=%d.\n",
+		pageset1_size,
+		pageset2_size,
+		expected_compression_ratio(),
+		num_extent_pages,
+	 	HEADER_STORAGE_NEEDED,
+		STORAGE_NEEDED(1));
+
+	/* Blank out image size display */
+	update_status(100, 100, "                   ");
+
+	/* Include image size limit when checking what to report */
+	if (amount_needed(1) > 0) 
+		SET_RESULT_STATE(SUSPEND_UNABLE_TO_FREE_ENOUGH_MEMORY);
+
+	/* But don't include it when deciding whether to abort (soft limit) */
+	if ((amount_needed(0) > 0)) {
+		printk("Unable to free sufficient memory to suspend. Still need %d pages. "
+			"You may be able to avoid this problem by reducing the async_io_limit\n",
+			amount_needed(1));
+		SET_RESULT_STATE(SUSPEND_ABORTED);
+	}
+	
+	check_shift_keys(1, "Memory eating completed.");
+	return 0;
+}
+
+/* prepare_image
+ *
+ * Entry point to the whole image preparation section.
+ *
+ * We do four things:
+ * - Freeze processes;
+ * - Ensure image size constraints are met;
+ * - Complete all the preparation for saving the image,
+ *   including allocation of storage. The only memory
+ *   that should be needed when we're finished is that
+ *   for actually storing the image (and we know how
+ *   much is needed for that because the plugins tell
+ *   us).
+ * - Make sure that all dirty buffers are written out.
+ */
+int prepare_image(void)
+{
+	int result = 1, sizesought, i;
+
+	for (i = 0; i < MAX_ORDER; i++)
+		INIT_LIST_HEAD(&grabbed_pages[i]);
+
+	arefrozen = 0;
+
+	header_space_allocated = 0;
+
+	sizesought = 100 + memory_for_plugins();
+
+	if (attempt_to_freeze())
+		return 1;
+
+	PRINTFREEMEM("after freezing processes");
+	suspend_store_free_mem(SUSPEND_FREE_FREEZER, 0);
+	
+	if (!active_writer->ops.writer.storage_available()) {
+		printk(KERN_ERR "You need some storage available to be able to suspend.\n");
+		SET_RESULT_STATE(SUSPEND_ABORTED);
+		SET_RESULT_STATE(SUSPEND_NOSTORAGE_AVAILABLE);
+		return 1;
+	}
+
+	do {
+		if (eat_memory() || TEST_RESULT_STATE(SUSPEND_ABORTED))
+			break;
+
+		PRINTFREEMEM("after eating memory");
+		suspend_store_free_mem(SUSPEND_FREE_EAT_MEMORY, 0);
+	
+		result = update_image();
+		PRINTFREEMEM("after updating the image");
+
+	} while ((result) && (!TEST_RESULT_STATE(SUSPEND_ABORTED)) &&
+		(!TEST_RESULT_STATE(SUSPEND_UNABLE_TO_FREE_ENOUGH_MEMORY)));
+
+	PRINTFREEMEM("after preparing image");
+
+	set_suspend_state(SUSPEND_LRU_FREEZE);
+
+	/* Release memory that has been eaten */
+	free_grabbed_memory();
+	
+	PRINTFREEMEM("after freeing grabbed memory");
+	suspend_store_free_mem(SUSPEND_FREE_GRABBED_MEMORY, 1);
+	
+	check_shift_keys(1, "Image preparation complete.");
+
+	return result;
+}
+
+EXPORT_SYMBOL(suspend_amount_grabbed);
