diff -urN 2.4.10pre14/include/linux/mm.h readahead/include/linux/mm.h
--- 2.4.10pre14/include/linux/mm.h	Sat Sep 22 08:06:22 2001
+++ readahead/include/linux/mm.h	Sat Sep 22 12:01:18 2001
@@ -414,6 +414,8 @@
 #define __free_page(page) __free_pages((page), 0)
 #define free_page(addr) free_pages((addr),0)
 
+extern int start_aggressive_readahead(unsigned int);
+
 extern void show_free_areas(void);
 extern void show_free_areas_node(pg_data_t *pgdat);
 
diff -urN 2.4.10pre14/kernel/ksyms.c readahead/kernel/ksyms.c
--- 2.4.10pre14/kernel/ksyms.c	Sat Sep 22 08:06:23 2001
+++ readahead/kernel/ksyms.c	Sat Sep 22 11:59:09 2001
@@ -89,6 +89,7 @@
 EXPORT_SYMBOL(exit_sighand);
 
 /* internal kernel memory management */
+EXPORT_SYMBOL(start_aggressive_readahead);
 EXPORT_SYMBOL(_alloc_pages);
 EXPORT_SYMBOL(__alloc_pages);
 EXPORT_SYMBOL(alloc_pages_node);
diff -urN 2.4.10pre14/mm/page_alloc.c readahead/mm/page_alloc.c
--- 2.4.10pre14/mm/page_alloc.c	Sat Sep 22 08:06:24 2001
+++ readahead/mm/page_alloc.c	Sat Sep 22 12:00:51 2001
@@ -507,6 +507,38 @@
 #endif
 
 /*
+ * If it returns non zero it means there's lots of ram "free"
+ * (note: not in cache!) so any caller will know that
+ * he can allocate some memory to do some more aggressive
+ * (possibly wasteful) readahead. The state of the memory
+ * should be rechecked after every few pages allocated for
+ * doing this aggressive readahead.
+ *
+ * The gfp_mask parameter specifies in which kind of memory
+ * the readahead information will be applocated to.
+ */
+int start_aggressive_readahead(unsigned int gfp_mask)
+{
+	pg_data_t *pgdat = pgdat_list;
+	zonelist_t *zonelist;
+	zone_t **zonep, *zone;
+	int ret = 0;
+
+	do {
+		zonelist = pgdat->node_zonelists + gfp_mask;
+		zonep = zonelist->zones;
+
+		for (zone = *zonep++; zone; zone = *zonep++)
+			if (zone->free_pages > zone->pages_high * 2)
+				ret = 1;
+
+		pgdat = pgdat->node_next;
+	} while (pgdat);
+
+	return ret;
+}
+
+/*
  * Show free area list (used inside shift_scroll-lock stuff)
  * We also calculate the percentage fragmentation. We do this by counting the
  * memory on each free list with the exception of the first item on the list.