patch-2.0.31 linux/mm/page_alloc.c

Next file: linux/mm/vmscan.c
Previous file: linux/mm/mlock.c
Back to the patch index
Back to the overall index

diff -u --recursive --new-file v2.0.30/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -214,7 +214,7 @@
 		return 0;
 	}
 	restore_flags(flags);
-	if (priority != GFP_BUFFER && try_to_free_page(priority, dma, 1))
+	if (try_to_free_page(priority, dma, 1))
 		goto repeat;
 	return 0;
 }
@@ -264,11 +264,11 @@
 
 	/*
 	 * select nr of pages we try to keep free for important stuff
-	 * with a minimum of 16 pages. This is totally arbitrary
+	 * with a minimum of 24 pages. This is totally arbitrary
 	 */
 	i = (end_mem - PAGE_OFFSET) >> (PAGE_SHIFT+7);
-	if (i < 16)
-		i = 16;
+	if (i < 24)
+		i = 24;
 	min_free_pages = i;
 	free_pages_low = i + (i>>1);
 	free_pages_high = i + i;
@@ -311,7 +311,8 @@
 	unsigned long page = __get_free_page(GFP_KERNEL);
 
 	if (pte_val(*page_table) != entry) {
-		free_page(page);
+		if (page)
+			free_page(page);
 		return;
 	}
 	if (!page) {
@@ -327,6 +328,11 @@
 	}
 	vma->vm_mm->rss++;
 	tsk->maj_flt++;
+
+	/* Give the physical reallocated page a bigger start */
+	if (vma->vm_mm->rss < (MAP_NR(high_memory) >> 2))
+		mem_map[MAP_NR(page)].age = (PAGE_INITIAL_AGE + PAGE_ADVANCE);
+
 	if (!write_access && add_to_swap_cache(MAP_NR(page), entry)) {
 		/* keep swap page allocated for the moment (swap cache) */
 		set_pte(page_table, mk_pte(page, vma->vm_page_prot));

FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen, slshen@lbl.gov