patch-2.3.43 linux/mm/page_alloc.c
Next file: linux/mm/slab.c
Previous file: linux/mm/mprotect.c
Back to the patch index
Back to the overall index
- Lines: 188
- Date:
Wed Feb 9 19:33:13 2000
- Orig file:
v2.3.42/linux/mm/page_alloc.c
- Orig date:
Fri Jan 28 15:09:09 2000
diff -u --recursive --new-file v2.3.42/linux/mm/page_alloc.c linux/mm/page_alloc.c
@@ -29,7 +29,9 @@
LIST_HEAD(lru_cache);
static char *zone_names[MAX_NR_ZONES] = { "DMA", "Normal", "HighMem" };
-static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128 };
+static int zone_balance_ratio[MAX_NR_ZONES] = { 128, 128, 128, };
+static int zone_balance_min[MAX_NR_ZONES] = { 10 , 10, 10, };
+static int zone_balance_max[MAX_NR_ZONES] = { 255 , 255, 255, };
/*
* Free_page() adds the page to the free lists. This is optimized for
@@ -196,9 +198,6 @@
return NULL;
}
-#define ZONE_BALANCED(zone) \
- (((zone)->free_pages > (zone)->pages_low) && (!(zone)->low_on_memory))
-
static inline unsigned long classfree(zone_t *zone)
{
unsigned long free = 0;
@@ -215,21 +214,6 @@
static inline int zone_balance_memory (zone_t *zone, int gfp_mask)
{
int freed;
- unsigned long free = classfree(zone);
-
- if (free >= zone->pages_low) {
- if (!zone->low_on_memory)
- return 1;
- /*
- * Simple hysteresis: exit 'low memory mode' if
- * the upper limit has been reached:
- */
- if (free >= zone->pages_high) {
- zone->low_on_memory = 0;
- return 1;
- }
- } else
- zone->low_on_memory = 1;
/*
* In the atomic allocation case we only 'kick' the
@@ -243,43 +227,6 @@
return 1;
}
-#if 0
-/*
- * We are still balancing memory in a global way:
- */
-static inline int balance_memory (zone_t *zone, int gfp_mask)
-{
- unsigned long free = nr_free_pages();
- static int low_on_memory = 0;
- int freed;
-
- if (free >= freepages.low) {
- if (!low_on_memory)
- return 1;
- /*
- * Simple hysteresis: exit 'low memory mode' if
- * the upper limit has been reached:
- */
- if (free >= freepages.high) {
- low_on_memory = 0;
- return 1;
- }
- } else
- low_on_memory = 1;
-
- /*
- * In the atomic allocation case we only 'kick' the
- * state machine, but do not try to free pages
- * ourselves.
- */
- freed = try_to_free_pages(gfp_mask, zone);
-
- if (!freed && !(gfp_mask & __GFP_HIGH))
- return 0;
- return 1;
-}
-#endif
-
/*
* This is the 'heart' of the zoned buddy allocator:
*/
@@ -310,11 +257,31 @@
* further thought.
*/
if (!(current->flags & PF_MEMALLOC))
- /*
- * fastpath
- */
- if (!ZONE_BALANCED(z))
- goto balance;
+ {
+ if (classfree(z) > z->pages_high)
+ {
+ if (z->low_on_memory)
+ z->low_on_memory = 0;
+ }
+ else
+ {
+ extern wait_queue_head_t kswapd_wait;
+
+ if (z->low_on_memory)
+ goto balance;
+
+ if (classfree(z) <= z->pages_low)
+ {
+ wake_up_interruptible(&kswapd_wait);
+
+ if (classfree(z) <= z->pages_min)
+ {
+ z->low_on_memory = 1;
+ goto balance;
+ }
+ }
+ }
+ }
/*
* This is an optimization for the 'higher order zone
* is empty' case - it can happen even in well-behaved
@@ -378,7 +345,7 @@
zone_t *zone;
int i;
- sum = nr_lru_pages;
+ sum = nr_lru_pages - atomic_read(&page_cache_size);
for (i = 0; i < NUMNODES; i++)
for (zone = NODE_DATA(i)->node_zones; zone <= NODE_DATA(i)->node_zones+ZONE_NORMAL; zone++)
sum += zone->free_pages;
@@ -515,12 +482,12 @@
* - clear the memory bitmaps
*/
void __init free_area_init_core(int nid, pg_data_t *pgdat, struct page **gmap,
- unsigned int *zones_size, unsigned long zone_start_paddr)
+ unsigned long *zones_size, unsigned long zone_start_paddr)
{
struct page *p, *lmem_map;
unsigned long i, j;
unsigned long map_size;
- unsigned int totalpages, offset;
+ unsigned long totalpages, offset;
unsigned int cumulative = 0;
totalpages = 0;
@@ -528,7 +495,7 @@
unsigned long size = zones_size[i];
totalpages += size;
}
- printk("On node %d totalpages: %08x\n", nid, totalpages);
+ printk("On node %d totalpages: %lu\n", nid, totalpages);
/*
* Select nr of pages we try to keep free for important stuff
@@ -579,7 +546,7 @@
size = zones_size[j];
- printk("zone(%ld): %ld pages.\n", j, size);
+ printk("zone(%lu): %lu pages.\n", j, size);
zone->size = size;
zone->name = zone_names[j];
zone->lock = SPIN_LOCK_UNLOCKED;
@@ -590,7 +557,11 @@
zone->offset = offset;
cumulative += size;
mask = (cumulative / zone_balance_ratio[j]);
- if (mask < 1) mask = 1;
+ if (mask < zone_balance_min[j])
+ mask = zone_balance_min[j];
+ else if (mask > zone_balance_max[j])
+ mask = zone_balance_max[j];
+ zone->pages_min = mask;
zone->pages_low = mask*2;
zone->pages_high = mask*3;
zone->low_on_memory = 0;
@@ -622,7 +593,7 @@
build_zonelists(pgdat);
}
-void __init free_area_init(unsigned int *zones_size)
+void __init free_area_init(unsigned long *zones_size)
{
free_area_init_core(0, NODE_DATA(0), &mem_map, zones_size, 0);
}
FUNET's LINUX-ADM group, linux-adm@nic.funet.fi
TCL-scripts by Sam Shen (who was at: slshen@lbl.gov)