View | Details | Raw Unified | Return to bug 552492
Collapse All | Expand All

(-)head-2009-10-06.orig/mm/vmalloc.c (-4 / +22 lines)
Lines 1394-1399 static void *__vmalloc_area_node(struct Link Here
1394
{
1394
{
1395
	struct page **pages;
1395
	struct page **pages;
1396
	unsigned int nr_pages, array_size, i;
1396
	unsigned int nr_pages, array_size, i;
1397
	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
1398
#ifdef CONFIG_XEN
1399
	gfp_t dma_mask = gfp_mask & (__GFP_DMA | __GFP_DMA32);
1400
1401
	BUILD_BUG_ON((__GFP_DMA | __GFP_DMA32) != (__GFP_DMA + __GFP_DMA32));
1402
	if (dma_mask == (__GFP_DMA | __GFP_DMA32))
1403
		gfp_mask &= ~(__GFP_DMA | __GFP_DMA32);
1404
#endif
1397
1405
1398
	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1406
	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
1399
	array_size = (nr_pages * sizeof(struct page *));
1407
	array_size = (nr_pages * sizeof(struct page *));
Lines 1401-1413 static void *__vmalloc_area_node(struct Link Here
1401
	area->nr_pages = nr_pages;
1409
	area->nr_pages = nr_pages;
1402
	/* Please note that the recursion is strictly bounded. */
1410
	/* Please note that the recursion is strictly bounded. */
1403
	if (array_size > PAGE_SIZE) {
1411
	if (array_size > PAGE_SIZE) {
1404
		pages = __vmalloc_node(array_size, gfp_mask | __GFP_ZERO,
1412
		pages = __vmalloc_node(array_size, nested_gfp | __GFP_HIGHMEM,
1405
				PAGE_KERNEL, node, caller);
1413
				PAGE_KERNEL, node, caller);
1406
		area->flags |= VM_VPAGES;
1414
		area->flags |= VM_VPAGES;
1407
	} else {
1415
	} else {
1408
		pages = kmalloc_node(array_size,
1416
		pages = kmalloc_node(array_size, nested_gfp, node);
1409
				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
1410
				node);
1411
	}
1417
	}
1412
	area->pages = pages;
1418
	area->pages = pages;
1413
	area->caller = caller;
1419
	area->caller = caller;
Lines 1431-1436 static void *__vmalloc_area_node(struct Link Here
1431
			goto fail;
1437
			goto fail;
1432
		}
1438
		}
1433
		area->pages[i] = page;
1439
		area->pages[i] = page;
1440
#ifdef CONFIG_XEN
1441
		if (dma_mask) {
1442
			if (xen_limit_pages_to_max_mfn(page, 0, 32)) {
1443
				area->nr_pages = i + 1;
1444
				goto fail;
1445
			}
1446
			if (gfp_mask & __GFP_ZERO)
1447
				clear_highpage(page);
1448
		}
1449
#endif
1434
	}
1450
	}
1435
1451
1436
	if (map_vm_area(area, prot, &pages))
1452
	if (map_vm_area(area, prot, &pages))
Lines 1587-1592 void *vmalloc_exec(unsigned long size) Link Here
1587
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1603
#define GFP_VMALLOC32 GFP_DMA32 | GFP_KERNEL
1588
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1604
#elif defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA)
1589
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1605
#define GFP_VMALLOC32 GFP_DMA | GFP_KERNEL
1606
#elif defined(CONFIG_XEN)
1607
#define GFP_VMALLOC32 __GFP_DMA | __GFP_DMA32 | GFP_KERNEL
1590
#else
1608
#else
1591
#define GFP_VMALLOC32 GFP_KERNEL
1609
#define GFP_VMALLOC32 GFP_KERNEL
1592
#endif
1610
#endif

Return to bug 552492