View | Details | Raw Unified | Return to bug 559047
Collapse All | Expand All

(-)11.2-2009-11-12.orig/arch/x86/include/asm/scatterlist.h (+3 lines)
Lines 12-17 struct scatterlist { Link Here
12
	unsigned int	length;
12
	unsigned int	length;
13
	dma_addr_t	dma_address;
13
	dma_addr_t	dma_address;
14
	unsigned int	dma_length;
14
	unsigned int	dma_length;
15
#ifdef CONFIG_XEN//temp
16
 unsigned int caller;
17
#endif
15
};
18
};
16
19
17
#define ARCH_HAS_SG_CHAIN
20
#define ARCH_HAS_SG_CHAIN
(-)11.2-2009-11-12.orig/drivers/xen/balloon/balloon.c (-1 / +9 lines)
Lines 616-628 static int dealloc_pte_fn( Link Here
616
	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
616
	set_pte_at(&init_mm, addr, pte, __pte_ma(0));
617
	pfn = __pa(addr) >> PAGE_SHIFT;
617
	pfn = __pa(addr) >> PAGE_SHIFT;
618
	set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
618
	set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
619
	SetPageReserved(pfn_to_page(pfn));
619
//temp	SetPageReserved(pfn_to_page(pfn));
620
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
620
	ret = HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation);
621
	BUG_ON(ret != 1);
621
	BUG_ON(ret != 1);
622
	return 0;
622
	return 0;
623
}
623
}
624
#endif
624
#endif
625
625
626
#include <linux/kallsyms.h>//temp
626
struct page **alloc_empty_pages_and_pagevec(int nr_pages)
627
struct page **alloc_empty_pages_and_pagevec(int nr_pages)
627
{
628
{
628
	unsigned long flags;
629
	unsigned long flags;
Lines 630-635 struct page **alloc_empty_pages_and_page Link Here
630
	struct page *page, **pagevec;
631
	struct page *page, **pagevec;
631
	int i, ret;
632
	int i, ret;
632
633
634
print_symbol("aep&p: %s\n", _RET_IP_);//temp
633
	pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
635
	pagevec = kmalloc(sizeof(page) * nr_pages, GFP_KERNEL);
634
	if (pagevec == NULL)
636
	if (pagevec == NULL)
635
		return NULL;
637
		return NULL;
Lines 642-647 struct page **alloc_empty_pages_and_page Link Here
642
		v = page_address(page);
644
		v = page_address(page);
643
		scrub_pages(v, 1);
645
		scrub_pages(v, 1);
644
646
647
printk("aep&p: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp
645
		balloon_lock(flags);
648
		balloon_lock(flags);
646
649
647
		if (xen_feature(XENFEAT_auto_translated_physmap)) {
650
		if (xen_feature(XENFEAT_auto_translated_physmap)) {
Lines 707-712 static void _free_empty_pages_and_pageve Link Here
707
	if (pagevec == NULL)
710
	if (pagevec == NULL)
708
		return;
711
		return;
709
712
713
print_symbol("fep&p: %s\n", _RET_IP_);//temp
714
for(i = 0; i < nr_pages; ++i)//temp
715
 printk("fep&p: %lx(%lx)\n", page_to_pfn(pagevec[i]), pagevec[i]->flags);//temp
710
	balloon_lock(flags);
716
	balloon_lock(flags);
711
	for (i = 0; i < nr_pages; i++) {
717
	for (i = 0; i < nr_pages; i++) {
712
		BUG_ON(page_count(pagevec[i]) != 1);
718
		BUG_ON(page_count(pagevec[i]) != 1);
Lines 736-741 void balloon_release_driver_page(struct Link Here
736
{
742
{
737
	unsigned long flags;
743
	unsigned long flags;
738
744
745
print_symbol("brdp: %s\n", _RET_IP_);//temp
746
printk("brdp: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp
739
	balloon_lock(flags);
747
	balloon_lock(flags);
740
	balloon_append(page, 1);
748
	balloon_append(page, 1);
741
	bs.driver_pages--;
749
	bs.driver_pages--;
(-)11.2-2009-11-12.orig/drivers/xen/core/gnttab.c (+4 lines)
Lines 511-516 static int gnttab_map(unsigned int start Link Here
511
static void gnttab_page_free(struct page *page, unsigned int order)
511
static void gnttab_page_free(struct page *page, unsigned int order)
512
{
512
{
513
	BUG_ON(order);
513
	BUG_ON(order);
514
printk("gpf: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp
514
	ClearPageForeign(page);
515
	ClearPageForeign(page);
515
	gnttab_reset_grant_page(page);
516
	gnttab_reset_grant_page(page);
516
	put_page(page);
517
	put_page(page);
Lines 577-582 int gnttab_copy_grant_page(grant_ref_t r Link Here
577
	BUG_ON(unmap.status);
578
	BUG_ON(unmap.status);
578
579
579
	write_sequnlock(&gnttab_dma_lock);
580
	write_sequnlock(&gnttab_dma_lock);
581
printk("gcgp: %lx/%lx(%lx) -> %lx/%lx\n",
582
       (unsigned long)pfn, (unsigned long)mfn, page->flags,
583
       page_to_pfn(new_page), (unsigned long)new_mfn);//temp
580
584
581
	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
585
	if (!xen_feature(XENFEAT_auto_translated_physmap)) {
582
		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
586
		set_phys_to_machine(page_to_pfn(new_page), INVALID_P2M_ENTRY);
(-)11.2-2009-11-12.orig/lib/swiotlb-xen.c (-12 / +82 lines)
Lines 284-292 static inline int range_needs_mapping(ph Link Here
284
	return range_straddles_page_boundary(pa, size);
284
	return range_straddles_page_boundary(pa, size);
285
}
285
}
286
286
287
static int is_swiotlb_buffer(char *addr)
287
static int is_swiotlb_buffer(dma_addr_t addr)
288
{
288
{
289
	return addr >= io_tlb_start && addr < io_tlb_end;
289
	unsigned long pfn = mfn_to_local_pfn(PFN_DOWN(addr));
290
	char *va = pfn_valid(pfn) ? __va(pfn << PAGE_SHIFT) : NULL;
291
292
#ifdef CONFIG_HIGHMEM
293
	if (pfn >= highstart_pfn)
294
		return 0;
295
#endif
296
	return va >= io_tlb_start && va < io_tlb_end;
290
}
297
}
291
298
292
/*
299
/*
Lines 331-338 static void swiotlb_bounce(phys_addr_t p Link Here
331
	} else {
338
	} else {
332
		if (dir == DMA_TO_DEVICE)
339
		if (dir == DMA_TO_DEVICE)
333
			memcpy(dma_addr, phys_to_virt(phys), size);
340
			memcpy(dma_addr, phys_to_virt(phys), size);
334
		else
341
		else if (__copy_to_user_inatomic(phys_to_virt(phys),
335
			memcpy(phys_to_virt(phys), dma_addr, size);
342
						 dma_addr, size))
343
			/* inaccessible */;
336
	}
344
	}
337
}
345
}
338
346
Lines 431-436 found: Link Here
431
	 * This is needed when we sync the memory.  Then we sync the buffer if
439
	 * This is needed when we sync the memory.  Then we sync the buffer if
432
	 * needed.
440
	 * needed.
433
	 */
441
	 */
442
WARN_ON(!phys);//temp
443
mask = (phys + (nslots << IO_TLB_SHIFT) - 1) >> PAGE_SHIFT;//temp
444
WARN(!pfn_valid(mask), "ms: %Lx:%d ?\n", (unsigned long long)phys, nslots);//temp
434
	for (i = 0; i < nslots; i++)
445
	for (i = 0; i < nslots; i++)
435
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
446
		io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT);
436
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
447
	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL)
Lines 443-455 found: Link Here
443
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
454
 * dma_addr is the kernel virtual address of the bounce buffer to unmap.
444
 */
455
 */
445
static void
456
static void
446
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
457
//temp do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir)
458
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir, unsigned mapper)//temp
447
{
459
{
448
	unsigned long flags;
460
	unsigned long flags;
449
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
461
	int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT;
450
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
462
	int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT;
451
	phys_addr_t phys = io_tlb_orig_addr[index];
463
	phys_addr_t phys = io_tlb_orig_addr[index];
452
464
465
flags = (phys + (nslots << IO_TLB_SHIFT) - 1) >> PAGE_SHIFT;//temp
466
WARN(!pfn_valid(flags), "dus: %Lx:%d ?\n", (unsigned long long)phys, nslots);//temp
467
for(i = 1, count = 0; i < nslots; ++i) {//temp
468
 if((phys + (i << IO_TLB_SHIFT)) != io_tlb_orig_addr[index + i]) {
469
  printk("dus[%x+%x]: %Lx %Lx %x (%x)\n", index, i,
470
         (unsigned long long)io_tlb_orig_addr[index + i], (unsigned long long)phys + (i << IO_TLB_SHIFT),
471
         io_tlb_list[index + i], mapper);
472
  ++count;
473
 }
474
}
475
WARN_ON(phys && count);//temp
453
	/*
476
	/*
454
	 * First, sync the memory before unmapping the entry
477
	 * First, sync the memory before unmapping the entry
455
	 */
478
	 */
Lines 543-549 dma_addr_t swiotlb_map_page(struct devic Link Here
543
	 */
566
	 */
544
	if (!address_needs_mapping(dev, dev_addr, size) &&
567
	if (!address_needs_mapping(dev, dev_addr, size) &&
545
	    !range_needs_mapping(phys, size))
568
	    !range_needs_mapping(phys, size))
569
{//temp
570
 int f = 0;
571
 for(size += offset; size > PAGE_SIZE; size -= PAGE_SIZE) {
572
  ++page;
573
  if(PageForeign(page)) {
574
   printk("smp: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp
575
   ++f;
576
  }
577
 }
578
 WARN_ON(f);
546
		return dev_addr;
579
		return dev_addr;
580
}
547
581
548
	/*
582
	/*
549
	 * Oh well, have to allocate and map a bounce buffer.
583
	 * Oh well, have to allocate and map a bounce buffer.
Lines 569-582 EXPORT_SYMBOL_GPL(swiotlb_map_page); Link Here
569
 * whatever the device wrote there.
603
 * whatever the device wrote there.
570
 */
604
 */
571
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
605
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr,
572
			 size_t size, int dir)
606
//temp			 size_t size, int dir)
607
size_t size, int dir, unsigned mapper)//temp
573
{
608
{
574
	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
609
	char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr);
575
610
576
	BUG_ON(dir == DMA_NONE);
611
	BUG_ON(dir == DMA_NONE);
577
612
578
	if (is_swiotlb_buffer(dma_addr)) {
613
	if (is_swiotlb_buffer(dev_addr)) {
579
		do_unmap_single(hwdev, dma_addr, size, dir);
614
//temp		do_unmap_single(hwdev, dma_addr, size, dir);
615
do_unmap_single(hwdev, dma_addr, size, dir, mapper);//temp
580
		return;
616
		return;
581
	}
617
	}
582
618
Lines 587-593 void swiotlb_unmap_page(struct device *h Link Here
587
			size_t size, enum dma_data_direction dir,
623
			size_t size, enum dma_data_direction dir,
588
			struct dma_attrs *attrs)
624
			struct dma_attrs *attrs)
589
{
625
{
590
	unmap_single(hwdev, dev_addr, size, dir);
626
//temp	unmap_single(hwdev, dev_addr, size, dir);
627
unmap_single(hwdev, dev_addr, size, dir, 0);//temp
591
}
628
}
592
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
629
EXPORT_SYMBOL_GPL(swiotlb_unmap_page);
593
630
Lines 609-615 swiotlb_sync_single_for_cpu(struct devic Link Here
609
646
610
	BUG_ON(dir == DMA_NONE);
647
	BUG_ON(dir == DMA_NONE);
611
648
612
	if (is_swiotlb_buffer(dma_addr))
649
	if (is_swiotlb_buffer(dev_addr))
613
		sync_single(hwdev, dma_addr, size, dir);
650
		sync_single(hwdev, dma_addr, size, dir);
614
}
651
}
615
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
652
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu);
Lines 622-628 swiotlb_sync_single_for_device(struct de Link Here
622
659
623
	BUG_ON(dir == DMA_NONE);
660
	BUG_ON(dir == DMA_NONE);
624
661
625
	if (is_swiotlb_buffer(dma_addr))
662
	if (is_swiotlb_buffer(dev_addr))
626
		sync_single(hwdev, dma_addr, size, dir);
663
		sync_single(hwdev, dma_addr, size, dir);
627
}
664
}
628
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
665
EXPORT_SYMBOL(swiotlb_sync_single_for_device);
Lines 676-681 swiotlb_map_sg_attrs(struct device *hwde Link Here
676
		phys_addr_t paddr = page_to_pseudophys(sg_page(sg))
713
		phys_addr_t paddr = page_to_pseudophys(sg_page(sg))
677
				   + sg->offset;
714
				   + sg->offset;
678
715
716
if(dir != DMA_TO_DEVICE && sg->offset + sg->length > PAGE_SIZE) {//temp
717
 u8*p = page_address(sg_page(sg));
718
 long offs = sg->offset + sg->length - 1;
719
 WARN((sg->offset | sg->length) & 0x1ff,
720
      "sg%d@%p[%d/%d] v=%p+%x:%x\n", dir, sgl, i, nelems, p, sg->offset, sg->length);
721
#ifndef CONFIG_HIGHMEM
722
 do {
723
  __asm__ __volatile__("lock orb $0, %0" : : "m" (p[offs]));
724
  offs -= PAGE_SIZE;
725
 } while(offs >= (long)sg->offset);
726
#endif
727
}
679
		if (range_needs_mapping(paddr, sg->length)
728
		if (range_needs_mapping(paddr, sg->length)
680
		    || address_needs_mapping(hwdev, dev_addr, sg->length)) {
729
		    || address_needs_mapping(hwdev, dev_addr, sg->length)) {
681
			void *map;
730
			void *map;
Lines 694-701 swiotlb_map_sg_attrs(struct device *hwde Link Here
694
			}
743
			}
695
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
744
			sg->dma_address = swiotlb_virt_to_bus(hwdev, map);
696
		} else
745
		} else
746
{//temp
747
 int f = 0;
748
 size_t size = sg->offset + sg->length;
749
 struct page *page = sg_page(sg);
750
 for(; size > PAGE_SIZE; size -= PAGE_SIZE) {
751
  ++page;
752
  if(PageForeign(page)) {
753
   printk("smsa: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp
754
   ++f;
755
  }
756
 }
757
 WARN_ON(f);
697
			sg->dma_address = dev_addr;
758
			sg->dma_address = dev_addr;
759
}
698
		sg->dma_length = sg->length;
760
		sg->dma_length = sg->length;
761
sg->caller = _RET_IP_;//temp
699
	}
762
	}
700
	return nelems;
763
	return nelems;
701
}
764
}
Lines 723-729 swiotlb_unmap_sg_attrs(struct device *hw Link Here
723
	BUG_ON(dir == DMA_NONE);
786
	BUG_ON(dir == DMA_NONE);
724
787
725
	for_each_sg(sgl, sg, nelems, i)
788
	for_each_sg(sgl, sg, nelems, i)
726
		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
789
{//temp
790
 unsigned mapper = sg->caller;
791
 sg->caller = _RET_IP_;//temp
792
 WARN(sg->dma_length != sg->length, "susa: %lx %Lx %x %x %x (%x)\n",
793
      page_to_pfn(sg_page(sg)), sg->dma_address, sg->dma_length, sg->length, sg->offset, mapper);//temp
794
//temp		unmap_single(hwdev, sg->dma_address, sg->dma_length, dir);
795
 unmap_single(hwdev, sg->dma_address, sg->dma_length, dir, mapper);//temp
796
}
727
797
728
}
798
}
729
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
799
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs);
(-)11.2-2009-11-12.orig/mm/page_alloc.c (+31 lines)
Lines 256-261 static void bad_page(struct page *page) Link Here
256
256
257
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
257
	printk(KERN_ALERT "BUG: Bad page state in process %s  pfn:%05lx\n",
258
		current->comm, page_to_pfn(page));
258
		current->comm, page_to_pfn(page));
259
#ifdef CONFIG_XEN//temp
260
{
261
 unsigned long mfn = pfn_to_mfn(page_to_pfn(page));
262
 printk(KERN_ALERT "mfn:%lx pfn:%lx/%lx\n", mfn, mfn_to_pfn(mfn), mfn_to_local_pfn(mfn));
263
}
264
#endif
259
	printk(KERN_ALERT
265
	printk(KERN_ALERT
260
		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
266
		"page:%p flags:%p count:%d mapcount:%d mapping:%p index:%lx\n",
261
		page, (void *)page->flags, page_count(page),
267
		page, (void *)page->flags, page_count(page),
Lines 311-316 static int destroy_compound_page(struct Link Here
311
	int bad = 0;
317
	int bad = 0;
312
318
313
	if (unlikely(compound_order(page) != order) ||
319
	if (unlikely(compound_order(page) != order) ||
320
#ifdef CONFIG_XEN//temp
321
({//temp
322
 int f = 0, x = 0;
323
 unsigned long pfn = page_to_pfn(page);
324
 for(i = 0; i < nr_pages; ++i, ++pfn) {
325
  f += WARN(PageForeign(page + i), "dcp: %lx(%lx)\n", pfn, page[i].flags);
326
  x += (pfn != mfn_to_pfn(pfn_to_mfn(pfn)) || pfn != mfn_to_local_pfn(pfn_to_mfn(pfn)));
327
 }
328
 f | x;
329
}) ||
330
#endif
314
	    unlikely(!PageHead(page))) {
331
	    unlikely(!PageHead(page))) {
315
		bad_page(page);
332
		bad_page(page);
316
		bad++;
333
		bad++;
Lines 500-505 static inline int free_pages_check(struc Link Here
500
	if (unlikely(page_mapcount(page) |
517
	if (unlikely(page_mapcount(page) |
501
		(page->mapping != NULL)  |
518
		(page->mapping != NULL)  |
502
		(atomic_read(&page->_count) != 0) |
519
		(atomic_read(&page->_count) != 0) |
520
#ifdef CONFIG_XEN//temp
521
({
522
  unsigned long pfn = page_to_pfn(page);
523
  unsigned long mfn = pfn_to_mfn(pfn);
524
  (pfn ^ mfn_to_pfn(mfn)) | (pfn ^ mfn_to_local_pfn(mfn));
525
}) |
526
#endif
503
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
527
		(page->flags & PAGE_FLAGS_CHECK_AT_FREE))) {
504
		bad_page(page);
528
		bad_page(page);
505
		return 1;
529
		return 1;
Lines 659-664 static int prep_new_page(struct page *pa Link Here
659
	if (unlikely(page_mapcount(page) |
683
	if (unlikely(page_mapcount(page) |
660
		(page->mapping != NULL)  |
684
		(page->mapping != NULL)  |
661
		(atomic_read(&page->_count) != 0)  |
685
		(atomic_read(&page->_count) != 0)  |
686
#ifdef CONFIG_XEN//temp
687
({
688
  unsigned long pfn = page_to_pfn(page);
689
  unsigned long mfn = pfn_to_mfn(pfn);
690
  (pfn ^ mfn_to_pfn(mfn)) | (pfn ^ mfn_to_local_pfn(mfn));
691
}) |
692
#endif
662
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
693
		(page->flags & PAGE_FLAGS_CHECK_AT_PREP))) {
663
		bad_page(page);
694
		bad_page(page);
664
		return 1;
695
		return 1;

Return to bug 559047