|
Lines 284-292
static inline int range_needs_mapping(ph
Link Here
|
| 284 |
return range_straddles_page_boundary(pa, size); |
284 |
return range_straddles_page_boundary(pa, size); |
| 285 |
} |
285 |
} |
| 286 |
|
286 |
|
| 287 |
static int is_swiotlb_buffer(char *addr) |
287 |
static int is_swiotlb_buffer(dma_addr_t addr) |
| 288 |
{ |
288 |
{ |
| 289 |
return addr >= io_tlb_start && addr < io_tlb_end; |
289 |
unsigned long pfn = mfn_to_local_pfn(PFN_DOWN(addr)); |
|
|
290 |
char *va = pfn_valid(pfn) ? __va(pfn << PAGE_SHIFT) : NULL; |
| 291 |
|
| 292 |
#ifdef CONFIG_HIGHMEM |
| 293 |
if (pfn >= highstart_pfn) |
| 294 |
return 0; |
| 295 |
#endif |
| 296 |
return va >= io_tlb_start && va < io_tlb_end; |
| 290 |
} |
297 |
} |
| 291 |
|
298 |
|
| 292 |
/* |
299 |
/* |
|
Lines 331-338
static void swiotlb_bounce(phys_addr_t p
Link Here
|
| 331 |
} else { |
338 |
} else { |
| 332 |
if (dir == DMA_TO_DEVICE) |
339 |
if (dir == DMA_TO_DEVICE) |
| 333 |
memcpy(dma_addr, phys_to_virt(phys), size); |
340 |
memcpy(dma_addr, phys_to_virt(phys), size); |
| 334 |
else |
341 |
else if (__copy_to_user_inatomic(phys_to_virt(phys), |
| 335 |
memcpy(phys_to_virt(phys), dma_addr, size); |
342 |
dma_addr, size)) |
|
|
343 |
/* inaccessible */; |
| 336 |
} |
344 |
} |
| 337 |
} |
345 |
} |
| 338 |
|
346 |
|
|
Lines 431-436
found:
Link Here
|
| 431 |
* This is needed when we sync the memory. Then we sync the buffer if |
439 |
* This is needed when we sync the memory. Then we sync the buffer if |
| 432 |
* needed. |
440 |
* needed. |
| 433 |
*/ |
441 |
*/ |
|
|
442 |
WARN_ON(!phys);//temp |
| 443 |
mask = (phys + (nslots << IO_TLB_SHIFT) - 1) >> PAGE_SHIFT;//temp |
| 444 |
WARN(!pfn_valid(mask), "ms: %Lx:%d ?\n", (unsigned long long)phys, nslots);//temp |
| 434 |
for (i = 0; i < nslots; i++) |
445 |
for (i = 0; i < nslots; i++) |
| 435 |
io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); |
446 |
io_tlb_orig_addr[index+i] = phys + (i << IO_TLB_SHIFT); |
| 436 |
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
447 |
if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) |
|
Lines 443-455
found:
Link Here
|
| 443 |
* dma_addr is the kernel virtual address of the bounce buffer to unmap. |
454 |
* dma_addr is the kernel virtual address of the bounce buffer to unmap. |
| 444 |
*/ |
455 |
*/ |
| 445 |
static void |
456 |
static void |
| 446 |
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) |
457 |
//temp do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir) |
|
|
458 |
do_unmap_single(struct device *hwdev, char *dma_addr, size_t size, int dir, unsigned mapper)//temp |
| 447 |
{ |
459 |
{ |
| 448 |
unsigned long flags; |
460 |
unsigned long flags; |
| 449 |
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
461 |
int i, count, nslots = ALIGN(size, 1 << IO_TLB_SHIFT) >> IO_TLB_SHIFT; |
| 450 |
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
462 |
int index = (dma_addr - io_tlb_start) >> IO_TLB_SHIFT; |
| 451 |
phys_addr_t phys = io_tlb_orig_addr[index]; |
463 |
phys_addr_t phys = io_tlb_orig_addr[index]; |
| 452 |
|
464 |
|
|
|
465 |
flags = (phys + (nslots << IO_TLB_SHIFT) - 1) >> PAGE_SHIFT;//temp |
| 466 |
WARN(!pfn_valid(flags), "dus: %Lx:%d ?\n", (unsigned long long)phys, nslots);//temp |
| 467 |
for(i = 1, count = 0; i < nslots; ++i) {//temp |
| 468 |
if((phys + (i << IO_TLB_SHIFT)) != io_tlb_orig_addr[index + i]) { |
| 469 |
printk("dus[%x+%x]: %Lx %Lx %x (%x)\n", index, i, |
| 470 |
(unsigned long long)io_tlb_orig_addr[index + i], (unsigned long long)phys + (i << IO_TLB_SHIFT), |
| 471 |
io_tlb_list[index + i], mapper); |
| 472 |
++count; |
| 473 |
} |
| 474 |
} |
| 475 |
WARN_ON(phys && count);//temp |
| 453 |
/* |
476 |
/* |
| 454 |
* First, sync the memory before unmapping the entry |
477 |
* First, sync the memory before unmapping the entry |
| 455 |
*/ |
478 |
*/ |
|
Lines 543-549
dma_addr_t swiotlb_map_page(struct devic
Link Here
|
| 543 |
*/ |
566 |
*/ |
| 544 |
if (!address_needs_mapping(dev, dev_addr, size) && |
567 |
if (!address_needs_mapping(dev, dev_addr, size) && |
| 545 |
!range_needs_mapping(phys, size)) |
568 |
!range_needs_mapping(phys, size)) |
|
|
569 |
{//temp |
| 570 |
int f = 0; |
| 571 |
for(size += offset; size > PAGE_SIZE; size -= PAGE_SIZE) { |
| 572 |
++page; |
| 573 |
if(PageForeign(page)) { |
| 574 |
printk("smp: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp |
| 575 |
++f; |
| 576 |
} |
| 577 |
} |
| 578 |
WARN_ON(f); |
| 546 |
return dev_addr; |
579 |
return dev_addr; |
|
|
580 |
} |
| 547 |
|
581 |
|
| 548 |
/* |
582 |
/* |
| 549 |
* Oh well, have to allocate and map a bounce buffer. |
583 |
* Oh well, have to allocate and map a bounce buffer. |
|
Lines 569-582
EXPORT_SYMBOL_GPL(swiotlb_map_page);
Link Here
|
| 569 |
* whatever the device wrote there. |
603 |
* whatever the device wrote there. |
| 570 |
*/ |
604 |
*/ |
| 571 |
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
605 |
static void unmap_single(struct device *hwdev, dma_addr_t dev_addr, |
| 572 |
size_t size, int dir) |
606 |
//temp size_t size, int dir) |
|
|
607 |
size_t size, int dir, unsigned mapper)//temp |
| 573 |
{ |
608 |
{ |
| 574 |
char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); |
609 |
char *dma_addr = swiotlb_bus_to_virt(hwdev, dev_addr); |
| 575 |
|
610 |
|
| 576 |
BUG_ON(dir == DMA_NONE); |
611 |
BUG_ON(dir == DMA_NONE); |
| 577 |
|
612 |
|
| 578 |
if (is_swiotlb_buffer(dma_addr)) { |
613 |
if (is_swiotlb_buffer(dev_addr)) { |
| 579 |
do_unmap_single(hwdev, dma_addr, size, dir); |
614 |
//temp do_unmap_single(hwdev, dma_addr, size, dir); |
|
|
615 |
do_unmap_single(hwdev, dma_addr, size, dir, mapper);//temp |
| 580 |
return; |
616 |
return; |
| 581 |
} |
617 |
} |
| 582 |
|
618 |
|
|
Lines 587-593
void swiotlb_unmap_page(struct device *h
Link Here
|
| 587 |
size_t size, enum dma_data_direction dir, |
623 |
size_t size, enum dma_data_direction dir, |
| 588 |
struct dma_attrs *attrs) |
624 |
struct dma_attrs *attrs) |
| 589 |
{ |
625 |
{ |
| 590 |
unmap_single(hwdev, dev_addr, size, dir); |
626 |
//temp unmap_single(hwdev, dev_addr, size, dir); |
|
|
627 |
unmap_single(hwdev, dev_addr, size, dir, 0);//temp |
| 591 |
} |
628 |
} |
| 592 |
EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
629 |
EXPORT_SYMBOL_GPL(swiotlb_unmap_page); |
| 593 |
|
630 |
|
|
Lines 609-615
swiotlb_sync_single_for_cpu(struct devic
Link Here
|
| 609 |
|
646 |
|
| 610 |
BUG_ON(dir == DMA_NONE); |
647 |
BUG_ON(dir == DMA_NONE); |
| 611 |
|
648 |
|
| 612 |
if (is_swiotlb_buffer(dma_addr)) |
649 |
if (is_swiotlb_buffer(dev_addr)) |
| 613 |
sync_single(hwdev, dma_addr, size, dir); |
650 |
sync_single(hwdev, dma_addr, size, dir); |
| 614 |
} |
651 |
} |
| 615 |
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); |
652 |
EXPORT_SYMBOL(swiotlb_sync_single_for_cpu); |
|
Lines 622-628
swiotlb_sync_single_for_device(struct de
Link Here
|
| 622 |
|
659 |
|
| 623 |
BUG_ON(dir == DMA_NONE); |
660 |
BUG_ON(dir == DMA_NONE); |
| 624 |
|
661 |
|
| 625 |
if (is_swiotlb_buffer(dma_addr)) |
662 |
if (is_swiotlb_buffer(dev_addr)) |
| 626 |
sync_single(hwdev, dma_addr, size, dir); |
663 |
sync_single(hwdev, dma_addr, size, dir); |
| 627 |
} |
664 |
} |
| 628 |
EXPORT_SYMBOL(swiotlb_sync_single_for_device); |
665 |
EXPORT_SYMBOL(swiotlb_sync_single_for_device); |
|
Lines 676-681
swiotlb_map_sg_attrs(struct device *hwde
Link Here
|
| 676 |
phys_addr_t paddr = page_to_pseudophys(sg_page(sg)) |
713 |
phys_addr_t paddr = page_to_pseudophys(sg_page(sg)) |
| 677 |
+ sg->offset; |
714 |
+ sg->offset; |
| 678 |
|
715 |
|
|
|
716 |
if(dir != DMA_TO_DEVICE && sg->offset + sg->length > PAGE_SIZE) {//temp |
| 717 |
u8*p = page_address(sg_page(sg)); |
| 718 |
long offs = sg->offset + sg->length - 1; |
| 719 |
WARN((sg->offset | sg->length) & 0x1ff, |
| 720 |
"sg%d@%p[%d/%d] v=%p+%x:%x\n", dir, sgl, i, nelems, p, sg->offset, sg->length); |
| 721 |
#ifndef CONFIG_HIGHMEM |
| 722 |
do { |
| 723 |
__asm__ __volatile__("lock orb $0, %0" : : "m" (p[offs])); |
| 724 |
offs -= PAGE_SIZE; |
| 725 |
} while(offs >= (long)sg->offset); |
| 726 |
#endif |
| 727 |
} |
| 679 |
if (range_needs_mapping(paddr, sg->length) |
728 |
if (range_needs_mapping(paddr, sg->length) |
| 680 |
|| address_needs_mapping(hwdev, dev_addr, sg->length)) { |
729 |
|| address_needs_mapping(hwdev, dev_addr, sg->length)) { |
| 681 |
void *map; |
730 |
void *map; |
|
Lines 694-701
swiotlb_map_sg_attrs(struct device *hwde
Link Here
|
| 694 |
} |
743 |
} |
| 695 |
sg->dma_address = swiotlb_virt_to_bus(hwdev, map); |
744 |
sg->dma_address = swiotlb_virt_to_bus(hwdev, map); |
| 696 |
} else |
745 |
} else |
|
|
746 |
{//temp |
| 747 |
int f = 0; |
| 748 |
size_t size = sg->offset + sg->length; |
| 749 |
struct page *page = sg_page(sg); |
| 750 |
for(; size > PAGE_SIZE; size -= PAGE_SIZE) { |
| 751 |
++page; |
| 752 |
if(PageForeign(page)) { |
| 753 |
printk("smsa: %lx(%lx)\n", page_to_pfn(page), page->flags);//temp |
| 754 |
++f; |
| 755 |
} |
| 756 |
} |
| 757 |
WARN_ON(f); |
| 697 |
sg->dma_address = dev_addr; |
758 |
sg->dma_address = dev_addr; |
|
|
759 |
} |
| 698 |
sg->dma_length = sg->length; |
760 |
sg->dma_length = sg->length; |
|
|
761 |
sg->caller = _RET_IP_;//temp |
| 699 |
} |
762 |
} |
| 700 |
return nelems; |
763 |
return nelems; |
| 701 |
} |
764 |
} |
|
Lines 723-729
swiotlb_unmap_sg_attrs(struct device *hw
Link Here
|
| 723 |
BUG_ON(dir == DMA_NONE); |
786 |
BUG_ON(dir == DMA_NONE); |
| 724 |
|
787 |
|
| 725 |
for_each_sg(sgl, sg, nelems, i) |
788 |
for_each_sg(sgl, sg, nelems, i) |
| 726 |
unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
789 |
{//temp |
|
|
790 |
unsigned mapper = sg->caller; |
| 791 |
sg->caller = _RET_IP_;//temp |
| 792 |
WARN(sg->dma_length != sg->length, "susa: %lx %Lx %x %x %x (%x)\n", |
| 793 |
page_to_pfn(sg_page(sg)), sg->dma_address, sg->dma_length, sg->length, sg->offset, mapper);//temp |
| 794 |
//temp unmap_single(hwdev, sg->dma_address, sg->dma_length, dir); |
| 795 |
unmap_single(hwdev, sg->dma_address, sg->dma_length, dir, mapper);//temp |
| 796 |
} |
| 727 |
|
797 |
|
| 728 |
} |
798 |
} |
| 729 |
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |
799 |
EXPORT_SYMBOL(swiotlb_unmap_sg_attrs); |