Ядро Linux в комментариях

       

Mm/vmalloc.c


38567 /* 38568 * linux/mm/vmalloc.c 38569 * 38570 * Copyright (C) 1993 Linus Torvalds 38571 */ 38572 38573 #include <linux/malloc.h> 38574 #include <linux/vmalloc.h> 38575 38576 #include <asm/uaccess.h> 38577 38578 static struct vm_struct * vmlist = NULL; 38579 38580 static inline void free_area_pte(pmd_t * pmd, 38581 unsigned long address, unsigned long size) 38582 { 38583 pte_t * pte; 38584 unsigned long end; 38585 38586 if (pmd_none(*pmd)) 38587 return; 38588 if (pmd_bad(*pmd)) { 38589 printk("free_area_pte: bad pmd (%08lx)\n", 38590 pmd_val(*pmd)); 38591 pmd_clear(pmd); 38592 return; 38593 } 38594 pte = pte_offset(pmd, address); 38595 address &= ~PMD_MASK; 38596 end = address + size; 38597 if (end > PMD_SIZE) 38598 end = PMD_SIZE; 38599 while (address < end) { 38600 pte_t page = *pte; 38601 pte_clear(pte); 38602 address += PAGE_SIZE; 38603 pte++; 38604 if (pte_none(page)) 38605 continue; 38606 if (pte_present(page)) { 38607 free_page(pte_page(page)); 38608 continue; 38609 } 38610 printk("Whee.. Swapped out page in " 38611 "kernel page table\n"); 38612 } 38613 } 38614 38615 static inline void free_area_pmd(pgd_t * dir, 38616 unsigned long address, unsigned long size) 38617 { 38618 pmd_t * pmd; 38619 unsigned long end; 38620 38621 if (pgd_none(*dir)) 38622 return; 38623 if (pgd_bad(*dir)) { 38624 printk("free_area_pmd: bad pgd (%08lx)\n", 38625 pgd_val(*dir)); 38626 pgd_clear(dir); 38627 return; 38628 } 38629 pmd = pmd_offset(dir, address); 38630 address &= ~PGDIR_MASK; 38631 end = address + size; 38632 if (end > PGDIR_SIZE) 38633 end = PGDIR_SIZE; 38634 while (address < end) { 38635 free_area_pte(pmd, address, end - address); 38636 address = (address + PMD_SIZE) & PMD_MASK; 38637 pmd++; 38638 } 38639 } 38640 38641 void vmfree_area_pages(unsigned long address, 38642 unsigned long size) 38643 { 38644 pgd_t * dir; 38645 unsigned long end = address + size; 38646 38647 dir = pgd_offset_k(address); 38648 flush_cache_all(); 38649 while (address < end) { 38650 free_area_pmd(dir, address, end - address); 38651 address = (address + PGDIR_SIZE) & PGDIR_MASK; 38652 dir++; 38653 } 38654 flush_tlb_all(); 38655 } 38656 38657 static inline int alloc_area_pte(pte_t * pte, 38658 unsigned long address, unsigned long size) 38659 { 38660 unsigned long end; 38661 38662 address &= ~PMD_MASK; 38663 end = address + size; 38664 if (end > PMD_SIZE) 38665 end = PMD_SIZE; 38666 while (address < end) { 38667 unsigned long page; 38668 if (!pte_none(*pte)) 38669 printk("alloc_area_pte: page already exists\n"); 38670 page = __get_free_page(GFP_KERNEL); 38671 if (!page) 38672 return -ENOMEM; 38673 set_pte(pte, mk_pte(page, PAGE_KERNEL)); 38674 address += PAGE_SIZE; 38675 pte++; 38676 } 38677 return 0; 38678 } 38679 38680 static inline int alloc_area_pmd(pmd_t * pmd, 38681 unsigned long address, unsigned long size) 38682 { 38683 unsigned long end; 38684 38685 address &= ~PGDIR_MASK; 38686 end = address + size; 38687 if (end > PGDIR_SIZE) 38688 end = PGDIR_SIZE; 38689 while (address < end) { 38690 pte_t * pte = pte_alloc_kernel(pmd, address); 38691 if (!pte) 38692 return -ENOMEM; 38693 if (alloc_area_pte(pte, address, end - address)) 38694 return -ENOMEM; 38695 address = (address + PMD_SIZE) & PMD_MASK; 38696 pmd++; 38697 } 38698 return 0; 38699 } 38700 38701 int vmalloc_area_pages(unsigned long address, 38702 unsigned long size) 38703 { 38704 pgd_t * dir; 38705 unsigned long end = address + size; 38706 38707 dir = pgd_offset_k(address); 38708 flush_cache_all(); 38709 while (address < end) { 38710 pmd_t *pmd; 38711 pgd_t olddir = *dir; 38712 38713 pmd = pmd_alloc_kernel(dir, address); 38714 if (!pmd) 38715 return -ENOMEM; 38716 if (alloc_area_pmd(pmd, address, end - address)) 38717 return -ENOMEM; 38718 if (pgd_val(olddir) != pgd_val(*dir)) 38719 set_pgdir(address, *dir); 38720 address = (address + PGDIR_SIZE) & PGDIR_MASK; 38721 dir++; 38722 } 38723 flush_tlb_all(); 38724 return 0; 38725 } 38726


38727 struct vm_struct * get_vm_area(unsigned long size) 38728 { 38729 unsigned long addr; 38730 struct vm_struct **p, *tmp, *area; 38731

38732 area = (struct vm_struct *) kmalloc(sizeof(*area), 38733 GFP_KERNEL); 38734 if (!area) 38735 return NULL; 38736 addr = VMALLOC_START; 38737 for (p = &vmlist; (tmp = *p) ; p = &tmp->next) { 38738 if (size + addr < (unsigned long) tmp->addr) 38739 break; 38740 if (addr > VMALLOC_END-size) { 38741 kfree(area); 38742 return NULL; 38743 } 38744 addr = tmp->size + (unsigned long) tmp->addr; 38745 } 38746 area->addr = (void *)addr; 38747 area->size = size + PAGE_SIZE; 38748 area->next = *p; 38749 *p = area; 38750 return area; 38751 } 38752

38753 void vfree(void * addr) 38754 { 38755 struct vm_struct **p, *tmp; 38756 38757 if (!addr) 38758 return; 38759 if ((PAGE_SIZE-1) & (unsigned long) addr) { 38760 printk("Trying to vfree() bad address (%p)\n", addr); 38761 return; 38762 } 38763 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) { 38764 if (tmp->addr == addr) { 38765 *p = tmp->next; 38766 vmfree_area_pages(VMALLOC_VMADDR(tmp->addr), 38767 tmp->size); 38768 kfree(tmp); 38769 return; 38770 } 38771 } 38772 printk("Trying to vfree() nonexistent vm area (%p)\n", 38773 addr); 38774 } 38775

38776 void * vmalloc(unsigned long size) 38777 { 38778 void * addr; 38779 struct vm_struct *area; 38780 38781 size = PAGE_ALIGN(size); 38782 if (!size size > (max_mapnr << PAGE_SHIFT)) 38783 return NULL; 38784 area = get_vm_area(size); 38785 if (!area) 38786 return NULL; 38787 addr = area->addr; 38788 if (vmalloc_area_pages(VMALLOC_VMADDR(addr), size)) { 38789 vfree(addr); 38790 return NULL; 38791 } 38792 return addr; 38793 } 38794 38795 long vread(char *buf, char *addr, unsigned long count) 38796 { 38797 struct vm_struct *tmp; 38798 char *vaddr, *buf_start = buf; 38799 unsigned long n; 38800 38801 /* Don't allow overflow */ 38802 if ((unsigned long) addr + count < count) 38803 count = -(unsigned long) addr; 38804 38805 for (tmp = vmlist; tmp; tmp = tmp->next) { 38806 vaddr = (char *) tmp->addr; 38807 if (addr >= vaddr + tmp->size - PAGE_SIZE) 38808 continue; 38809 while (addr < vaddr) { 38810 if (count == 0) 38811 goto finished; 38812 put_user('\0', buf); 38813 buf++; 38814 addr++; 38815 count--; 38816 } 38817 n = vaddr + tmp->size - PAGE_SIZE - addr; 38818 do { 38819 if (count == 0) 38820 goto finished; 38821 put_user(*addr, buf); 38822 buf++; 38823 addr++; 38824 count--; 38825 } while (--n > 0); 38826 } 38827 finished: 38828 return buf - buf_start; 38829 }


Содержание раздела