????page_address()?????????????????????
* Returns the page's virtual address.
*/
/*?????????????????*/
void *page_address(struct page *page)
{
unsigned long flags;
void *ret;
struct page_address_slot *pas;
/*???????????????*/
if (!PageHighMem(page))
/*?????????????????????????±?
????????????????????????????
???????????????*/
return lowmem_page_address(page);
/*??page_address_htable??б??е??pas*/
pas = page_slot(page);
ret = NULL;
spin_lock_irqsave(&pas->lock?? flags);
if (!list_empty(&pas->lh)) {/*????????????????
???????д?????page_address_map??*/
struct page_address_map *pam;
/*??????????е????*/
list_for_each_entry(pam?? &pas->lh?? list) {
if (pam->page == page) {
ret = pam->virtual;/*??????????*/
goto done;
}
}
}
done:
spin_unlock_irqrestore(&pas->lock?? flags);
return ret;
}
????kmap()??????????????
/*???????????????????в??????????
????ú?????????????У?*/
void *kmap(struct page *page)
{
might_sleep();
if (!PageHighMem(page))/*???????????????*/
return page_address(page);
return kmap_high(page);/*??????????????*/
}
/**
* kmap_high - map a highmem page into memory
* @page: &struct page to map
*
* Returns the page's virtual memory address.
*
* We cannot call this from interrupts?? as it may block.
*/
void *kmap_high(struct page *page)
{
unsigned long vaddr;
/*
* For highmem pages?? we can't trust "virtual" until
* after we have the lock.
*/
lock_kmap();/*?????????????????????
????????*/
/*??????????????*/
vaddr = (unsigned long)page_address(page);
if (!vaddr)/*??????*/
/*????????????????pkmap_page_table??
??????в???page_address_htable??б??м??????
???*/
vaddr = map_new_virtual(page);
pkmap_count[PKMAP_NR(vaddr)]++;/*????????????????????????????2??*/
BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
unlock_kmap();
return (void*) vaddr;/*??????*/
}
static inline unsigned long map_new_virtual(struct page *page)
{
unsigned long vaddr;
int count;
start:
count = LAST_PKMAP;
/* Find an empty entry */
for (;;) {
/*??1????????*/
last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
/*
???????ж??????last_pkmap_nr???????????????1023??LAST_PKMAP(1024)-1????????????????????
??????????????flush_all_zero_pkmaps()????????????pkmap_count[] ?????1?????????TLB?????entry??flush??
?????????0?????????????????????????????????????????pkmap_count???1??????
????????????TLB?flush???
????о??п????????Ч????????????????????????????£?Ч??????ɡ?
*/
if (!last_pkmap_nr) {
flush_all_zero_pkmaps();
count = LAST_PKMAP;
}
if (!pkmap_count[last_pkmap_nr])
break;  /* Found a usable entry */
if (--count)
continue;
/*
* Sleep for somebody else to unmap their entries
*/
{
DECLARE_WAITQUEUE(wait?? current);
__set_current_state(TASK_UNINTERRUPTIBLE);
add_wait_queue(&pkmap_map_wait?? &wait);
unlock_kmap();
schedule();
remove_wait_queue(&pkmap_map_wait?? &wait);
lock_kmap();
/* Somebody else might have mapped it while we slept */
if (page_address(page))
return (unsigned long)page_address(page);
/* Re-start */
goto start;
}
}
/*??????????????????????vaddr.*/
vaddr = PKMAP_ADDR(last_pkmap_nr);
/*
v
set_pte_at(mm?? addr?? ptep?? pte)??????NON-PAE i386??????????????????????????????
static inline void native_set_pte(pte_t *ptep ?? pte_t pte)
{
*ptep = pte;
}
*/  set_pte_at(&init_mm?? vaddr??/*?????????*/
&(pkmap_page_table[last_pkmap_nr])?? mk_pte(page?? kmap_prot));
/*????????pkmap_count[last_pkmap_nr]???1??1??????????????
????????????????????????2?????????????
?????????????kmap_high????????(pkmap_count[PKMAP_NR(vaddr)]++).*/
pkmap_count[last_pkmap_nr] = 1;
/*????????????????????????page????????????
????page_address_htable????????????????*/
set_page_address(page?? (void *)vaddr);
return vaddr;
}