linux-2.6.33/__get_user_pages()(mmu)
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#lc5dc598]
#backlinks
*説明 [#c41493ca]
-パス: [[linux-2.6.33/mm/memory.c]]
-FIXME: これは何?
--説明
**引数 [#a2276094]
-struct task_struct *tsk
--
--[[linux-2.6.33/task_struct]]
-struct mm_struct *mm
--
--[[linux-2.6.33/mm_struct]]
-unsigned long start
--
-int nr_pages
--
-unsigned int foll_flags
--
-struct page **pages
--
--[[linux-2.6.33/page]]
-struct vm_area_struct **vmas
-
--[[linux-2.6.33/vm_area_struct]]
**返り値 [#q6541b95]
-int
--
**参考 [#n5408a21]
*実装 [#h309d47e]
int __get_user_pages(struct task_struct *tsk, struct mm_...
unsigned long start, int nr_pages, unsigned int g...
struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
-
--[[linux-2.6.33/VM_BUG_ON()]]
-
--[[linux-2.6.33/FOLL_GET]]
/*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
-
--[[linux-2.6.33/FOLL_WRITE]]
-
--[[linux-2.6.33/FOLL_FORCE]]
-
--[[linux-2.6.33/VM_WRITE]]
-
--[[linux-2.6.33/VM_MAYWRITE]]
-
--[[linux-2.6.33/VM_READ]]
-
--[[linux-2.6.33/VM_MAYREAD]]
do {
struct vm_area_struct *vma;
vma = find_extend_vma(mm, start);
-
--[[linux-2.6.33/vm_area_struct]]
-
--[[linux-2.6.33/find_extended_vma()]]
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
-
--[[linux-2.6.33/in_gate_area()]]
-
--[[linux-2.6.33/PAGE_MASK]]
-
--[[linux-2.6.33/get_gate_vma()]]
-
--[[linux-2.6.33/pgd_t]]
-
--[[linux-2.6.33/pud_t]]
-
--[[linux-2.6.33/pmd_t]]
-
--[[linux-2.6.33/pte_t]]
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
pgd = pgd_offset_gate(mm, pg);
-
--[[linux-2.6.33/TASK_SIZE]]
-
--[[linux-2.6.33/pgd_offset_k()]]
-
--[[linux-2.6.33/pgd_offset_gate()]]
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
return i ? : -EFAULT;
-
--[[linux-2.6.33/BUG_ON()]]
-
--[[linux-2.6.33/pgd_none()]]
-
--[[linux-2.6.33/pud_offset()]]
-
--[[linux-2.6.33/pud_none()]]
-
--[[linux-2.6.33/pmd_offset()]]
-
--[[linux-2.6.33/pmd_none()]]
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
return i ? : -EFAULT;
}
-
--[[linux-2.6.33/pte_offset_map()]]
-
--[[linux-2.6.33/pte_none()]]
-
--[[linux-2.6.33/pte_unmap()]]
if (pages) {
struct page *page = vm_normal_page(gate_vma, start, ...
pages[i] = page;
if (page)
get_page(page);
}
pte_unmap(pte);
-
--[[linux-2.6.33/vm_normal_page()]]
-
--[[linux-2.6.33/get_page()]]
if (vmas)
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
nr_pages--;
continue;
}
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-
--[[linux-2.6.33/VM_IO]]
-
--[[linux-2.6.33/VM_PFNMAP]]
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, gup_flags);
continue;
}
-
--[[linux-2.6.33/is_vm_hugetlb_page()]]
-
--[[linux-2.6.33/follow_hugetlb_page()]]
do {
struct page *page;
unsigned int foll_flags = gup_flags;
/*
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory.
*/
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
-
--[[linux-2.6.33/unlikely()]]
-
--[[linux-2.6.33/current(global)]]
-
--[[linux-2.6.33/fatal_signal_pending()]]
cond_resched();
-
--[[linux-2.6.33/cond_resched()]]
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
-
--[[linux-2.6.33/follow_page()]]
ret = handle_mm_fault(mm, vma, start,
(foll_flags & FOLL_WRITE) ?
FAULT_FLAG_WRITE : 0);
-
--[[linux-2.6.33/handle_mm_fault()]]
-
--[[linux-2.6.33/FAULT_FLAG_WRITE]]
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
if (ret &
(VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
return i ? i : -EFAULT;
BUG();
}
-
--[[linux-2.6.33/VM_FAULT_ERROR]]
-
--[[linux-2.6.33/VM_FAULT_OOM]]
-
--[[linux-2.6.33/VM_FAULT_HWPOISON]]
-
--[[linux-2.6.33/VM_FAULT_SIGBUS]]
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
-
--[[linux-2.6.33/VM_FAULT_MAJOR]]
/*
* The VM_FAULT_WRITE bit tells us that
* do_wp_page has broken COW when necessary,
* even if maybe_mkwrite decided not to set
* pte_write. We can thus safely do subsequent
* page lookups as if they were reads. But only
* do so when looping for pte_write is futile:
* in some cases userspace may also be wanting
* to write to the gotten user page, which a
* read fault here might prevent (a readonly
* page might get reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) &&
!(vma->vm_flags & VM_WRITE))
foll_flags &= ~FOLL_WRITE;
cond_resched();
-
--[[linux-2.6.33/VM_FAULT_WRITE]]
-
--[[linux-2.6.33/VM_WRITE]]
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
-
--[[linux-2.6.33/IS_ERR()]]
-
--[[linux-2.6.33/PTR_ERR()]]
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
-
--[[linux-2.6.33/flush_anon_page()]]
-
--[[linux-2.6.33/flush_dcache_page()]]
if (vmas)
vmas[i] = vma;
i++;
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
}
*コメント [#u245a6d7]
終了行:
*参照元 [#lc5dc598]
#backlinks
*説明 [#c41493ca]
-パス: [[linux-2.6.33/mm/memory.c]]
-FIXME: これは何?
--説明
**引数 [#a2276094]
-struct task_struct *tsk
--
--[[linux-2.6.33/task_struct]]
-struct mm_struct *mm
--
--[[linux-2.6.33/mm_struct]]
-unsigned long start
--
-int nr_pages
--
-unsigned int foll_flags
--
-struct page **pages
--
--[[linux-2.6.33/page]]
-struct vm_area_struct **vmas
-
--[[linux-2.6.33/vm_area_struct]]
**返り値 [#q6541b95]
-int
--
**参考 [#n5408a21]
*実装 [#h309d47e]
int __get_user_pages(struct task_struct *tsk, struct mm_...
unsigned long start, int nr_pages, unsigned int g...
struct page **pages, struct vm_area_struct **vmas)
{
int i;
unsigned long vm_flags;
if (nr_pages <= 0)
return 0;
VM_BUG_ON(!!pages != !!(gup_flags & FOLL_GET));
-
--[[linux-2.6.33/VM_BUG_ON()]]
-
--[[linux-2.6.33/FOLL_GET]]
/*
* Require read or write permissions.
* If FOLL_FORCE is set, we only require the "MAY" flags.
*/
vm_flags = (gup_flags & FOLL_WRITE) ?
(VM_WRITE | VM_MAYWRITE) : (VM_READ | VM_MAYREAD);
vm_flags &= (gup_flags & FOLL_FORCE) ?
(VM_MAYREAD | VM_MAYWRITE) : (VM_READ | VM_WRITE);
i = 0;
-
--[[linux-2.6.33/FOLL_WRITE]]
-
--[[linux-2.6.33/FOLL_FORCE]]
-
--[[linux-2.6.33/VM_WRITE]]
-
--[[linux-2.6.33/VM_MAYWRITE]]
-
--[[linux-2.6.33/VM_READ]]
-
--[[linux-2.6.33/VM_MAYREAD]]
do {
struct vm_area_struct *vma;
vma = find_extend_vma(mm, start);
-
--[[linux-2.6.33/vm_area_struct]]
-
--[[linux-2.6.33/find_extended_vma()]]
if (!vma && in_gate_area(tsk, start)) {
unsigned long pg = start & PAGE_MASK;
struct vm_area_struct *gate_vma = get_gate_vma(tsk);
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
-
--[[linux-2.6.33/in_gate_area()]]
-
--[[linux-2.6.33/PAGE_MASK]]
-
--[[linux-2.6.33/get_gate_vma()]]
-
--[[linux-2.6.33/pgd_t]]
-
--[[linux-2.6.33/pud_t]]
-
--[[linux-2.6.33/pmd_t]]
-
--[[linux-2.6.33/pte_t]]
/* user gate pages are read-only */
if (gup_flags & FOLL_WRITE)
return i ? : -EFAULT;
if (pg > TASK_SIZE)
pgd = pgd_offset_k(pg);
else
pgd = pgd_offset_gate(mm, pg);
-
--[[linux-2.6.33/TASK_SIZE]]
-
--[[linux-2.6.33/pgd_offset_k()]]
-
--[[linux-2.6.33/pgd_offset_gate()]]
BUG_ON(pgd_none(*pgd));
pud = pud_offset(pgd, pg);
BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, pg);
if (pmd_none(*pmd))
return i ? : -EFAULT;
-
--[[linux-2.6.33/BUG_ON()]]
-
--[[linux-2.6.33/pgd_none()]]
-
--[[linux-2.6.33/pud_offset()]]
-
--[[linux-2.6.33/pud_none()]]
-
--[[linux-2.6.33/pmd_offset()]]
-
--[[linux-2.6.33/pmd_none()]]
pte = pte_offset_map(pmd, pg);
if (pte_none(*pte)) {
pte_unmap(pte);
return i ? : -EFAULT;
}
-
--[[linux-2.6.33/pte_offset_map()]]
-
--[[linux-2.6.33/pte_none()]]
-
--[[linux-2.6.33/pte_unmap()]]
if (pages) {
struct page *page = vm_normal_page(gate_vma, start, ...
pages[i] = page;
if (page)
get_page(page);
}
pte_unmap(pte);
-
--[[linux-2.6.33/vm_normal_page()]]
-
--[[linux-2.6.33/get_page()]]
if (vmas)
vmas[i] = gate_vma;
i++;
start += PAGE_SIZE;
nr_pages--;
continue;
}
if (!vma ||
(vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
!(vm_flags & vma->vm_flags))
return i ? : -EFAULT;
-
--[[linux-2.6.33/VM_IO]]
-
--[[linux-2.6.33/VM_PFNMAP]]
if (is_vm_hugetlb_page(vma)) {
i = follow_hugetlb_page(mm, vma, pages, vmas,
&start, &nr_pages, i, gup_flags);
continue;
}
-
--[[linux-2.6.33/is_vm_hugetlb_page()]]
-
--[[linux-2.6.33/follow_hugetlb_page()]]
do {
struct page *page;
unsigned int foll_flags = gup_flags;
/*
* If we have a pending SIGKILL, don't keep faulting
* pages and potentially allocating memory.
*/
if (unlikely(fatal_signal_pending(current)))
return i ? i : -ERESTARTSYS;
-
--[[linux-2.6.33/unlikely()]]
-
--[[linux-2.6.33/current(global)]]
-
--[[linux-2.6.33/fatal_signal_pending()]]
cond_resched();
-
--[[linux-2.6.33/cond_resched()]]
while (!(page = follow_page(vma, start, foll_flags))) {
int ret;
-
--[[linux-2.6.33/follow_page()]]
ret = handle_mm_fault(mm, vma, start,
(foll_flags & FOLL_WRITE) ?
FAULT_FLAG_WRITE : 0);
-
--[[linux-2.6.33/handle_mm_fault()]]
-
--[[linux-2.6.33/FAULT_FLAG_WRITE]]
if (ret & VM_FAULT_ERROR) {
if (ret & VM_FAULT_OOM)
return i ? i : -ENOMEM;
if (ret &
(VM_FAULT_HWPOISON|VM_FAULT_SIGBUS))
return i ? i : -EFAULT;
BUG();
}
-
--[[linux-2.6.33/VM_FAULT_ERROR]]
-
--[[linux-2.6.33/VM_FAULT_OOM]]
-
--[[linux-2.6.33/VM_FAULT_HWPOISON]]
-
--[[linux-2.6.33/VM_FAULT_SIGBUS]]
if (ret & VM_FAULT_MAJOR)
tsk->maj_flt++;
else
tsk->min_flt++;
-
--[[linux-2.6.33/VM_FAULT_MAJOR]]
/*
* The VM_FAULT_WRITE bit tells us that
* do_wp_page has broken COW when necessary,
* even if maybe_mkwrite decided not to set
* pte_write. We can thus safely do subsequent
* page lookups as if they were reads. But only
* do so when looping for pte_write is futile:
* in some cases userspace may also be wanting
* to write to the gotten user page, which a
* read fault here might prevent (a readonly
* page might get reCOWed by userspace write).
*/
if ((ret & VM_FAULT_WRITE) &&
!(vma->vm_flags & VM_WRITE))
foll_flags &= ~FOLL_WRITE;
cond_resched();
-
--[[linux-2.6.33/VM_FAULT_WRITE]]
-
--[[linux-2.6.33/VM_WRITE]]
}
if (IS_ERR(page))
return i ? i : PTR_ERR(page);
-
--[[linux-2.6.33/IS_ERR()]]
-
--[[linux-2.6.33/PTR_ERR()]]
if (pages) {
pages[i] = page;
flush_anon_page(vma, page, start);
flush_dcache_page(page);
}
-
--[[linux-2.6.33/flush_anon_page()]]
-
--[[linux-2.6.33/flush_dcache_page()]]
if (vmas)
vmas[i] = vma;
i++;
start += PAGE_SIZE;
nr_pages--;
} while (nr_pages && start < vma->vm_end);
} while (nr_pages);
return i;
}
*コメント [#u245a6d7]
ページ名: