linux-4.4.1/collapse_huge_page()
をテンプレートにして作成
[
トップ
] [
新規
|
一覧
|
検索
|
最終更新
|
ヘルプ
|
ログイン
]
開始行:
*参照元 [#g57e09ec]
#backlinks
*説明 [#w3319b2d]
-パス: [[linux-4.4.1/mm/huge_memory.c]]
-FIXME: これは何?
--説明
**引数 [#a76f33b3]
-struct mm_struct *mm
--
--[[linux-4.4.1/mm_struct]]
-unsigned long address
--
-struct page **hpage
--
--[[linux-4.4.1/page]]
-struct vm_area_struct *vma
--
--[[linux-4.4.1/vm_area_struct]]
-int node
--
**返り値 [#vc7eca58]
-void
**参考 [#w77bf618]
*実装 [#f002a913]
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *pmd_ptl, *pte_ptl;
int isolated;
unsigned long hstart, hend;
struct mem_cgroup *memcg;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
gfp_t gfp;
-
--[[linux-4.4.1/pmd_t]]
--[[linux-4.4.1/pte_t]]
--[[linux-4.4.1/pgtable_t]]
--[[linux-4.4.1/spinlock_t]]
--[[linux-4.4.1/mem_cgroup]]
--[[linux-4.4.1/gfp_t]]
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-
--[[linux-4.4.1/VM_BUG_ON()]]
/* Only allocate from the target node */
gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP...
__GFP_THISNODE;
-
--[[linux-4.4.1/alloc_hugepage_gfpmask()]]
--[[linux-4.4.1/khugepaged_defrag()]]
/* release the mmap_sem read lock. */
new_page = khugepaged_alloc_page(hpage, gfp, mm, addres...
if (!new_page)
return;
if (unlikely(mem_cgroup_try_charge(new_page, mm,
gfp, &memcg)))
return;
-
--[[linux-4.4.1/khugepaged_alloc_page()]]
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/mem_cgroup_try_charge()]]
/*
* Prevent all access to pagetables with the exception of
* gup_fast later hanlded by the ptep_clear_flush and t...
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
goto out;
-
--[[linux-4.4.1/down_write()]]
--[[linux-4.4.1/kjugepaged_test_exit()]]
vma = find_vma(mm, address);
if (!vma)
goto out;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_...
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
goto out;
if (!hugepage_vma_check(vma))
goto out;
-
--[[linux-4.4.1/find_vma()]]
--[[linux-4.4.1/hugepage_vma_check()]]
pmd = mm_find_pmd(mm, address);
if (!pmd)
goto out;
anon_vma_lock_write(vma->anon_vma);
-
--[[linux-4.4.1/mm_find_pmd()]]
--[[linux-4.4.1/anon_vma_lock_write()]]
pte = pte_offset_map(pmd, address);
pte_ptl = pte_lockptr(mm, pmd);
mmun_start = address;
mmun_end = address + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmu...
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
-
--[[linux-4.4.1/pte_offset_map()]]
--[[linux-4.4.1/pte_lockptr()]]
--[[linux-4.4.1/mmu_notifier_invalidate_range_start()]]
--[[linux-4.4.1/pmd_lock()]]
/*
* After this gup_fast can't run anymore. This also rem...
* any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual addr...
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_...
-
--[[linux-4.4.1/pmdp_collapse_flush()]]
--[[linux-4.4.1/spin_unlock()]]
--[[linux-4.4.1/mmu_notifier_invalidate_range_end()]]
spin_lock(pte_ptl);
isolated = __collapse_huge_page_isolate(vma, address, p...
spin_unlock(pte_ptl);
-
--[[linux-4.4.1/__collapse_huge_page_isolate()]]
if (unlikely(!isolated)) {
pte_unmap(pte);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
-
--[[linux-4.4.1/pte_unmap()]]
/*
* We can only use set_pmd_at when establishing
* hugepmds and never for establishing regular pmds that
* points to regular pagetables. Use pmd_populate for ...
*/
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
goto out;
-
--[[linux-4.4.1/pmd_populate()]]
--[[linux-4.4.1/pmd_pgtable()]]
--[[linux-4.4.1/anon_vma_unlock_write()]]
}
/*
* All pages are isolated and locked so anon_vma rmap
* can't run anymore.
*/
anon_vma_unlock_write(vma->anon_vma);
__collapse_huge_page_copy(pte, new_page, vma, address, ...
pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
-
--[[linux-4.4.1/anon_vma_unlock_write()]]
--[[linux-4.4.1/__collapse_huge_page_copy()]]
--[[linux-4.4.1/pte_unmap()]]
--[[linux-4.4.1/__SetPageUptodate()]]
--[[linux-4.4.1/pmd_pgtable()]]
_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
--[[linux-4.4.1/mk_huge_pmd()]]
--[[linux-4.4.1/maybe_pmd_mkwrite()]]
--[[linux-4.4.1/pmd_mkdirty()]]
/*
* spin_lock() below is not the equivalent of smp_wmb()...
* this is needed to avoid the copy_huge_page writes to...
* visible after the set_pmd_at() write.
*/
smp_wmb();
-
--[[linux-4.4.1/smp_wmb()]]
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
spin_unlock(pmd_ptl);
-
--[[linux-4.4.1/spin_lock()]]
--[[linux-4.4.1/BUG_ON()]]
--[[linux-4.4.1/pmd_none()]]
--[[linux-4.4.1/page_add_new_anon_rmap()]]
--[[linux-4.4.1/mem_cgroup_commit_charge()]]
--[[linux-4.4.1/lru_cache_add_active_or_unevictable()]]
--[[linux-4.4.1/pgtable_trans_huge_deposit()]]
--[[linux-4.4.1/set_pmd_at()]]
--[[linux-4.4.1/update_mmu_cache_pmd()]]
--[[linux-4.4.1/spin_unlock()]]
*hpage = NULL;
khugepaged_pages_collapsed++;
-
--[[linux-4.4.1/khugepaged_pages_collapsed(global)]]
out_up_write:
up_write(&mm->mmap_sem);
return;
-
--[[linux-4.4.1/up_write()]]
out:
mem_cgroup_cancel_charge(new_page, memcg);
goto out_up_write;
-
--[[linux-4.4.1/mem_cgroup_cancel_charge()]]
}
*コメント [#fc826de7]
終了行:
*参照元 [#g57e09ec]
#backlinks
*説明 [#w3319b2d]
-パス: [[linux-4.4.1/mm/huge_memory.c]]
-FIXME: これは何?
--説明
**引数 [#a76f33b3]
-struct mm_struct *mm
--
--[[linux-4.4.1/mm_struct]]
-unsigned long address
--
-struct page **hpage
--
--[[linux-4.4.1/page]]
-struct vm_area_struct *vma
--
--[[linux-4.4.1/vm_area_struct]]
-int node
--
**返り値 [#vc7eca58]
-void
**参考 [#w77bf618]
*実装 [#f002a913]
static void collapse_huge_page(struct mm_struct *mm,
unsigned long address,
struct page **hpage,
struct vm_area_struct *vma,
int node)
{
pmd_t *pmd, _pmd;
pte_t *pte;
pgtable_t pgtable;
struct page *new_page;
spinlock_t *pmd_ptl, *pte_ptl;
int isolated;
unsigned long hstart, hend;
struct mem_cgroup *memcg;
unsigned long mmun_start; /* For mmu_notifiers */
unsigned long mmun_end; /* For mmu_notifiers */
gfp_t gfp;
-
--[[linux-4.4.1/pmd_t]]
--[[linux-4.4.1/pte_t]]
--[[linux-4.4.1/pgtable_t]]
--[[linux-4.4.1/spinlock_t]]
--[[linux-4.4.1/mem_cgroup]]
--[[linux-4.4.1/gfp_t]]
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
-
--[[linux-4.4.1/VM_BUG_ON()]]
/* Only allocate from the target node */
gfp = alloc_hugepage_gfpmask(khugepaged_defrag(), __GFP...
__GFP_THISNODE;
-
--[[linux-4.4.1/alloc_hugepage_gfpmask()]]
--[[linux-4.4.1/khugepaged_defrag()]]
/* release the mmap_sem read lock. */
new_page = khugepaged_alloc_page(hpage, gfp, mm, addres...
if (!new_page)
return;
if (unlikely(mem_cgroup_try_charge(new_page, mm,
gfp, &memcg)))
return;
-
--[[linux-4.4.1/khugepaged_alloc_page()]]
--[[linux-4.4.1/unlikely()]]
--[[linux-4.4.1/mem_cgroup_try_charge()]]
/*
* Prevent all access to pagetables with the exception of
* gup_fast later hanlded by the ptep_clear_flush and t...
* handled by the anon_vma lock + PG_lock.
*/
down_write(&mm->mmap_sem);
if (unlikely(khugepaged_test_exit(mm)))
goto out;
-
--[[linux-4.4.1/down_write()]]
--[[linux-4.4.1/kjugepaged_test_exit()]]
vma = find_vma(mm, address);
if (!vma)
goto out;
hstart = (vma->vm_start + ~HPAGE_PMD_MASK) & HPAGE_PMD_...
hend = vma->vm_end & HPAGE_PMD_MASK;
if (address < hstart || address + HPAGE_PMD_SIZE > hend)
goto out;
if (!hugepage_vma_check(vma))
goto out;
-
--[[linux-4.4.1/find_vma()]]
--[[linux-4.4.1/hugepage_vma_check()]]
pmd = mm_find_pmd(mm, address);
if (!pmd)
goto out;
anon_vma_lock_write(vma->anon_vma);
-
--[[linux-4.4.1/mm_find_pmd()]]
--[[linux-4.4.1/anon_vma_lock_write()]]
pte = pte_offset_map(pmd, address);
pte_ptl = pte_lockptr(mm, pmd);
mmun_start = address;
mmun_end = address + HPAGE_PMD_SIZE;
mmu_notifier_invalidate_range_start(mm, mmun_start, mmu...
pmd_ptl = pmd_lock(mm, pmd); /* probably unnecessary */
-
--[[linux-4.4.1/pte_offset_map()]]
--[[linux-4.4.1/pte_lockptr()]]
--[[linux-4.4.1/mmu_notifier_invalidate_range_start()]]
--[[linux-4.4.1/pmd_lock()]]
/*
* After this gup_fast can't run anymore. This also rem...
* any huge TLB entry from the CPU so we won't allow
* huge and small TLB entries for the same virtual addr...
* to avoid the risk of CPU bugs in that area.
*/
_pmd = pmdp_collapse_flush(vma, address, pmd);
spin_unlock(pmd_ptl);
mmu_notifier_invalidate_range_end(mm, mmun_start, mmun_...
-
--[[linux-4.4.1/pmdp_collapse_flush()]]
--[[linux-4.4.1/spin_unlock()]]
--[[linux-4.4.1/mmu_notifier_invalidate_range_end()]]
spin_lock(pte_ptl);
isolated = __collapse_huge_page_isolate(vma, address, p...
spin_unlock(pte_ptl);
-
--[[linux-4.4.1/__collapse_huge_page_isolate()]]
if (unlikely(!isolated)) {
pte_unmap(pte);
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
-
--[[linux-4.4.1/pte_unmap()]]
/*
* We can only use set_pmd_at when establishing
* hugepmds and never for establishing regular pmds that
* points to regular pagetables. Use pmd_populate for ...
*/
pmd_populate(mm, pmd, pmd_pgtable(_pmd));
spin_unlock(pmd_ptl);
anon_vma_unlock_write(vma->anon_vma);
goto out;
-
--[[linux-4.4.1/pmd_populate()]]
--[[linux-4.4.1/pmd_pgtable()]]
--[[linux-4.4.1/anon_vma_unlock_write()]]
}
/*
* All pages are isolated and locked so anon_vma rmap
* can't run anymore.
*/
anon_vma_unlock_write(vma->anon_vma);
__collapse_huge_page_copy(pte, new_page, vma, address, ...
pte_unmap(pte);
__SetPageUptodate(new_page);
pgtable = pmd_pgtable(_pmd);
-
--[[linux-4.4.1/anon_vma_unlock_write()]]
--[[linux-4.4.1/__collapse_huge_page_copy()]]
--[[linux-4.4.1/pte_unmap()]]
--[[linux-4.4.1/__SetPageUptodate()]]
--[[linux-4.4.1/pmd_pgtable()]]
_pmd = mk_huge_pmd(new_page, vma->vm_page_prot);
_pmd = maybe_pmd_mkwrite(pmd_mkdirty(_pmd), vma);
-
--[[linux-4.4.1/mk_huge_pmd()]]
--[[linux-4.4.1/maybe_pmd_mkwrite()]]
--[[linux-4.4.1/pmd_mkdirty()]]
/*
* spin_lock() below is not the equivalent of smp_wmb()...
* this is needed to avoid the copy_huge_page writes to...
* visible after the set_pmd_at() write.
*/
smp_wmb();
-
--[[linux-4.4.1/smp_wmb()]]
spin_lock(pmd_ptl);
BUG_ON(!pmd_none(*pmd));
page_add_new_anon_rmap(new_page, vma, address);
mem_cgroup_commit_charge(new_page, memcg, false);
lru_cache_add_active_or_unevictable(new_page, vma);
pgtable_trans_huge_deposit(mm, pmd, pgtable);
set_pmd_at(mm, address, pmd, _pmd);
update_mmu_cache_pmd(vma, address, pmd);
spin_unlock(pmd_ptl);
-
--[[linux-4.4.1/spin_lock()]]
--[[linux-4.4.1/BUG_ON()]]
--[[linux-4.4.1/pmd_none()]]
--[[linux-4.4.1/page_add_new_anon_rmap()]]
--[[linux-4.4.1/mem_cgroup_commit_charge()]]
--[[linux-4.4.1/lru_cache_add_active_or_unevictable()]]
--[[linux-4.4.1/pgtable_trans_huge_deposit()]]
--[[linux-4.4.1/set_pmd_at()]]
--[[linux-4.4.1/update_mmu_cache_pmd()]]
--[[linux-4.4.1/spin_unlock()]]
*hpage = NULL;
khugepaged_pages_collapsed++;
-
--[[linux-4.4.1/khugepaged_pages_collapsed(global)]]
out_up_write:
up_write(&mm->mmap_sem);
return;
-
--[[linux-4.4.1/up_write()]]
out:
mem_cgroup_cancel_charge(new_page, memcg);
goto out_up_write;
-
--[[linux-4.4.1/mem_cgroup_cancel_charge()]]
}
*コメント [#fc826de7]
ページ名: