diff options
| author | droidfivex <droidfivex@gmail.com> | 2016-11-18 01:05:47 +0900 |
|---|---|---|
| committer | droidfivex <droidfivex@gmail.com> | 2016-11-18 01:05:47 +0900 |
| commit | 1e8e1576c95730e425f472a569bb15325329a99e (patch) | |
| tree | 66b14ebbc47c0bc6023aa57b91b1ab5a9ee6c8e6 /mm/memory.c | |
| parent | 8b77716a2a034c7658ea6569ddaa814c312dd94f (diff) | |
| parent | 30bdd78c2f639b28acc02a4133479311e01b18ce (diff) | |
Diffstat (limited to 'mm/memory.c')
| -rw-r--r-- | mm/memory.c | 14 |
1 files changed, 12 insertions, 2 deletions
diff --git a/mm/memory.c b/mm/memory.c index 55a21eb8478..c4866ee44ca 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -1445,6 +1445,16 @@ int zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, } EXPORT_SYMBOL_GPL(zap_vma_ptes); +/* + * FOLL_FORCE can write to even unwritable pte's, but only + * after we've gone through a COW cycle and they are dirty. + */ +static inline bool can_follow_write_pte(pte_t pte, unsigned int flags) +{ + return pte_write(pte) || + ((flags & FOLL_FORCE) && (flags & FOLL_COW) && pte_dirty(pte)); +} + /** * follow_page - look up a page descriptor from a user-virtual address * @vma: vm_area_struct mapping @address @@ -1527,7 +1537,7 @@ split_fallthrough: pte = *ptep; if (!pte_present(pte)) goto no_page; - if ((flags & FOLL_WRITE) && !pte_write(pte)) + if ((flags & FOLL_WRITE) && !can_follow_write_pte(pte, flags)) goto unlock; page = vm_normal_page(vma, address, pte); @@ -1830,7 +1840,7 @@ int __get_user_pages(struct task_struct *tsk, struct mm_struct *mm, */ if ((ret & VM_FAULT_WRITE) && !(vma->vm_flags & VM_WRITE)) - foll_flags &= ~FOLL_WRITE; + foll_flags |= FOLL_COW; cond_resched(); } |
