Skip to content

Commit a1b2289

Browse files
Sherry Yangtorvalds
authored andcommitted
android: binder: drop lru lock in isolate callback
Drop the global lru lock in isolate callback before calling zap_page_range which calls cond_resched, and re-acquire the global lru lock before returning. Also change return code to LRU_REMOVED_RETRY. Use mmput_async when fail to acquire mmap sem in an atomic context. Fix "BUG: sleeping function called from invalid context" errors when CONFIG_DEBUG_ATOMIC_SLEEP is enabled. Also restore mmput_async, which was initially introduced in commit ec8d7c1 ("mm, oom_reaper: do not mmput synchronously from the oom reaper context"), and was removed in commit 2129258 ("mm: oom: let oom_reap_task and exit_mmap run concurrently"). Link: http://lkml.kernel.org/r/[email protected] Fixes: f2517eb ("android: binder: Add global lru shrinker to binder") Signed-off-by: Sherry Yang <[email protected]> Signed-off-by: Greg Kroah-Hartman <[email protected]> Reported-by: Kyle Yan <[email protected]> Acked-by: Arve Hjønnevåg <[email protected]> Acked-by: Michal Hocko <[email protected]> Cc: Martijn Coenen <[email protected]> Cc: Todd Kjos <[email protected]> Cc: Riley Andrews <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Hillf Danton <[email protected]> Cc: Peter Zijlstra <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Andy Lutomirski <[email protected]> Cc: Oleg Nesterov <[email protected]> Cc: Hoeun Ryu <[email protected]> Cc: Christopher Lameter <[email protected]> Cc: Vegard Nossum <[email protected]> Cc: Frederic Weisbecker <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 3f2eb02 commit a1b2289

3 files changed

Lines changed: 36 additions & 6 deletions

File tree

drivers/android/binder_alloc.c

Lines changed: 12 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -913,6 +913,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
913913
struct binder_alloc *alloc;
914914
uintptr_t page_addr;
915915
size_t index;
916+
struct vm_area_struct *vma;
916917

917918
alloc = page->alloc;
918919
if (!mutex_trylock(&alloc->mutex))
@@ -923,16 +924,22 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
923924

924925
index = page - alloc->pages;
925926
page_addr = (uintptr_t)alloc->buffer + index * PAGE_SIZE;
926-
if (alloc->vma) {
927+
vma = alloc->vma;
928+
if (vma) {
927929
mm = get_task_mm(alloc->tsk);
928930
if (!mm)
929931
goto err_get_task_mm_failed;
930932
if (!down_write_trylock(&mm->mmap_sem))
931933
goto err_down_write_mmap_sem_failed;
934+
}
935+
936+
list_lru_isolate(lru, item);
937+
spin_unlock(lock);
932938

939+
if (vma) {
933940
trace_binder_unmap_user_start(alloc, index);
934941

935-
zap_page_range(alloc->vma,
942+
zap_page_range(vma,
936943
page_addr + alloc->user_buffer_offset,
937944
PAGE_SIZE);
938945

@@ -950,13 +957,12 @@ enum lru_status binder_alloc_free_page(struct list_head *item,
950957

951958
trace_binder_unmap_kernel_end(alloc, index);
952959

953-
list_lru_isolate(lru, item);
954-
960+
spin_lock(lock);
955961
mutex_unlock(&alloc->mutex);
956-
return LRU_REMOVED;
962+
return LRU_REMOVED_RETRY;
957963

958964
err_down_write_mmap_sem_failed:
959-
mmput(mm);
965+
mmput_async(mm);
960966
err_get_task_mm_failed:
961967
err_page_already_freed:
962968
mutex_unlock(&alloc->mutex);

include/linux/sched/mm.h

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -84,6 +84,12 @@ static inline bool mmget_not_zero(struct mm_struct *mm)
8484

8585
/* mmput gets rid of the mappings and all user-space */
8686
extern void mmput(struct mm_struct *);
87+
#ifdef CONFIG_MMU
88+
/* same as above but performs the slow path from the async context. Can
89+
* be called from the atomic context as well
90+
*/
91+
void mmput_async(struct mm_struct *);
92+
#endif
8793

8894
/* Grab a reference to a task's mm, if it is not already going away */
8995
extern struct mm_struct *get_task_mm(struct task_struct *task);

kernel/fork.c

Lines changed: 18 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -946,6 +946,24 @@ void mmput(struct mm_struct *mm)
946946
}
947947
EXPORT_SYMBOL_GPL(mmput);
948948

949+
#ifdef CONFIG_MMU
950+
static void mmput_async_fn(struct work_struct *work)
951+
{
952+
struct mm_struct *mm = container_of(work, struct mm_struct,
953+
async_put_work);
954+
955+
__mmput(mm);
956+
}
957+
958+
void mmput_async(struct mm_struct *mm)
959+
{
960+
if (atomic_dec_and_test(&mm->mm_users)) {
961+
INIT_WORK(&mm->async_put_work, mmput_async_fn);
962+
schedule_work(&mm->async_put_work);
963+
}
964+
}
965+
#endif
966+
949967
/**
950968
* set_mm_exe_file - change a reference to the mm's executable file
951969
*

0 commit comments

Comments
 (0)