aboutsummaryrefslogtreecommitdiff
path: root/kernel/panic_flush.c
blob: b53ab78d0446bcecf3a8988a77ed63880a476421 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
// SPDX-License-Identifier: GPL-2.0-only
/*
 * Copyright (C) 2018-2020 Oplus. All rights reserved.
 */

#define DEBUG
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/blkdev.h>
#include <linux/kthread.h>
#include <linux/workqueue.h>

#define PANIC_FLUSH_POLL_MS (10)


struct panic_flush_control {
	struct task_struct *flush_thread;
	wait_queue_head_t flush_wq;
	atomic_t flush_issuing;
	atomic_t flush_issued;
};

static struct panic_flush_control *pfc;
static void panic_issue_flush(struct super_block *sb ,void *arg)
{
	int ret = -1;
	int *flush_count = (int *)arg;
	if (!(sb->s_flags & MS_RDONLY) && NULL != sb->s_bdev) {
		ret = blkdev_issue_flush(sb->s_bdev, GFP_KERNEL, NULL);
	}
	if (!ret) {
		(*flush_count)++;
		pr_emerg("blkdev_issue_flush before panic return %d\n", *flush_count);
	}
}

static int panic_flush_thread(void *data)
{
	int flush_count = 0;
repeat:
	if (kthread_should_stop())
		return 0;
	wait_event(pfc->flush_wq, kthread_should_stop() ||
			atomic_read(&pfc->flush_issuing) > 0);
	if (atomic_read(&pfc->flush_issuing) > 0) {
		iterate_supers(panic_issue_flush, &flush_count);
		pr_emerg("Up to now, total %d panic_issue_flush_count\n", flush_count);
		atomic_inc(&pfc->flush_issued);
		atomic_dec(&pfc->flush_issuing);
	}
	goto repeat;
}

extern bool is_fulldump_enable(void);

static inline bool need_flush_device_cache(void)
{
	if (is_fulldump_enable())
		return false;
	return true;
}

int panic_flush_device_cache(int timeout)
{
	pr_emerg("%s\n", __func__);
	if (!need_flush_device_cache()) {
		pr_emerg("%s: skip flush device cache\n", __func__);
		return timeout;
	}

	if (atomic_inc_return(&pfc->flush_issuing) == 1 &&
		waitqueue_active(&pfc->flush_wq)) {
		pr_emerg("%s: flush device cache\n", __func__);
		atomic_set(&pfc->flush_issued, 0);
		wake_up(&pfc->flush_wq);
		while (timeout > 0 && atomic_read(&pfc->flush_issued) == 0) {
			mdelay(PANIC_FLUSH_POLL_MS);
			timeout -= PANIC_FLUSH_POLL_MS;
		}
		pr_emerg("%s: remaining timeout = %d\n", __func__, timeout);
	}
	return timeout;
}

static int __init create_panic_flush_control(void)
{
	int err = 0;
	pr_debug("%s\n", __func__);
	pfc = kzalloc(sizeof(*pfc), GFP_KERNEL);
	if (!pfc) {
		pr_err("%s: fail to allocate memory\n", __func__);
		return -ENOMEM;
	}

	init_waitqueue_head(&pfc->flush_wq);
	atomic_set(&pfc->flush_issuing, 0);
	atomic_set(&pfc->flush_issued, 0);
	pfc->flush_thread = kthread_run(panic_flush_thread, pfc, "panic_flush");
	if (IS_ERR(pfc->flush_thread)) {
		err = PTR_ERR(pfc->flush_thread);
		kfree(pfc);
		pfc = NULL;
	}
	return err;
}

static void __exit destroy_panic_flush_control(void)
{
	pr_debug("%s\n", __func__);
	if (pfc && pfc->flush_thread) {
		pr_debug("%s: stop panic_flush thread\n", __func__);
		kthread_stop(pfc->flush_thread);
		kfree(pfc);
		pfc = NULL;
	}
}
module_init(create_panic_flush_control);
module_exit(destroy_panic_flush_control);
MODULE_DESCRIPTION("OPLUS panic flush control");
MODULE_LICENSE("GPL v2");