]>
Commit | Line | Data |
---|---|---|
e26af0e8 PZ |
1 | /* |
2 | * Disregards a certain amount of sleep time (sched_latency_ns) and | |
3 | * considers the task to be running during that period. This gives it | |
4 | * a service deficit on wakeup, allowing it to run sooner. | |
5 | */ | |
51e0304c IM |
6 | SCHED_FEAT(FAIR_SLEEPERS, 1) |
7 | ||
8 | /* | |
9 | * Only give sleepers 50% of their service deficit. This allows | |
10 | * them to run sooner, but does not allow tons of sleepers to | |
11 | * rip the spread apart. | |
12 | */ | |
13 | SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) | |
e26af0e8 PZ |
14 | |
15 | /* | |
16 | * By not normalizing the sleep time, heavy tasks get an effective | |
17 | * longer period, and lighter task an effective shorter period they | |
18 | * are considered running. | |
19 | */ | |
e52fb7c0 | 20 | SCHED_FEAT(NORMALIZED_SLEEPER, 0) |
e26af0e8 PZ |
21 | |
22 | /* | |
23 | * Place new tasks ahead so that they do not starve already running | |
24 | * tasks | |
25 | */ | |
f00b45c1 | 26 | SCHED_FEAT(START_DEBIT, 1) |
e26af0e8 PZ |
27 | |
28 | /* | |
29 | * Should wakeups try to preempt running tasks. | |
30 | */ | |
31 | SCHED_FEAT(WAKEUP_PREEMPT, 1) | |
32 | ||
33 | /* | |
34 | * Compute wakeup_gran based on task behaviour, clipped to | |
35 | * [0, sched_wakeup_gran_ns] | |
36 | */ | |
37 | SCHED_FEAT(ADAPTIVE_GRAN, 1) | |
38 | ||
39 | /* | |
40 | * When converting the wakeup granularity to virtual time, do it such | |
41 | * that heavier tasks preempting a lighter task have an edge. | |
42 | */ | |
43 | SCHED_FEAT(ASYM_GRAN, 1) | |
44 | ||
45 | /* | |
46 | * Always wakeup-preempt SYNC wakeups, see SYNC_WAKEUPS. | |
47 | */ | |
48 | SCHED_FEAT(WAKEUP_SYNC, 0) | |
49 | ||
50 | /* | |
51 | * Wakeup preempt based on task behaviour. Tasks that do not overlap | |
52 | * don't get preempted. | |
53 | */ | |
54 | SCHED_FEAT(WAKEUP_OVERLAP, 0) | |
55 | ||
56 | /* | |
57 | * Use the SYNC wakeup hint, pipes and the likes use this to indicate | |
58 | * the remote end is likely to consume the data we just wrote, and | |
59 | * therefore has cache benefit from being placed on the same cpu, see | |
60 | * also AFFINE_WAKEUPS. | |
61 | */ | |
62 | SCHED_FEAT(SYNC_WAKEUPS, 1) | |
63 | ||
64 | /* | |
65 | * Based on load and program behaviour, see if it makes sense to place | |
66 | * a newly woken task on the same cpu as the task that woke it -- | |
67 | * improve cache locality. Typically used with SYNC wakeups as | |
68 | * generated by pipes and the like, see also SYNC_WAKEUPS. | |
69 | */ | |
f00b45c1 | 70 | SCHED_FEAT(AFFINE_WAKEUPS, 1) |
e26af0e8 | 71 | |
e69b0f1b PZ |
72 | /* |
73 | * Weaken SYNC hint based on overlap | |
74 | */ | |
75 | SCHED_FEAT(SYNC_LESS, 1) | |
76 | ||
77 | /* | |
78 | * Add SYNC hint based on overlap | |
79 | */ | |
80 | SCHED_FEAT(SYNC_MORE, 0) | |
81 | ||
e26af0e8 PZ |
82 | /* |
83 | * Prefer to schedule the task we woke last (assuming it failed | |
84 | * wakeup-preemption), since its likely going to consume data we | |
85 | * touched, increases cache locality. | |
86 | */ | |
0ec9fab3 | 87 | SCHED_FEAT(NEXT_BUDDY, 0) |
e26af0e8 PZ |
88 | |
89 | /* | |
90 | * Prefer to schedule the task that ran last (when we did | |
91 | * wake-preempt) as that likely will touch the same data, increases | |
92 | * cache locality. | |
93 | */ | |
94 | SCHED_FEAT(LAST_BUDDY, 1) | |
95 | ||
96 | /* | |
97 | * Consider buddies to be cache hot, decreases the likelyness of a | |
98 | * cache buddy being migrated away, increases cache locality. | |
99 | */ | |
f00b45c1 | 100 | SCHED_FEAT(CACHE_HOT_BUDDY, 1) |
e26af0e8 | 101 | |
8e6598af PZ |
102 | /* |
103 | * Use arch dependent cpu power functions | |
104 | */ | |
105 | SCHED_FEAT(ARCH_POWER, 0) | |
106 | ||
0c4b83da | 107 | SCHED_FEAT(HRTICK, 0) |
f00b45c1 | 108 | SCHED_FEAT(DOUBLE_TICK, 0) |
efc2dead | 109 | SCHED_FEAT(LB_BIAS, 1) |
3b640894 | 110 | SCHED_FEAT(LB_SHARES_UPDATE, 1) |
f5bfb7d9 | 111 | SCHED_FEAT(ASYM_EFF_LOAD, 1) |
e26af0e8 PZ |
112 | |
113 | /* | |
114 | * Spin-wait on mutex acquisition when the mutex owner is running on | |
115 | * another cpu -- assumes that when the owner is running, it will soon | |
116 | * release the lock. Decreases scheduling overhead. | |
117 | */ | |
0d66bf6d | 118 | SCHED_FEAT(OWNER_SPIN, 1) |