]> Git Repo - linux.git/blobdiff - include/linux/trace_recursion.h
HID: hid-sensor-custom: Fix big on-stack allocation in hid_sensor_custom_get_known()
[linux.git] / include / linux / trace_recursion.h
index fe95f0922526624201eebf7644868380ed9d378b..c303f7a114e916c6c5a36c7004bace8397b1d28c 100644 (file)
@@ -116,13 +116,9 @@ enum {
 
 static __always_inline int trace_get_context_bit(void)
 {
-       unsigned long pc = preempt_count();
+       unsigned char bit = interrupt_context_level();
 
-       if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
-               return TRACE_CTX_NORMAL;
-       else
-               return pc & NMI_MASK ? TRACE_CTX_NMI :
-                       pc & HARDIRQ_MASK ? TRACE_CTX_IRQ : TRACE_CTX_SOFTIRQ;
+       return TRACE_CTX_NORMAL - bit;
 }
 
 #ifdef CONFIG_FTRACE_RECORD_RECURSION
@@ -139,6 +135,9 @@ extern void ftrace_record_recursion(unsigned long ip, unsigned long parent_ip);
 # define do_ftrace_record_recursion(ip, pip)   do { } while (0)
 #endif
 
+/*
+ * Preemption is promised to be disabled when return bit >= 0.
+ */
 static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsigned long pip,
                                                        int start)
 {
@@ -148,8 +147,12 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
        bit = trace_get_context_bit() + start;
        if (unlikely(val & (1 << bit))) {
                /*
-                * It could be that preempt_count has not been updated during
-                * a switch between contexts. Allow for a single recursion.
+                * If an interrupt occurs during a trace, and another trace
+                * happens in that interrupt but before the preempt_count is
+                * updated to reflect the new interrupt context, then this
+                * will think a recursion occurred, and the event will be dropped.
+                * Let a single instance happen via the TRANSITION_BIT to
+                * not drop those events.
                 */
                bit = TRACE_CTX_TRANSITION + start;
                if (val & (1 << bit)) {
@@ -162,11 +165,17 @@ static __always_inline int trace_test_and_set_recursion(unsigned long ip, unsign
        current->trace_recursion = val;
        barrier();
 
+       preempt_disable_notrace();
+
        return bit;
 }
 
+/*
+ * Preemption will be enabled (if it was previously enabled).
+ */
 static __always_inline void trace_clear_recursion(int bit)
 {
+       preempt_enable_notrace();
        barrier();
        trace_recursion_clear(bit);
 }
@@ -178,7 +187,7 @@ static __always_inline void trace_clear_recursion(int bit)
  * tracing recursed in the same context (normal vs interrupt),
  *
  * Returns: -1 if a recursion happened.
- *           >= 0 if no recursion
+ *           >= 0 if no recursion.
  */
 static __always_inline int ftrace_test_recursion_trylock(unsigned long ip,
                                                         unsigned long parent_ip)
This page took 0.043417 seconds and 4 git commands to generate.