1 #ifndef __RES_COUNTER_H__
2 #define __RES_COUNTER_H__
6 * Contain common data types and routines for resource accounting
8 * Copyright 2007 OpenVZ SWsoft Inc
12 * See Documentation/cgroups/resource_counter.txt for more
13 * info about what this counter is.
16 #include <linux/spinlock.h>
17 #include <linux/errno.h>
20 * The core object. the cgroup that wishes to account for some
21 * resource may include this counter into its structures and use
22 * the helpers described beyond
27 * the current resource consumption level
29 unsigned long long usage;
31 * the maximal value of the usage from the counter creation
33 unsigned long long max_usage;
35 * the limit that usage cannot exceed
37 unsigned long long limit;
39 * the limit that usage can be exceed
41 unsigned long long soft_limit;
43 * the number of unsuccessful attempts to consume the resource
45 unsigned long long failcnt;
47 * the lock to protect all of the above.
48 * the routines below consider this to be IRQ-safe
52 * Parent counter, used for hierarchial resource accounting
54 struct res_counter *parent;
57 #define RES_COUNTER_MAX ULLONG_MAX
60 * Helpers to interact with userspace
61 * res_counter_read_u64() - returns the value of the specified member.
62 * res_counter_read/_write - put/get the specified fields from the
63 * res_counter struct to/from the user
65 * @counter: the counter in question
66 * @member: the field to work with (see RES_xxx below)
67 * @buf: the buffer to opeate on,...
68 * @nbytes: its size...
69 * @pos: and the offset.
72 u64 res_counter_read_u64(struct res_counter *counter, int member);
74 ssize_t res_counter_read(struct res_counter *counter, int member,
75 const char __user *buf, size_t nbytes, loff_t *pos,
76 int (*read_strategy)(unsigned long long val, char *s));
78 int res_counter_memparse_write_strategy(const char *buf,
79 unsigned long long *res);
82 * the field descriptors. one for each member of res_counter
94 * helpers for accounting
97 void res_counter_init(struct res_counter *counter, struct res_counter *parent);
100 * charge - try to consume more resource.
102 * @counter: the counter
103 * @val: the amount of the resource. each controller defines its own
104 * units, e.g. numbers, bytes, Kbytes, etc
106 * returns 0 on success and <0 if the counter->usage will exceed the
109 * charge_nofail works the same, except that it charges the resource
110 * counter unconditionally, and returns < 0 if the after the current
111 * charge we are over limit.
114 int __must_check res_counter_charge(struct res_counter *counter,
115 unsigned long val, struct res_counter **limit_fail_at);
116 int res_counter_charge_nofail(struct res_counter *counter,
117 unsigned long val, struct res_counter **limit_fail_at);
120 * uncharge - tell that some portion of the resource is released
122 * @counter: the counter
123 * @val: the amount of the resource
125 * these calls check for usage underflow and show a warning on the console
127 * returns the total charges still present in @counter.
130 u64 res_counter_uncharge(struct res_counter *counter, unsigned long val);
132 u64 res_counter_uncharge_until(struct res_counter *counter,
133 struct res_counter *top,
136 * res_counter_margin - calculate chargeable space of a counter
139 * Returns the difference between the hard limit and the current usage
140 * of resource counter @cnt.
142 static inline unsigned long long res_counter_margin(struct res_counter *cnt)
144 unsigned long long margin;
147 spin_lock_irqsave(&cnt->lock, flags);
148 if (cnt->limit > cnt->usage)
149 margin = cnt->limit - cnt->usage;
152 spin_unlock_irqrestore(&cnt->lock, flags);
157 * Get the difference between the usage and the soft limit
160 * Returns 0 if usage is less than or equal to soft limit
161 * The difference between usage and soft limit, otherwise.
163 static inline unsigned long long
164 res_counter_soft_limit_excess(struct res_counter *cnt)
166 unsigned long long excess;
169 spin_lock_irqsave(&cnt->lock, flags);
170 if (cnt->usage <= cnt->soft_limit)
173 excess = cnt->usage - cnt->soft_limit;
174 spin_unlock_irqrestore(&cnt->lock, flags);
178 static inline void res_counter_reset_max(struct res_counter *cnt)
182 spin_lock_irqsave(&cnt->lock, flags);
183 cnt->max_usage = cnt->usage;
184 spin_unlock_irqrestore(&cnt->lock, flags);
187 static inline void res_counter_reset_failcnt(struct res_counter *cnt)
191 spin_lock_irqsave(&cnt->lock, flags);
193 spin_unlock_irqrestore(&cnt->lock, flags);
196 static inline int res_counter_set_limit(struct res_counter *cnt,
197 unsigned long long limit)
202 spin_lock_irqsave(&cnt->lock, flags);
203 if (cnt->usage <= limit) {
207 spin_unlock_irqrestore(&cnt->lock, flags);
212 res_counter_set_soft_limit(struct res_counter *cnt,
213 unsigned long long soft_limit)
217 spin_lock_irqsave(&cnt->lock, flags);
218 cnt->soft_limit = soft_limit;
219 spin_unlock_irqrestore(&cnt->lock, flags);