#include <asm/uaccess.h>
#include <linux/seq_file.h>
#include <linux/debugfs.h>
-#include <linux/module.h>
-#include <linux/kallsyms.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include "gfs2.h"
#include "incore.h"
int hash; /* hash bucket index */
struct gfs2_sbd *sdp; /* incore superblock */
struct gfs2_glock *gl; /* current glock struct */
- struct hlist_head *hb_list; /* current hash bucket ptr */
struct seq_file *seq; /* sequence file for debugfs */
char string[512]; /* scratch space */
};
static void gfs2_glock_drop_th(struct gfs2_glock *gl);
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
+static struct task_struct *scand_process;
+static unsigned int scand_secs = 5;
#define GFS2_GL_HASH_SHIFT 15
#define GFS2_GL_HASH_SIZE (1 << GFS2_GL_HASH_SHIFT)
static void gfs2_holder_wake(struct gfs2_holder *gh)
{
clear_bit(HIF_WAIT, &gh->gh_iflags);
- smp_mb();
+ smp_mb__after_clear_bit();
wake_up_bit(&gh->gh_iflags, HIF_WAIT);
}
-static int holder_wait(void *word)
+static int just_schedule(void *word)
{
schedule();
return 0;
static void wait_on_holder(struct gfs2_holder *gh)
{
might_sleep();
- wait_on_bit(&gh->gh_iflags, HIF_WAIT, holder_wait, TASK_UNINTERRUPTIBLE);
+ wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
+}
+
+static void gfs2_demote_wake(struct gfs2_glock *gl)
+{
+ clear_bit(GLF_DEMOTE, &gl->gl_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
+}
+
+static void wait_on_demote(struct gfs2_glock *gl)
+{
+ might_sleep();
+ wait_on_bit(&gl->gl_flags, GLF_DEMOTE, just_schedule, TASK_UNINTERRUPTIBLE);
}
/**
if (gl->gl_state == gl->gl_demote_state ||
gl->gl_state == LM_ST_UNLOCKED) {
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
+ gfs2_demote_wake(gl);
return 0;
}
set_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
if (gl->gl_demote_state == LM_ST_UNLOCKED ||
- gl->gl_state != LM_ST_EXCLUSIVE)
+ gl->gl_state != LM_ST_EXCLUSIVE) {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_drop_th(gl);
- else
+ } else {
+ spin_unlock(&gl->gl_spin);
gfs2_glock_xmote_th(gl, NULL);
+ }
spin_lock(&gl->gl_spin);
return 0;
* practise: LM_ST_SHARED and LM_ST_UNLOCKED
*/
-static void handle_callback(struct gfs2_glock *gl, unsigned int state)
+static void handle_callback(struct gfs2_glock *gl, unsigned int state, int remote)
{
spin_lock(&gl->gl_spin);
if (test_and_set_bit(GLF_DEMOTE, &gl->gl_flags) == 0) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
- } else if (gl->gl_demote_state != LM_ST_UNLOCKED) {
- gl->gl_demote_state = state;
+ if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
+ gl->gl_object) {
+ struct inode *inode = igrab(gl->gl_object);
+ spin_unlock(&gl->gl_spin);
+ if (inode) {
+ d_prune_aliases(inode);
+ iput(inode);
+ }
+ return;
+ }
+ } else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
+ gl->gl_demote_state != state) {
+ gl->gl_demote_state = LM_ST_UNLOCKED;
}
spin_unlock(&gl->gl_spin);
}
if (!gh) {
gl->gl_stamp = jiffies;
- if (ret & LM_OUT_CANCELED)
+ if (ret & LM_OUT_CANCELED) {
op_done = 0;
- else
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
+ } else {
+ spin_lock(&gl->gl_spin);
+ if (gl->gl_state != gl->gl_demote_state) {
+ gl->gl_req_bh = NULL;
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_drop_th(gl);
+ gfs2_glock_put(gl);
+ return;
+ }
+ gfs2_demote_wake(gl);
+ spin_unlock(&gl->gl_spin);
+ }
} else {
spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
*
*/
-void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
+static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
int flags = gh ? gh->gh_flags : 0;
gfs2_assert_warn(sdp, !ret);
state_change(gl, LM_ST_UNLOCKED);
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
+ gfs2_demote_wake(gl);
if (glops->go_inval)
glops->go_inval(gl, DIO_METADATA);
const struct gfs2_glock_operations *glops = gl->gl_ops;
if (gh->gh_flags & GL_NOCACHE)
- handle_callback(gl, LM_ST_UNLOCKED);
+ handle_callback(gl, LM_ST_UNLOCKED, 0);
gfs2_glmutex_lock(gl);
spin_unlock(&gl->gl_spin);
}
+void gfs2_glock_dq_wait(struct gfs2_holder *gh)
+{
+ struct gfs2_glock *gl = gh->gh_gl;
+ gfs2_glock_dq(gh);
+ wait_on_demote(gl);
+}
+
/**
* gfs2_glock_dq_uninit - dequeue a holder from a glock and initialize it
* @gh: the holder structure
* @num_gh: the number of structures
* @ghs: an array of struct gfs2_holder structures
*
- * Figure out how big an impact this function has. Either:
- * 1) Replace this code with code that calls gfs2_glock_prefetch()
- * 2) Forget async stuff and just call nq_m_sync()
- * 3) Leave it like it is
*
* Returns: 0 on success (all glocks acquired),
* errno on failure (no glocks acquired)
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs)
{
- int *e;
- unsigned int x;
- int borked = 0, serious = 0;
+ struct gfs2_holder *tmp[4];
+ struct gfs2_holder **pph = tmp;
int error = 0;
- if (!num_gh)
+ switch(num_gh) {
+ case 0:
return 0;
-
- if (num_gh == 1) {
+ case 1:
ghs->gh_flags &= ~(LM_FLAG_TRY | GL_ASYNC);
return gfs2_glock_nq(ghs);
- }
-
- e = kcalloc(num_gh, sizeof(struct gfs2_holder *), GFP_KERNEL);
- if (!e)
- return -ENOMEM;
-
- for (x = 0; x < num_gh; x++) {
- ghs[x].gh_flags |= LM_FLAG_TRY | GL_ASYNC;
- error = gfs2_glock_nq(&ghs[x]);
- if (error) {
- borked = 1;
- serious = error;
- num_gh = x;
+ default:
+ if (num_gh <= 4)
break;
- }
- }
-
- for (x = 0; x < num_gh; x++) {
- error = e[x] = glock_wait_internal(&ghs[x]);
- if (error) {
- borked = 1;
- if (error != GLR_TRYFAILED && error != GLR_CANCELED)
- serious = error;
- }
- }
-
- if (!borked) {
- kfree(e);
- return 0;
+ pph = kmalloc(num_gh * sizeof(struct gfs2_holder *), GFP_NOFS);
+ if (!pph)
+ return -ENOMEM;
}
- for (x = 0; x < num_gh; x++)
- if (!e[x])
- gfs2_glock_dq(&ghs[x]);
-
- if (serious)
- error = serious;
- else {
- for (x = 0; x < num_gh; x++)
- gfs2_holder_reinit(ghs[x].gh_state, ghs[x].gh_flags,
- &ghs[x]);
- error = nq_m_sync(num_gh, ghs, (struct gfs2_holder **)e);
- }
+ error = nq_m_sync(num_gh, ghs, pph);
- kfree(e);
+ if (pph != tmp)
+ kfree(pph);
return error;
}
if (!gl)
return;
- handle_callback(gl, state);
+ handle_callback(gl, state, 1);
spin_lock(&gl->gl_spin);
run_queue(gl);
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED);
+ handle_callback(gl, LM_ST_UNLOCKED, 0);
gfs2_glmutex_unlock(gl);
}
goto out;
gl = list_entry(head->first, struct gfs2_glock, gl_list);
while(1) {
- if (gl->gl_sbd == sdp) {
+ if (!sdp || gl->gl_sbd == sdp) {
gfs2_glock_hold(gl);
read_unlock(gl_lock_addr(hash));
if (prev)
read_unlock(gl_lock_addr(hash));
if (prev)
gfs2_glock_put(prev);
+ cond_resched();
return has_entries;
}
gfs2_glock_schedule_for_reclaim(gl);
}
-/**
- * gfs2_scand_internal - Look for glocks and inodes to toss from memory
- * @sdp: the filesystem
- *
- */
-
-void gfs2_scand_internal(struct gfs2_sbd *sdp)
-{
- unsigned int x;
-
- for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
- examine_bucket(scan_glock, sdp, x);
-}
-
/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @gl: the glock to look at
if (gfs2_glmutex_trylock(gl)) {
if (list_empty(&gl->gl_holders) &&
gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED);
+ handle_callback(gl, LM_ST_UNLOCKED, 0);
gfs2_glmutex_unlock(gl);
}
}
print_dbg(gi, " Inode:\n");
print_dbg(gi, " num = %llu/%llu\n",
- ip->i_num.no_formal_ino, ip->i_num.no_addr);
+ (unsigned long long)ip->i_no_formal_ino,
+ (unsigned long long)ip->i_no_addr);
print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
print_dbg(gi, " i_flags =");
for (x = 0; x < 32; x++)
}
if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
- gl->gl_demote_state,
- (u64)(jiffies - gl->gl_demote_time)*(1000000/HZ));
+ gl->gl_demote_state, (unsigned long long)
+ (jiffies - gl->gl_demote_time)*(1000000/HZ));
}
if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
return error;
}
+/**
+ * gfs2_scand - Look for cached glocks and inodes to toss from memory
+ * @sdp: Pointer to GFS2 superblock
+ *
+ * One of these daemons runs, finding candidates to add to sd_reclaim_list.
+ * See gfs2_glockd()
+ */
+
+static int gfs2_scand(void *data)
+{
+ unsigned x;
+ unsigned delay;
+
+ while (!kthread_should_stop()) {
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(scan_glock, NULL, x);
+ if (freezing(current))
+ refrigerator();
+ delay = scand_secs;
+ if (delay < 1)
+ delay = 1;
+ schedule_timeout_interruptible(delay * HZ);
+ }
+
+ return 0;
+}
+
+
+
int __init gfs2_glock_init(void)
{
unsigned i;
rwlock_init(&gl_hash_locks[i]);
}
#endif
+
+ scand_process = kthread_run(gfs2_scand, NULL, "gfs2_scand");
+ if (IS_ERR(scand_process))
+ return PTR_ERR(scand_process);
+
return 0;
}
+void gfs2_glock_exit(void)
+{
+ kthread_stop(scand_process);
+}
+
+module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
+MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
+
static int gfs2_glock_iter_next(struct glock_iter *gi)
{
+ struct gfs2_glock *gl;
+
read_lock(gl_lock_addr(gi->hash));
- while (1) {
- if (!gi->hb_list) { /* If we don't have a hash bucket yet */
- gi->hb_list = &gl_hash_table[gi->hash].hb_list;
- if (hlist_empty(gi->hb_list)) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- if (gi->hash >= GFS2_GL_HASH_SIZE) {
- read_unlock(gl_lock_addr(gi->hash));
- return 1;
- }
- else
- continue;
- }
- if (!hlist_empty(gi->hb_list)) {
- gi->gl = list_entry(gi->hb_list->first,
- struct gfs2_glock,
- gl_list);
- }
- } else {
- if (gi->gl->gl_list.next == NULL) {
- read_unlock(gl_lock_addr(gi->hash));
- gi->hash++;
- read_lock(gl_lock_addr(gi->hash));
- gi->hb_list = NULL;
- continue;
- }
- gi->gl = list_entry(gi->gl->gl_list.next,
- struct gfs2_glock, gl_list);
- }
+ gl = gi->gl;
+ if (gl) {
+ gi->gl = hlist_entry(gl->gl_list.next, struct gfs2_glock,
+ gl_list);
if (gi->gl)
- break;
+ gfs2_glock_hold(gi->gl);
}
read_unlock(gl_lock_addr(gi->hash));
+ if (gl)
+ gfs2_glock_put(gl);
+
+ while(gi->gl == NULL) {
+ gi->hash++;
+ if (gi->hash >= GFS2_GL_HASH_SIZE)
+ return 1;
+ read_lock(gl_lock_addr(gi->hash));
+ gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
+ struct gfs2_glock, gl_list);
+ if (gi->gl)
+ gfs2_glock_hold(gi->gl);
+ read_unlock(gl_lock_addr(gi->hash));
+ }
return 0;
}
static void gfs2_glock_iter_free(struct glock_iter *gi)
{
+ if (gi->gl)
+ gfs2_glock_put(gi->gl);
kfree(gi);
}
gi->sdp = sdp;
gi->hash = 0;
- gi->gl = NULL;
- gi->hb_list = NULL;
gi->seq = NULL;
memset(gi->string, 0, sizeof(gi->string));
- if (gfs2_glock_iter_next(gi)) {
+ read_lock(gl_lock_addr(gi->hash));
+ gi->gl = hlist_entry(gl_hash_table[gi->hash].hb_list.first,
+ struct gfs2_glock, gl_list);
+ if (gi->gl)
+ gfs2_glock_hold(gi->gl);
+ read_unlock(gl_lock_addr(gi->hash));
+
+ if (!gi->gl && gfs2_glock_iter_next(gi)) {
gfs2_glock_iter_free(gi);
return NULL;
}
if (!gi)
return NULL;
- while (n--) {
+ while(n--) {
if (gfs2_glock_iter_next(gi)) {
gfs2_glock_iter_free(gi);
return NULL;
static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
{
- /* nothing for now */
+ struct glock_iter *gi = iter_ptr;
+ if (gi)
+ gfs2_glock_iter_free(gi);
}
static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
return 0;
}
-static struct seq_operations gfs2_glock_seq_ops = {
+static const struct seq_operations gfs2_glock_seq_ops = {
.start = gfs2_glock_seq_start,
.next = gfs2_glock_seq_next,
.stop = gfs2_glock_seq_stop,