Changes by: antona
Update of /cvsroot/linux-ntfs/linux-ntfs/libntfs
In directory usw-pr-cvs1:/tmp/cvs-serv3974/libntfs
Modified Files:
attrib.c mft.c volume.c
Log Message:
User space conversion of locking complete. I settled for using simple spinlocks
and atomic variables and instead of deadlocking/livelocking when using
spin_lock(), use spin_trylock() in a while letting go of the cpu between each
call and making a maximum of 100 iterations (or we return EDEADLK error code).
This is not the most efficient way, especially as can't have multiple readers
but it is the simplest way to go about things.
Should now have (almost) all required helper functions for dealing with mft
entries implemented.
Now need the file handling and then convert the whole project to use the new
code and then can finally get back to work on attribute searching...
Index: attrib.c
===================================================================
RCS file: /cvsroot/linux-ntfs/linux-ntfs/libntfs/attrib.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -U2 -r1.9 -r1.10
--- attrib.c 2001/04/05 20:14:45 1.9
+++ attrib.c 2001/04/08 01:58:29 1.10
@@ -682,9 +682,12 @@
return 0;
}
- errno = 0;
do {
- if (vcn < rl[++i].vcn)
- return rl[i-1].lcn + (vcn - rl[i-1].vcn);
- } while (rl[i].length);
+ if (vcn < rl[i+1].vcn) {
+ if (rl[i].lcn == (LCN)-1)
+ return (LCN)-1;
+ errno = 0;
+ return rl[i].lcn + (vcn - rl[i].vcn);
+ }
+ } while (rl[++i].length);
errno = ENOENT;
return 0;
Index: mft.c
===================================================================
RCS file: /cvsroot/linux-ntfs/linux-ntfs/libntfs/mft.c,v
retrieving revision 1.4
retrieving revision 1.5
diff -U2 -r1.4 -r1.5
--- mft.c 2001/04/05 20:14:45 1.4
+++ mft.c 2001/04/08 01:58:29 1.5
@@ -37,13 +37,93 @@
}
-void __set_mft_entry_dirty(mft_entry *mentry)
+int flush_mft_entry(const mft_entry *me)
{
- ntfs_volume *v = mentry->m_vol;
- down_write(&mentry->m_sem);
- down_write(&v->mft_entries_sem);
- list_add_tail(&mentry->m_dirty_list, &v->dirty_mft_entries);
+ VCN mref;
+ LCN lcn;
+ __u64 ofs;
+ MFT_RECORD *mrec;
+ __u32 i;
+
+ if (!MftEntryMapped(me))
+ return 0;
+ if (MftEntryError(me))
+ return -EBADF;
+ mref = (VCN)(*(__u64*)me->m_ref & MFT_REFERENCE_MASK_CPU);
+ /* Size of mft record != size of cluster thus need to work with offsets
+ * into clusters. */
+ if (vol->mft_records_per_cluster) {
+ lcn = vcn_to_lcn(vol->mft_runlist,
+ mref / vol->mft_records_per_cluster);
+ /* FIXME: Change "/" above and "%" below to ">>" and "&"
+ respectively! */
+ ofs = mref % vol->mft_records_per_cluster;
+ ofs <<= vol->mft_record_size_bits;
+ } else {
+ lcn = vcn_to_lcn(vol->mft_runlist,
+ m * vol->clusters_per_mft_record);
+ /* FIXME: Change "*" above to "<<"! */
+ ofs = 0;
+ }
+ if (!lcn) {
+ if (errno)
+ return -errno;
+ fprintf(stderr, "Linux-NTFS: BUG! flush_mft_entry() tried to "
+ "write to cluster zero!\n");
+ return -EPERM;
+ }
+ if (lcn == -1) {
+ /* It is sparse. This should never happen as we would have
+ * instantiated it when reading the entry. */
+ fprintf(stderr, "Linux-NTFS: BUG! flush_mft_entry() called on "
+ "sparse mft entry.\n");
+ return -ENOTSUP;
+ }
+ i = 0;
+ while (!spin_trylock(&me->m_rec_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_rec_lock))
+ return -EDEADLK;
+ i = mst_pwrite(vol->fd, me->m_rec, vol->mft_record_size,
+ (lcn << vol->cluster_size_bits) + ofs);
+ spin_unlock(&me->m_rec_lock);
+ if (i != vol->mft_record_size) {
+ perror("Linux-NTFS: flush_mft_entry() mst_write() failed");
+ return -EIO;
+ }
+ i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock))
+ return -EDEADLK;
+ if (!list_empty(&me->m_dirty_list)) {
+ vol->nr_dirty_mft_entries--;
+ list_del_init(&me->m_dirty_list);
+ }
+ ClearMftEntryDirty(me);
+ spin_unlock(&me->m_lock);
+ return 0;
+err_ret_errno:
+ return -errno;
+}
+
+BOOL __set_mft_entry_dirty(mft_entry *me)
+{
+ ntfs_volume *v = me->m_vol;
+ int i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock))
+ return FALSE;
+ while (!spin_trylock(&v->dirty_mft_entries_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&v->dirty_mft_entries_lock)) {
+ spin_unlock(&me->m_lock);
+ return FALSE;
+ }
+ list_add_tail(&me->m_dirty_list, &v->dirty_mft_entries);
v->nr_dirty_mft_entries++;
- up_write(&v->mft_entries_sem);
- up_write(&mentry->m_sem);
+ spin_unlock(&v->dirty_mft_entries_lock);
+ spin_unlock(&me->m_lock);
+ return TRUE;
}
@@ -54,6 +134,7 @@
return NULL;
memset(me, 0, sizeof(mft_entry));
- init_rwsem(&me->m_rec_sem);
- init_rwsem(&me->m_sem);
+ spin_lock_init(&me->m_lock);
+ spin_lock_init(&me->m_rec_lock);
+ &me->m_count = ATOMIC_INIT(0);
INIT_LIST_HEAD(me->m_list);
INIT_LIST_HEAD(me->m_dirty_list);
@@ -63,13 +144,8 @@
__inline__ int __free_mft_entry(mft_entry *me)
{
- down_read(&me->m_sem);
- if (atomic_read(me->count) || MftEntryDirty(me) || MftEntryMapped(me)) {
- up_read(&me->m_sem);
+ if (spin_is_locked(&me->m_lock) || atomic_read(me->count) ||
+ MftEntryDirty(me) || MftEntryMapped(me)) {
return -EBUSY;
}
- up_read(&me->m_sem);
- down_write(&me->m_sem);
- /* Just making sure nobody else is holding the semaphore... */
- down_up(&me->m_sem);
free(me);
return 0;
@@ -105,19 +181,42 @@
ofs = 0;
}
- if (lcn < 0)
- goto err_ret_errno;
- down_write(&me->m_mrec_sem);
+ if (!lcn) {
+ if (errno)
+ return -errno;
+ fprintf(stderr, "Linux-NTFS: BUG! map_mft_entry() tried to "
+ "read from cluster zero!\n");
+ return -EPERM;
+ }
+ if (lcn == -1) {
+ /* It is sparse. Argh! We now need to instantiate it. */
+ fprintf(stderr, "Linux-NTFS: map_mft_entry() encountered "
+ "sparse mft entry. Not supported yet.\n"
+ "Please email Anton Altaparmakov "
+ "<ai...@ca...> and tell me you got this\n"
+ "error, so I know I have to implement it.\n");
+ return -ENOTSUP;
+ }
+ i = 0;
+ while (!spin_trylock(&me->m_rec_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_rec_lock))
+ return -EDEADLK;
i = mst_pread(vol->fd, (__u8*)mrec, vol->mft_record_size, 1,
(lcn << vol->cluster_size_bits) + ofs);
- up_write(&me->m_mrec_sem);
+ spin_unlock(&me->m_mrec_lock);
if (i != 1) {
+ SetMftEntryError(me);
if (i == -1)
goto err_ret_errno;
return -EIO;
}
- down_write(&me->m_sem);
+ i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock))
+ return -EDEADLK;
me->m_rec = mrec;
SetMftEntryMapped(me);
- up_write(&me->m_sem);
+ spin_unlock(&me->m_lock);
return 0;
err_ret_errno:
@@ -127,79 +226,47 @@
int __unmap_mft_entry(mft_entry *me)
{
- int err;
+ int i;
if (atomic_read(&me->m_count))
return -EBUSY;
- down_write(&me->m_sem);
- if (!test_and_clear_bit(ME_mapped, &me->m_flags)) {
- up_write(&me->m_sem);
+ if (!test_and_clear_bit(ME_mapped, &me->m_flags))
return 0;
+ if (MftEntryDirty(me) && (i = flush_mft_entry(me)))
+ goto err_map_ret;
+ i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock)) {
+ i = -EDEADLK;
+ goto err_map_ret;
}
if (MftEntryDirty(me)) {
- if (err = flush_mft_entry(me))
- goto err_up_ret;
- if (MftEntryDirty(me)) {
- err = -EBUSY;
- goto err_up_ret;
- }
+ i = -EBUSY;
+ goto err_unl_map_ret;
}
- down_write(&me->m_rec_sem);
- free(me->m_rec);
- up_write(&me->m_rec_sem);
- up_write(&me->m_sem);
- return 0;
-err_up_ret:
- up_write(&me->m_sem);
- return err;
-}
-
-int flush_mft_entry(const mft_entry *me)
-{
- VCN mref;
- LCN lcn;
- __u64 ofs;
- MFT_RECORD *mrec;
- __u32 i;
-
- if (!MftEntryMapped(me))
- return 0;
- mref = (VCN)(*(__u64*)me->m_ref & MFT_REFERENCE_MASK_CPU);
- /* Size of mft record != size of cluster thus need to work with offsets
- * into clusters. */
- if (vol->mft_records_per_cluster) {
- lcn = vcn_to_lcn(vol->mft_runlist,
- mref / vol->mft_records_per_cluster);
- /* FIXME: Change "/" above and "%" below to ">>" and "&"
- respectively! */
- ofs = mref % vol->mft_records_per_cluster;
- ofs <<= vol->mft_record_size_bits;
- } else {
- lcn = vcn_to_lcn(vol->mft_runlist,
- m * vol->clusters_per_mft_record);
- /* FIXME: Change "*" above to "<<"! */
- ofs = 0;
- }
- if (lcn < 0)
- return -errno;
- down_write(&me->m_rec_sem);
- i = mst_pwrite(vol->fd, me->m_rec, vol->mft_record_size,
- (lcn << vol->cluster_size_bits) + ofs);
- up_write(&me->m_rec_sem);
- if (i != vol->mft_record_size) {
- if (i == -1)
- goto err_ret_errno;
- return -EIO;
+ i = 0;
+ while (!spin_trylock(&me->m_rec_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_rec_lock)) {
+ i = -EDEADLK;
+ goto err_unl_map_out;
}
- ClearMftEntryDirty(me);
+ free(me->m_rec);
+ spin_unlock(&me->m_rec_lock);
+ spin_unlock(&me->m_lock);
return 0;
-err_ret_errno:
- return -errno;
+err_unl_map_ret:
+ spin_unlock(&me->m_lock);
+err_map_ret:
+ SetMftEntryMapped(me);
+ return i;
}
-int insert_mft_entry(ntfs_volume *vol, mft_entry *me, MFT_REFERENCE mref,
- MFT_RECORD *mrec, int dirty)
+int insert_mft_entry(ntfs_volume *vol, mft_entry *me, const MFT_REFERENCE mref,
+ const MFT_RECORD *mrec, const BOOL dirty)
{
struct list_head *tmp;
mft_entry *m;
+ int i;
*(__u64*)&me->m_ref = mref;
@@ -208,7 +275,13 @@
me->m_rec = mrec;
SetMftEntryMapped(me);
+ if (dirty)
+ SetMftEntryDirty(me);
} else
__map_mft_entry(me);
- down_write(&vol->mft_entries_sem);
+ i = 0;
+ while (!spin_trylock(&vol->mft_entries_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&vol->mft_entries_lock))
+ return -EDEADLK;
list_for_each(tmp, &vol->mft_entries) {
m = list_entry(tmp, mft_entry, m_list);
@@ -216,21 +289,42 @@
(__u64)me->m_ref & MFT_REFERENCE_MASK_CPU)
continue;
- if (m->m_ref == me->m_ref) {
- up_write(&vol->mft_entries_sem);
- return -EEXIST; /* BUG()! */
+ if ((__u64)m->m_ref & MFT_REFERENCE_MASK_CPU >
+ (__u64)me->m_ref & MFT_REFERENCE_MASK_CPU) {
+ /* Insert the mft entry in the correct position. */
+ __list_add(&me->m_list, &tmp->m_list->prev,
+ &tmp->m_list);
+ break;
}
- __list_add(&me->m_list, &tmp->m_list->prev, &tmp->m_list);
- break;
+ spin_unlock(&vol->mft_entries_lock);
+ fprintf(stderr, "Linux-NTFS: BUG! insert_mft_entry(): mft "
+ "entry already present in volume.\n");
+ if (m->m_ref.sequence_number != me->m_ref.sequence_number)
+ fprintf(stderr, "On top of this, the sequence numbers "
+ "are mismatched.\n");
+ return -EEXIST;
}
vol->nr_mft_entries++;
- if (mrec && dirty) {
- down_write(&me->m_sem);
- SetMftEntryDirty(me);
- list_add_tail(&me->m_dirty_list, &vol->dirty_mft_entries);
- up_write(&me->m_sem);
- vol->nr_dirty_mft_entries++;
+ spin_unlock(&vol->mft_entries_lock);
+ if (!MftEntryDirty(me))
+ return 0;
+ i = 0;
+ while (!spin_trylock(&vol->dirty_mft_entries_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&vol->dirty_mft_entries_lock))
+ goto try_to_clean_ret;
+ i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock)) {
+ spin_unlock(&vol->dirty_mft_entries_lock);
+ goto try_to_clean_ret;
}
- up_write(&vol->mft_entries_sem);
+ list_add_tail(&me->m_dirty_list, &vol->dirty_mft_entries);
+ spin_unlock(&me->m_lock);
+ vol->nr_dirty_mft_entries++;
+ spin_unlock(&vol->dirty_mft_entries_lock);
return 0;
+try_to_clean_ret:
+ return flush_mft_entry(me);
}
@@ -238,27 +332,48 @@
{
ntfs_volume *vol = me->m_vol;
+ int i, err = 0;
- if (atomic_read(me->count))
- return -EBUSY;
if (MftEntryDirty(me))
- flush_mft_entry(me);
- down_write(&vol->mft_entries_sem);
+ err = flush_mft_entry(me);
+ i = 0;
+ while (!spin_trylock(&me->m_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_lock))
+ return -EDEADLK;
+ if (MftEntryDirty(me)) {
+ spin_unlock(&me->m_lock);
+ return err < 0 ? err : -EBUSY;
+ }
if (atomic_read(me->count)) {
- up_write(&vol->mft_entries_sem);
+ spin_unlock(&me->m_lock);
return -EBUSY;
}
+ i = 0;
+ while (!spin_trylock(&me->m_rec_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&me->m_rec_lock)) {
+ spin_unlock(&me->m_lock);
+ return -EDEADLK;
+ }
+ i = 0;
+ while (!spin_trylock(&vol->mft_entries_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&vol->mft_entries_lock)) {
+ spin_unlock(&me->m_rec_lock);
+ spin_unlock(&me->m_lock);
+ return -EDEADLK;
+ }
list_del(&me->m_list);
vol->nr_mft_entries--;
- up_write(&vol->mft_entries_sem);
- down_write(&me->m_rec_sem);
+ spin_unlock(&vol->mft_entries_lock);
free(me->m_rec);
- up_write(&me->m_rec_sem);
- down_write(&me->m_sem);
- up_write(&me->m_sem);
+ ClearMftEntryMapped(me);
+ spin_unlock(&me->m_rec_lock);
+ spin_unlock(&me->m_rec);
free(me);
return 0;
}
-ntfs_file *ntfs_open_by_mref(const ntfs_volume *vol, const MFT_REFERENCE *mref)
+ntfs_file *ntfs_open_by_mref(ntfs_volume *vol, const MFT_REFERENCE *mref)
{
errno = -ENOTSUP;
Index: volume.c
===================================================================
RCS file: /cvsroot/linux-ntfs/linux-ntfs/libntfs/volume.c,v
retrieving revision 1.9
retrieving revision 1.10
diff -U2 -r1.9 -r1.10
--- volume.c 2001/04/05 20:14:45 1.9
+++ volume.c 2001/04/08 01:58:29 1.10
@@ -27,4 +27,5 @@
#include <unistd.h>
#include <linux/types.h>
+#include <linux/list.h>
#include "endians.h"
@@ -36,4 +37,25 @@
#include "support.h"
+BOOL ntfs_sync_volume(ntfs_volume *vol)
+{
+ struct list_head *tmp;
+ int i, nr_err;
+
+ i = nr_err = 0;
+ while (!spin_trylock(&vol->dirty_mft_entries_lock) && i++ < 100)
+ sched_yield();
+ if (!spin_is_locked(&vol->dirty_mft_entries_lock))
+ errno = EBUSY;
+ return FALSE;
+ }
+ list_for_each(tmp, &vol->dirty_mft_entries)
+ flush_mft_entry(list_entry(tmp, mft_entry, m_dirty_list));
+ spin_unlock(&vol->dirty_mft_entries_lock);
+ if (!nr_err)
+ return TRUE;
+ errno = EBUSY;
+ return FALSE;
+}
+
ntfs_volume *ntfs_mount(const char *name)
{
@@ -189,8 +211,9 @@
INIT_LIST_HEAD(vol->closed_files);
vol->max_files = 1000;
- init_rwsem(&vol->files_sem);
+ spin_lock_init(&vol->files_lock);
INIT_LIST_HEAD(vol->mft_entries);
INIT_LIST_HEAD(vol->dirty_mft_entries);
- init_rwsem(&vol->mft_entries_sem);
+ spin_lock_init(&vol->mft_entries_lock);
+ spin_lock_init(&vol->dirty_mft_entries_lock);
/* Note: No locking needed until we return from this function. */
/*if (!(me = __allocate_mft_entry())) {
@@ -510,9 +533,9 @@
}
-int ntfs_umount(ntfs_volume *vol, const int force)
+BOOL ntfs_umount(ntfs_volume *vol, const int force)
{
if (!vol) {
errno = EINVAL;
- return 0;
+ return FALSE;
}
/* FIXME: Do the cleanup. */
@@ -532,5 +555,5 @@
free(vol->upcase);
free(vol);
- return 1;
+ return TRUE;
}
|