|
From: <ga...@us...> - 2009-01-03 21:05:23
|
Revision: 4821
http://jnode.svn.sourceforge.net/jnode/?rev=4821&view=rev
Author: galatnm
Date: 2009-01-03 21:05:14 +0000 (Sat, 03 Jan 2009)
Log Message:
-----------
Intermediate checkins for HFS+ formatter and read/Write support.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusObject.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
Added Paths:
-----------
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -9,6 +9,7 @@
import org.jnode.fs.ReadOnlyFileSystemException;
import org.jnode.fs.hfsplus.catalog.CatalogFolder;
import org.jnode.fs.hfsplus.catalog.CatalogKey;
+import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.spi.AbstractFSDirectory;
import org.jnode.fs.spi.FSEntryTable;
@@ -32,7 +33,20 @@
@Override
protected final FSEntry createDirectoryEntry(final String name) throws IOException {
- throw new ReadOnlyFileSystemException();
+ if(!canWrite()){
+ throw new ReadOnlyFileSystemException();
+ }
+ Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
+ CatalogFolder newFolder = new CatalogFolder(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ log.debug("New catalog folder :\n" + newFolder.toString());
+ CatalogKey key = new CatalogKey(this.folder.getFolderId(),new HFSUnicodeString(name));
+ log.debug("New catalog key :\n" + key.toString());
+ LeafRecord folderRecord = new LeafRecord(key,newFolder.getBytes());
+ log.debug("New record folder :\n" + folderRecord.toString());
+ HFSPlusEntry newEntry = new HFSPlusEntry( (HfsPlusFileSystem) getFileSystem(),null,this,name,folderRecord);
+ volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
+ log.debug("New volume header :\n" + volumeHeader.toString());
+ return newEntry;
}
@Override
@@ -54,7 +68,7 @@
rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
HFSPlusEntry e =
- new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, null, name, rec);
+ new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, rec);
pathList.add(e);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -8,24 +8,53 @@
private static final int EXTENT_OFFSET = 16;
private byte[] data;
-
+
+ /**
+ *
+ * @param src
+ * @param offset
+ */
public HFSPlusForkData(final byte[] src, final int offset) {
data = new byte[FORK_DATA_LENGTH];
System.arraycopy(src, offset, data, 0, FORK_DATA_LENGTH);
}
+
+ /**
+ *
+ * Create a new empty fork data object.
+ *
+ * @param totalSize
+ * @param clumpSize
+ * @param totalBock
+ */
+ public HFSPlusForkData(){
+ data = new byte[FORK_DATA_LENGTH];
+ }
public final long getTotalSize() {
return BigEndian.getInt64(data, 0);
}
+ public final void setTotalSize(long totalSize){
+ BigEndian.setInt64(data, 0, totalSize);
+ }
+
public final int getClumpSize() {
return BigEndian.getInt32(data, 8);
}
+ public final void setClumpSize(int clumpSize){
+ BigEndian.setInt32(data, 8, clumpSize);
+ }
+
public final int getTotalBlocks() {
return BigEndian.getInt32(data, 12);
}
+ public final void setTotalBlocks(int totalBlock){
+ BigEndian.setInt32(data, 12, totalBlock);
+ }
+
public final ExtentDescriptor[] getExtents() {
ExtentDescriptor[] list = new ExtentDescriptor[8];
for (int i = 0; i < 8; i++) {
@@ -34,6 +63,15 @@
}
return list;
}
+
+ public final void setExtentDescriptor(int position, ExtentDescriptor desc){
+ int offset = EXTENT_OFFSET + (position * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
+ System.arraycopy(desc.getBytes(), 0, data, offset, ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
+ }
+
+ public byte[] getBytes(){
+ return data;
+ }
public final String toString() {
StringBuffer s = new StringBuffer();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusObject.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusObject.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusObject.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -3,7 +3,7 @@
import org.jnode.fs.FSObject;
public class HFSPlusObject implements FSObject {
- private HfsPlusFileSystem fs;
+ protected HfsPlusFileSystem fs;
public HFSPlusObject(final HfsPlusFileSystem fileSystem) {
this.fs = fileSystem;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -8,9 +8,14 @@
private int length;
private String string;
-
+ /**
+ *
+ * @param src
+ * @param offset
+ */
public HFSUnicodeString(final byte[] src, final int offset) {
- byte[] data = new byte[2];
+ length = BigEndian.getInt16(src, offset);
+ byte[] data = new byte[2 + length * 2];
System.arraycopy(src, offset, data, 0, 2);
length = BigEndian.getInt16(data, 0);
data = new byte[length * 2];
@@ -21,6 +26,15 @@
}
string = new String(result);
}
+
+ /**
+ *
+ * @param string
+ */
+ public HFSUnicodeString(String string){
+ this.string = string;
+ this.length = string.length();
+ }
public final int getLength() {
return length;
@@ -29,5 +43,9 @@
public final String getUnicodeString() {
return string;
}
+
+ public final byte[] getBytes(){
+ return (length + "" + string).getBytes() ;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -1,29 +1,40 @@
package org.jnode.fs.hfsplus;
import java.text.SimpleDateFormat;
-import java.util.Date;
+import java.util.Calendar;
public class HFSUtils {
- public static final long DIFF_TO_JAVA_DATE_IN_MILLIS = 2082844800000L;
+
+ /**
+ * Difference in second between 01/01/1970 00:00:00 (java reference time)
+ * and 01/01/1904 00:00:00 (HFS reference time).
+ */
+ public static final long MAC_DATE_CONVERTION = 2082844800L;
/**
+ * Convert time from/to java time to/from mac time.
+ *
+ * @param time in seconds since reference date.
+ * @param encode if set to true, convert from java to mac. If set to false,
+ * convert from mac to java.
*
- * @param time time in second since midnight, January 1, 1904, GMT.
* @return
*/
- public static Date decodeDate(final int time) {
- return new Date(time * 1000 - DIFF_TO_JAVA_DATE_IN_MILLIS);
+ public static long getDate(long time, boolean encode){
+ time = (encode)? time + MAC_DATE_CONVERTION:time - MAC_DATE_CONVERTION;
+ return time;
}
-
+
/**
*
* @param time
* @param dateFormat
* @return
*/
- public static String printDate(final int time, final String dateFormat) {
- Date date = decodeDate(time);
+ public static String printDate(final long time, final String dateFormat) {
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(getDate(time, false)*1000);
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
- return sdf.format(date.getTime());
+ return sdf.format(cal.getTime());
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -3,7 +3,7 @@
public class HfsPlusConstants {
public static final int HFSPLUS_SUPER_MAGIC = 0x482b;
- public static final int HFSPLUS_MIN_VERSION = 4; /* HFS+ */
+ public static final int HFSPLUS_MIN_VERSION = 0x0004; /* HFS+ */
public static final int HFSPLUS_CURRENT_VERSION = 5; /* HFSX */
/* HFS+ volume attributes */
@@ -32,4 +32,10 @@
public static final byte EK_DATA_FORK = (byte) 0x00;
public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
+
+ public static final int MINIMAL_BLOCK_SIZE = 512;
+ public static final int OPTIMAL_BLOCK_SIZE = 4096;
+
+ public static final int DATA_CLUMP_FACTOR = 16;
+ public static final int RESOURCE_CLUMP_FACTOR = 16;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -1,8 +1,10 @@
package org.jnode.fs.hfsplus;
import java.io.IOException;
+import java.nio.ByteBuffer;
import org.apache.log4j.Logger;
+import org.jnode.driver.ApiNotFoundException;
import org.jnode.driver.Device;
import org.jnode.fs.FSDirectory;
import org.jnode.fs.FSEntry;
@@ -35,13 +37,7 @@
super(device, readOnly, type);
}
- /**
- *
- * @throws FileSystemException
- */
- public void create() throws FileSystemException {
- // TODO implements file system creation.
- }
+
/**
*
@@ -114,4 +110,21 @@
public final Superblock getVolumeHeader() {
return sb;
}
+
+ /**
+ *
+ * @throws FileSystemException
+ */
+ public void create(int blockSize) throws FileSystemException {
+ sb = new Superblock();
+ try {
+ sb.create(this,blockSize, false);
+ this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
+ flush();
+ } catch (IOException e) {
+ throw new FileSystemException("Unable to create HFS+ filesystem", e);
+ } catch (ApiNotFoundException e) {
+ throw new FileSystemException("Unable to create HFS+ filesystem", e);
+ }
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -17,10 +17,10 @@
@Override
public final HfsPlusFileSystem format(final Device device) throws FileSystemException {
try {
- FileSystemService fSS = InitialNaming.lookup(FileSystemService.NAME);
- HfsPlusFileSystemType type = fSS.getFileSystemType(HfsPlusFileSystemType.ID);
+ FileSystemService fss = InitialNaming.lookup(FileSystemService.NAME);
+ HfsPlusFileSystemType type = fss.getFileSystemType(HfsPlusFileSystemType.ID);
HfsPlusFileSystem fs = type.create(device, false);
- fs.create();
+ fs.create(HfsPlusConstants.OPTIMAL_BLOCK_SIZE);
return fs;
} catch (NameNotFoundException e) {
throw new FileSystemException(e);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -7,10 +7,15 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.Calendar;
+import java.util.Date;
import org.apache.log4j.Level;
import org.apache.log4j.Logger;
+import org.jnode.driver.ApiNotFoundException;
import org.jnode.fs.FileSystemException;
+import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
+import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
import org.jnode.util.BigEndian;
import org.jnode.util.NumberUtils;
@@ -62,7 +67,229 @@
throw new FileSystemException(e);
}
}
+
+
+
+ /**
+ * Create a new volume header.
+ *
+ * @param fs
+ * @param blockSize
+ * @throws ApiNotFoundException
+ */
+ public void create(HfsPlusFileSystem fs, int blockSize,boolean journaled) throws IOException, ApiNotFoundException, FileSystemException {
+
+ this.fs = fs;
+ int burnedBlocksBeforeVH = 0;
+ int burnedBlocksAfterAltVH = 0;
+ /*
+ * Volume header is located at sector 2. Block before this position
+ * must be invalidated.
+ */
+ if ( blockSize == 512 ) {
+ burnedBlocksBeforeVH = 2;
+ burnedBlocksAfterAltVH = 1;
+ } else if ( blockSize == 1024 ) {
+ burnedBlocksBeforeVH = 1;
+ }
+ long size = fs.getApi().getLength();
+ long sectorCount = size / fs.getFSApi().getSectorSize();
+ long blocks = size / blockSize;
+ long allocationClumpSize = getClumpSize(blocks);
+ long bitmapBlocks = allocationClumpSize/blockSize;
+ long blockUsed = 2+ burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
+ // Populate volume header.
+ this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
+ this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
+ //Set attributes.
+ this.setAttribute(HFSPLUS_VOL_UNMNT_BIT);
+ this.setLastMountedVersion(0x446534a);
+ //TODO Put correct dates.
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ int macDate = (int)HFSUtils.getDate(now.getTimeInMillis()/1000, true);
+ this.setCreateDate(macDate);
+ this.setModifyDate(macDate);
+ this.setBackupDate(0);
+ this.setCheckedDate(macDate);
+ //---
+ this.setFileCount(0);
+ this.setFolderCount(0);
+ this.setBlockSize(blockSize);
+ this.setTotalBlocks((int)blocks);
+ this.setFreeBlocks((int)blocks);
+ this.setRsrcClumpSize(HfsPlusConstants.RESOURCE_CLUMP_FACTOR * blockSize);
+ this.setDataClumpSize(HfsPlusConstants.DATA_CLUMP_FACTOR * blockSize);
+ this.setNextCatalogId(CatalogNodeId.HFSPLUS_FIRSTUSER_CNID.getId());
+ // Allocation file creation
+ initAllocationFile((int)allocationClumpSize, (int)bitmapBlocks, burnedBlocksBeforeVH);
+ int nextBlock = 0;
+ // Journal creation
+ ExtentDescriptor desc = this.getAllocationFile().getExtents()[0];
+ if(journaled){
+ int journalSize = 8*1024*1024;
+ this.setFileCount(2);
+ this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
+ this.setNextCatalogId(this.getNextCatalogId() + 2);
+ this.setJournalInfoBlock(desc.getStartBlock() + desc.getBlockCount());
+ blockUsed = blockUsed + 1 + (journalSize / blockSize);
+ } else {
+ this.setJournalInfoBlock(0);
+ nextBlock = desc.getStartBlock() + desc.getBlockCount();
+ }
+ blockUsed += initExtents(0,blockSize,nextBlock, (int)sectorCount, blockUsed);
+ blockUsed += initCatalog(0,blockSize,nextBlock, (int)sectorCount, blockUsed);
+ this.setFreeBlocks(this.getFreeBlocks() - (int)blockUsed);
+ this.setNextAllocation((int)blockUsed - 1 - burnedBlocksAfterAltVH + 10 * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
+ }
+
+ /**
+ *
+ * @param clumpSize
+ * @param bitmapBlocks
+ * @param burnedBlocksBeforeVH
+ * @return
+ */
+ private void initAllocationFile(int clumpSize, int bitmapBlocks, int burnedBlocksBeforeVH){
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize(clumpSize);
+ forkdata.setTotalBlocks(bitmapBlocks);
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(1 + burnedBlocksBeforeVH);
+ desc.setBlockCount(0);
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
+ }
+
+ /**
+ *
+ * @param extentsClumpBlock
+ * @param blockSize
+ * @param nextBlock
+ * @return
+ */
+ private long initExtents(int extentsClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed){
+ int extentNodeSize = 4096;
+ long clumpSize = 0;
+ if(extentsClumpBlock == 0){
+ clumpSize = getBTreeClumpSize(blockSize, extentNodeSize, sectorCount, false);
+ } else {
+ clumpSize = clumpSizeCalculation(extentsClumpBlock, blockSize);
+ }
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize((int)clumpSize);
+ forkdata.setTotalBlocks((int)(clumpSize/blockSize));
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(nextBlock);
+ desc.setBlockCount(forkdata.getTotalBlocks());
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 192, forkdata.FORK_DATA_LENGTH);
+ return blockUsed + forkdata.getTotalBlocks();
+ }
+
+ /**
+ *
+ * @param extentsClumpBlock
+ * @param blockSize
+ * @param nextBlock
+ * @param sectorCount
+ * @param blockUsed
+ * @return
+ * @throws IOException
+ */
+ private long initCatalog(int catalogClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed) throws FileSystemException{
+ int catalogNodeSize = 8192;
+ try{
+ if(blockSize < HfsPlusConstants.OPTIMAL_BLOCK_SIZE || fs.getApi().getLength() < 0x40000000){
+ catalogNodeSize = 4096;
+ }
+ long clumpSize = 0;
+ if(catalogClumpBlock == 0){
+ clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize, sectorCount, true);
+ } else {
+ clumpSize = clumpSizeCalculation(catalogClumpBlock, blockSize);
+ if(clumpSize % catalogNodeSize != 0){
+ throw new FileSystemException("clump size is not a multiple of node size");
+ }
+ }
+
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize((int)clumpSize);
+ forkdata.setTotalBlocks((int)(clumpSize/blockSize));
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock() + this.getExtentsFile().getExtents()[0].getBlockCount());
+ desc.setBlockCount(forkdata.getTotalBlocks());
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
+ return blockUsed + forkdata.getTotalBlocks();
+ } catch(IOException e){
+ throw new FileSystemException(e);
+ }
+ }
+
+ /**
+ * Calculate the number of blocks needed for bitmap.
+ *
+ * @param totalBlocks Total of blocks found in the device.
+ *
+ * @return long - Number of blocks.
+ *
+ * @throws IOException
+ */
+ private long getClumpSize(long totalBlocks) throws IOException {
+ long clumpSize;
+ long minClumpSize = totalBlocks >> 3;
+ if ((totalBlocks & 7) == 0){
+ ++minClumpSize;
+ }
+ clumpSize = minClumpSize;
+ return clumpSize;
+ }
+ /**
+ *
+ * @param blockSize
+ * @param nodeSize
+ * @param sectors
+ * @param catalog
+ * @return
+ */
+ private int[] extentClumpTable = new int[]{4,4,4,5,5,6,7,8,9,11,14,16,20,25,32};
+ private int[] catalogClumpTable = new int[]{4,6,8,11,14,19,25,34,45,60,80,107,144,192,256};
+ private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors, boolean catalog){
+ long clumpSize = 0;
+ if(sectors < 0x200000){
+ clumpSize = (sectors << 2);
+ if(clumpSize < (8*nodeSize)){
+ clumpSize = (8*nodeSize);
+ }
+ } else {
+ sectors = sectors >> 22;
+ for(int i=0;sectors != 0 && (i < 14);++i){
+ if(catalog){
+ clumpSize = catalogClumpTable[i] * 1024 * 1024;
+ } else {
+ clumpSize = extentClumpTable[i] * 1024 * 1024;
+ }
+ sectors = sectors >> 1;
+ }
+ }
+
+ return clumpSize;
+ }
+
+ private int clumpSizeCalculation(long clumpBlocks, int blockSize){
+ long clumpSize = clumpBlocks * blockSize;
+ if((clumpSize & 0XFFFFFFFF00000000L) == 0){
+ //ERROR
+ }
+ return (int)clumpSize;
+ }
+ // Getters/setters
+
public final int getMagic() {
return BigEndian.getInt16(data, 0);
}
@@ -82,31 +309,14 @@
//
public final int getAttributes() {
+
return BigEndian.getInt32(data, 4);
}
-
- /**
- * Get string representation of attribute.
- *
- * @return
- */
- public final String getAttributesAsString() {
- return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "") +
- ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "") +
- ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
+
+ public final void setAttribute(final int attributeMaskBit) {
+ BigEndian.setInt32(data, 4,(getAttributes() >> attributeMaskBit) | 0x1);
}
- /**
- * Check if the attribute corresponding to maskBit parameter is set.
- *
- * @param maskBit See constants.
- *
- * @return true if attribute is set.
- */
- public final boolean isAttribute(final int maskBit) {
- return (((getAttributes() >> maskBit) & 0x1) != 0);
- }
-
//
public final int getLastMountedVersion() {
return BigEndian.getInt32(data, 8);
@@ -126,32 +336,32 @@
}
//
- public final int getCreateDate() {
- return BigEndian.getInt32(data, 16);
+ public final long getCreateDate() {
+ return BigEndian.getUInt32(data, 16);
}
public final void setCreateDate(final int value) {
BigEndian.setInt32(data, 16, value);
}
- public final int getModifyDate() {
- return BigEndian.getInt32(data, 20);
+ public final long getModifyDate() {
+ return BigEndian.getUInt32(data, 20);
}
public final void setModifyDate(final int value) {
BigEndian.setInt32(data, 20, value);
}
- public final int getBackupDate() {
- return BigEndian.getInt32(data, 24);
+ public final long getBackupDate() {
+ return BigEndian.getUInt32(data, 24);
}
public final void setBackupDate(final int value) {
BigEndian.setInt32(data, 24, value);
}
- public final int getCheckedDate() {
- return BigEndian.getInt32(data, 28);
+ public final long getCheckedDate() {
+ return BigEndian.getUInt32(data, 28);
}
public final void setCheckedDate(final int value) {
@@ -277,7 +487,33 @@
public final HFSPlusForkData getStartupFile() {
return new HFSPlusForkData(data, 432);
}
+
+ /**
+ * Get string representation of attribute.
+ *
+ * @return
+ */
+ public final String getAttributesAsString() {
+ return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "") +
+ ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "") +
+ ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
+ }
+ /**
+ * Check if the corresponding attribute corresponding is set.
+ *
+ * @param maskBit Bit position of the attribute. See constants.
+ *
+ * @return true if attribute is set.
+ */
+ public final boolean isAttribute(final int maskBit) {
+ return (((getAttributes() >> maskBit) & 0x1) != 0);
+ }
+
+ public byte[] getBytes(){
+ return data;
+ }
+
/*
* (non-Javadoc)
* @see java.lang.Object#toString()
@@ -286,7 +522,7 @@
StringBuffer buffer = new StringBuffer();
buffer.append("Magic: 0x").append(NumberUtils.hex(getMagic(), 4)).append("\n");
buffer.append("Version: ").append(getVersion()).append("\n").append("\n");
- buffer.append("Attributes: ").append(getAttributesAsString()).append("\n").append("\n");
+ buffer.append("Attributes: ").append(getAttributesAsString()).append(" (").append(getAttributes()).append(")").append("\n").append("\n");
buffer.append("Create date: ").append(
HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
buffer.append("Modify date: ").append(
@@ -305,7 +541,7 @@
buffer.append("Write count: ").append(getWriteCount()).append("\n");
buffer.append("Encoding bmp: ").append(getEncodingsBmp()).append("\n");
buffer.append("Finder Infos: ").append(getFinderInfo()).append("\n").append("\n");
- buffer.append("Finder Infos: ").append(getJournalInfoBlock()).append("\n").append("\n");
+ buffer.append("Journal block: ").append(getJournalInfoBlock()).append("\n").append("\n");
buffer.append("Allocation file").append("\n");
buffer.append(getAllocationFile().toString()).append("\n");
buffer.append("Extents file").append("\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -1,6 +1,7 @@
package org.jnode.fs.hfsplus.catalog;
import org.jnode.fs.hfsplus.HFSPlusForkData;
+import org.jnode.fs.hfsplus.HFSUtils;
import org.jnode.util.BigEndian;
public class CatalogFile {
@@ -14,10 +15,26 @@
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
}
+
+ public final int getFlags() {
+ return BigEndian.getInt16(data, 2);
+ }
public final CatalogNodeId getFileId() {
return new CatalogNodeId(data, 8);
}
+
+ public final int getCreateDate() {
+ return BigEndian.getInt32(data, 12);
+ }
+
+ public final int getContentModDate() {
+ return BigEndian.getInt32(data, 16);
+ }
+
+ public final int getAttrModDate() {
+ return BigEndian.getInt32(data, 20);
+ }
public final HFSPlusForkData getDataFork() {
return new HFSPlusForkData(data, 88);
@@ -31,6 +48,9 @@
StringBuffer s = new StringBuffer();
s.append("Record type:").append(getRecordType()).append("\t");
s.append("File ID :").append(getFileId().getId()).append("\n");
+ s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -1,5 +1,7 @@
package org.jnode.fs.hfsplus.catalog;
+import org.jnode.fs.hfsplus.HFSUtils;
+import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
public class CatalogFolder {
@@ -9,6 +11,19 @@
data = new byte[88];
System.arraycopy(src, 0, data, 0, 88);
}
+
+ /**
+ * Create a new catalog folder.
+ *
+ * @param folderId
+ *
+ */
+ public CatalogFolder(CatalogNodeId folderId){
+ data = new byte[88];
+ BigEndian.setInt16(data, 0, HfsPlusConstants.RECORD_TYPE_FOLDER);
+ BigEndian.setInt32(data, 4, 0);
+ System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
+ }
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
@@ -21,12 +36,31 @@
public final CatalogNodeId getFolderId() {
return new CatalogNodeId(data, 8);
}
+
+ public final int getCreateDate() {
+ return BigEndian.getInt32(data, 12);
+ }
+
+ public final int getContentModDate() {
+ return BigEndian.getInt32(data, 16);
+ }
+
+ public final int getAttrModDate() {
+ return BigEndian.getInt32(data, 20);
+ }
+
+ public byte[] getBytes(){
+ return data;
+ }
public final String toString() {
StringBuffer s = new StringBuffer();
s.append("Record type: ").append(getRecordType()).append("\n");
s.append("Valence: ").append(getValence()).append("\n");
s.append("Folder ID: ").append(getFolderId().getId()).append("\n");
+ s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -35,6 +35,7 @@
public CatalogKey(final CatalogNodeId parentID, final HFSUnicodeString name) {
this.parentID = parentID;
this.nodeName = name;
+ this.keyLength = 6 + name.getLength();
}
public final int getKeyLength() {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -39,5 +39,9 @@
public final int getId() {
return BigEndian.getInt32(cnid, 0);
}
+
+ public final byte[] getBytes(){
+ return cnid;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -11,6 +11,20 @@
System.arraycopy(src, 0, data, 0, 512);
}
+ /**
+ * Create a new catalog thread.
+ *
+ * @param type
+ * @param parent
+ * @param name
+ */
+ public CatalogThread(int type, CatalogNodeId parent, HFSUnicodeString name){
+ BigEndian.setInt16(data, 0, type);
+ BigEndian.setInt32(data, 4, parent.getId());
+ System.arraycopy(parent.getBytes(), 0, data, 4, 4);
+ System.arraycopy(name.getBytes(), 0, data, 8, name.getBytes().length);
+ }
+
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -0,0 +1,19 @@
+package org.jnode.fs.hfsplus.command;
+
+import org.jnode.fs.Formatter;
+import org.jnode.fs.command.AbstractFormatCommand;
+import org.jnode.fs.hfsplus.HfsPlusFileSystem;
+
+public class FormatHfsPlusCommand extends AbstractFormatCommand<HfsPlusFileSystem> {
+
+ public FormatHfsPlusCommand() {
+ super("Format a block device with HFS+ filesystem");
+ }
+
+ @Override
+ protected Formatter<HfsPlusFileSystem> getFormatter() {
+ // TODO implement it.
+ return null;
+ }
+
+}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -8,6 +8,13 @@
private byte[] data;
+ /**
+ * Create empty extent descriptor.
+ */
+ public ExtentDescriptor() {
+ data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ }
+
public ExtentDescriptor(final byte[] src, final int offset) {
data = new byte[EXTENT_DESCRIPTOR_LENGTH];
System.arraycopy(src, offset, data, 0, EXTENT_DESCRIPTOR_LENGTH);
@@ -17,10 +24,22 @@
return BigEndian.getInt32(data, 0);
}
+ public final void setStartBlock(int start){
+ BigEndian.setInt32(data, 0, start);
+ }
+
public final int getBlockCount() {
return BigEndian.getInt32(data, 4);
}
+
+ public final void setBlockCount(int count){
+ BigEndian.setInt32(data, 4, count);
+ }
+ public final byte[] getBytes(){
+ return data;
+ }
+
public final String toString() {
return "Start block : " + getStartBlock() + "\tBlock count : " + getBlockCount() + "\n";
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -14,11 +14,5 @@
records = new LeafRecord[offsets.length - 1];
}
- /**
- *
- * @return
- */
- public final LeafRecord[] getRecords() {
- return records;
- }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-03 15:15:07 UTC (rev 4820)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-03 21:05:14 UTC (rev 4821)
@@ -13,6 +13,17 @@
System.arraycopy(nodeData, offset + key.getKeyLength() + 2, recordData, 0, recordDataSize);
}
+ /**
+ *
+ * @param key
+ * @param recordData
+ */
+ public LeafRecord(final Key key, final byte[] recordData){
+ this.key = key;
+ this.recordData = new byte[recordData.length];
+ System.arraycopy(recordData,0, this.recordData, 0, recordData.length);
+ }
+
public final Key getKey() {
return key;
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-04 12:22:15
|
Revision: 4825
http://jnode.svn.sourceforge.net/jnode/?rev=4825&view=rev
Author: galatnm
Date: 2009-01-04 11:42:26 +0000 (Sun, 04 Jan 2009)
Log Message:
-----------
Fix checkstyle warnings.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -33,20 +33,20 @@
@Override
protected final FSEntry createDirectoryEntry(final String name) throws IOException {
- if(!canWrite()){
- throw new ReadOnlyFileSystemException();
- }
- Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
- CatalogFolder newFolder = new CatalogFolder(new CatalogNodeId(volumeHeader.getNextCatalogId()));
- log.debug("New catalog folder :\n" + newFolder.toString());
- CatalogKey key = new CatalogKey(this.folder.getFolderId(),new HFSUnicodeString(name));
- log.debug("New catalog key :\n" + key.toString());
- LeafRecord folderRecord = new LeafRecord(key,newFolder.getBytes());
- log.debug("New record folder :\n" + folderRecord.toString());
- HFSPlusEntry newEntry = new HFSPlusEntry( (HfsPlusFileSystem) getFileSystem(),null,this,name,folderRecord);
- volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
- log.debug("New volume header :\n" + volumeHeader.toString());
- return newEntry;
+ if (!canWrite()) {
+ throw new ReadOnlyFileSystemException();
+ }
+ Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
+ CatalogFolder newFolder = new CatalogFolder(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ log.debug("New catalog folder :\n" + newFolder.toString());
+ CatalogKey key = new CatalogKey(this.folder.getFolderId(), new HFSUnicodeString(name));
+ log.debug("New catalog key :\n" + key.toString());
+ LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
+ log.debug("New record folder :\n" + folderRecord.toString());
+ HFSPlusEntry newEntry = new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, folderRecord);
+ volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
+ log.debug("New volume header :\n" + volumeHeader.toString());
+ return newEntry;
}
@Override
@@ -61,14 +61,12 @@
@Override
protected final FSEntryTable readEntries() throws IOException {
List<FSEntry> pathList = new LinkedList<FSEntry>();
- LeafRecord[] records =
- ((HfsPlusFileSystem) getFileSystem()).getCatalog().getRecords(folder.getFolderId());
+ LeafRecord[] records = ((HfsPlusFileSystem) getFileSystem()).getCatalog().getRecords(folder.getFolderId());
for (LeafRecord rec : records) {
- if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER ||
- rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
+ if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER
+ || rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
- HFSPlusEntry e =
- new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, rec);
+ HFSPlusEntry e = new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, rec);
pathList.add(e);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -8,8 +8,8 @@
private LeafRecord record;
- public HFSPlusEntry(final HfsPlusFileSystem fs, final FSEntryTable table,
- final HFSPlusDirectory parent, final String name, final LeafRecord record) {
+ public HFSPlusEntry(final HfsPlusFileSystem fs, final FSEntryTable table, final HFSPlusDirectory parent,
+ final String name, final LeafRecord record) {
super(fs, table, parent, name, getFSEntryType(name, record));
this.record = record;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -8,7 +8,7 @@
private static final int EXTENT_OFFSET = 16;
private byte[] data;
-
+
/**
*
* @param src
@@ -18,7 +18,7 @@
data = new byte[FORK_DATA_LENGTH];
System.arraycopy(src, offset, data, 0, FORK_DATA_LENGTH);
}
-
+
/**
*
* Create a new empty fork data object.
@@ -27,50 +27,49 @@
* @param clumpSize
* @param totalBock
*/
- public HFSPlusForkData(){
- data = new byte[FORK_DATA_LENGTH];
+ public HFSPlusForkData() {
+ data = new byte[FORK_DATA_LENGTH];
}
public final long getTotalSize() {
return BigEndian.getInt64(data, 0);
}
- public final void setTotalSize(long totalSize){
- BigEndian.setInt64(data, 0, totalSize);
+ public final void setTotalSize(long totalSize) {
+ BigEndian.setInt64(data, 0, totalSize);
}
-
+
public final int getClumpSize() {
return BigEndian.getInt32(data, 8);
}
- public final void setClumpSize(int clumpSize){
- BigEndian.setInt32(data, 8, clumpSize);
+ public final void setClumpSize(int clumpSize) {
+ BigEndian.setInt32(data, 8, clumpSize);
}
-
+
public final int getTotalBlocks() {
return BigEndian.getInt32(data, 12);
}
- public final void setTotalBlocks(int totalBlock){
- BigEndian.setInt32(data, 12, totalBlock);
+ public final void setTotalBlocks(int totalBlock) {
+ BigEndian.setInt32(data, 12, totalBlock);
}
-
+
public final ExtentDescriptor[] getExtents() {
ExtentDescriptor[] list = new ExtentDescriptor[8];
for (int i = 0; i < 8; i++) {
- list[i] = new ExtentDescriptor(
- data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
+ list[i] = new ExtentDescriptor(data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
}
return list;
}
-
- public final void setExtentDescriptor(int position, ExtentDescriptor desc){
- int offset = EXTENT_OFFSET + (position * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
- System.arraycopy(desc.getBytes(), 0, data, offset, ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
+
+ public final void setExtentDescriptor(int position, ExtentDescriptor desc) {
+ int offset = EXTENT_OFFSET + (position * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
+ System.arraycopy(desc.getBytes(), 0, data, offset, ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
}
-
- public byte[] getBytes(){
- return data;
+
+ public byte[] getBytes() {
+ return data;
}
public final String toString() {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -2,19 +2,19 @@
import org.jnode.util.BigEndian;
-
public class HFSUnicodeString {
/** Length of string in characters. */
private int length;
private String string;
+
/**
*
* @param src
* @param offset
*/
public HFSUnicodeString(final byte[] src, final int offset) {
- length = BigEndian.getInt16(src, offset);
+ length = BigEndian.getInt16(src, offset);
byte[] data = new byte[2 + length * 2];
System.arraycopy(src, offset, data, 0, 2);
length = BigEndian.getInt16(data, 0);
@@ -26,14 +26,14 @@
}
string = new String(result);
}
-
+
/**
*
* @param string
*/
- public HFSUnicodeString(String string){
- this.string = string;
- this.length = string.length();
+ public HFSUnicodeString(String string) {
+ this.string = string;
+ this.length = string.length();
}
public final int getLength() {
@@ -43,9 +43,9 @@
public final String getUnicodeString() {
return string;
}
-
- public final byte[] getBytes(){
- return (length + "" + string).getBytes() ;
+
+ public final byte[] getBytes() {
+ return (length + "" + string).getBytes();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -4,27 +4,29 @@
import java.util.Calendar;
public class HFSUtils {
-
- /**
- * Difference in second between 01/01/1970 00:00:00 (java reference time)
- * and 01/01/1904 00:00:00 (HFS reference time).
- */
+
+ /**
+ * Difference in second between 01/01/1970 00:00:00 (java reference time)
+ * and 01/01/1904 00:00:00 (HFS reference time).
+ */
public static final long MAC_DATE_CONVERTION = 2082844800L;
/**
* Convert time from/to java time to/from mac time.
- *
- * @param time in seconds since reference date.
- * @param encode if set to true, convert from java to mac. If set to false,
- * convert from mac to java.
*
+ * @param time
+ * in seconds since reference date.
+ * @param encode
+ * if set to true, convert from java to mac. If set to false,
+ * convert from mac to java.
+ *
* @return
*/
- public static long getDate(long time, boolean encode){
- time = (encode)? time + MAC_DATE_CONVERTION:time - MAC_DATE_CONVERTION;
- return time;
+ public static long getDate(long time, boolean encode) {
+ time = (encode) ? time + MAC_DATE_CONVERTION : time - MAC_DATE_CONVERTION;
+ return time;
}
-
+
/**
*
* @param time
@@ -32,8 +34,8 @@
* @return
*/
public static String printDate(final long time, final String dateFormat) {
- Calendar cal = Calendar.getInstance();
- cal.setTimeInMillis(getDate(time, false)*1000);
+ Calendar cal = Calendar.getInstance();
+ cal.setTimeInMillis(getDate(time, false) * 1000);
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
return sdf.format(cal.getTime());
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -5,7 +5,7 @@
public static final int HFSPLUS_MIN_VERSION = 0x0004; /* HFS+ */
public static final int HFSPLUS_CURRENT_VERSION = 5; /* HFSX */
-
+
/* HFS+ volume attributes */
public static final int HFSPLUS_VOL_UNMNT_BIT = 8;
public static final int HFSPLUS_VOL_SPARE_BLK_BIT = 9;
@@ -14,12 +14,12 @@
public static final int HFSPLUS_VOL_NODEID_REUSED_BIT = 12;
public static final int HFSPLUS_VOL_JOURNALED_BIT = 13;
public static final int HFSPLUS_VOL_SOFTLOCK_BIT = 15;
-
+
public static final int BT_LEAF_NODE = -1;
public static final int BT_INDEX_NODE = 0;
public static final int BT_HEADER_NODE = 1;
public static final int BT_MAP_NODE = 2;
-
+
/* Types */
public static final int RECORD_TYPE_FOLDER = 0x0001;
public static final int RECORD_TYPE_FILE = 0x0002;
@@ -32,10 +32,10 @@
public static final byte EK_DATA_FORK = (byte) 0x00;
public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
-
+
public static final int MINIMAL_BLOCK_SIZE = 512;
public static final int OPTIMAL_BLOCK_SIZE = 4096;
-
+
public static final int DATA_CLUMP_FACTOR = 16;
public static final int RESOURCE_CLUMP_FACTOR = 16;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -15,7 +15,6 @@
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.spi.AbstractFileSystem;
-
public class HfsPlusFileSystem extends AbstractFileSystem<HFSPlusEntry> {
private final Logger log = Logger.getLogger(getClass());
@@ -32,13 +31,11 @@
* @param type
* @throws FileSystemException
*/
- public HfsPlusFileSystem(final Device device, final boolean readOnly,
- final HfsPlusFileSystemType type) throws FileSystemException {
+ public HfsPlusFileSystem(final Device device, final boolean readOnly, final HfsPlusFileSystemType type)
+ throws FileSystemException {
super(device, readOnly, type);
}
-
-
/**
*
* @throws FileSystemException
@@ -48,8 +45,7 @@
log.debug("Superblock informations:\n" + sb.toString());
if (!sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT)) {
- log.info(getDevice().getId() +
- " Filesystem has not been cleanly unmounted, mounting it readonly");
+ log.info(getDevice().getId() + " Filesystem has not been cleanly unmounted, mounting it readonly");
setReadOnly(true);
}
if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_SOFTLOCK_BIT)) {
@@ -57,8 +53,8 @@
setReadOnly(true);
}
if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_JOURNALED_BIT)) {
- log.info(getDevice().getId() +
- " Filesystem is journaled, write access is not supported. Mounting it readonly");
+ log.info(getDevice().getId()
+ + " Filesystem is journaled, write access is not supported. Mounting it readonly");
setReadOnly(true);
}
try {
@@ -110,21 +106,21 @@
public final Superblock getVolumeHeader() {
return sb;
}
-
+
/**
*
* @throws FileSystemException
*/
public void create(int blockSize) throws FileSystemException {
- sb = new Superblock();
- try {
- sb.create(this,blockSize, false);
- this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
- flush();
- } catch (IOException e) {
- throw new FileSystemException("Unable to create HFS+ filesystem", e);
- } catch (ApiNotFoundException e) {
- throw new FileSystemException("Unable to create HFS+ filesystem", e);
- }
+ sb = new Superblock();
+ try {
+ sb.create(this, blockSize, false);
+ this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
+ flush();
+ } catch (IOException e) {
+ throw new FileSystemException("Unable to create HFS+ filesystem", e);
+ } catch (ApiNotFoundException e) {
+ throw new FileSystemException("Unable to create HFS+ filesystem", e);
+ }
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -15,8 +15,7 @@
public class HfsPlusFileSystemType implements BlockDeviceFileSystemType<HfsPlusFileSystem> {
public static final Class<HfsPlusFileSystemType> ID = HfsPlusFileSystemType.class;
- public final HfsPlusFileSystem create(final Device device, final boolean readOnly)
- throws FileSystemException {
+ public final HfsPlusFileSystem create(final Device device, final boolean readOnly) throws FileSystemException {
HfsPlusFileSystem fs = new HfsPlusFileSystem(device, readOnly, this);
fs.read();
return fs;
@@ -26,7 +25,7 @@
return "HFS+";
}
- public final boolean supports(final PartitionTableEntry pte, final byte[] firstSector,
+ public final boolean supports(final PartitionTableEntry pte, final byte[] firstSector,
final FSBlockDeviceAPI devApi) {
if (pte != null) {
if (pte instanceof IBMPartitionTableEntry) {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -19,19 +19,18 @@
import org.jnode.util.BigEndian;
import org.jnode.util.NumberUtils;
-
/**
* HFS+ volume header definition.
*
* @author Fabien L.
*
*/
-public class Superblock extends HFSPlusObject {
+public class Superblock extends HFSPlusObject {
private final Logger log = Logger.getLogger(getClass());
-
+
/** Volume header data length */
public static final int SUPERBLOCK_LENGTH = 1024;
-
+
/** Data bytes array that contains volume header information */
private byte[] data;
@@ -43,20 +42,24 @@
data = new byte[SUPERBLOCK_LENGTH];
log.setLevel(Level.INFO);
}
-
+
/**
- * Create the volume header and load information for the file system passed as parameter.
- *
- * @param fs The file system contains HFS+ partition.
+ * Create the volume header and load information for the file system passed
+ * as parameter.
*
- * @throws FileSystemException If magic number (0X482B) is incorrect or not available.
+ * @param fs
+ * The file system contains HFS+ partition.
+ *
+ * @throws FileSystemException
+ * If magic number (0X482B) is incorrect or not available.
*/
public Superblock(final HfsPlusFileSystem fs) throws FileSystemException {
super(fs);
log.setLevel(Level.INFO);
try {
ByteBuffer b = ByteBuffer.allocate(SUPERBLOCK_LENGTH);
- // skip the first 1024 bytes (boot sector) and read the volume header.
+ // skip the first 1024 bytes (boot sector) and read the volume
+ // header.
fs.getApi().read(1024, b);
data = new byte[SUPERBLOCK_LENGTH];
System.arraycopy(b.array(), 0, data, 0, SUPERBLOCK_LENGTH);
@@ -67,82 +70,82 @@
throw new FileSystemException(e);
}
}
-
-
-
+
/**
* Create a new volume header.
*
* @param fs
* @param blockSize
- * @throws ApiNotFoundException
+ * @throws ApiNotFoundException
*/
- public void create(HfsPlusFileSystem fs, int blockSize,boolean journaled) throws IOException, ApiNotFoundException, FileSystemException {
-
- this.fs = fs;
- int burnedBlocksBeforeVH = 0;
- int burnedBlocksAfterAltVH = 0;
- /*
- * Volume header is located at sector 2. Block before this position
- * must be invalidated.
- */
- if ( blockSize == 512 ) {
- burnedBlocksBeforeVH = 2;
- burnedBlocksAfterAltVH = 1;
- } else if ( blockSize == 1024 ) {
- burnedBlocksBeforeVH = 1;
- }
- long size = fs.getApi().getLength();
- long sectorCount = size / fs.getFSApi().getSectorSize();
- long blocks = size / blockSize;
- long allocationClumpSize = getClumpSize(blocks);
- long bitmapBlocks = allocationClumpSize/blockSize;
- long blockUsed = 2+ burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
- // Populate volume header.
- this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
- this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
- //Set attributes.
- this.setAttribute(HFSPLUS_VOL_UNMNT_BIT);
- this.setLastMountedVersion(0x446534a);
- //TODO Put correct dates.
- Calendar now = Calendar.getInstance();
- now.setTime(new Date());
- int macDate = (int)HFSUtils.getDate(now.getTimeInMillis()/1000, true);
- this.setCreateDate(macDate);
- this.setModifyDate(macDate);
- this.setBackupDate(0);
- this.setCheckedDate(macDate);
- //---
- this.setFileCount(0);
- this.setFolderCount(0);
- this.setBlockSize(blockSize);
- this.setTotalBlocks((int)blocks);
- this.setFreeBlocks((int)blocks);
- this.setRsrcClumpSize(HfsPlusConstants.RESOURCE_CLUMP_FACTOR * blockSize);
- this.setDataClumpSize(HfsPlusConstants.DATA_CLUMP_FACTOR * blockSize);
- this.setNextCatalogId(CatalogNodeId.HFSPLUS_FIRSTUSER_CNID.getId());
- // Allocation file creation
- initAllocationFile((int)allocationClumpSize, (int)bitmapBlocks, burnedBlocksBeforeVH);
- int nextBlock = 0;
- // Journal creation
- ExtentDescriptor desc = this.getAllocationFile().getExtents()[0];
- if(journaled){
- int journalSize = 8*1024*1024;
- this.setFileCount(2);
- this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
- this.setNextCatalogId(this.getNextCatalogId() + 2);
- this.setJournalInfoBlock(desc.getStartBlock() + desc.getBlockCount());
- blockUsed = blockUsed + 1 + (journalSize / blockSize);
- } else {
- this.setJournalInfoBlock(0);
- nextBlock = desc.getStartBlock() + desc.getBlockCount();
- }
- blockUsed += initExtents(0,blockSize,nextBlock, (int)sectorCount, blockUsed);
- blockUsed += initCatalog(0,blockSize,nextBlock, (int)sectorCount, blockUsed);
- this.setFreeBlocks(this.getFreeBlocks() - (int)blockUsed);
- this.setNextAllocation((int)blockUsed - 1 - burnedBlocksAfterAltVH + 10 * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
+ public void create(HfsPlusFileSystem fs, int blockSize, boolean journaled) throws IOException,
+ ApiNotFoundException, FileSystemException {
+
+ this.fs = fs;
+ int burnedBlocksBeforeVH = 0;
+ int burnedBlocksAfterAltVH = 0;
+ /*
+ * Volume header is located at sector 2. Block before this position must
+ * be invalidated.
+ */
+ if (blockSize == 512) {
+ burnedBlocksBeforeVH = 2;
+ burnedBlocksAfterAltVH = 1;
+ } else if (blockSize == 1024) {
+ burnedBlocksBeforeVH = 1;
+ }
+ long size = fs.getApi().getLength();
+ long sectorCount = size / fs.getFSApi().getSectorSize();
+ long blocks = size / blockSize;
+ long allocationClumpSize = getClumpSize(blocks);
+ long bitmapBlocks = allocationClumpSize / blockSize;
+ long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
+ // Populate volume header.
+ this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
+ this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
+ // Set attributes.
+ this.setAttribute(HFSPLUS_VOL_UNMNT_BIT);
+ this.setLastMountedVersion(0x446534a);
+ // TODO Put correct dates.
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+ this.setCreateDate(macDate);
+ this.setModifyDate(macDate);
+ this.setBackupDate(0);
+ this.setCheckedDate(macDate);
+ // ---
+ this.setFileCount(0);
+ this.setFolderCount(0);
+ this.setBlockSize(blockSize);
+ this.setTotalBlocks((int) blocks);
+ this.setFreeBlocks((int) blocks);
+ this.setRsrcClumpSize(HfsPlusConstants.RESOURCE_CLUMP_FACTOR * blockSize);
+ this.setDataClumpSize(HfsPlusConstants.DATA_CLUMP_FACTOR * blockSize);
+ this.setNextCatalogId(CatalogNodeId.HFSPLUS_FIRSTUSER_CNID.getId());
+ // Allocation file creation
+ initAllocationFile((int) allocationClumpSize, (int) bitmapBlocks, burnedBlocksBeforeVH);
+ int nextBlock = 0;
+ // Journal creation
+ ExtentDescriptor desc = this.getAllocationFile().getExtents()[0];
+ if (journaled) {
+ int journalSize = 8 * 1024 * 1024;
+ this.setFileCount(2);
+ this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
+ this.setNextCatalogId(this.getNextCatalogId() + 2);
+ this.setJournalInfoBlock(desc.getStartBlock() + desc.getBlockCount());
+ blockUsed = blockUsed + 1 + (journalSize / blockSize);
+ } else {
+ this.setJournalInfoBlock(0);
+ nextBlock = desc.getStartBlock() + desc.getBlockCount();
+ }
+ blockUsed += initExtents(0, blockSize, nextBlock, (int) sectorCount, blockUsed);
+ blockUsed += initCatalog(0, blockSize, nextBlock, (int) sectorCount, blockUsed);
+ this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
+ this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10
+ * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
}
-
+
/**
*
* @param clumpSize
@@ -150,18 +153,18 @@
* @param burnedBlocksBeforeVH
* @return
*/
- private void initAllocationFile(int clumpSize, int bitmapBlocks, int burnedBlocksBeforeVH){
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize(clumpSize);
- forkdata.setTotalBlocks(bitmapBlocks);
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(1 + burnedBlocksBeforeVH);
- desc.setBlockCount(0);
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
+ private void initAllocationFile(int clumpSize, int bitmapBlocks, int burnedBlocksBeforeVH) {
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize(clumpSize);
+ forkdata.setTotalBlocks(bitmapBlocks);
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(1 + burnedBlocksBeforeVH);
+ desc.setBlockCount(0);
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
}
-
+
/**
*
* @param extentsClumpBlock
@@ -169,26 +172,26 @@
* @param nextBlock
* @return
*/
- private long initExtents(int extentsClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed){
- int extentNodeSize = 4096;
- long clumpSize = 0;
- if(extentsClumpBlock == 0){
- clumpSize = getBTreeClumpSize(blockSize, extentNodeSize, sectorCount, false);
- } else {
- clumpSize = clumpSizeCalculation(extentsClumpBlock, blockSize);
- }
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize((int)clumpSize);
- forkdata.setTotalBlocks((int)(clumpSize/blockSize));
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(nextBlock);
- desc.setBlockCount(forkdata.getTotalBlocks());
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 192, forkdata.FORK_DATA_LENGTH);
- return blockUsed + forkdata.getTotalBlocks();
+ private long initExtents(int extentsClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed) {
+ int extentNodeSize = 4096;
+ long clumpSize = 0;
+ if (extentsClumpBlock == 0) {
+ clumpSize = getBTreeClumpSize(blockSize, extentNodeSize, sectorCount, false);
+ } else {
+ clumpSize = clumpSizeCalculation(extentsClumpBlock, blockSize);
+ }
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize((int) clumpSize);
+ forkdata.setTotalBlocks((int) (clumpSize / blockSize));
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(nextBlock);
+ desc.setBlockCount(forkdata.getTotalBlocks());
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 192, forkdata.FORK_DATA_LENGTH);
+ return blockUsed + forkdata.getTotalBlocks();
}
-
+
/**
*
* @param extentsClumpBlock
@@ -197,57 +200,61 @@
* @param sectorCount
* @param blockUsed
* @return
- * @throws IOException
+ * @throws IOException
*/
- private long initCatalog(int catalogClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed) throws FileSystemException{
- int catalogNodeSize = 8192;
- try{
- if(blockSize < HfsPlusConstants.OPTIMAL_BLOCK_SIZE || fs.getApi().getLength() < 0x40000000){
- catalogNodeSize = 4096;
- }
- long clumpSize = 0;
- if(catalogClumpBlock == 0){
- clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize, sectorCount, true);
- } else {
- clumpSize = clumpSizeCalculation(catalogClumpBlock, blockSize);
- if(clumpSize % catalogNodeSize != 0){
- throw new FileSystemException("clump size is not a multiple of node size");
- }
- }
-
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize((int)clumpSize);
- forkdata.setTotalBlocks((int)(clumpSize/blockSize));
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock() + this.getExtentsFile().getExtents()[0].getBlockCount());
- desc.setBlockCount(forkdata.getTotalBlocks());
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
- return blockUsed + forkdata.getTotalBlocks();
- } catch(IOException e){
- throw new FileSystemException(e);
- }
+ private long initCatalog(int catalogClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed)
+ throws FileSystemException {
+ int catalogNodeSize = 8192;
+ try {
+ if (blockSize < HfsPlusConstants.OPTIMAL_BLOCK_SIZE || fs.getApi().getLength() < 0x40000000) {
+ catalogNodeSize = 4096;
+ }
+ long clumpSize = 0;
+ if (catalogClumpBlock == 0) {
+ clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize, sectorCount, true);
+ } else {
+ clumpSize = clumpSizeCalculation(catalogClumpBlock, blockSize);
+ if (clumpSize % catalogNodeSize != 0) {
+ throw new FileSystemException("clump size is not a multiple of node size");
+ }
+ }
+
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(clumpSize);
+ forkdata.setClumpSize((int) clumpSize);
+ forkdata.setTotalBlocks((int) (clumpSize / blockSize));
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock()
+ + this.getExtentsFile().getExtents()[0].getBlockCount());
+ desc.setBlockCount(forkdata.getTotalBlocks());
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
+ return blockUsed + forkdata.getTotalBlocks();
+ } catch (IOException e) {
+ throw new FileSystemException(e);
+ }
}
-
+
/**
* Calculate the number of blocks needed for bitmap.
*
- * @param totalBlocks Total of blocks found in the device.
+ * @param totalBlocks
+ * Total of blocks found in the device.
*
* @return long - Number of blocks.
*
* @throws IOException
*/
private long getClumpSize(long totalBlocks) throws IOException {
- long clumpSize;
- long minClumpSize = totalBlocks >> 3;
- if ((totalBlocks & 7) == 0){
- ++minClumpSize;
- }
- clumpSize = minClumpSize;
- return clumpSize;
+ long clumpSize;
+ long minClumpSize = totalBlocks >> 3;
+ if ((totalBlocks & 7) == 0) {
+ ++minClumpSize;
+ }
+ clumpSize = minClumpSize;
+ return clumpSize;
}
+
/**
*
* @param blockSize
@@ -257,39 +264,41 @@
* @return
*/
- private int[] extentClumpTable = new int[]{4,4,4,5,5,6,7,8,9,11,14,16,20,25,32};
- private int[] catalogClumpTable = new int[]{4,6,8,11,14,19,25,34,45,60,80,107,144,192,256};
- private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors, boolean catalog){
- long clumpSize = 0;
- if(sectors < 0x200000){
- clumpSize = (sectors << 2);
- if(clumpSize < (8*nodeSize)){
- clumpSize = (8*nodeSize);
- }
- } else {
- sectors = sectors >> 22;
- for(int i=0;sectors != 0 && (i < 14);++i){
- if(catalog){
- clumpSize = catalogClumpTable[i] * 1024 * 1024;
- } else {
- clumpSize = extentClumpTable[i] * 1024 * 1024;
- }
- sectors = sectors >> 1;
- }
- }
-
- return clumpSize;
+ private int[] extentClumpTable = new int[] {4, 4, 4, 5, 5, 6, 7, 8, 9, 11, 14, 16, 20, 25, 32 };
+ private int[] catalogClumpTable = new int[] {4, 6, 8, 11, 14, 19, 25, 34, 45, 60, 80, 107, 144, 192, 256 };
+
+ private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors, boolean catalog) {
+ long clumpSize = 0;
+ if (sectors < 0x200000) {
+ clumpSize = (sectors << 2);
+ if (clumpSize < (8 * nodeSize)) {
+ clumpSize = (8 * nodeSize);
+ }
+ } else {
+ sectors = sectors >> 22;
+ for (int i = 0; sectors != 0 && (i < 14); ++i) {
+ if (catalog) {
+ clumpSize = catalogClumpTable[i] * 1024 * 1024;
+ } else {
+ clumpSize = extentClumpTable[i] * 1024 * 1024;
+ }
+ sectors = sectors >> 1;
+ }
+ }
+
+ return clumpSize;
}
-
- private int clumpSizeCalculation(long clumpBlocks, int blockSize){
- long clumpSize = clumpBlocks * blockSize;
- if((clumpSize & 0XFFFFFFFF00000000L) == 0){
- //ERROR
- }
- return (int)clumpSize;
+
+ private int clumpSizeCalculation(long clumpBlocks, int blockSize) {
+ long clumpSize = clumpBlocks * blockSize;
+ if ((clumpSize & 0XFFFFFFFF00000000L) == 0) {
+ // ERROR
+ }
+ return (int) clumpSize;
}
+
// Getters/setters
-
+
public final int getMagic() {
return BigEndian.getInt16(data, 0);
}
@@ -309,12 +318,12 @@
//
public final int getAttributes() {
-
+
return BigEndian.getInt32(data, 4);
}
-
+
public final void setAttribute(final int attributeMaskBit) {
- BigEndian.setInt32(data, 4,(getAttributes() >> attributeMaskBit) | 0x1);
+ BigEndian.setInt32(data, 4, (getAttributes() >> attributeMaskBit) | 0x1);
}
//
@@ -487,51 +496,53 @@
public final HFSPlusForkData getStartupFile() {
return new HFSPlusForkData(data, 432);
}
-
+
/**
* Get string representation of attribute.
*
* @return
*/
public final String getAttributesAsString() {
- return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "") +
- ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "") +
- ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
+ return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "")
+ + ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "")
+ + ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
}
/**
* Check if the corresponding attribute corresponding is set.
*
- * @param maskBit Bit position of the attribute. See constants.
+ * @param maskBit
+ * Bit position of the attribute. See constants.
*
* @return true if attribute is set.
*/
public final boolean isAttribute(final int maskBit) {
return (((getAttributes() >> maskBit) & 0x1) != 0);
}
-
- public byte[] getBytes(){
- return data;
+
+ public byte[] getBytes() {
+ return data;
}
/*
* (non-Javadoc)
+ *
* @see java.lang.Object#toString()
*/
public final String toString() {
StringBuffer buffer = new StringBuffer();
buffer.append("Magic: 0x").append(NumberUtils.hex(getMagic(), 4)).append("\n");
buffer.append("Version: ").append(getVersion()).append("\n").append("\n");
- buffer.append("Attributes: ").append(getAttributesAsString()).append(" (").append(getAttributes()).append(")").append("\n").append("\n");
- buffer.append("Create date: ").append(
- HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- buffer.append("Modify date: ").append(
- HFSUtils.printDate(getModifyDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- buffer.append("Backup date: ").append(
- HFSUtils.printDate(getBackupDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- buffer.append("Checked date: ").append(
- HFSUtils.printDate(getCheckedDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n")
- .append("\n");
+ buffer.append("Attributes: ").append(getAttributesAsString()).append(" (").append(getAttributes()).append(")")
+ .append("\n").append("\n");
+ buffer.append("Create date: ").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n");
+ buffer.append("Modify date: ").append(HFSUtils.printDate(getModifyDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n");
+ buffer.append("Backup date: ").append(HFSUtils.printDate(getBackupDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n");
+ buffer.append("Checked date: ").append(HFSUtils.printDate(getCheckedDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n").append("\n");
buffer.append("File count: ").append(getFileCount()).append("\n");
buffer.append("Folder count: ").append(getFolderCount()).append("\n").append("\n");
buffer.append("Block size: ").append(getBlockSize()).append("\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -60,13 +60,12 @@
NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array());
log.debug("Current node descriptor:\n" + currentBtnd.toString());
while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode =
- new CatalogIndexNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, nodeData.array(), currentNodeSize);
IndexRecord record = currentIndexNode.find(parentID);
currentNodeNumber = record.getIndex();
currentOffset = firstNodeOffset + (currentNodeNumber * currentNodeSize);
- log.debug("Current node number: " + currentNodeNumber + " currentOffset:" +
- currentOffset + "(" + currentNodeSize + ")");
+ log.debug("Current node number: " + currentNodeNumber + " currentOffset:" + currentOffset + "("
+ + currentNodeSize + ")");
nodeData = ByteBuffer.allocate(currentNodeSize);
fs.getApi().read(currentOffset, nodeData);
currentBtnd = new NodeDescriptor(nodeData.array());
@@ -74,8 +73,7 @@
}
LeafRecord lr = null;
if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf =
- new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
lr = leaf.find(parentID);
log.debug("Leaf record :\n" + lr.toString());
}
@@ -99,8 +97,7 @@
* @return
* @throws IOException
*/
- public final LeafRecord[] getRecords(final CatalogNodeId parentID, final int nodeNumber)
- throws IOException {
+ public final LeafRecord[] getRecords(final CatalogNodeId parentID, final int nodeNumber) throws IOException {
try {
int currentOffset = firstNodeOffset;
int currentNodeNumber = nodeNumber;
@@ -110,8 +107,8 @@
NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array());
log.debug("Current node descriptor:\n" + currentBtnd.toString());
if (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode =
- new CatalogIndexNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, nodeData.array(),
+ currentNodeSize);
IndexRecord[] records = currentIndexNode.findChilds(parentID);
List<LeafRecord> lfList = new LinkedList<LeafRecord>();
for (IndexRecord rec : records) {
@@ -122,8 +119,8 @@
}
return lfList.toArray(new LeafRecord[lfList.size()]);
} else if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf =
- new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(),
+ currentNodeSize);
LeafRecord[] lr = leaf.findAll(parentID);
log.debug("Leaf record size: " + lr.length);
return lr;
@@ -145,8 +142,8 @@
* @return
* @throws IOException
*/
- public final LeafRecord getRecord(final CatalogNodeId parentID, final HFSUnicodeString nodeName)
- throws IOException {
+ public final LeafRecord getRecord(final CatalogNodeId parentID,
+ final HFSUnicodeString nodeName) throws IOException {
int currentOffset = firstNodeOffset;
int currentNodeNumber = getBTHeaderRecord().getRootNode();
int currentNodeSize = getBTHeaderRecord().getNodeSize();
@@ -156,8 +153,7 @@
log.debug("Current node descriptor: \n" + currentBtnd.toString());
CatalogKey cKey = new CatalogKey(parentID, nodeName);
while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode =
- new CatalogIndexNode(currentBtnd, buffer.array(), currentNodeSize);
+ CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, buffer.array(), currentNodeSize);
IndexRecord record = currentIndexNode.find(cKey);
currentNodeNumber = record.getIndex();
currentOffset = currentNodeNumber * currentNodeSize;
@@ -167,8 +163,7 @@
}
LeafRecord lr = null;
if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf =
- new CatalogLeafNode(currentBtnd, buffer.array(), currentNodeSize);
+ CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, buffer.array(), currentNodeSize);
lr = leaf.find(parentID);
log.debug("Leaf record: \n" + lr.toString());
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -15,7 +15,7 @@
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
}
-
+
public final int getFlags() {
return BigEndian.getInt16(data, 2);
}
@@ -23,17 +23,17 @@
public final CatalogNodeId getFileId() {
return new CatalogNodeId(data, 8);
}
-
+
public final int getCreateDate() {
- return BigEndian.getInt32(data, 12);
+ return BigEndian.getInt32(data, 12);
}
-
+
public final int getContentModDate() {
- return BigEndian.getInt32(data, 16);
+ return BigEndian.getInt32(data, 16);
}
-
+
public final int getAttrModDate() {
- return BigEndian.getInt32(data, 20);
+ return BigEndian.getInt32(data, 20);
}
public final HFSPlusForkData getDataFork() {
@@ -49,8 +49,10 @@
s.append("Record type:").append(getRecordType()).append("\t");
s.append("File ID :").append(getFileId().getId()).append("\n");
s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy"))
+ .append("\n");
+ s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -11,18 +11,18 @@
data = new byte[88];
System.arraycopy(src, 0, data, 0, 88);
}
-
+
/**
* Create a new catalog folder.
*
* @param folderId
*
*/
- public CatalogFolder(CatalogNodeId folderId){
- data = new byte[88];
- BigEndian.setInt16(data, 0, HfsPlusConstants.RECORD_TYPE_FOLDER);
- BigEndian.setInt32(data, 4, 0);
- System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
+ public CatalogFolder(CatalogNodeId folderId) {
+ data = new byte[88];
+ BigEndian.setInt16(data, 0, HfsPlusConstants.RECORD_TYPE_FOLDER);
+ BigEndian.setInt32(data, 4, 0);
+ System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
}
public final int getRecordType() {
@@ -36,21 +36,21 @@
public final CatalogNodeId getFolderId() {
return new CatalogNodeId(data, 8);
}
-
+
public final int getCreateDate() {
- return BigEndian.getInt32(data, 12);
+ return BigEndian.getInt32(data, 12);
}
-
+
public final int getContentModDate() {
- return BigEndian.getInt32(data, 16);
+ return BigEndian.getInt32(data, 16);
}
-
+
public final int getAttrModDate() {
- return BigEndian.getInt32(data, 20);
+ return BigEndian.getInt32(data, 20);
}
-
- public byte[] getBytes(){
- return data;
+
+ public byte[] getBytes() {
+ return data;
}
public final String toString() {
@@ -59,8 +59,10 @@
s.append("Valence: ").append(getValence()).append("\n");
s.append("Folder ID: ").append(getFolderId().getId()).append("\n");
s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy"))
+ .append("\n");
+ s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append(
+ "\n");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -11,8 +11,7 @@
public class CatalogIndexNode extends IndexNode {
private final Logger log = Logger.getLogger(getClass());
- public CatalogIndexNode(final NodeDescriptor descriptor, final byte[] nodeData,
- final int nodeSize) {
+ public CatalogIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
int currentOffset = offsets[i];
@@ -45,8 +44,8 @@
CatalogKey largestMatchingKey = null;
for (IndexRecord rec : records) {
CatalogKey key = (CatalogKey) rec.getKey();
- if (key.getParentId().getId() < parentId.getId() &&
- (largestMatchingKey == null || key.compareTo(largestMatchingKey) > 0)) {
+ if (key.getParentId().getId() < parentId.getId()
+ && (largestMatchingKey == null || key.compareTo(largestMatchingKey) > 0)) {
largestMatchingKey = key;
largestMatchingRecord = rec;
} else if (key.getParentId().getId() == parentId.getId()) {
@@ -64,9 +63,9 @@
public final IndexRecord find(final CatalogKey key) {
IndexRecord largestMatchingRecord = null;
for (int i = 0; i < records.length; ++i) {
- if (records[i].getKey().compareTo(key) <= 0 &&
- (largestMatchingRecord == null || records[i].getKey().compareTo(
- largestMatchingRecord.getKey()) > 0)) {
+ if (records[i].getKey().compareTo(key) <= 0
+ && (largestMatchingRecord == null ||
+ records[i].getKey().compareTo(largestMatchingRecord.getKey()) > 0)) {
largestMatchingRecord = records[i];
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -73,8 +73,7 @@
StringBuffer s = new StringBuffer();
s.append("Key length: ").append(getKeyLength()).append(" ");
s.append("Parent ID: ").append(getParentId().getId()).append(" ");
- s.append("Node name: ").append(
- (getNodeName() != null) ? getNodeName().getUnicodeString() : "");
+ s.append("Node name: ").append((getNodeName() != null) ? getNodeName().getUnicodeString() : "");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -10,8 +10,7 @@
public class CatalogLeafNode extends LeafNode {
- public CatalogLeafNode(final NodeDescriptor descriptor, final byte[] nodeData,
- final int nodeSize) {
+ public CatalogLeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
int currentOffset = offsets[i];
@@ -47,8 +46,7 @@
List<LeafRecord> list = new LinkedList<LeafRecord>();
for (LeafRecord rec : records) {
Key key = rec.getKey();
- if (key instanceof CatalogKey &&
- ((CatalogKey) key).getParentId().getId() == parentId.getId()) {
+ if (key instanceof CatalogKey && ((CatalogKey) key).getParentId().getId() == parentId.getId()) {
list.add(rec);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNodeId.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -39,9 +39,9 @@
public final int getId() {
return BigEndian.getInt32(cnid, 0);
}
-
- public final byte[] getBytes(){
- return cnid;
+
+ public final byte[] getBytes() {
+ return cnid;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -18,13 +18,13 @@
* @param parent
* @param name
*/
- public CatalogThread(int type, CatalogNodeId parent, HFSUnicodeString name){
- BigEndian.setInt16(data, 0, type);
- BigEndian.setInt32(data, 4, parent.getId());
- System.arraycopy(parent.getBytes(), 0, data, 4, 4);
- System.arraycopy(name.getBytes(), 0, data, 8, name.getBytes().length);
+ public CatalogThread(int type, CatalogNodeId parent, HFSUnicodeString name) {
+ BigEndian.setInt16(data, 0, type);
+ BigEndian.setInt32(data, 4, parent.getId());
+ System.arraycopy(parent.getBytes(), 0, data, 4, 4);
+ System.arraycopy(name.getBytes(), 0, data, 8, name.getBytes().length);
}
-
+
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -6,14 +6,14 @@
public class FormatHfsPlusCommand extends AbstractFormatCommand<HfsPlusFileSystem> {
- public FormatHfsPlusCommand() {
- super("Format a block device with HFS+ filesystem");
- }
+ public FormatHfsPlusCommand() {
+ super("Format a block device with HFS+ filesystem");
+ }
- @Override
- protected Formatter<HfsPlusFileSystem> getFormatter() {
- // TODO implement it.
- return null;
- }
+ @Override
+ protected Formatter<HfsPlusFileSystem> getFormatter() {
+ // TODO implement it.
+ return null;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -14,7 +14,7 @@
public ExtentDescriptor() {
data = new byte[EXTENT_DESCRIPTOR_LENGTH];
}
-
+
public ExtentDescriptor(final byte[] src, final int offset) {
data = new byte[EXTENT_DESCRIPTOR_LENGTH];
System.arraycopy(src, offset, data, 0, EXTENT_DESCRIPTOR_LENGTH);
@@ -24,22 +24,22 @@
return BigEndian.getInt32(data, 0);
}
- public final void setStartBlock(int start){
- BigEndian.setInt32(data, 0, start);
+ public final void setStartBlock(int start) {
+ BigEndian.setInt32(data, 0, start);
}
-
+
public final int getBlockCount() {
return BigEndian.getInt32(data, 4);
}
-
- public final void setBlockCount(int count){
- BigEndian.setInt32(data, 4, count);
+
+ public final void setBlockCount(int count) {
+ BigEndian.setInt32(data, 4, count);
}
- public final byte[] getBytes(){
- return data;
+ public final byte[] getBytes() {
+ return data;
}
-
+
public final String toString() {
return "Start block : " + getStartBlock() + "\tBlock count : " + getBlockCount() + "\n";
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-04 10:08:42 UTC (rev 4824)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-04 11:42:26 UTC (rev 4825)
@@ -6,8 +6,7 @@
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
public class ExtentIndexNode extends IndexNode {
- public ExtentIndexNode(final NodeDescriptor descriptor, final byte[] nodeData,
- final int nodeSize) {
+ public ExtentIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descr...
[truncated message content] |
|
From: <ga...@us...> - 2009-01-06 10:25:35
|
Revision: 4831
http://jnode.svn.sourceforge.net/jnode/?rev=4831&view=rev
Author: galatnm
Date: 2009-01-06 10:25:31 +0000 (Tue, 06 Jan 2009)
Log Message:
-----------
Use of params for formatter.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
Added Paths:
-----------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -0,0 +1,33 @@
+package org.jnode.fs.hfsplus;
+
+public class HFSPlusParams {
+ private String volumeName;
+ private int blockSize;
+ private boolean journaled;
+ private int journalSize;
+
+ public String getVolumeName() {
+ return volumeName;
+ }
+ public void setVolumeName(String volumeName) {
+ this.volumeName = volumeName;
+ }
+ public int getBlockSize() {
+ return blockSize;
+ }
+ public void setBlockSize(int blockSize) {
+ this.blockSize = blockSize;
+ }
+ public boolean isJournaled() {
+ return journaled;
+ }
+ public void setJournaled(boolean journaled) {
+ this.journaled = journaled;
+ }
+ public int getJournalSize() {
+ return journalSize;
+ }
+ public void setJournalSize(int journalSize) {
+ this.journalSize = journalSize;
+ }
+}
Property changes on: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
___________________________________________________________________
Added: svn:executable
+ *
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-05 15:09:00 UTC (rev 4830)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -38,4 +38,7 @@
public static final int DATA_CLUMP_FACTOR = 16;
public static final int RESOURCE_CLUMP_FACTOR = 16;
+
+ public static final int DEFAULT_JOURNAL_SIZE = 8 * 1024 * 1024;
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-05 15:09:00 UTC (rev 4830)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -41,7 +41,7 @@
* @throws FileSystemException
*/
public final void read() throws FileSystemException {
- sb = new Superblock(this);
+ sb = new Superblock(this, false);
log.debug("Superblock informations:\n" + sb.toString());
if (!sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT)) {
@@ -111,10 +111,10 @@
*
* @throws FileSystemException
*/
- public void create(int blockSize) throws FileSystemException {
- sb = new Superblock();
+ public void create(HFSPlusParams params) throws FileSystemException {
+ sb = new Superblock(this, true);
try {
- sb.create(this, blockSize, false);
+ sb.create(params);
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
} catch (IOException e) {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java 2009-01-05 15:09:00 UTC (rev 4830)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemFormatter.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -9,9 +9,12 @@
import org.jnode.naming.InitialNaming;
public class HfsPlusFileSystemFormatter extends Formatter<HfsPlusFileSystem> {
-
- protected HfsPlusFileSystemFormatter() {
+
+ private HFSPlusParams params;
+
+ public HfsPlusFileSystemFormatter(HFSPlusParams params) {
super(new HfsPlusFileSystemType());
+ this.params = params;
}
@Override
@@ -20,7 +23,7 @@
FileSystemService fss = InitialNaming.lookup(FileSystemService.NAME);
HfsPlusFileSystemType type = fss.getFileSystemType(HfsPlusFileSystemType.ID);
HfsPlusFileSystem fs = type.create(device, false);
- fs.create(HfsPlusConstants.OPTIMAL_BLOCK_SIZE);
+ fs.create(params);
return fs;
} catch (NameNotFoundException e) {
throw new FileSystemException(e);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-05 15:09:00 UTC (rev 4830)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -35,15 +35,6 @@
private byte[] data;
/**
- * Default constructor for empty volume header.
- */
- public Superblock() {
- super(null);
- data = new byte[SUPERBLOCK_LENGTH];
- log.setLevel(Level.INFO);
- }
-
- /**
* Create the volume header and load information for the file system passed
* as parameter.
*
@@ -53,18 +44,21 @@
* @throws FileSystemException
* If magic number (0X482B) is incorrect or not available.
*/
- public Superblock(final HfsPlusFileSystem fs) throws FileSystemException {
+ public Superblock(final HfsPlusFileSystem fs, boolean create) throws FileSystemException {
super(fs);
log.setLevel(Level.INFO);
+ data = new byte[SUPERBLOCK_LENGTH];
try {
- ByteBuffer b = ByteBuffer.allocate(SUPERBLOCK_LENGTH);
- // skip the first 1024 bytes (boot sector) and read the volume
- // header.
- fs.getApi().read(1024, b);
- data = new byte[SUPERBLOCK_LENGTH];
- System.arraycopy(b.array(), 0, data, 0, SUPERBLOCK_LENGTH);
- if (getMagic() != HFSPLUS_SUPER_MAGIC) {
- throw new FileSystemException("Not hfs+ volume header (" + getMagic() + ": bad magic)");
+ if (!create) {
+ // skip the first 1024 bytes (boot sector) and read the volume
+ // header.
+ ByteBuffer b = ByteBuffer.allocate(SUPERBLOCK_LENGTH);
+ fs.getApi().read(1024, b);
+ data = new byte[SUPERBLOCK_LENGTH];
+ System.arraycopy(b.array(), 0, data, 0, SUPERBLOCK_LENGTH);
+ if (getMagic() != HFSPLUS_SUPER_MAGIC) {
+ throw new FileSystemException("Not hfs+ volume header (" + getMagic() + ": bad magic)");
+ }
}
} catch (IOException e) {
throw new FileSystemException(e);
@@ -74,20 +68,19 @@
/**
* Create a new volume header.
*
- * @param fs
- * @param blockSize
+ * @param params
+ *
* @throws ApiNotFoundException
*/
- public void create(HfsPlusFileSystem fs, int blockSize, boolean journaled) throws IOException,
+ public void create(HFSPlusParams params) throws IOException,
ApiNotFoundException, FileSystemException {
-
- this.fs = fs;
int burnedBlocksBeforeVH = 0;
int burnedBlocksAfterAltVH = 0;
/*
* Volume header is located at sector 2. Block before this position must
* be invalidated.
*/
+ int blockSize = params.getBlockSize();
if (blockSize == 512) {
burnedBlocksBeforeVH = 2;
burnedBlocksAfterAltVH = 1;
@@ -106,7 +99,6 @@
// Set attributes.
this.setAttribute(HFSPLUS_VOL_UNMNT_BIT);
this.setLastMountedVersion(0x446534a);
- // TODO Put correct dates.
Calendar now = Calendar.getInstance();
now.setTime(new Date());
int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
@@ -128,13 +120,12 @@
int nextBlock = 0;
// Journal creation
ExtentDescriptor desc = this.getAllocationFile().getExtents()[0];
- if (journaled) {
- int journalSize = 8 * 1024 * 1024;
+ if (params.isJournaled()) {
this.setFileCount(2);
this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
this.setNextCatalogId(this.getNextCatalogId() + 2);
this.setJournalInfoBlock(desc.getStartBlock() + desc.getBlockCount());
- blockUsed = blockUsed + 1 + (journalSize / blockSize);
+ blockUsed = blockUsed + 1 + (params.getJournalSize() / blockSize);
} else {
this.setJournalInfoBlock(0);
nextBlock = desc.getStartBlock() + desc.getBlockCount();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-05 15:09:00 UTC (rev 4830)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-06 10:25:31 UTC (rev 4831)
@@ -2,18 +2,34 @@
import org.jnode.fs.Formatter;
import org.jnode.fs.command.AbstractFormatCommand;
+import org.jnode.fs.hfsplus.HFSPlusParams;
+import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
+import org.jnode.fs.hfsplus.HfsPlusFileSystemFormatter;
+import org.jnode.shell.syntax.Argument;
+import org.jnode.shell.syntax.StringArgument;
public class FormatHfsPlusCommand extends AbstractFormatCommand<HfsPlusFileSystem> {
+
+ private final StringArgument ARG_VOLUME_NAME =
+ new StringArgument("volumename", Argument.OPTIONAL, "set volume name");
public FormatHfsPlusCommand() {
super("Format a block device with HFS+ filesystem");
+ registerArguments(ARG_VOLUME_NAME);
}
+ public static void main(String[] args) throws Exception {
+ new FormatHfsPlusCommand().execute(args);
+ }
+
@Override
protected Formatter<HfsPlusFileSystem> getFormatter() {
- // TODO implement it.
- return null;
+ HFSPlusParams params = new HFSPlusParams();
+ params.setVolumeName(ARG_VOLUME_NAME.getValue());
+ params.setBlockSize(HfsPlusConstants.OPTIMAL_BLOCK_SIZE);
+ params.setJournaled(false);
+ params.setJournalSize(HfsPlusConstants.DEFAULT_JOURNAL_SIZE);
+ return new HfsPlusFileSystemFormatter(params);
}
-
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-09 13:43:30
|
Revision: 4838
http://jnode.svn.sourceforge.net/jnode/?rev=4838&view=rev
Author: galatnm
Date: 2009-01-09 13:43:21 +0000 (Fri, 09 Jan 2009)
Log Message:
-----------
Complete HFSPlusParams.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-09 12:27:13 UTC (rev 4837)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-09 13:43:21 UTC (rev 4838)
@@ -1,33 +1,260 @@
package org.jnode.fs.hfsplus;
+import org.jnode.fs.FileSystemException;
+
public class HFSPlusParams {
+
+ public static final int MINIMAL_BLOCK_SIZE = 512;
+ public static final int DEFAULT_BLOCK_SIZE = 4096;
+ public static final int OPTIMAL_BLOCK_SIZE = 4096;
+ public static final int DATA_CLUMP_FACTOR = 16;
+ public static final int RESOURCE_CLUMP_FACTOR = 16;
+ public static final int DEFAULT_JOURNAL_SIZE = 8 * 1024 * 1024;
+ public static final int DEFAULT_CATALOG_NODE_SIZE = 8192;
+ public static final int DEFAULT_EXTENT_NODE_SIZE = 4096;
+ public static final int DEFAULT_ATTRIBUTE_NODE_SIZE = 4096;
+
+ private long blockDeviceSize;
+
private String volumeName;
private int blockSize;
+ private int resourceClumpBlocks;
+ private int dataClumpBlocks;
+ private int catalogClumpBlocks;
+ private int extentClumpBlocks;
+ private int attributeClumpBlocks;
+ private int bitmapClumpBlocks;
private boolean journaled;
private int journalSize;
+
+ private int resourceClumpSize;
+ private int dataClumpSize;
+ private int catalogClumpSize;
+ private int catalogNodeSize;
+ private int extentClumpSize;
+ private int extentNodeSize;
+ private int attributeClumpSize;
+ private int attributeNodeSize;
+ private int allocationClumpSize;
+
+ /**
+ * Default constructor.
+ */
+ public HFSPlusParams() {
+ this.catalogNodeSize = DEFAULT_CATALOG_NODE_SIZE;
+ this.extentNodeSize = DEFAULT_EXTENT_NODE_SIZE;
+ }
+
+ /**
+ *
+ * @param blockDeviceSize
+ * @param sectorSize
+ *
+ * @throws FileSystemException
+ *
+ */
+ public void initializeDefaultsValues(long blockDeviceSize, long sectorSize)
+ throws FileSystemException {
+ long clumpSize = 0;
+ this.blockDeviceSize = blockDeviceSize;
+ if (resourceClumpBlocks == 0) {
+ if (blockSize > DEFAULT_BLOCK_SIZE) {
+ clumpSize = round(RESOURCE_CLUMP_FACTOR * DEFAULT_BLOCK_SIZE, blockSize);
+ } else {
+ clumpSize = RESOURCE_CLUMP_FACTOR * blockSize;
+ }
+ } else {
+ clumpSize = clumpSizeCalculation(resourceClumpBlocks);
+ }
+ resourceClumpSize = (int) clumpSize;
+ if (dataClumpBlocks == 0) {
+ if (blockSize > DEFAULT_BLOCK_SIZE) {
+ clumpSize = round(DATA_CLUMP_FACTOR * DEFAULT_BLOCK_SIZE, blockSize);
+ } else {
+ clumpSize = DATA_CLUMP_FACTOR * blockSize;
+ }
+ } else {
+ clumpSize = clumpSizeCalculation(dataClumpBlocks);
+ }
+
+ if (blockSize < OPTIMAL_BLOCK_SIZE || blockDeviceSize < 0x40000000) {
+ catalogNodeSize = 4096;
+ }
+ long sectorCount = blockDeviceSize / sectorSize;
+ if (catalogClumpBlocks == 0) {
+ clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize,
+ sectorCount, true);
+ } else {
+ clumpSize = clumpSizeCalculation(catalogClumpBlocks);
+ if (clumpSize % catalogNodeSize != 0) {
+ throw new FileSystemException(
+ "clump size is not a multiple of node size");
+ }
+ }
+ catalogClumpSize = (int) clumpSize;
+ if (extentClumpBlocks == 0) {
+ clumpSize = getBTreeClumpSize(blockSize, extentNodeSize,
+ sectorCount, false);
+ } else {
+ clumpSize = clumpSizeCalculation(extentClumpBlocks);
+ }
+ extentClumpSize = (int) clumpSize;
+
+ if(attributeClumpBlocks == 0){
+ clumpSize = 0;
+ } else {
+ clumpSize = clumpSizeCalculation(attributeClumpBlocks);
+ if(clumpSize % attributeNodeSize != 0){
+ throw new FileSystemException("clump size is not a multiple of attribute node size");
+ }
+ }
+ attributeClumpSize = (int)clumpSize;
+
+ long totalBlocks = this.getBlockCount();
+ long minClumpSize = this.getBlockCount() >> 3;
+ if ((totalBlocks & 7) == 0) {
+ ++minClumpSize;
+ }
+ if(bitmapClumpBlocks == 0){
+ clumpSize = minClumpSize;
+ } else {
+ clumpSize = clumpSizeCalculation(bitmapClumpBlocks);
+ if(clumpSize < minClumpSize){
+ throw new FileSystemException("bitmap clump size is too small.");
+ }
+ }
+ allocationClumpSize = (int)clumpSize;
+
+ }
+
+ private int[] extentClumpTable = new int[] { 4, 4, 4, 5, 5, 6, 7, 8, 9, 11,
+ 14, 16, 20, 25, 32 };
+ private int[] catalogClumpTable = new int[] { 4, 6, 8, 11, 14, 19, 25, 34,
+ 45, 60, 80, 107, 144, 192, 256 };
+
+ /**
+ *
+ * @param blockSize
+ * @param nodeSize
+ * @param sectors
+ * @param catalog
+ * @return
+ */
+ private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors,
+ boolean catalog) {
+ long clumpSize = 0;
+ if (sectors < 0x200000) {
+ clumpSize = (sectors << 2);
+ if (clumpSize < (8 * nodeSize)) {
+ clumpSize = (8 * nodeSize);
+ }
+ } else {
+ sectors = sectors >> 22;
+ for (int i = 0; sectors != 0 && (i < 14); ++i) {
+ if (catalog) {
+ clumpSize = catalogClumpTable[i] * 1024 * 1024;
+ } else {
+ clumpSize = extentClumpTable[i] * 1024 * 1024;
+ }
+ sectors = sectors >> 1;
+ }
+ }
+
+ return clumpSize;
+ }
+
+ /**
+ *
+ * @param clumpBlocks
+ *
+ * @return
+ */
+ private int clumpSizeCalculation(long clumpBlocks) throws FileSystemException {
+ long clumpSize = clumpBlocks * blockSize;
+ if ((clumpSize & 0XFFFFFFFF00000000L) == 0) {
+ throw new FileSystemException("Too many blocks (" + clumpBlocks + ") for clump size (" + clumpSize +").");
+ }
+ return (int) clumpSize;
+ }
+ private long round(long x, long y){
+ return (((x+y)-1)/y*y);
+ }
+
public String getVolumeName() {
return volumeName;
}
+
public void setVolumeName(String volumeName) {
this.volumeName = volumeName;
}
+
public int getBlockSize() {
return blockSize;
}
+
public void setBlockSize(int blockSize) {
this.blockSize = blockSize;
}
+
public boolean isJournaled() {
return journaled;
}
+
public void setJournaled(boolean journaled) {
this.journaled = journaled;
}
+
public int getJournalSize() {
return journalSize;
}
+
public void setJournalSize(int journalSize) {
this.journalSize = journalSize;
}
+
+ public int getCatalogNodeSize() {
+
+ return catalogNodeSize;
+ }
+
+ public long getBlockCount() {
+ return blockDeviceSize / blockSize;
+ }
+
+ public int getCatalogClumpSize() {
+ return catalogClumpSize;
+ }
+
+ public int getExtentClumpSize() {
+ return extentClumpSize;
+ }
+
+ public int getResourceClumpSize() {
+ return resourceClumpSize;
+ }
+
+ public int getDataClumpSize() {
+ return dataClumpSize;
+ }
+
+ public int getAttributeClumpSize() {
+ return attributeClumpSize;
+ }
+
+ public int getAttributeNodeSize() {
+ return attributeNodeSize;
+ }
+
+ public void setAttributeClumpBlocks(int attributeClumpBlocks) {
+ this.attributeClumpBlocks = attributeClumpBlocks;
+ }
+
+ public int getAllocationClumpSize() {
+ return allocationClumpSize;
+ }
+
+ public void setBitmapClumpBlocks(int bitmapClumpBlocks) {
+ this.bitmapClumpBlocks = bitmapClumpBlocks;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-09 12:27:13 UTC (rev 4837)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-01-09 13:43:21 UTC (rev 4838)
@@ -33,12 +33,6 @@
public static final byte EK_DATA_FORK = (byte) 0x00;
public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
- public static final int MINIMAL_BLOCK_SIZE = 512;
- public static final int OPTIMAL_BLOCK_SIZE = 4096;
-
- public static final int DATA_CLUMP_FACTOR = 16;
- public static final int RESOURCE_CLUMP_FACTOR = 16;
- public static final int DEFAULT_JOURNAL_SIZE = 8 * 1024 * 1024;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-09 12:27:13 UTC (rev 4837)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-09 13:43:21 UTC (rev 4838)
@@ -109,11 +109,14 @@
/**
*
+ * @param params
+ *
* @throws FileSystemException
*/
public void create(HFSPlusParams params) throws FileSystemException {
sb = new Superblock(this, true);
try {
+ params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi().getSectorSize());
sb.create(params);
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-09 12:27:13 UTC (rev 4837)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-09 13:43:21 UTC (rev 4838)
@@ -87,12 +87,7 @@
} else if (blockSize == 1024) {
burnedBlocksBeforeVH = 1;
}
- long size = fs.getApi().getLength();
- long sectorCount = size / fs.getFSApi().getSectorSize();
- long blocks = size / blockSize;
- long allocationClumpSize = getClumpSize(blocks);
- long bitmapBlocks = allocationClumpSize / blockSize;
- long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
+
// Populate volume header.
this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
@@ -110,16 +105,27 @@
this.setFileCount(0);
this.setFolderCount(0);
this.setBlockSize(blockSize);
- this.setTotalBlocks((int) blocks);
- this.setFreeBlocks((int) blocks);
- this.setRsrcClumpSize(HfsPlusConstants.RESOURCE_CLUMP_FACTOR * blockSize);
- this.setDataClumpSize(HfsPlusConstants.DATA_CLUMP_FACTOR * blockSize);
+ this.setTotalBlocks((int) params.getBlockCount());
+ this.setFreeBlocks((int) params.getBlockCount());
+ this.setRsrcClumpSize(params.getResourceClumpSize());
+ this.setDataClumpSize(params.getDataClumpSize());
this.setNextCatalogId(CatalogNodeId.HFSPLUS_FIRSTUSER_CNID.getId());
// Allocation file creation
- initAllocationFile((int) allocationClumpSize, (int) bitmapBlocks, burnedBlocksBeforeVH);
- int nextBlock = 0;
+ long allocationClumpSize = getClumpSize(params.getBlockCount());
+ long bitmapBlocks = allocationClumpSize / blockSize;
+ long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
+ HFSPlusForkData forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(allocationClumpSize);
+ forkdata.setClumpSize((int)allocationClumpSize);
+ forkdata.setTotalBlocks((int)bitmapBlocks);
+ ExtentDescriptor desc = new ExtentDescriptor();
+ desc.setStartBlock(1 + burnedBlocksBeforeVH);
+ desc.setBlockCount(0);
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
// Journal creation
- ExtentDescriptor desc = this.getAllocationFile().getExtents()[0];
+ int nextBlock = 0;
+
if (params.isJournaled()) {
this.setFileCount(2);
this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
@@ -130,103 +136,36 @@
this.setJournalInfoBlock(0);
nextBlock = desc.getStartBlock() + desc.getBlockCount();
}
- blockUsed += initExtents(0, blockSize, nextBlock, (int) sectorCount, blockUsed);
- blockUsed += initCatalog(0, blockSize, nextBlock, (int) sectorCount, blockUsed);
- this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
- this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10
- * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
- }
-
- /**
- *
- * @param clumpSize
- * @param bitmapBlocks
- * @param burnedBlocksBeforeVH
- * @return
- */
- private void initAllocationFile(int clumpSize, int bitmapBlocks, int burnedBlocksBeforeVH) {
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize(clumpSize);
- forkdata.setTotalBlocks(bitmapBlocks);
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(1 + burnedBlocksBeforeVH);
- desc.setBlockCount(0);
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
- }
-
- /**
- *
- * @param extentsClumpBlock
- * @param blockSize
- * @param nextBlock
- * @return
- */
- private long initExtents(int extentsClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed) {
- int extentNodeSize = 4096;
- long clumpSize = 0;
- if (extentsClumpBlock == 0) {
- clumpSize = getBTreeClumpSize(blockSize, extentNodeSize, sectorCount, false);
- } else {
- clumpSize = clumpSizeCalculation(extentsClumpBlock, blockSize);
- }
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize((int) clumpSize);
- forkdata.setTotalBlocks((int) (clumpSize / blockSize));
- ExtentDescriptor desc = new ExtentDescriptor();
+ // Extent B-Tree initialization
+ forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(params.getExtentClumpSize());
+ forkdata.setClumpSize(params.getExtentClumpSize());
+ forkdata.setTotalBlocks((params.getExtentClumpSize() / blockSize));
+ desc = new ExtentDescriptor();
desc.setStartBlock(nextBlock);
desc.setBlockCount(forkdata.getTotalBlocks());
forkdata.setExtentDescriptor(0, desc);
System.arraycopy(forkdata.getBytes(), 0, data, 192, forkdata.FORK_DATA_LENGTH);
- return blockUsed + forkdata.getTotalBlocks();
+ blockUsed += forkdata.getTotalBlocks();
+ // Catalog B-Tree initialization
+ forkdata = new HFSPlusForkData();
+ forkdata.setTotalSize(params.getCatalogClumpSize());
+ forkdata.setClumpSize(params.getCatalogClumpSize());
+ forkdata.setTotalBlocks(params.getCatalogClumpSize() / blockSize);
+ desc = new ExtentDescriptor();
+ desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock()
+ + this.getExtentsFile().getExtents()[0].getBlockCount());
+ desc.setBlockCount(forkdata.getTotalBlocks());
+ forkdata.setExtentDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
+ blockUsed += forkdata.getTotalBlocks();
+
+ this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
+ this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10
+ * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
}
-
+
/**
- *
- * @param extentsClumpBlock
- * @param blockSize
- * @param nextBlock
- * @param sectorCount
- * @param blockUsed
- * @return
- * @throws IOException
- */
- private long initCatalog(int catalogClumpBlock, int blockSize, int nextBlock, int sectorCount, long blockUsed)
- throws FileSystemException {
- int catalogNodeSize = 8192;
- try {
- if (blockSize < HfsPlusConstants.OPTIMAL_BLOCK_SIZE || fs.getApi().getLength() < 0x40000000) {
- catalogNodeSize = 4096;
- }
- long clumpSize = 0;
- if (catalogClumpBlock == 0) {
- clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize, sectorCount, true);
- } else {
- clumpSize = clumpSizeCalculation(catalogClumpBlock, blockSize);
- if (clumpSize % catalogNodeSize != 0) {
- throw new FileSystemException("clump size is not a multiple of node size");
- }
- }
-
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(clumpSize);
- forkdata.setClumpSize((int) clumpSize);
- forkdata.setTotalBlocks((int) (clumpSize / blockSize));
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock()
- + this.getExtentsFile().getExtents()[0].getBlockCount());
- desc.setBlockCount(forkdata.getTotalBlocks());
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
- return blockUsed + forkdata.getTotalBlocks();
- } catch (IOException e) {
- throw new FileSystemException(e);
- }
- }
-
- /**
* Calculate the number of blocks needed for bitmap.
*
* @param totalBlocks
@@ -246,48 +185,6 @@
return clumpSize;
}
- /**
- *
- * @param blockSize
- * @param nodeSize
- * @param sectors
- * @param catalog
- * @return
- */
-
- private int[] extentClumpTable = new int[] {4, 4, 4, 5, 5, 6, 7, 8, 9, 11, 14, 16, 20, 25, 32 };
- private int[] catalogClumpTable = new int[] {4, 6, 8, 11, 14, 19, 25, 34, 45, 60, 80, 107, 144, 192, 256 };
-
- private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors, boolean catalog) {
- long clumpSize = 0;
- if (sectors < 0x200000) {
- clumpSize = (sectors << 2);
- if (clumpSize < (8 * nodeSize)) {
- clumpSize = (8 * nodeSize);
- }
- } else {
- sectors = sectors >> 22;
- for (int i = 0; sectors != 0 && (i < 14); ++i) {
- if (catalog) {
- clumpSize = catalogClumpTable[i] * 1024 * 1024;
- } else {
- clumpSize = extentClumpTable[i] * 1024 * 1024;
- }
- sectors = sectors >> 1;
- }
- }
-
- return clumpSize;
- }
-
- private int clumpSizeCalculation(long clumpBlocks, int blockSize) {
- long clumpSize = clumpBlocks * blockSize;
- if ((clumpSize & 0XFFFFFFFF00000000L) == 0) {
- // ERROR
- }
- return (int) clumpSize;
- }
-
// Getters/setters
public final int getMagic() {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-09 12:27:13 UTC (rev 4837)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-09 13:43:21 UTC (rev 4838)
@@ -27,9 +27,9 @@
protected Formatter<HfsPlusFileSystem> getFormatter() {
HFSPlusParams params = new HFSPlusParams();
params.setVolumeName(ARG_VOLUME_NAME.getValue());
- params.setBlockSize(HfsPlusConstants.OPTIMAL_BLOCK_SIZE);
+ params.setBlockSize(params.OPTIMAL_BLOCK_SIZE);
params.setJournaled(false);
- params.setJournalSize(HfsPlusConstants.DEFAULT_JOURNAL_SIZE);
+ params.setJournalSize(params.DEFAULT_JOURNAL_SIZE);
return new HfsPlusFileSystemFormatter(params);
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-09 15:56:27
|
Revision: 4840
http://jnode.svn.sourceforge.net/jnode/?rev=4840&view=rev
Author: galatnm
Date: 2009-01-09 15:56:22 +0000 (Fri, 09 Jan 2009)
Log Message:
-----------
Add creation of new catalog object.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-09 13:48:41 UTC (rev 4839)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-09 15:56:22 UTC (rev 4840)
@@ -37,7 +37,8 @@
throw new ReadOnlyFileSystemException();
}
Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
- CatalogFolder newFolder = new CatalogFolder(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ CatalogFolder newFolder = new CatalogFolder();
+ newFolder.setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), new HFSUnicodeString(name));
log.debug("New catalog key :\n" + key.toString());
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-09 13:48:41 UTC (rev 4839)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-09 15:56:22 UTC (rev 4840)
@@ -6,6 +6,7 @@
import java.util.List;
import org.apache.log4j.Logger;
+import org.jnode.fs.hfsplus.HFSPlusParams;
import org.jnode.fs.hfsplus.HFSUnicodeString;
import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
@@ -22,7 +23,46 @@
private NodeDescriptor btnd;
private BTHeaderRecord bthr;
private int firstNodeOffset;
-
+
+ /**
+ * Create new Catalog
+ *
+ * @param params
+ */
+ public Catalog(HFSPlusParams params){
+ btnd = new NodeDescriptor();
+ btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
+ btnd.setHeight(0);
+ btnd.setRecordCount(3);
+ //
+ bthr = new BTHeaderRecord();
+ bthr.setTreeDepth(1);
+ bthr.setRootNode(1);
+ bthr.settFirstLeafNode(1);
+ bthr.setLastLeafNode(1);
+ bthr.setLeafRecords(params.isJournaled() ? 6 : 2);
+ bthr.setNodeSize(params.getCatalogNodeSize());
+ bthr.setTotalNodes(params.getCatalogClumpSize()/params.getCatalogNodeSize());
+ bthr.setFreeNodes(bthr.getTotalNodes() - 2);
+ bthr.setClumpSize(params.getCatalogClumpSize());
+ //TODO initialize attributes, max key length and key comparaison type.
+ // Root directory
+ CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID,new HFSUnicodeString(params.getVolumeName()));
+ CatalogFolder folder = new CatalogFolder();
+ folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
+ folder.setValence(params.isJournaled() ? 2 : 0);
+ //TODO creation date, content modification date, text encoding and access rights.
+ ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID,new HFSUnicodeString(""));
+ CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ }
+
+ /**
+ * Create Catalog based on catalog file that exist on the file system.
+ *
+ * @param fs
+ *
+ * @throws IOException
+ */
public Catalog(final HfsPlusFileSystem fs) throws IOException {
log.debug("Initialize catalog\n");
this.fs = fs;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-09 13:48:41 UTC (rev 4839)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-09 15:56:22 UTC (rev 4840)
@@ -18,17 +18,19 @@
* @param folderId
*
*/
- public CatalogFolder(CatalogNodeId folderId) {
+ public CatalogFolder() {
data = new byte[88];
BigEndian.setInt16(data, 0, HfsPlusConstants.RECORD_TYPE_FOLDER);
- BigEndian.setInt32(data, 4, 0);
- System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
}
public final int getRecordType() {
return BigEndian.getInt16(data, 0);
}
+ public final void setValence(int valence) {
+ BigEndian.setInt32(data, 4, valence);
+ }
+
public final int getValence() {
return BigEndian.getInt32(data, 4);
}
@@ -36,6 +38,10 @@
public final CatalogNodeId getFolderId() {
return new CatalogNodeId(data, 8);
}
+
+ public final void setFolderId(CatalogNodeId folderId){
+ System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
+ }
public final int getCreateDate() {
return BigEndian.getInt32(data, 12);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-09 13:48:41 UTC (rev 4839)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-09 15:56:22 UTC (rev 4840)
@@ -6,6 +6,10 @@
public static final int BT_HEADER_RECORD_LENGTH = 106;
private byte[] data;
+ public BTHeaderRecord() {
+ data = new byte[BT_HEADER_RECORD_LENGTH];
+ }
+
public BTHeaderRecord(final byte[] src) {
data = new byte[BT_HEADER_RECORD_LENGTH];
System.arraycopy(src, 0, data, 0, BT_HEADER_RECORD_LENGTH);
@@ -15,26 +19,98 @@
return BigEndian.getInt16(data, 0);
}
+ public void setTreeDepth(int depth){
+ BigEndian.setInt16(data, 0, depth);
+ }
+
public final int getRootNode() {
return BigEndian.getInt32(data, 2);
}
+
+ public void setRootNode(int node){
+ BigEndian.setInt32(data, 2, node);
+ }
public final int getLeafRecords() {
return BigEndian.getInt32(data, 6);
}
+
+ public void setLeafRecords(int count){
+ BigEndian.setInt32(data, 6, count);
+ }
public final int getFirstLeafNode() {
return BigEndian.getInt32(data, 10);
}
+ public void settFirstLeafNode(int node){
+ BigEndian.setInt32(data, 10, node);
+ }
+
public final int getLastLeafNode() {
return BigEndian.getInt32(data, 14);
}
+
+ public void setLastLeafNode(int node){
+ BigEndian.setInt32(data, 14, node);
+ }
public final int getNodeSize() {
return BigEndian.getInt16(data, 18);
}
+ public void setNodeSize(int size){
+ BigEndian.setInt16(data, 18, size);
+ }
+
+ public int getMaxKeyLength(){
+ return BigEndian.getInt16(data, 20);
+ }
+
+ public void setMaxKeyLength(int length){
+ BigEndian.setInt16(data, 20, length);
+ }
+
+ public int getTotalNodes(){
+ return BigEndian.getInt32(data, 22);
+ }
+
+ public void setTotalNodes(int count){
+ BigEndian.setInt32(data, 22, count);
+ }
+
+ public int getFreeNodes(){
+ return BigEndian.getInt32(data, 26);
+ }
+
+ public void setFreeNodes(int count){
+ BigEndian.setInt32(data, 26, count);
+ }
+
+ public int getClumpSize(){
+ return BigEndian.getInt32(data, 32);
+ }
+
+ public void setClumpSize(int size){
+ BigEndian.setInt32(data, 32, size);
+ }
+
+ public int getTreeType(){
+ return BigEndian.getInt8(data, 36);
+ }
+
+ public void setTreeType(int type){
+ BigEndian.setInt8(data, 36, type);
+ }
+
+ public int getKeyCompareType(){
+ return BigEndian.getInt8(data, 37);
+ }
+
+ public void setKeyCompareType(int type){
+ BigEndian.setInt8(data, 38, type);
+ }
+
public final String toString() {
return ("Root node: " + getRootNode() + "\n" + "First leaf: " + getFirstLeafNode() + "\n" + "Last leaf: "
+ getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-09 13:48:41 UTC (rev 4839)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-09 15:56:22 UTC (rev 4840)
@@ -1,11 +1,16 @@
package org.jnode.fs.hfsplus.tree;
+import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
public class NodeDescriptor {
public static final int BT_NODE_DESCRIPTOR_LENGTH = 14;
private byte[] data;
+ public NodeDescriptor(){
+ data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
+ }
+
public NodeDescriptor(final byte[] src) {
data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
System.arraycopy(src, 0, data, 0, BT_NODE_DESCRIPTOR_LENGTH);
@@ -22,14 +27,26 @@
public final int getKind() {
return BigEndian.getInt8(data, 8);
}
+
+ public void setKind(int kind){
+ BigEndian.setInt8(data, 8, kind);
+ }
public final int getHeight() {
return BigEndian.getInt8(data, 9);
}
+ public void setHeight(int height){
+ BigEndian.setInt8(data, 9, height);
+ }
+
public final int getNumRecords() {
return BigEndian.getInt16(data, 10);
}
+
+ public void setRecordCount(int count){
+ BigEndian.setInt16(data, 10, count);
+ }
public final String toString() {
return ("FLink: " + getFLink() + "\n" + "BLink: " + getBLink() + "\n" + "Kind: " + getKind() + "\n"
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-10 11:26:38
|
Revision: 4844
http://jnode.svn.sourceforge.net/jnode/?rev=4844&view=rev
Author: galatnm
Date: 2009-01-10 11:26:31 +0000 (Sat, 10 Jan 2009)
Log Message:
-----------
Checkstyle fixes.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -53,8 +53,7 @@
* @throws FileSystemException
*
*/
- public void initializeDefaultsValues(long blockDeviceSize, long sectorSize)
- throws FileSystemException {
+ public void initializeDefaultsValues(long blockDeviceSize, long sectorSize) throws FileSystemException {
long clumpSize = 0;
this.blockDeviceSize = blockDeviceSize;
if (resourceClumpBlocks == 0) {
@@ -82,55 +81,50 @@
}
long sectorCount = blockDeviceSize / sectorSize;
if (catalogClumpBlocks == 0) {
- clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize,
- sectorCount, true);
+ clumpSize = getBTreeClumpSize(blockSize, catalogNodeSize, sectorCount, true);
} else {
clumpSize = clumpSizeCalculation(catalogClumpBlocks);
if (clumpSize % catalogNodeSize != 0) {
- throw new FileSystemException(
- "clump size is not a multiple of node size");
+ throw new FileSystemException("clump size is not a multiple of node size");
}
}
catalogClumpSize = (int) clumpSize;
if (extentClumpBlocks == 0) {
- clumpSize = getBTreeClumpSize(blockSize, extentNodeSize,
- sectorCount, false);
+ clumpSize = getBTreeClumpSize(blockSize, extentNodeSize, sectorCount, false);
} else {
clumpSize = clumpSizeCalculation(extentClumpBlocks);
}
extentClumpSize = (int) clumpSize;
-
- if(attributeClumpBlocks == 0){
+
+ if (attributeClumpBlocks == 0) {
clumpSize = 0;
} else {
clumpSize = clumpSizeCalculation(attributeClumpBlocks);
- if(clumpSize % attributeNodeSize != 0){
+ if (clumpSize % attributeNodeSize != 0) {
throw new FileSystemException("clump size is not a multiple of attribute node size");
}
}
- attributeClumpSize = (int)clumpSize;
-
+ attributeClumpSize = (int) clumpSize;
+
long totalBlocks = this.getBlockCount();
long minClumpSize = this.getBlockCount() >> 3;
if ((totalBlocks & 7) == 0) {
++minClumpSize;
}
- if(bitmapClumpBlocks == 0){
+ if (bitmapClumpBlocks == 0) {
clumpSize = minClumpSize;
} else {
clumpSize = clumpSizeCalculation(bitmapClumpBlocks);
- if(clumpSize < minClumpSize){
+ if (clumpSize < minClumpSize) {
throw new FileSystemException("bitmap clump size is too small.");
}
}
- allocationClumpSize = (int)clumpSize;
-
+ allocationClumpSize = (int) clumpSize;
+
}
- private int[] extentClumpTable = new int[] { 4, 4, 4, 5, 5, 6, 7, 8, 9, 11,
- 14, 16, 20, 25, 32 };
- private int[] catalogClumpTable = new int[] { 4, 6, 8, 11, 14, 19, 25, 34,
- 45, 60, 80, 107, 144, 192, 256 };
+ private int[] extentClumpTable = new int[] {4, 4, 4, 5, 5, 6, 7, 8, 9, 11, 14, 16, 20, 25, 32 };
+ private int[] catalogClumpTable = new int[] {4, 6, 8, 11, 14, 19, 25, 34, 45, 60, 80, 107, 144, 192, 256 };
/**
*
@@ -140,8 +134,7 @@
* @param catalog
* @return
*/
- private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors,
- boolean catalog) {
+ private long getBTreeClumpSize(int blockSize, int nodeSize, long sectors, boolean catalog) {
long clumpSize = 0;
if (sectors < 0x200000) {
clumpSize = (sectors << 2);
@@ -172,13 +165,13 @@
private int clumpSizeCalculation(long clumpBlocks) throws FileSystemException {
long clumpSize = clumpBlocks * blockSize;
if ((clumpSize & 0XFFFFFFFF00000000L) == 0) {
- throw new FileSystemException("Too many blocks (" + clumpBlocks + ") for clump size (" + clumpSize +").");
+ throw new FileSystemException("Too many blocks (" + clumpBlocks + ") for clump size (" + clumpSize + ").");
}
return (int) clumpSize;
}
-
- private long round(long x, long y){
- return (((x+y)-1)/y*y);
+
+ private long round(long x, long y) {
+ return (((x + y) - 1) / y * y);
}
public String getVolumeName() {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -72,8 +72,7 @@
*
* @throws ApiNotFoundException
*/
- public void create(HFSPlusParams params) throws IOException,
- ApiNotFoundException, FileSystemException {
+ public void create(HFSPlusParams params) throws IOException, ApiNotFoundException, FileSystemException {
int burnedBlocksBeforeVH = 0;
int burnedBlocksAfterAltVH = 0;
/*
@@ -87,7 +86,7 @@
} else if (blockSize == 1024) {
burnedBlocksBeforeVH = 1;
}
-
+
// Populate volume header.
this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
@@ -116,8 +115,8 @@
long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
HFSPlusForkData forkdata = new HFSPlusForkData();
forkdata.setTotalSize(allocationClumpSize);
- forkdata.setClumpSize((int)allocationClumpSize);
- forkdata.setTotalBlocks((int)bitmapBlocks);
+ forkdata.setClumpSize((int) allocationClumpSize);
+ forkdata.setTotalBlocks((int) bitmapBlocks);
ExtentDescriptor desc = new ExtentDescriptor();
desc.setStartBlock(1 + burnedBlocksBeforeVH);
desc.setBlockCount(0);
@@ -136,7 +135,7 @@
this.setJournalInfoBlock(0);
nextBlock = desc.getStartBlock() + desc.getBlockCount();
}
- // Extent B-Tree initialization
+ // Extent B-Tree initialization
forkdata = new HFSPlusForkData();
forkdata.setTotalSize(params.getExtentClumpSize());
forkdata.setClumpSize(params.getExtentClumpSize());
@@ -159,12 +158,12 @@
forkdata.setExtentDescriptor(0, desc);
System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
blockUsed += forkdata.getTotalBlocks();
-
+
this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10
* (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
}
-
+
/**
* Calculate the number of blocks needed for bitmap.
*
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -23,13 +23,13 @@
private NodeDescriptor btnd;
private BTHeaderRecord bthr;
private int firstNodeOffset;
-
+
/**
* Create new Catalog
*
* @param params
*/
- public Catalog(HFSPlusParams params){
+ public Catalog(HFSPlusParams params) {
btnd = new NodeDescriptor();
btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
btnd.setHeight(0);
@@ -42,20 +42,21 @@
bthr.setLastLeafNode(1);
bthr.setLeafRecords(params.isJournaled() ? 6 : 2);
bthr.setNodeSize(params.getCatalogNodeSize());
- bthr.setTotalNodes(params.getCatalogClumpSize()/params.getCatalogNodeSize());
+ bthr.setTotalNodes(params.getCatalogClumpSize() / params.getCatalogNodeSize());
bthr.setFreeNodes(bthr.getTotalNodes() - 2);
bthr.setClumpSize(params.getCatalogClumpSize());
- //TODO initialize attributes, max key length and key comparaison type.
+ // TODO initialize attributes, max key length and key comparaison type.
// Root directory
- CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID,new HFSUnicodeString(params.getVolumeName()));
+ CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, new HFSUnicodeString(params.getVolumeName()));
CatalogFolder folder = new CatalogFolder();
folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
folder.setValence(params.isJournaled() ? 2 : 0);
- //TODO creation date, content modification date, text encoding and access rights.
- ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID,new HFSUnicodeString(""));
- CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ // TODO creation date, content modification date, text encoding and access rights.
+ ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
+ CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
}
-
+
/**
* Create Catalog based on catalog file that exist on the file system.
*
@@ -91,7 +92,8 @@
* @return
* @throws IOException
*/
- public final LeafRecord getRecord(final CatalogNodeId parentID) throws IOException {
+ public final LeafRecord getRecord(final CatalogNodeId parentID)
+ throws IOException {
int currentOffset = firstNodeOffset;
int currentNodeNumber = getBTHeaderRecord().getRootNode();
int currentNodeSize = getBTHeaderRecord().getNodeSize();
@@ -126,7 +128,8 @@
* @return
* @throws IOException
*/
- public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOException {
+ public final LeafRecord[] getRecords(final CatalogNodeId parentID)
+ throws IOException {
return getRecords(parentID, getBTHeaderRecord().getRootNode());
}
@@ -137,7 +140,8 @@
* @return
* @throws IOException
*/
- public final LeafRecord[] getRecords(final CatalogNodeId parentID, final int nodeNumber) throws IOException {
+ public final LeafRecord[] getRecords(final CatalogNodeId parentID, final int nodeNumber)
+ throws IOException {
try {
int currentOffset = firstNodeOffset;
int currentNodeNumber = nodeNumber;
@@ -159,8 +163,7 @@
}
return lfList.toArray(new LeafRecord[lfList.size()]);
} else if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(),
- currentNodeSize);
+ CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
LeafRecord[] lr = leaf.findAll(parentID);
log.debug("Leaf record size: " + lr.length);
return lr;
@@ -182,8 +185,8 @@
* @return
* @throws IOException
*/
- public final LeafRecord getRecord(final CatalogNodeId parentID,
- final HFSUnicodeString nodeName) throws IOException {
+ public final LeafRecord getRecord(final CatalogNodeId parentID, final HFSUnicodeString nodeName)
+ throws IOException {
int currentOffset = firstNodeOffset;
int currentNodeNumber = getBTHeaderRecord().getRootNode();
int currentNodeSize = getBTHeaderRecord().getNodeSize();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -30,7 +30,7 @@
public final void setValence(int valence) {
BigEndian.setInt32(data, 4, valence);
}
-
+
public final int getValence() {
return BigEndian.getInt32(data, 4);
}
@@ -38,8 +38,8 @@
public final CatalogNodeId getFolderId() {
return new CatalogNodeId(data, 8);
}
-
- public final void setFolderId(CatalogNodeId folderId){
+
+ public final void setFolderId(CatalogNodeId folderId) {
System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -51,7 +51,6 @@
} else if (key.getParentId().getId() == parentId.getId()) {
result.addLast(rec);
}
-
}
if (largestMatchingKey != null) {
@@ -64,8 +63,8 @@
IndexRecord largestMatchingRecord = null;
for (int i = 0; i < records.length; ++i) {
if (records[i].getKey().compareTo(key) <= 0
- && (largestMatchingRecord == null ||
- records[i].getKey().compareTo(largestMatchingRecord.getKey()) > 0)) {
+ && (largestMatchingRecord == null
+ || records[i].getKey().compareTo(largestMatchingRecord.getKey()) > 0)) {
largestMatchingRecord = records[i];
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -3,26 +3,26 @@
import org.jnode.fs.Formatter;
import org.jnode.fs.command.AbstractFormatCommand;
import org.jnode.fs.hfsplus.HFSPlusParams;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
import org.jnode.fs.hfsplus.HfsPlusFileSystemFormatter;
import org.jnode.shell.syntax.Argument;
import org.jnode.shell.syntax.StringArgument;
public class FormatHfsPlusCommand extends AbstractFormatCommand<HfsPlusFileSystem> {
-
- private final StringArgument ARG_VOLUME_NAME =
- new StringArgument("volumename", Argument.OPTIONAL, "set volume name");
+ private final StringArgument ARG_VOLUME_NAME = new StringArgument("volumename", Argument.OPTIONAL,
+ "set volume name");
+
public FormatHfsPlusCommand() {
super("Format a block device with HFS+ filesystem");
registerArguments(ARG_VOLUME_NAME);
}
- public static void main(String[] args) throws Exception {
+ public static void main(String[] args)
+ throws Exception {
new FormatHfsPlusCommand().execute(args);
}
-
+
@Override
protected Formatter<HfsPlusFileSystem> getFormatter() {
HFSPlusParams params = new HFSPlusParams();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -9,7 +9,7 @@
public BTHeaderRecord() {
data = new byte[BT_HEADER_RECORD_LENGTH];
}
-
+
public BTHeaderRecord(final byte[] src) {
data = new byte[BT_HEADER_RECORD_LENGTH];
System.arraycopy(src, 0, data, 0, BT_HEADER_RECORD_LENGTH);
@@ -19,23 +19,23 @@
return BigEndian.getInt16(data, 0);
}
- public void setTreeDepth(int depth){
+ public void setTreeDepth(int depth) {
BigEndian.setInt16(data, 0, depth);
}
-
+
public final int getRootNode() {
return BigEndian.getInt32(data, 2);
}
-
- public void setRootNode(int node){
+
+ public void setRootNode(int node) {
BigEndian.setInt32(data, 2, node);
}
public final int getLeafRecords() {
return BigEndian.getInt32(data, 6);
}
-
- public void setLeafRecords(int count){
+
+ public void setLeafRecords(int count) {
BigEndian.setInt32(data, 6, count);
}
@@ -43,15 +43,15 @@
return BigEndian.getInt32(data, 10);
}
- public void settFirstLeafNode(int node){
+ public void settFirstLeafNode(int node) {
BigEndian.setInt32(data, 10, node);
}
-
+
public final int getLastLeafNode() {
return BigEndian.getInt32(data, 14);
}
-
- public void setLastLeafNode(int node){
+
+ public void setLastLeafNode(int node) {
BigEndian.setInt32(data, 14, node);
}
@@ -59,58 +59,58 @@
return BigEndian.getInt16(data, 18);
}
- public void setNodeSize(int size){
+ public void setNodeSize(int size) {
BigEndian.setInt16(data, 18, size);
}
-
- public int getMaxKeyLength(){
+
+ public int getMaxKeyLength() {
return BigEndian.getInt16(data, 20);
}
-
- public void setMaxKeyLength(int length){
+
+ public void setMaxKeyLength(int length) {
BigEndian.setInt16(data, 20, length);
}
-
- public int getTotalNodes(){
+
+ public int getTotalNodes() {
return BigEndian.getInt32(data, 22);
}
-
- public void setTotalNodes(int count){
+
+ public void setTotalNodes(int count) {
BigEndian.setInt32(data, 22, count);
}
-
- public int getFreeNodes(){
+
+ public int getFreeNodes() {
return BigEndian.getInt32(data, 26);
}
-
- public void setFreeNodes(int count){
+
+ public void setFreeNodes(int count) {
BigEndian.setInt32(data, 26, count);
}
-
- public int getClumpSize(){
+
+ public int getClumpSize() {
return BigEndian.getInt32(data, 32);
}
-
- public void setClumpSize(int size){
+
+ public void setClumpSize(int size) {
BigEndian.setInt32(data, 32, size);
}
-
- public int getTreeType(){
+
+ public int getTreeType() {
return BigEndian.getInt8(data, 36);
}
-
- public void setTreeType(int type){
+
+ public void setTreeType(int type) {
BigEndian.setInt8(data, 36, type);
}
-
- public int getKeyCompareType(){
+
+ public int getKeyCompareType() {
return BigEndian.getInt8(data, 37);
}
-
- public void setKeyCompareType(int type){
+
+ public void setKeyCompareType(int type) {
BigEndian.setInt8(data, 38, type);
}
-
+
public final String toString() {
return ("Root node: " + getRootNode() + "\n" + "First leaf: " + getFirstLeafNode() + "\n" + "Last leaf: "
+ getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-10 07:30:50 UTC (rev 4843)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-10 11:26:31 UTC (rev 4844)
@@ -1,16 +1,15 @@
package org.jnode.fs.hfsplus.tree;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
public class NodeDescriptor {
public static final int BT_NODE_DESCRIPTOR_LENGTH = 14;
private byte[] data;
- public NodeDescriptor(){
+ public NodeDescriptor() {
data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
}
-
+
public NodeDescriptor(final byte[] src) {
data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
System.arraycopy(src, 0, data, 0, BT_NODE_DESCRIPTOR_LENGTH);
@@ -27,8 +26,8 @@
public final int getKind() {
return BigEndian.getInt8(data, 8);
}
-
- public void setKind(int kind){
+
+ public void setKind(int kind) {
BigEndian.setInt8(data, 8, kind);
}
@@ -36,15 +35,15 @@
return BigEndian.getInt8(data, 9);
}
- public void setHeight(int height){
+ public void setHeight(int height) {
BigEndian.setInt8(data, 9, height);
}
-
+
public final int getNumRecords() {
return BigEndian.getInt16(data, 10);
}
-
- public void setRecordCount(int count){
+
+ public void setRecordCount(int count) {
BigEndian.setInt16(data, 10, count);
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-13 16:05:30
|
Revision: 4858
http://jnode.svn.sourceforge.net/jnode/?rev=4858&view=rev
Author: galatnm
Date: 2009-01-13 16:05:19 +0000 (Tue, 13 Jan 2009)
Log Message:
-----------
Add extents overflow file definition and complete BTree header record to access attributes.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
Added Paths:
-----------
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-12 15:07:59 UTC (rev 4857)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -250,4 +250,8 @@
public void setBitmapClumpBlocks(int bitmapClumpBlocks) {
this.bitmapClumpBlocks = bitmapClumpBlocks;
}
+
+ public int getExtentNodeSize() {
+ return extentNodeSize;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-12 15:07:59 UTC (rev 4857)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -118,6 +118,7 @@
try {
params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi().getSectorSize());
sb.create(params);
+ log.debug("Write volume header to disk.");
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
} catch (IOException e) {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-12 15:07:59 UTC (rev 4857)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -6,6 +6,9 @@
import org.jnode.util.BigEndian;
public class CatalogKey extends AbstractKey {
+
+ public final static int MAXIMUM_KEY_LENGTH = 516;
+
private int keyLength;
private CatalogNodeId parentID;
private HFSUnicodeString nodeName;
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -0,0 +1,31 @@
+package org.jnode.fs.hfsplus.extent;
+
+import org.jnode.fs.hfsplus.HFSPlusParams;
+import org.jnode.fs.hfsplus.HfsPlusConstants;
+import org.jnode.fs.hfsplus.tree.BTHeaderRecord;
+import org.jnode.fs.hfsplus.tree.NodeDescriptor;
+
+public class Extent {
+ private NodeDescriptor btnd;
+ private BTHeaderRecord bthr;
+
+ public Extent(HFSPlusParams params) {
+ btnd = new NodeDescriptor();
+ btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
+ btnd.setHeight(0);
+ btnd.setRecordCount(3);
+ //
+ bthr = new BTHeaderRecord();
+ bthr.setTreeDepth(0);
+ bthr.setRootNode(0);
+ bthr.settFirstLeafNode(0);
+ bthr.setLastLeafNode(0);
+ bthr.setLeafRecords(0);
+ bthr.setNodeSize(params.getExtentNodeSize());
+ bthr.setTotalNodes(params.getExtentClumpSize()
+ / params.getExtentNodeSize());
+ bthr.setFreeNodes(bthr.getTotalNodes() - 1);
+ bthr.setClumpSize(params.getExtentClumpSize());
+ bthr.setMaxKeyLength(ExtentKey.KEY_LENGTH);
+ }
+}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-01-12 15:07:59 UTC (rev 4857)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -6,7 +6,7 @@
import org.jnode.util.BigEndian;
public class ExtentKey extends AbstractKey {
-
+
public static final byte DATA_FORK = (byte) 0x00;
public static final byte RESOURCE_FORK = (byte) 0xFF;
public static final int KEY_LENGTH = 12;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-12 15:07:59 UTC (rev 4857)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-13 16:05:19 UTC (rev 4858)
@@ -111,8 +111,17 @@
BigEndian.setInt8(data, 38, type);
}
+ public long getAttributes() {
+ return BigEndian.getInt32(data, 39);
+ }
+
+ public void setAttributes(int attrs) {
+ BigEndian.setInt32(data, 39, attrs);
+ }
+
public final String toString() {
- return ("Root node: " + getRootNode() + "\n" + "First leaf: " + getFirstLeafNode() + "\n" + "Last leaf: "
+ return ("Root node: " + getRootNode() + "\n" + "First leaf: "
+ + getFirstLeafNode() + "\n" + "Last leaf: "
+ getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-14 11:01:34
|
Revision: 4859
http://jnode.svn.sourceforge.net/jnode/?rev=4859&view=rev
Author: galatnm
Date: 2009-01-14 11:01:26 +0000 (Wed, 14 Jan 2009)
Log Message:
-----------
Correctly set attributes in volume header.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-13 16:05:19 UTC (rev 4858)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-14 11:01:26 UTC (rev 4859)
@@ -32,7 +32,7 @@
* @throws FileSystemException
*/
public HfsPlusFileSystem(final Device device, final boolean readOnly, final HfsPlusFileSystemType type)
- throws FileSystemException {
+ throws FileSystemException {
super(device, readOnly, type);
}
@@ -85,20 +85,28 @@
log.debug("Root entry : No record found.");
return null;
}
-
+ /*
+ * (non-Javadoc)
+ * @see org.jnode.fs.FileSystem#getFreeSpace()
+ */
public final long getFreeSpace() {
return sb.getFreeBlocks() * sb.getBlockSize();
}
-
+ /*
+ * (non-Javadoc)
+ * @see org.jnode.fs.FileSystem#getTotalSpace()
+ */
public final long getTotalSpace() {
return sb.getTotalBlocks() * sb.getBlockSize();
}
-
+ /*
+ * (non-Javadoc)
+ * @see org.jnode.fs.FileSystem#getUsableSpace()
+ */
public final long getUsableSpace() {
- // TODO Auto-generated method stub
return -1;
}
-
+
public final Catalog getCatalog() {
return catalog;
}
@@ -118,6 +126,12 @@
try {
params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi().getSectorSize());
sb.create(params);
+ //---
+ long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks();
+ if(sb.getBlockSize() != 512) volumeBlockUsed++;
+ //---
+ log.debug("Write allocation bitmap bits to disk.");
+ //---
log.debug("Write volume header to disk.");
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-13 16:05:19 UTC (rev 4858)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-14 11:01:26 UTC (rev 4859)
@@ -210,7 +210,7 @@
}
public final void setAttribute(final int attributeMaskBit) {
- BigEndian.setInt32(data, 4, (getAttributes() >> attributeMaskBit) | 0x1);
+ BigEndian.setInt32(data, 4, getAttributes() | (1 << attributeMaskBit));
}
//
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-13 16:05:19 UTC (rev 4858)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-14 11:01:26 UTC (rev 4859)
@@ -119,6 +119,10 @@
BigEndian.setInt32(data, 39, attrs);
}
+ public byte[] getBytes() {
+ return data;
+ }
+
public final String toString() {
return ("Root node: " + getRootNode() + "\n" + "First leaf: "
+ getFirstLeafNode() + "\n" + "Last leaf: "
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-13 16:05:19 UTC (rev 4858)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-14 11:01:26 UTC (rev 4859)
@@ -47,6 +47,10 @@
BigEndian.setInt16(data, 10, count);
}
+ public byte[] getBytes() {
+ return data;
+ }
+
public final String toString() {
return ("FLink: " + getFLink() + "\n" + "BLink: " + getBLink() + "\n" + "Kind: " + getKind() + "\n"
+ "height: " + getHeight() + "\n" + "#rec: " + getNumRecords() + "\n");
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-16 15:23:39
|
Revision: 4874
http://jnode.svn.sourceforge.net/jnode/?rev=4874&view=rev
Author: galatnm
Date: 2009-01-16 15:23:29 +0000 (Fri, 16 Jan 2009)
Log Message:
-----------
Updates
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-16 15:16:27 UTC (rev 4873)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-16 15:23:29 UTC (rev 4874)
@@ -1,6 +1,8 @@
package org.jnode.fs.hfsplus;
import java.io.IOException;
+import java.util.Calendar;
+import java.util.Date;
import java.util.LinkedList;
import java.util.List;
@@ -10,6 +12,7 @@
import org.jnode.fs.hfsplus.catalog.CatalogFolder;
import org.jnode.fs.hfsplus.catalog.CatalogKey;
import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
+import org.jnode.fs.hfsplus.catalog.CatalogThread;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.spi.AbstractFSDirectory;
import org.jnode.fs.spi.FSEntryTable;
@@ -37,16 +40,32 @@
throw new ReadOnlyFileSystemException();
}
Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
+
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+
+ HFSUnicodeString dirName = new HFSUnicodeString(name);
+ CatalogThread thread = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,this.folder.getFolderId(),dirName);
+
CatalogFolder newFolder = new CatalogFolder();
newFolder.setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ newFolder.setCreateDate(macDate);
+ newFolder.setContentModDate(macDate);
+ newFolder.setAttrModDate(macDate);
log.debug("New catalog folder :\n" + newFolder.toString());
- CatalogKey key = new CatalogKey(this.folder.getFolderId(), new HFSUnicodeString(name));
+
+ CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
log.debug("New catalog key :\n" + key.toString());
+
+
LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
log.debug("New record folder :\n" + folderRecord.toString());
+
HFSPlusEntry newEntry = new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, folderRecord);
volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
log.debug("New volume header :\n" + volumeHeader.toString());
+
return newEntry;
}
@@ -56,7 +75,9 @@
}
public synchronized void remove(String name) throws IOException {
- throw new ReadOnlyFileSystemException();
+ if (!canWrite()) {
+ throw new ReadOnlyFileSystemException();
+ }
}
@Override
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-16 15:16:27 UTC (rev 4873)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-16 15:23:29 UTC (rev 4874)
@@ -127,8 +127,7 @@
params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi().getSectorSize());
sb.create(params);
//---
- long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks();
- if(sb.getBlockSize() != 512) volumeBlockUsed++;
+ long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks() - ((sb.getBlockSize() == 512)?2:1);
//---
log.debug("Write allocation bitmap bits to disk.");
//---
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-16 15:16:27 UTC (rev 4873)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-16 15:23:29 UTC (rev 4874)
@@ -47,13 +47,25 @@
return BigEndian.getInt32(data, 12);
}
+ public void setCreateDate(int time){
+ BigEndian.setInt32(data, 12, time);
+ }
+
public final int getContentModDate() {
return BigEndian.getInt32(data, 16);
}
+
+ public void setContentModDate(int time){
+ BigEndian.setInt32(data, 16, time);
+ }
public final int getAttrModDate() {
return BigEndian.getInt32(data, 20);
}
+
+ public void setAttrModDate(int time){
+ BigEndian.setInt32(data, 20, time);
+ }
public byte[] getBytes() {
return data;
@@ -66,9 +78,9 @@
s.append("Folder ID: ").append(getFolderId().getId()).append("\n");
s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy"))
- .append("\n");
+ .append("\n");
s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
+ "\n");
return s.toString();
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-21 13:22:10
|
Revision: 4895
http://jnode.svn.sourceforge.net/jnode/?rev=4895&view=rev
Author: galatnm
Date: 2009-01-21 11:45:39 +0000 (Wed, 21 Jan 2009)
Log Message:
-----------
Style fixes.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-21 11:22:57 UTC (rev 4894)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-21 11:45:39 UTC (rev 4895)
@@ -35,42 +35,50 @@
}
@Override
- protected final FSEntry createDirectoryEntry(final String name) throws IOException {
+ protected final FSEntry createDirectoryEntry(final String name)
+ throws IOException {
if (!canWrite()) {
throw new ReadOnlyFileSystemException();
}
- Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
-
+ Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem())
+ .getVolumeHeader();
+
Calendar now = Calendar.getInstance();
now.setTime(new Date());
- int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
-
+ int macDate = (int) HFSUtils
+ .getDate(now.getTimeInMillis() / 1000, true);
+
HFSUnicodeString dirName = new HFSUnicodeString(name);
- CatalogThread thread = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,this.folder.getFolderId(),dirName);
-
+ CatalogThread thread = new CatalogThread(
+ HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
+ .getFolderId(), dirName);
+
CatalogFolder newFolder = new CatalogFolder();
- newFolder.setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ newFolder
+ .setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
newFolder.setCreateDate(macDate);
newFolder.setContentModDate(macDate);
newFolder.setAttrModDate(macDate);
log.debug("New catalog folder :\n" + newFolder.toString());
-
+
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
log.debug("New catalog key :\n" + key.toString());
-
-
+
LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
log.debug("New record folder :\n" + folderRecord.toString());
-
- HFSPlusEntry newEntry = new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, folderRecord);
+
+ HFSPlusEntry newEntry = new HFSPlusEntry(
+ (HfsPlusFileSystem) getFileSystem(), null, this, name,
+ folderRecord);
volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
log.debug("New volume header :\n" + volumeHeader.toString());
-
+
return newEntry;
}
@Override
- protected final FSEntry createFileEntry(final String name) throws IOException {
+ protected final FSEntry createFileEntry(final String name)
+ throws IOException {
throw new ReadOnlyFileSystemException();
}
@@ -83,12 +91,16 @@
@Override
protected final FSEntryTable readEntries() throws IOException {
List<FSEntry> pathList = new LinkedList<FSEntry>();
- LeafRecord[] records = ((HfsPlusFileSystem) getFileSystem()).getCatalog().getRecords(folder.getFolderId());
+ LeafRecord[] records = ((HfsPlusFileSystem) getFileSystem())
+ .getCatalog().getRecords(folder.getFolderId());
for (LeafRecord rec : records) {
if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER
|| rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
- String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
- HFSPlusEntry e = new HFSPlusEntry((HfsPlusFileSystem) getFileSystem(), null, this, name, rec);
+ String name = ((CatalogKey) rec.getKey()).getNodeName()
+ .getUnicodeString();
+ HFSPlusEntry e = new HFSPlusEntry(
+ (HfsPlusFileSystem) getFileSystem(), null, this, name,
+ rec);
pathList.add(e);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-21 11:22:57 UTC (rev 4894)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-21 11:45:39 UTC (rev 4895)
@@ -31,8 +31,8 @@
* @param type
* @throws FileSystemException
*/
- public HfsPlusFileSystem(final Device device, final boolean readOnly, final HfsPlusFileSystemType type)
- throws FileSystemException {
+ public HfsPlusFileSystem(final Device device, final boolean readOnly,
+ final HfsPlusFileSystemType type) throws FileSystemException {
super(device, readOnly, type);
}
@@ -45,15 +45,19 @@
log.debug("Superblock informations:\n" + sb.toString());
if (!sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT)) {
- log.info(getDevice().getId() + " Filesystem has not been cleanly unmounted, mounting it readonly");
+ log
+ .info(getDevice().getId()
+ + " Filesystem has not been cleanly unmounted, mounting it readonly");
setReadOnly(true);
}
if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_SOFTLOCK_BIT)) {
- log.info(getDevice().getId() + " Filesystem is marked locked, mounting it readonly");
+ log.info(getDevice().getId()
+ + " Filesystem is marked locked, mounting it readonly");
setReadOnly(true);
}
if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_JOURNALED_BIT)) {
- log.info(getDevice().getId()
+ log
+ .info(getDevice().getId()
+ " Filesystem is journaled, write access is not supported. Mounting it readonly");
setReadOnly(true);
}
@@ -65,7 +69,8 @@
}
@Override
- protected final FSDirectory createDirectory(final FSEntry entry) throws IOException {
+ protected final FSDirectory createDirectory(final FSEntry entry)
+ throws IOException {
HFSPlusEntry e = (HFSPlusEntry) entry;
return new HFSPlusDirectory(e);
}
@@ -85,28 +90,34 @@
log.debug("Root entry : No record found.");
return null;
}
+
/*
* (non-Javadoc)
+ *
* @see org.jnode.fs.FileSystem#getFreeSpace()
*/
public final long getFreeSpace() {
return sb.getFreeBlocks() * sb.getBlockSize();
}
+
/*
* (non-Javadoc)
+ *
* @see org.jnode.fs.FileSystem#getTotalSpace()
*/
public final long getTotalSpace() {
return sb.getTotalBlocks() * sb.getBlockSize();
}
+
/*
* (non-Javadoc)
+ *
* @see org.jnode.fs.FileSystem#getUsableSpace()
*/
public final long getUsableSpace() {
return -1;
}
-
+
public final Catalog getCatalog() {
return catalog;
}
@@ -124,13 +135,15 @@
public void create(HFSPlusParams params) throws FileSystemException {
sb = new Superblock(this, true);
try {
- params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi().getSectorSize());
+ params.initializeDefaultsValues(this.getApi().getLength(), this
+ .getFSApi().getSectorSize());
sb.create(params);
- //---
- long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks() - ((sb.getBlockSize() == 512)?2:1);
- //---
+ // ---
+ long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks()
+ - ((sb.getBlockSize() == 512) ? 2 : 1);
+ // ---
log.debug("Write allocation bitmap bits to disk.");
- //---
+ // ---
log.debug("Write volume header to disk.");
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-21 11:22:57 UTC (rev 4894)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-21 11:45:39 UTC (rev 4895)
@@ -40,30 +40,31 @@
}
public final void setFolderId(CatalogNodeId folderId) {
- System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
+ System.arraycopy(folderId.getBytes(), 0, data, 8,
+ folderId.getBytes().length);
}
public final int getCreateDate() {
return BigEndian.getInt32(data, 12);
}
- public void setCreateDate(int time){
+ public void setCreateDate(int time) {
BigEndian.setInt32(data, 12, time);
}
public final int getContentModDate() {
return BigEndian.getInt32(data, 16);
}
-
- public void setContentModDate(int time){
+
+ public void setContentModDate(int time) {
BigEndian.setInt32(data, 16, time);
}
public final int getAttrModDate() {
return BigEndian.getInt32(data, 20);
}
-
- public void setAttrModDate(int time){
+
+ public void setAttrModDate(int time) {
BigEndian.setInt32(data, 20, time);
}
@@ -76,11 +77,16 @@
s.append("Record type: ").append(getRecordType()).append("\n");
s.append("Valence: ").append(getValence()).append("\n");
s.append("Folder ID: ").append(getFolderId().getId()).append("\n");
- s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy"))
- .append("\n");
- s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
+ s.append("Creation Date :").append(
+ HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy"))
+ .append("\n");
+ s.append("Content Mod Date :").append(
+ HFSUtils.printDate(getContentModDate(),
+ "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Attr Mod Date :")
+ .append(
+ HFSUtils.printDate(getAttrModDate(),
+ "EEE MMM d HH:mm:ss yyyy")).append("\n");
return s.toString();
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-21 11:22:57 UTC (rev 4894)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-21 11:45:39 UTC (rev 4895)
@@ -6,9 +6,9 @@
import org.jnode.util.BigEndian;
public class CatalogKey extends AbstractKey {
-
- public final static int MAXIMUM_KEY_LENGTH = 516;
-
+
+ public static final int MAXIMUM_KEY_LENGTH = 516;
+
private int keyLength;
private CatalogNodeId parentID;
private HFSUnicodeString nodeName;
@@ -61,7 +61,8 @@
if (o instanceof CatalogKey) {
CatalogKey ck = (CatalogKey) o;
if (getParentId().getId() == ck.getParentId().getId()) {
- return nodeName.getUnicodeString().compareTo(ck.getNodeName().getUnicodeString());
+ return nodeName.getUnicodeString().compareTo(
+ ck.getNodeName().getUnicodeString());
} else if (getParentId().getId() < ck.getParentId().getId()) {
return -1;
} else {
@@ -76,7 +77,10 @@
StringBuffer s = new StringBuffer();
s.append("Key length: ").append(getKeyLength()).append(" ");
s.append("Parent ID: ").append(getParentId().getId()).append(" ");
- s.append("Node name: ").append((getNodeName() != null) ? getNodeName().getUnicodeString() : "");
+ s.append("Node name: ")
+ .append(
+ (getNodeName() != null) ? getNodeName()
+ .getUnicodeString() : "");
return s.toString();
}
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-21 13:34:33
|
Revision: 4900
http://jnode.svn.sourceforge.net/jnode/?rev=4900&view=rev
Author: galatnm
Date: 2009-01-21 13:34:29 +0000 (Wed, 21 Jan 2009)
Log Message:
-----------
Updates.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-21 13:33:54 UTC (rev 4899)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-21 13:34:29 UTC (rev 4900)
@@ -144,6 +144,10 @@
// ---
log.debug("Write allocation bitmap bits to disk.");
// ---
+ log.debug("Write Catalog to disk.");
+ long offset = sb.getCatalogFile().getExtents()[0].getStartBlock() * sb.getBlockSize();
+ Catalog catalog = new Catalog(params);
+ this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-21 13:33:54 UTC (rev 4899)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-21 13:34:29 UTC (rev 4900)
@@ -23,6 +23,8 @@
private NodeDescriptor btnd;
private BTHeaderRecord bthr;
private int firstNodeOffset;
+
+ private ByteBuffer buffer;
/**
* Create new Catalog
@@ -30,10 +32,11 @@
* @param params
*/
public Catalog(HFSPlusParams params) {
- btnd = new NodeDescriptor();
- btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
- btnd.setHeight(0);
- btnd.setRecordCount(3);
+ NodeDescriptor nd = new NodeDescriptor();
+ nd.setKind(HfsPlusConstants.BT_HEADER_NODE);
+ nd.setHeight(0);
+ nd.setRecordCount(3);
+ btnd = nd;
//
bthr = new BTHeaderRecord();
bthr.setTreeDepth(1);
@@ -47,16 +50,31 @@
bthr.setClumpSize(params.getCatalogClumpSize());
// TODO initialize attributes, max key length and key comparaison type.
// Root directory
- CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, new HFSUnicodeString(params.getVolumeName()));
+ nd = new NodeDescriptor();
+ nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
+ nd.setHeight(1);
+ nd.setRecordCount(params.isJournaled() ? 6 : 2);
+ HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
+ CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
CatalogFolder folder = new CatalogFolder();
folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
folder.setValence(params.isJournaled() ? 2 : 0);
// TODO creation date, content modification date, text encoding and access rights.
- ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ buffer = ByteBuffer.allocate(134);
+ buffer.put(btnd.getBytes());
+ buffer.position(14);
+ buffer.put(bthr.getBytes());
+ buffer.position(120);
+ buffer.put(nd.getBytes());
}
+ public ByteBuffer getBytes() {
+ return buffer;
+ }
+
/**
* Create Catalog based on meta-data that exist on the file system.
*
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-21 13:33:54 UTC (rev 4899)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/command/FormatHfsPlusCommand.java 2009-01-21 13:34:29 UTC (rev 4900)
@@ -10,7 +10,7 @@
public class FormatHfsPlusCommand extends AbstractFormatCommand<HfsPlusFileSystem> {
- private final StringArgument ARG_VOLUME_NAME = new StringArgument("volumename", Argument.OPTIONAL,
+ private final StringArgument ARG_VOLUME_NAME = new StringArgument("volumeName", Argument.OPTIONAL,
"set volume name");
public FormatHfsPlusCommand() {
@@ -26,7 +26,7 @@
@Override
protected Formatter<HfsPlusFileSystem> getFormatter() {
HFSPlusParams params = new HFSPlusParams();
- params.setVolumeName(ARG_VOLUME_NAME.getValue());
+ params.setVolumeName((ARG_VOLUME_NAME.isSet()) ? ARG_VOLUME_NAME.getValue() : "untitled");
params.setBlockSize(params.OPTIMAL_BLOCK_SIZE);
params.setJournaled(false);
params.setJournalSize(params.DEFAULT_JOURNAL_SIZE);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-29 16:26:43
|
Revision: 4928
http://jnode.svn.sourceforge.net/jnode/?rev=4928&view=rev
Author: galatnm
Date: 2009-01-29 16:26:33 +0000 (Thu, 29 Jan 2009)
Log Message:
-----------
Updates.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -147,8 +147,9 @@
log.debug("Write Catalog to disk.");
long offset = sb.getCatalogFile().getExtents()[0].getStartBlock() * sb.getBlockSize();
Catalog catalog = new Catalog(params);
- this.getApi().write(offset, catalog.getBytes());
+ this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
+ log.debug(sb.toString());
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
} catch (IOException e) {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -119,7 +119,7 @@
forkdata.setTotalBlocks((int) bitmapBlocks);
ExtentDescriptor desc = new ExtentDescriptor();
desc.setStartBlock(1 + burnedBlocksBeforeVH);
- desc.setBlockCount(0);
+ desc.setBlockCount((int) bitmapBlocks);
forkdata.setExtentDescriptor(0, desc);
System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
// Journal creation
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -16,27 +16,62 @@
import org.jnode.fs.hfsplus.tree.IndexRecord;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
+import org.jnode.util.ByteBufferUtils;
public class Catalog {
+
private final Logger log = Logger.getLogger(getClass());
private HfsPlusFileSystem fs;
+ /** B-Tree node descriptor */
private NodeDescriptor btnd;
+ /** B-Tree Header record */
private BTHeaderRecord bthr;
- private int firstNodeOffset;
-
+ /** */
+ private int catalogHeaderNodeOffset;
+ /** */
private ByteBuffer buffer;
/**
+ * Create Catalog based on meta-data that exist on the file system.
+ *
+ * @param fs HFS+ file system that contains catalog informations.
+ *
+ * @throws IOException
+ */
+ public Catalog(final HfsPlusFileSystem fs) throws IOException {
+ log.debug("Load B-Tree catalog file.\n");
+ this.fs = fs;
+ Superblock sb = fs.getVolumeHeader();
+ ExtentDescriptor firstExtent = sb.getCatalogFile().getExtents()[0];
+ catalogHeaderNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
+ if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
+ buffer = ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH
+ + BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
+ fs.getApi().read(catalogHeaderNodeOffset, buffer);
+ buffer.rewind();
+ byte[] data = ByteBufferUtils.toArray(buffer);
+ btnd = new NodeDescriptor(data, 0);
+ bthr = new BTHeaderRecord(data, NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH);
+
+ }
+ }
+
+ /**
* Create new Catalog
*
* @param params
*/
public Catalog(HFSPlusParams params) {
- NodeDescriptor nd = new NodeDescriptor();
- nd.setKind(HfsPlusConstants.BT_HEADER_NODE);
- nd.setHeight(0);
- nd.setRecordCount(3);
- btnd = nd;
+ log.debug("Create B-Tree catalog file.\n");
+
+ int nodeSize = params.getCatalogNodeSize();
+
+ int bufferLength = 0;
+ btnd = new NodeDescriptor();
+ btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
+ btnd.setHeight(0);
+ btnd.setRecordCount(3);
+ bufferLength += NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
//
bthr = new BTHeaderRecord();
bthr.setTreeDepth(1);
@@ -44,98 +79,74 @@
bthr.settFirstLeafNode(1);
bthr.setLastLeafNode(1);
bthr.setLeafRecords(params.isJournaled() ? 6 : 2);
- bthr.setNodeSize(params.getCatalogNodeSize());
+ bthr.setNodeSize(nodeSize);
bthr.setTotalNodes(params.getCatalogClumpSize() / params.getCatalogNodeSize());
bthr.setFreeNodes(bthr.getTotalNodes() - 2);
bthr.setClumpSize(params.getCatalogClumpSize());
// TODO initialize attributes, max key length and key comparaison type.
- // Root directory
- nd = new NodeDescriptor();
+ bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
+
+ int rootNodePosition = bthr.getRootNode() * bthr.getNodeSize();
+ bufferLength += (rootNodePosition - bufferLength);
+ //Create node descriptor
+ NodeDescriptor nd = new NodeDescriptor();
nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
nd.setHeight(1);
nd.setRecordCount(params.isJournaled() ? 6 : 2);
+ CatalogLeafNode clf = new CatalogLeafNode(nd, bthr.getNodeSize());
+ // First record (folder)
+ int offset = rootNodePosition + NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
CatalogFolder folder = new CatalogFolder();
folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
folder.setValence(params.isJournaled() ? 2 : 0);
- // TODO creation date, content modification date, text encoding and access rights.
- ck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
+ LeafRecord record = new LeafRecord(ck, folder.getBytes());
+ clf.addRecord(record, offset, 0);
+ // Second record (thread)
+ offset = offset + ck.getKeyLength() + CatalogFolder.CATALOG_FOLDER_SIZE;
+ CatalogKey tck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
- buffer = ByteBuffer.allocate(134);
+ record = new LeafRecord(tck, ct.getBytes());
+ clf.addRecord(record, offset, 1);
+ buffer = ByteBuffer.allocate(bufferLength + bthr.getNodeSize());
buffer.put(btnd.getBytes());
- buffer.position(14);
buffer.put(bthr.getBytes());
- buffer.position(120);
- buffer.put(nd.getBytes());
+ buffer.position(rootNodePosition);
+ buffer.put(clf.getBytes());
+ buffer.rewind();
}
-
- public ByteBuffer getBytes() {
- return buffer;
- }
/**
- * Create Catalog based on meta-data that exist on the file system.
*
- * @param fs HFS+ file system that contains catalog informations.
- *
- * @throws IOException
- */
- public Catalog(final HfsPlusFileSystem fs) throws IOException {
- log.debug("Initialize catalog\n");
- this.fs = fs;
- Superblock sb = fs.getVolumeHeader();
- int offset = 0;
- // Get btree header record and node descriptor.
- ExtentDescriptor firstExtent = sb.getCatalogFile().getExtents()[0];
- if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
- ByteBuffer buffer = ByteBuffer.allocate(14);
- firstNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
- fs.getApi().read(offset, buffer);
- btnd = new NodeDescriptor(buffer.array());
- log.debug("BTNodeDescriptor informations :\n" + btnd.toString());
- offset = firstNodeOffset + 14;
- buffer = ByteBuffer.allocate(106);
- fs.getApi().read(offset, buffer);
- bthr = new BTHeaderRecord(buffer.array());
- log.debug("BTHeaderRec informations :\n" + bthr.toString());
- offset = offset + 106;
- }
- }
-
- /**
- *
* @param parentID
* @return
* @throws IOException
*/
public final LeafRecord getRecord(final CatalogNodeId parentID)
throws IOException {
- int currentOffset = firstNodeOffset;
- int currentNodeNumber = getBTHeaderRecord().getRootNode();
- int currentNodeSize = getBTHeaderRecord().getNodeSize();
- ByteBuffer nodeData = ByteBuffer.allocate(currentNodeSize);
- fs.getApi().read(currentOffset + (currentNodeNumber * currentNodeSize), nodeData);
- NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array());
- log.debug("Current node descriptor:\n" + currentBtnd.toString());
+ int nodeSize = getBTHeaderRecord().getNodeSize();
+ ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
+ fs.getApi().read(catalogHeaderNodeOffset + (getBTHeaderRecord().getRootNode() * nodeSize), nodeData);
+ nodeData.rewind();
+ byte[] data = ByteBufferUtils.toArray(nodeData);
+ NodeDescriptor currentBtnd = new NodeDescriptor(data, 0);
+ int currentOffset = 0;
while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, data, nodeSize);
IndexRecord record = currentIndexNode.find(parentID);
- currentNodeNumber = record.getIndex();
- currentOffset = firstNodeOffset + (currentNodeNumber * currentNodeSize);
- log.debug("Current node number: " + currentNodeNumber + " currentOffset:" + currentOffset + "("
- + currentNodeSize + ")");
- nodeData = ByteBuffer.allocate(currentNodeSize);
+ currentOffset = catalogHeaderNodeOffset + (record.getIndex() * nodeSize);
+ nodeData = ByteBuffer.allocate(nodeSize);
fs.getApi().read(currentOffset, nodeData);
- currentBtnd = new NodeDescriptor(nodeData.array());
- log.debug("Current node descriptor:\n" + currentBtnd.toString());
+ nodeData.rewind();
+ data = ByteBufferUtils.toArray(nodeData);
+ currentBtnd = new NodeDescriptor(data, 0);
}
LeafRecord lr = null;
if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
+ CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, data, nodeSize);
lr = leaf.find(parentID);
- log.debug("Leaf record :\n" + lr.toString());
}
return lr;
}
@@ -168,13 +179,11 @@
public final LeafRecord[] getRecords(final CatalogNodeId parentID, final int nodeNumber)
throws IOException {
try {
- int currentOffset = firstNodeOffset;
int currentNodeNumber = nodeNumber;
int currentNodeSize = getBTHeaderRecord().getNodeSize();
ByteBuffer nodeData = ByteBuffer.allocate(currentNodeSize);
- fs.getApi().read(currentOffset + (currentNodeNumber * currentNodeSize), nodeData);
- NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array());
- log.debug("Current node descriptor:\n" + currentBtnd.toString());
+ fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * currentNodeSize), nodeData);
+ NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array(), 0);
if (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, nodeData.array(),
currentNodeSize);
@@ -190,11 +199,9 @@
} else if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
LeafRecord[] lr = leaf.findAll(parentID);
- log.debug("Leaf record size: " + lr.length);
return lr;
} else {
return null;
- //
}
} catch (Exception e) {
@@ -212,28 +219,26 @@
*/
public final LeafRecord getRecord(final CatalogNodeId parentID, final HFSUnicodeString nodeName)
throws IOException {
- int currentOffset = firstNodeOffset;
int currentNodeNumber = getBTHeaderRecord().getRootNode();
int currentNodeSize = getBTHeaderRecord().getNodeSize();
ByteBuffer buffer = ByteBuffer.allocate(currentNodeSize);
- fs.getApi().read(currentOffset + (currentNodeNumber * currentNodeSize), buffer);
- NodeDescriptor currentBtnd = new NodeDescriptor(buffer.array());
- log.debug("Current node descriptor: \n" + currentBtnd.toString());
+ fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * currentNodeSize), buffer);
+ NodeDescriptor currentBtnd = new NodeDescriptor(buffer.array(), 0);
CatalogKey cKey = new CatalogKey(parentID, nodeName);
+ int currentOffset = 0;
while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, buffer.array(), currentNodeSize);
IndexRecord record = currentIndexNode.find(cKey);
currentNodeNumber = record.getIndex();
- currentOffset = currentNodeNumber * currentNodeSize;
+ currentOffset = catalogHeaderNodeOffset + currentNodeNumber * currentNodeSize;
buffer = ByteBuffer.allocate(currentNodeSize);
fs.getApi().read(currentOffset, buffer);
- currentBtnd = new NodeDescriptor(buffer.array());
+ currentBtnd = new NodeDescriptor(buffer.array(), 0);
}
LeafRecord lr = null;
if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, buffer.array(), currentNodeSize);
lr = leaf.find(parentID);
- log.debug("Leaf record: \n" + lr.toString());
}
return lr;
}
@@ -245,5 +250,9 @@
public final BTHeaderRecord getBTHeaderRecord() {
return bthr;
}
-
+
+ public ByteBuffer getBytes() {
+ return buffer;
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -5,11 +5,14 @@
import org.jnode.util.BigEndian;
public class CatalogFolder {
+
+ public static final int CATALOG_FOLDER_SIZE = 88;
+
private byte[] data;
public CatalogFolder(final byte[] src) {
data = new byte[88];
- System.arraycopy(src, 0, data, 0, 88);
+ System.arraycopy(src, 0, data, 0, CATALOG_FOLDER_SIZE);
}
/**
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -14,7 +14,7 @@
public CatalogIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
- int currentOffset = offsets[i];
+ int currentOffset = getOffset(i);
Key currentKey = new CatalogKey(nodeData, currentOffset);
records[i] = new IndexRecord(currentKey, nodeData, currentOffset);
log.debug("Index record key:" + records[i].getKey().toString());
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -73,6 +73,14 @@
}
}
+ public byte[] getBytes() {
+ byte[] data = new byte[this.getKeyLength()];
+ BigEndian.setInt16(data, 0, this.getKeyLength());
+ System.arraycopy(parentID.getBytes(), 0, data, 2, 4);
+ System.arraycopy(nodeName.getBytes(), 0, data, 6, nodeName.getLength());
+ return data;
+ }
+
public final String toString() {
StringBuffer s = new StringBuffer();
s.append("Key length: ").append(getKeyLength()).append(" ");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -10,16 +10,22 @@
public class CatalogLeafNode extends LeafNode {
+ public CatalogLeafNode(final NodeDescriptor descriptor, final int nodeSize) {
+ super(descriptor, nodeSize);
+ }
+
public CatalogLeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
- int currentOffset = offsets[i];
- int recordDataSize = offsets[i + 1] - offsets[i];
+ int currentOffset = getOffset(i);
+ int recordDataSize = getOffset(i + 1) - currentOffset;
Key key = new CatalogKey(nodeData, currentOffset);
records[i] = new LeafRecord(key, nodeData, currentOffset, recordDataSize);
}
}
-
+
+
+
/**
*
* @param parentId
@@ -52,4 +58,8 @@
}
return list.toArray(new LeafRecord[list.size()]);
}
+
+ public byte[] getBytes() {
+ return nodeData;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -3,12 +3,17 @@
import org.jnode.fs.hfsplus.HFSUnicodeString;
import org.jnode.util.BigEndian;
+
+
public class CatalogThread {
+
+ public static final int CATALOG_THREAD_SIZE = 512;
+
private byte[] data;
public CatalogThread(final byte[] src) {
data = new byte[512];
- System.arraycopy(src, 0, data, 0, 512);
+ System.arraycopy(src, 0, data, 0, CATALOG_THREAD_SIZE);
}
/**
@@ -37,4 +42,8 @@
public final HFSUnicodeString getNodeName() {
return new HFSUnicodeString(data, 8);
}
+
+ public byte[] getBytes() {
+ return data;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -6,10 +6,17 @@
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
public class ExtentIndexNode extends IndexNode {
+
+ /**
+ *
+ * @param descriptor
+ * @param nodeData
+ * @param nodeSize
+ */
public ExtentIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
- int currentOffset = offsets[i];
+ int currentOffset = getOffset(i);
Key currentKey = new ExtentKey(nodeData, currentOffset);
records[i] = new IndexRecord(currentKey, nodeData, currentOffset);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -6,11 +6,18 @@
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
public class ExtentLeafNode extends LeafNode {
+
+ /**
+ *
+ * @param descriptor
+ * @param nodeData
+ * @param nodeSize
+ */
public ExtentLeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
for (int i = 0; i < records.length; ++i) {
- int currentOffset = offsets[i];
- int recordDataSize = offsets[i + 1] - offsets[i];
+ int currentOffset = getOffset(i);
+ int recordDataSize = getOffset(i + 1) - currentOffset;
Key currentKey = new ExtentKey(nodeData, currentOffset);
records[i] = new LeafRecord(currentKey, nodeData, currentOffset, recordDataSize);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -10,9 +10,9 @@
data = new byte[BT_HEADER_RECORD_LENGTH];
}
- public BTHeaderRecord(final byte[] src) {
+ public BTHeaderRecord(final byte[] src, int offset) {
data = new byte[BT_HEADER_RECORD_LENGTH];
- System.arraycopy(src, 0, data, 0, BT_HEADER_RECORD_LENGTH);
+ System.arraycopy(src, offset, data, 0, BT_HEADER_RECORD_LENGTH);
}
public final int getTreeDepth() {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -11,14 +11,14 @@
*/
public IndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
- records = new IndexRecord[offsets.length - 1];
+ records = new IndexRecord[descriptor.getNumRecords()];
}
/**
*
* @return
*/
- public final IndexRecord[] getRecords() {
- return records;
+ public final IndexRecord getRecord(int index) {
+ return records[index];
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -6,12 +6,33 @@
/**
*
* @param descriptor
+ * @param nodeSize
+ */
+ public LeafNode(final NodeDescriptor descriptor, int nodeSize) {
+ super(descriptor, nodeSize);
+ records = new LeafRecord[descriptor.getNumRecords()];
+ }
+
+ /**
+ *
+ * @param record
+ * @param offset
+ * @param index
+ */
+ public void addRecord(LeafRecord record, int offset, int index) {
+ records[index] = record;
+ this.setOffset(index, offset);
+ }
+
+ /**
+ *
+ * @param descriptor
* @param nodeData
* @param nodeSize
*/
public LeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
super(descriptor, nodeData, nodeSize);
- records = new LeafRecord[offsets.length - 1];
+ records = new LeafRecord[descriptor.getNumRecords()];
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -5,7 +5,7 @@
public class LeafRecord {
private final Key key;
private final byte[] recordData;
-
+
public LeafRecord(final Key key, final byte[] nodeData, final int offset, final int recordDataSize) {
this.key = key;
recordData = new byte[recordDataSize];
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -4,21 +4,31 @@
public class Node {
protected NodeDescriptor descriptor;
- protected int[] offsets;
+ protected byte[] nodeData;
+ protected int nodeSize;
+ public Node(NodeDescriptor descriptor, final int nodeSize) {
+ this.descriptor = descriptor;
+ this.nodeData = new byte[nodeSize];
+ this.nodeSize = nodeSize;
+ }
+
public Node(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
this.descriptor = descriptor;
- offsets = new int[descriptor.getNumRecords() + 1];
- for (int i = 0; i < offsets.length; ++i) {
- offsets[i] = BigEndian.getInt16(nodeData, nodeSize - ((i + 1) * 2));
- }
+ this.nodeData = nodeData;
+ this.nodeSize = nodeSize;
}
- public final NodeDescriptor getDescriptor() {
+ public NodeDescriptor getDescriptor() {
return descriptor;
}
+
+ public int getOffset(int index) {
+ return BigEndian.getInt16(nodeData, nodeSize - ((index + 1) * 2));
+ }
+
+ public void setOffset(int index, int offsetValue) {
+ BigEndian.setInt16(nodeData, nodeSize - ((index + 1) * 2), offsetValue);
+ }
- public final int[] getOffsets() {
- return offsets;
- }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-28 19:51:49 UTC (rev 4927)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-01-29 16:26:33 UTC (rev 4928)
@@ -10,18 +10,26 @@
data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
}
- public NodeDescriptor(final byte[] src) {
+ public NodeDescriptor(final byte[] src, int offset) {
data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
- System.arraycopy(src, 0, data, 0, BT_NODE_DESCRIPTOR_LENGTH);
+ System.arraycopy(src, offset, data, 0, BT_NODE_DESCRIPTOR_LENGTH);
}
public final int getFLink() {
return BigEndian.getInt32(data, 0);
}
+
+ public void setFLink(int link) {
+ BigEndian.setInt32(data, 0, link);
+ }
public final int getBLink() {
return BigEndian.getInt32(data, 4);
}
+
+ public void setBLink(int link) {
+ BigEndian.setInt32(data, 4, link);
+ }
public final int getKind() {
return BigEndian.getInt8(data, 8);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-01-30 23:01:59
|
Revision: 4929
http://jnode.svn.sourceforge.net/jnode/?rev=4929&view=rev
Author: galatnm
Date: 2009-01-30 23:01:52 +0000 (Fri, 30 Jan 2009)
Log Message:
-----------
Update HFS+ creation.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Key.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java
Added Paths:
-----------
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeRecord.java
Removed Paths:
-------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -27,7 +27,7 @@
public HFSPlusDirectory(final HFSPlusEntry e) {
super((HfsPlusFileSystem) e.getFileSystem());
this.record = e.getRecord();
- this.folder = new CatalogFolder(record.getRecordData());
+ this.folder = new CatalogFolder(record.getData());
log.debug("Associated record:" + record.toString());
if (record.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER) {
log.debug("Associated folder : " + folder.toString());
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -15,12 +15,11 @@
public HFSPlusFile(final HFSPlusEntry e) {
super((HfsPlusFileSystem) e.getFileSystem());
this.record = e.getRecord();
- this.file = new CatalogFile(record.getRecordData());
+ this.file = new CatalogFile(record.getData());
}
@Override
public void flush() throws IOException {
- // TODO Auto-generated method stub
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -16,6 +16,7 @@
import org.jnode.fs.hfsplus.tree.IndexRecord;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
+import org.jnode.fs.hfsplus.tree.NodeRecord;
import org.jnode.util.ByteBufferUtils;
public class Catalog {
@@ -85,7 +86,7 @@
bthr.setClumpSize(params.getCatalogClumpSize());
// TODO initialize attributes, max key length and key comparaison type.
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
-
+ // Create root node
int rootNodePosition = bthr.getRootNode() * bthr.getNodeSize();
bufferLength += (rootNodePosition - bufferLength);
//Create node descriptor
@@ -93,28 +94,28 @@
nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
nd.setHeight(1);
nd.setRecordCount(params.isJournaled() ? 6 : 2);
- CatalogLeafNode clf = new CatalogLeafNode(nd, bthr.getNodeSize());
+ CatalogNode rootNode = new CatalogNode(nd, bthr.getNodeSize());
+ int offset = NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
// First record (folder)
- int offset = rootNodePosition + NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
CatalogFolder folder = new CatalogFolder();
folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
folder.setValence(params.isJournaled() ? 2 : 0);
LeafRecord record = new LeafRecord(ck, folder.getBytes());
- clf.addRecord(record, offset, 0);
+ rootNode.addNodeRecord(0, record, offset);
// Second record (thread)
offset = offset + ck.getKeyLength() + CatalogFolder.CATALOG_FOLDER_SIZE;
CatalogKey tck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
record = new LeafRecord(tck, ct.getBytes());
- clf.addRecord(record, offset, 1);
+ rootNode.addNodeRecord(1, record, offset);
buffer = ByteBuffer.allocate(bufferLength + bthr.getNodeSize());
buffer.put(btnd.getBytes());
buffer.put(bthr.getBytes());
buffer.position(rootNodePosition);
- buffer.put(clf.getBytes());
+ buffer.put(rootNode.getBytes());
buffer.rewind();
}
@@ -126,27 +127,26 @@
*/
public final LeafRecord getRecord(final CatalogNodeId parentID)
throws IOException {
+ int currentOffset = 0;
+ LeafRecord lr = null;
int nodeSize = getBTHeaderRecord().getNodeSize();
ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
fs.getApi().read(catalogHeaderNodeOffset + (getBTHeaderRecord().getRootNode() * nodeSize), nodeData);
nodeData.rewind();
byte[] data = ByteBufferUtils.toArray(nodeData);
- NodeDescriptor currentBtnd = new NodeDescriptor(data, 0);
- int currentOffset = 0;
- while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, data, nodeSize);
- IndexRecord record = currentIndexNode.find(parentID);
+ CatalogNode node = new CatalogNode(data, nodeSize);
+ while (node.isIndexNode()) {
+ IndexRecord record = (IndexRecord)node.find(parentID);
currentOffset = catalogHeaderNodeOffset + (record.getIndex() * nodeSize);
nodeData = ByteBuffer.allocate(nodeSize);
fs.getApi().read(currentOffset, nodeData);
nodeData.rewind();
data = ByteBufferUtils.toArray(nodeData);
- currentBtnd = new NodeDescriptor(data, 0);
+ node = new CatalogNode(data, nodeSize);
}
- LeafRecord lr = null;
- if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, data, nodeSize);
- lr = leaf.find(parentID);
+
+ if (node.isLeafNode()) {
+ lr = (LeafRecord)node.find(parentID);
}
return lr;
}
@@ -180,14 +180,12 @@
throws IOException {
try {
int currentNodeNumber = nodeNumber;
- int currentNodeSize = getBTHeaderRecord().getNodeSize();
- ByteBuffer nodeData = ByteBuffer.allocate(currentNodeSize);
- fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * currentNodeSize), nodeData);
- NodeDescriptor currentBtnd = new NodeDescriptor(nodeData.array(), 0);
- if (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, nodeData.array(),
- currentNodeSize);
- IndexRecord[] records = currentIndexNode.findChilds(parentID);
+ int nodeSize = getBTHeaderRecord().getNodeSize();
+ ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
+ fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * nodeSize), nodeData);
+ CatalogNode node = new CatalogNode(nodeData.array(), nodeSize);
+ if (node.isIndexNode()) {
+ IndexRecord[] records = (IndexRecord[])node.findChilds(parentID);
List<LeafRecord> lfList = new LinkedList<LeafRecord>();
for (IndexRecord rec : records) {
LeafRecord[] lfr = getRecords(parentID, rec.getIndex());
@@ -196,10 +194,8 @@
}
}
return lfList.toArray(new LeafRecord[lfList.size()]);
- } else if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, nodeData.array(), currentNodeSize);
- LeafRecord[] lr = leaf.findAll(parentID);
- return lr;
+ } else if (node.isLeafNode()) {
+ return (LeafRecord[])node.findAll(parentID);
} else {
return null;
}
@@ -220,25 +216,23 @@
public final LeafRecord getRecord(final CatalogNodeId parentID, final HFSUnicodeString nodeName)
throws IOException {
int currentNodeNumber = getBTHeaderRecord().getRootNode();
- int currentNodeSize = getBTHeaderRecord().getNodeSize();
- ByteBuffer buffer = ByteBuffer.allocate(currentNodeSize);
- fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * currentNodeSize), buffer);
- NodeDescriptor currentBtnd = new NodeDescriptor(buffer.array(), 0);
- CatalogKey cKey = new CatalogKey(parentID, nodeName);
+ int nodeSize = getBTHeaderRecord().getNodeSize();
+ ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
+ fs.getApi().read(catalogHeaderNodeOffset + (currentNodeNumber * nodeSize), buffer);
+ CatalogNode node = new CatalogNode(nodeData.array(), nodeSize);
int currentOffset = 0;
- while (currentBtnd.getKind() == HfsPlusConstants.BT_INDEX_NODE) {
- CatalogIndexNode currentIndexNode = new CatalogIndexNode(currentBtnd, buffer.array(), currentNodeSize);
- IndexRecord record = currentIndexNode.find(cKey);
+ CatalogKey cKey = new CatalogKey(parentID, nodeName);
+ while (node.isIndexNode()) {
+ IndexRecord record = (IndexRecord)node.find(cKey);
currentNodeNumber = record.getIndex();
- currentOffset = catalogHeaderNodeOffset + currentNodeNumber * currentNodeSize;
- buffer = ByteBuffer.allocate(currentNodeSize);
+ currentOffset = catalogHeaderNodeOffset + record.getIndex() * nodeSize;
+ nodeData = ByteBuffer.allocate(nodeSize);
fs.getApi().read(currentOffset, buffer);
- currentBtnd = new NodeDescriptor(buffer.array(), 0);
+ node = new CatalogNode(nodeData.array(), nodeSize);
}
LeafRecord lr = null;
- if (currentBtnd.getKind() == HfsPlusConstants.BT_LEAF_NODE) {
- CatalogLeafNode leaf = new CatalogLeafNode(currentBtnd, buffer.array(), currentNodeSize);
- lr = leaf.find(parentID);
+ if (node.isLeafNode()) {
+ lr = (LeafRecord)node.find(parentID);
}
return lr;
}
Deleted: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogIndexNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,73 +0,0 @@
-package org.jnode.fs.hfsplus.catalog;
-
-import java.util.LinkedList;
-
-import org.apache.log4j.Logger;
-import org.jnode.fs.hfsplus.tree.IndexNode;
-import org.jnode.fs.hfsplus.tree.IndexRecord;
-import org.jnode.fs.hfsplus.tree.Key;
-import org.jnode.fs.hfsplus.tree.NodeDescriptor;
-
-public class CatalogIndexNode extends IndexNode {
- private final Logger log = Logger.getLogger(getClass());
-
- public CatalogIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- for (int i = 0; i < records.length; ++i) {
- int currentOffset = getOffset(i);
- Key currentKey = new CatalogKey(nodeData, currentOffset);
- records[i] = new IndexRecord(currentKey, nodeData, currentOffset);
- log.debug("Index record key:" + records[i].getKey().toString());
- }
- }
-
- /**
- *
- * @param parentId
- * @return
- */
- public final IndexRecord find(final CatalogNodeId parentId) {
- for (IndexRecord rec : records) {
- Key key = rec.getKey();
- if (key instanceof CatalogKey) {
- if (((CatalogKey) key).getParentId().getId() == parentId.getId()) {
- return rec;
- }
- }
- }
- return null;
- }
-
- public final IndexRecord[] findChilds(final CatalogNodeId parentId) {
- LinkedList<IndexRecord> result = new LinkedList<IndexRecord>();
- IndexRecord largestMatchingRecord = null;
- CatalogKey largestMatchingKey = null;
- for (IndexRecord rec : records) {
- CatalogKey key = (CatalogKey) rec.getKey();
- if (key.getParentId().getId() < parentId.getId()
- && (largestMatchingKey == null || key.compareTo(largestMatchingKey) > 0)) {
- largestMatchingKey = key;
- largestMatchingRecord = rec;
- } else if (key.getParentId().getId() == parentId.getId()) {
- result.addLast(rec);
- }
- }
-
- if (largestMatchingKey != null) {
- result.addFirst(largestMatchingRecord);
- }
- return result.toArray(new IndexRecord[result.size()]);
- }
-
- public final IndexRecord find(final CatalogKey key) {
- IndexRecord largestMatchingRecord = null;
- for (int i = 0; i < records.length; ++i) {
- if (records[i].getKey().compareTo(key) <= 0
- && (largestMatchingRecord == null
- || records[i].getKey().compareTo(largestMatchingRecord.getKey()) > 0)) {
- largestMatchingRecord = records[i];
- }
- }
- return largestMatchingRecord;
- }
-}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -45,10 +45,6 @@
return keyLength;
}
- public final int getLength() {
- return keyLength;
- }
-
public final CatalogNodeId getParentId() {
return parentID;
}
@@ -91,4 +87,5 @@
.getUnicodeString() : "");
return s.toString();
}
+
}
Deleted: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogLeafNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,65 +0,0 @@
-package org.jnode.fs.hfsplus.catalog;
-
-import java.util.LinkedList;
-import java.util.List;
-
-import org.jnode.fs.hfsplus.tree.Key;
-import org.jnode.fs.hfsplus.tree.LeafNode;
-import org.jnode.fs.hfsplus.tree.LeafRecord;
-import org.jnode.fs.hfsplus.tree.NodeDescriptor;
-
-public class CatalogLeafNode extends LeafNode {
-
- public CatalogLeafNode(final NodeDescriptor descriptor, final int nodeSize) {
- super(descriptor, nodeSize);
- }
-
- public CatalogLeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- for (int i = 0; i < records.length; ++i) {
- int currentOffset = getOffset(i);
- int recordDataSize = getOffset(i + 1) - currentOffset;
- Key key = new CatalogKey(nodeData, currentOffset);
- records[i] = new LeafRecord(key, nodeData, currentOffset, recordDataSize);
- }
- }
-
-
-
- /**
- *
- * @param parentId
- * @return
- */
- public final LeafRecord find(final CatalogNodeId parentId) {
- for (LeafRecord rec : records) {
- Key key = rec.getKey();
- if (key instanceof CatalogKey) {
- if (((CatalogKey) key).getParentId().getId() == parentId.getId()) {
- return rec;
- }
- }
- }
- return null;
- }
-
- /**
- *
- * @param parentId
- * @return
- */
- public final LeafRecord[] findAll(final CatalogNodeId parentId) {
- List<LeafRecord> list = new LinkedList<LeafRecord>();
- for (LeafRecord rec : records) {
- Key key = rec.getKey();
- if (key instanceof CatalogKey && ((CatalogKey) key).getParentId().getId() == parentId.getId()) {
- list.add(rec);
- }
- }
- return list.toArray(new LeafRecord[list.size()]);
- }
-
- public byte[] getBytes() {
- return nodeData;
- }
-}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNode.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -0,0 +1,120 @@
+package org.jnode.fs.hfsplus.catalog;
+
+import java.util.LinkedList;
+import java.util.List;
+
+import org.jnode.fs.hfsplus.tree.AbstractNode;
+import org.jnode.fs.hfsplus.tree.IndexRecord;
+import org.jnode.fs.hfsplus.tree.Key;
+import org.jnode.fs.hfsplus.tree.LeafRecord;
+import org.jnode.fs.hfsplus.tree.NodeDescriptor;
+import org.jnode.fs.hfsplus.tree.NodeRecord;
+
+public class CatalogNode extends AbstractNode {
+
+ public CatalogNode(NodeDescriptor descriptor, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = new byte[nodeSize];
+ System.arraycopy(descriptor.getBytes(), 0, datas, 0, NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH);
+ }
+
+ public CatalogNode(final byte[] nodeData, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = nodeData;
+ }
+
+ @Override
+ public NodeRecord getNodeRecord(int index) {
+ int offset = this.getRecordOffset(index);
+ int offset2 = this.getRecordOffset(index + 1);
+ int recordSize = offset2 - offset;
+ NodeRecord record = null;
+ Key key = new CatalogKey(datas, offset);
+ if(isIndexNode()){
+ record = new IndexRecord(key, datas, offset);
+ } else {
+ record = new LeafRecord(key, datas, offset, recordSize);
+ }
+ return record;
+ }
+
+ /**
+ *
+ * @param parentId
+ * @return
+ */
+ public final NodeRecord find(final CatalogNodeId parentId) {
+ for (int index = 0; index < this.getNodeDescriptor().getNumRecords(); index++) {
+ NodeRecord record = this.getNodeRecord(index);
+ Key key = record.getKey();
+ if (key instanceof CatalogKey) {
+ if (((CatalogKey) key).getParentId().getId() == parentId.getId()) {
+ return record;
+ }
+ }
+ }
+ return null;
+ }
+
+ /**
+ *
+ * @param key
+ * @return
+ */
+ public NodeRecord find(final CatalogKey key) {
+ NodeRecord largestMatchingRecord = null;
+ for (int index = 0; index < this.getNodeDescriptor().getNumRecords(); index++) {
+ NodeRecord record = this.getNodeRecord(index);
+ if ((record.getKey().compareTo(key) <= 0)
+ && (record.getKey().compareTo(largestMatchingRecord.getKey()) > 0)) {
+ largestMatchingRecord = record;
+ }
+ }
+ return largestMatchingRecord;
+ }
+
+ /**
+ *
+ * @param parentId
+ * @return
+ */
+ public final NodeRecord[] findChilds(final CatalogNodeId parentId) {
+ LinkedList<NodeRecord> result = new LinkedList<NodeRecord>();
+ NodeRecord largestMatchingRecord = null;
+ CatalogKey largestMatchingKey = null;
+ for (int index = 0; index < this.getNodeDescriptor().getNumRecords(); index++) {
+ NodeRecord record = this.getNodeRecord(index);
+ CatalogKey key = (CatalogKey) record.getKey();
+ if (key.getParentId().getId() < parentId.getId()
+ && (largestMatchingKey == null || key.compareTo(largestMatchingKey) > 0)) {
+ largestMatchingKey = key;
+ largestMatchingRecord = record;
+ } else if (key.getParentId().getId() == parentId.getId()) {
+ result.addLast(record);
+ }
+ }
+
+ if (largestMatchingKey != null) {
+ result.addFirst(largestMatchingRecord);
+ }
+ return result.toArray(new NodeRecord[result.size()]);
+ }
+
+ /**
+ *
+ * @param parentId
+ * @return
+ */
+ public final NodeRecord[] findAll(final CatalogNodeId parentId) {
+ List<NodeRecord> list = new LinkedList<NodeRecord>();
+ for (int index = 0; index < this.getNodeDescriptor().getNumRecords(); index++) {
+ NodeRecord record = this.getNodeRecord(index);
+ Key key = record.getKey();
+ if (key instanceof CatalogKey && ((CatalogKey) key).getParentId().getId() == parentId.getId()) {
+ list.add(record);
+ }
+ }
+ return list.toArray(new NodeRecord[list.size()]);
+ }
+
+}
Deleted: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentIndexNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,24 +0,0 @@
-package org.jnode.fs.hfsplus.extent;
-
-import org.jnode.fs.hfsplus.tree.IndexNode;
-import org.jnode.fs.hfsplus.tree.IndexRecord;
-import org.jnode.fs.hfsplus.tree.Key;
-import org.jnode.fs.hfsplus.tree.NodeDescriptor;
-
-public class ExtentIndexNode extends IndexNode {
-
- /**
- *
- * @param descriptor
- * @param nodeData
- * @param nodeSize
- */
- public ExtentIndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- for (int i = 0; i < records.length; ++i) {
- int currentOffset = getOffset(i);
- Key currentKey = new ExtentKey(nodeData, currentOffset);
- records[i] = new IndexRecord(currentKey, nodeData, currentOffset);
- }
- }
-}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -40,14 +40,14 @@
}
@Override
- public final int getLength() {
- return KEY_LENGTH;
+ public final int compareTo(final Key key) {
+ return 0;
}
@Override
- public final int compareTo(final Key key) {
+ public byte[] getBytes() {
// TODO Auto-generated method stub
- return 0;
+ return null;
}
}
Deleted: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentLeafNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,25 +0,0 @@
-package org.jnode.fs.hfsplus.extent;
-
-import org.jnode.fs.hfsplus.tree.Key;
-import org.jnode.fs.hfsplus.tree.LeafNode;
-import org.jnode.fs.hfsplus.tree.LeafRecord;
-import org.jnode.fs.hfsplus.tree.NodeDescriptor;
-
-public class ExtentLeafNode extends LeafNode {
-
- /**
- *
- * @param descriptor
- * @param nodeData
- * @param nodeSize
- */
- public ExtentLeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- for (int i = 0; i < records.length; ++i) {
- int currentOffset = getOffset(i);
- int recordDataSize = getOffset(i + 1) - currentOffset;
- Key currentKey = new ExtentKey(nodeData, currentOffset);
- records[i] = new LeafRecord(currentKey, nodeData, currentOffset, recordDataSize);
- }
- }
-}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentNode.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -0,0 +1,26 @@
+package org.jnode.fs.hfsplus.extent;
+
+import org.jnode.fs.hfsplus.tree.AbstractNode;
+import org.jnode.fs.hfsplus.tree.NodeDescriptor;
+import org.jnode.fs.hfsplus.tree.NodeRecord;
+
+public class ExtentNode extends AbstractNode {
+
+ public ExtentNode(NodeDescriptor descriptor, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = new byte[nodeSize];
+ System.arraycopy(descriptor.getBytes(), 0, datas, 0, NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH);
+ }
+
+ public ExtentNode(final byte[] nodeData, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = nodeData;
+ }
+
+ @Override
+ public NodeRecord getNodeRecord(int index) {
+ // TODO Auto-generated method stub
+ return null;
+ }
+
+}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,9 +1,10 @@
package org.jnode.fs.hfsplus.tree;
public abstract class AbstractKey implements Key {
+
public abstract int getKeyLength();
+
+ public abstract byte[] getBytes();
- public abstract int getLength();
-
public abstract int compareTo(Key key);
}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -0,0 +1,41 @@
+package org.jnode.fs.hfsplus.tree;
+
+import org.jnode.fs.hfsplus.HfsPlusConstants;
+import org.jnode.util.BigEndian;
+
+public abstract class AbstractNode implements Node {
+
+ protected byte[] datas;
+ protected int size;
+
+ @Override
+ public NodeDescriptor getNodeDescriptor() {
+ return new NodeDescriptor(datas, 0);
+ }
+
+ public boolean isIndexNode(){
+ return this.getNodeDescriptor().getKind() == HfsPlusConstants.BT_INDEX_NODE;
+ }
+
+ public boolean isLeafNode(){
+ return this.getNodeDescriptor().getKind() == HfsPlusConstants.BT_LEAF_NODE;
+ }
+
+ @Override
+ public int getRecordOffset(int index) {
+ return BigEndian.getInt16(datas, size - ((index + 1) * 2));
+ }
+
+ @Override
+ public abstract NodeRecord getNodeRecord(int index);
+
+ @Override
+ public void addNodeRecord(int index, NodeRecord record, int offset) {
+ BigEndian.setInt16(datas, size - ((index + 1) * 2), offset);
+ System.arraycopy(record.getBytes(), 0, datas, offset, record.getSize());
+ }
+
+ public byte[] getBytes(){
+ return datas;
+ }
+}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -0,0 +1,21 @@
+package org.jnode.fs.hfsplus.tree;
+
+public abstract class AbstractNodeRecord implements NodeRecord {
+
+ protected Key key = null;
+ protected byte[] recordData = null;
+
+ public Key getKey() {
+ return key;
+ }
+
+ public byte[] getData() {
+ return recordData;
+ }
+
+ public int getSize(){
+ return key.getKeyLength() + recordData.length;
+ }
+
+ public abstract byte[] getBytes();
+}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,7 +1,6 @@
package org.jnode.fs.hfsplus.tree;
-public class IndexNode extends Node {
- protected IndexRecord[] records;
+public class IndexNode extends AbstractNode {
/**
*
@@ -9,16 +8,13 @@
* @param nodeData
* @param nodeSize
*/
- public IndexNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- records = new IndexRecord[descriptor.getNumRecords()];
+ public IndexNode(final byte[] nodeData, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = nodeData;
}
- /**
- *
- * @return
- */
- public final IndexRecord getRecord(int index) {
- return records[index];
+ @Override
+ public NodeRecord getNodeRecord(int index) {
+ return null;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -2,9 +2,7 @@
import org.jnode.util.BigEndian;
-public class IndexRecord {
- private final Key key;
- private final byte[] index;
+public class IndexRecord extends AbstractNodeRecord {
/**
*
@@ -14,15 +12,18 @@
*/
public IndexRecord(final Key key, final byte[] nodeData, final int offset) {
this.key = key;
- index = new byte[4];
- System.arraycopy(nodeData, offset + key.getLength() + 2, index, 0, 4);
+ this.recordData = new byte[4];
+ System.arraycopy(nodeData, offset + key.getKeyLength() + 2, recordData, 0, 4);
}
- public final Key getKey() {
- return key;
+ @Override
+ public byte[] getBytes() {
+ // TODO Auto-generated method stub
+ return null;
}
-
+
public final int getIndex() {
- return BigEndian.getInt32(index, 0);
+ return BigEndian.getInt32(recordData, 0);
}
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Key.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Key.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Key.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,7 +1,11 @@
package org.jnode.fs.hfsplus.tree;
public interface Key extends Comparable<Key> {
- int getKeyLength();
-
- int getLength();
+
+ public int getKeyLength();
+
+ public byte[] getBytes();
+
+ public int compareTo(Key key);
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafNode.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,38 +1,31 @@
package org.jnode.fs.hfsplus.tree;
-public class LeafNode extends Node {
- protected LeafRecord[] records;
+public class LeafNode extends AbstractNode {
+
/**
*
* @param descriptor
+ * @param nodeData
* @param nodeSize
*/
- public LeafNode(final NodeDescriptor descriptor, int nodeSize) {
- super(descriptor, nodeSize);
- records = new LeafRecord[descriptor.getNumRecords()];
+ public LeafNode(final byte[] nodeData, final int nodeSize) {
+ this.size = nodeSize;
+ this.datas = nodeData;
}
-
- /**
- *
- * @param record
- * @param offset
- * @param index
- */
- public void addRecord(LeafRecord record, int offset, int index) {
- records[index] = record;
- this.setOffset(index, offset);
- }
/**
*
* @param descriptor
- * @param nodeData
* @param nodeSize
*/
- public LeafNode(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- super(descriptor, nodeData, nodeSize);
- records = new LeafRecord[descriptor.getNumRecords()];
+ public LeafNode(final NodeDescriptor descriptor, int nodeSize) {
+ this.size = nodeSize;
}
+
+ @Override
+ public NodeRecord getNodeRecord(int index) {
+ return null;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -2,40 +2,34 @@
import org.jnode.util.BigEndian;
-public class LeafRecord {
- private final Key key;
- private final byte[] recordData;
+public class LeafRecord extends AbstractNodeRecord {
- public LeafRecord(final Key key, final byte[] nodeData, final int offset, final int recordDataSize) {
+ public LeafRecord(final Key key, final byte[] recordData){
this.key = key;
- recordData = new byte[recordDataSize];
- System.arraycopy(nodeData, offset + key.getKeyLength() + 2, recordData, 0, recordDataSize);
+ this.recordData = new byte[recordData.length];
+ System.arraycopy(recordData,0, this.recordData, 0, recordData.length);
}
-
- /**
- *
- * @param key
- * @param recordData
- */
- public LeafRecord(final Key key, final byte[] recordData) {
+
+ public LeafRecord(final Key key, final byte[] nodeData, final int offset, final int recordDataSize) {
this.key = key;
- this.recordData = new byte[recordData.length];
- System.arraycopy(recordData, 0, this.recordData, 0, recordData.length);
+ this.recordData = new byte[recordDataSize];
+ System.arraycopy(nodeData, offset + key.getKeyLength() + 2, this.recordData, 0, recordDataSize);
}
- public final Key getKey() {
- return key;
- }
-
public final int getType() {
- return BigEndian.getInt16(recordData, 0);
+ return BigEndian.getInt16(this.recordData, 0);
}
- public final byte[] getRecordData() {
- return recordData;
+ @Override
+ public byte[] getBytes() {
+ byte[] data = new byte[key.getKeyLength() + this.recordData.length];
+ System.arraycopy(data, 0, key.getBytes(), 0, key.getKeyLength());
+ System.arraycopy(data, key.getKeyLength(), this.recordData, 0, this.recordData.length);
+ return data;
}
-
+
public final String toString() {
return "Type : " + getType() + "\nKey : " + getKey().toString() + "\n";
}
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java 2009-01-29 16:26:33 UTC (rev 4928)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/Node.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -1,34 +1,12 @@
package org.jnode.fs.hfsplus.tree;
-import org.jnode.util.BigEndian;
-public class Node {
- protected NodeDescriptor descriptor;
- protected byte[] nodeData;
- protected int nodeSize;
- public Node(NodeDescriptor descriptor, final int nodeSize) {
- this.descriptor = descriptor;
- this.nodeData = new byte[nodeSize];
- this.nodeSize = nodeSize;
- }
-
- public Node(final NodeDescriptor descriptor, final byte[] nodeData, final int nodeSize) {
- this.descriptor = descriptor;
- this.nodeData = nodeData;
- this.nodeSize = nodeSize;
- }
-
- public NodeDescriptor getDescriptor() {
- return descriptor;
- }
-
- public int getOffset(int index) {
- return BigEndian.getInt16(nodeData, nodeSize - ((index + 1) * 2));
- }
-
- public void setOffset(int index, int offsetValue) {
- BigEndian.setInt16(nodeData, nodeSize - ((index + 1) * 2), offsetValue);
- }
-
+public interface Node {
+ public NodeDescriptor getNodeDescriptor();
+ public boolean isIndexNode();
+ public boolean isLeafNode();
+ public int getRecordOffset(int index);
+ public NodeRecord getNodeRecord(int index);
+ public void addNodeRecord(int index, NodeRecord record, int offset);
}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeRecord.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeRecord.java 2009-01-30 23:01:52 UTC (rev 4929)
@@ -0,0 +1,25 @@
+package org.jnode.fs.hfsplus.tree;
+
+public interface NodeRecord {
+
+ /**
+ * Key identify the record
+ */
+ public Key getKey();
+
+ /**
+ * Get record data as byte array.
+ */
+ public byte[] getData();
+
+ /**
+ * Get node record size
+ */
+ public int getSize();
+
+ /**
+ * Get node record as byte array
+ */
+ public byte[] getBytes();
+
+}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-02-01 15:11:36
|
Revision: 4957
http://jnode.svn.sourceforge.net/jnode/?rev=4957&view=rev
Author: galatnm
Date: 2009-02-01 15:11:23 +0000 (Sun, 01 Feb 2009)
Log Message:
-----------
Filesystem creation done.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -9,6 +9,7 @@
import org.apache.log4j.Logger;
import org.jnode.fs.FSEntry;
import org.jnode.fs.ReadOnlyFileSystemException;
+import org.jnode.fs.hfsplus.catalog.Catalog;
import org.jnode.fs.hfsplus.catalog.CatalogFolder;
import org.jnode.fs.hfsplus.catalog.CatalogKey;
import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
@@ -91,19 +92,21 @@
@Override
protected final FSEntryTable readEntries() throws IOException {
List<FSEntry> pathList = new LinkedList<FSEntry>();
- LeafRecord[] records = ((HfsPlusFileSystem) getFileSystem())
- .getCatalog().getRecords(folder.getFolderId());
- for (LeafRecord rec : records) {
- if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER
- || rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
- String name = ((CatalogKey) rec.getKey()).getNodeName()
+ HfsPlusFileSystem fs = (HfsPlusFileSystem)getFileSystem();
+ if(fs.getVolumeHeader().getFolderCount() > 0) {
+ LeafRecord[] records = fs.getCatalog().getRecords(folder.getFolderId());
+ for (LeafRecord rec : records) {
+ if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER
+ || rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
+ String name = ((CatalogKey) rec.getKey()).getNodeName()
.getUnicodeString();
- HFSPlusEntry e = new HFSPlusEntry(
- (HfsPlusFileSystem) getFileSystem(), null, this, name,
- rec);
- pathList.add(e);
+ HFSPlusEntry e = new HFSPlusEntry(
+ (HfsPlusFileSystem) getFileSystem(), null, this, name,
+ rec);
+ pathList.add(e);
+ }
+ }
}
- }
return new FSEntryTable(((HfsPlusFileSystem) getFileSystem()), pathList);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -15,7 +15,7 @@
*/
public HFSUnicodeString(final byte[] src, final int offset) {
length = BigEndian.getInt16(src, offset);
- byte[] data = new byte[2 + length * 2];
+ byte[] data = new byte[2 + (length * 2)];
System.arraycopy(src, offset, data, 0, 2);
length = BigEndian.getInt16(data, 0);
data = new byte[length * 2];
@@ -45,7 +45,16 @@
}
public final byte[] getBytes() {
- return (length + "" + string).getBytes();
+ char[] result = new char[length];
+ string.getChars(0, length, result, 0);
+ byte[] name = new byte[length * 2];
+ for (int i = 0; i < length; ++i) {
+ BigEndian.setChar(name, i * 2, result[i]);
+ }
+ byte[] data = new byte[(length * 2) + 2];
+ BigEndian.setInt16(data, 0, length);
+ System.arraycopy(name, 0, data, 2, name.length);
+ return data;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -43,7 +43,6 @@
public final void read() throws FileSystemException {
sb = new Superblock(this, false);
- log.debug("Superblock informations:\n" + sb.toString());
if (!sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT)) {
log
.info(getDevice().getId()
@@ -149,7 +148,6 @@
Catalog catalog = new Catalog(params);
this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
- log.debug(sb.toString());
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
} catch (IOException e) {
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -89,14 +89,14 @@
// TODO initialize attributes, max key length and key comparaison type.
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
// Create root node
- int rootNodePosition = bthr.getRootNode() * bthr.getNodeSize();
+ int rootNodePosition = bthr.getRootNode() * nodeSize;
bufferLength += (rootNodePosition - bufferLength);
//Create node descriptor
NodeDescriptor nd = new NodeDescriptor();
nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
nd.setHeight(1);
nd.setRecordCount(params.isJournaled() ? 6 : 2);
- CatalogNode rootNode = new CatalogNode(nd, bthr.getNodeSize());
+ CatalogNode rootNode = new CatalogNode(nd, nodeSize);
int offset = NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
// First record (folder)
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -6,11 +6,10 @@
import org.jnode.util.BigEndian;
public class CatalogKey extends AbstractKey {
-
+
+ public static final int MINIMUM_KEY_LENGTH = 6;
public static final int MAXIMUM_KEY_LENGTH = 516;
- private int keyLength;
- private CatalogNodeId parentID;
private HFSUnicodeString nodeName;
/**
@@ -19,14 +18,17 @@
* @param offset
*/
public CatalogKey(final byte[] src, final int offset) {
+ int currentOffset = offset;
byte[] ck = new byte[2];
- System.arraycopy(src, offset, ck, 0, 2);
+ System.arraycopy(src, currentOffset, ck, 0, 2);
keyLength = BigEndian.getInt16(ck, 0);
+ currentOffset += 2;
ck = new byte[4];
- System.arraycopy(src, offset + 2, ck, 0, 4);
+ System.arraycopy(src, currentOffset, ck, 0, 4);
parentID = new CatalogNodeId(ck, 0);
- if (keyLength > 6) {
- nodeName = new HFSUnicodeString(src, offset + 6);
+ currentOffset += 4;
+ if (keyLength > MINIMUM_KEY_LENGTH) {
+ nodeName = new HFSUnicodeString(src, currentOffset);
}
}
@@ -38,7 +40,7 @@
public CatalogKey(final CatalogNodeId parentID, final HFSUnicodeString name) {
this.parentID = parentID;
this.nodeName = name;
- this.keyLength = 6 + name.getLength();
+ this.keyLength = MINIMUM_KEY_LENGTH + name.getLength();
}
public final int getKeyLength() {
@@ -79,12 +81,8 @@
public final String toString() {
StringBuffer s = new StringBuffer();
- s.append("Key length: ").append(getKeyLength()).append(" ");
- s.append("Parent ID: ").append(getParentId().getId()).append(" ");
- s.append("Node name: ")
- .append(
- (getNodeName() != null) ? getNodeName()
- .getUnicodeString() : "");
+ s.append("[length, Parent ID, Node name]:").append(getKeyLength()).append(",").append(getParentId().getId())
+ .append(",").append((getNodeName() != null) ? getNodeName().getUnicodeString() : "");
return s.toString();
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -1,7 +1,12 @@
package org.jnode.fs.hfsplus.tree;
+import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
+
public abstract class AbstractKey implements Key {
+ protected int keyLength;
+ protected CatalogNodeId parentID;
+
public abstract int getKeyLength();
public abstract byte[] getBytes();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -17,5 +17,10 @@
return key.getKeyLength() + recordData.length;
}
- public abstract byte[] getBytes();
+ public byte[] getBytes() {
+ byte[] data = new byte[key.getKeyLength() + this.recordData.length];
+ System.arraycopy(data, 0, key.getBytes(), 0, key.getKeyLength());
+ System.arraycopy(data, key.getKeyLength(), this.recordData, 0, this.recordData.length);
+ return data;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -13,14 +13,8 @@
public IndexRecord(final Key key, final byte[] nodeData, final int offset) {
this.key = key;
this.recordData = new byte[4];
- System.arraycopy(nodeData, offset + key.getKeyLength() + 2, recordData, 0, 4);
+ System.arraycopy(nodeData, offset + key.getKeyLength(), recordData, 0, 4);
}
-
- @Override
- public byte[] getBytes() {
- // TODO Auto-generated method stub
- return null;
- }
public final int getIndex() {
return BigEndian.getInt32(recordData, 0);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-02-01 14:49:39 UTC (rev 4956)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-02-01 15:11:23 UTC (rev 4957)
@@ -20,14 +20,6 @@
return BigEndian.getInt16(this.recordData, 0);
}
- @Override
- public byte[] getBytes() {
- byte[] data = new byte[key.getKeyLength() + this.recordData.length];
- System.arraycopy(data, 0, key.getBytes(), 0, key.getKeyLength());
- System.arraycopy(data, key.getKeyLength(), this.recordData, 0, this.recordData.length);
- return data;
- }
-
public final String toString() {
return "Type : " + getType() + "\nKey : " + getKey().toString() + "\n";
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-02-09 16:04:07
|
Revision: 5015
http://jnode.svn.sourceforge.net/jnode/?rev=5015&view=rev
Author: galatnm
Date: 2009-02-09 16:04:03 +0000 (Mon, 09 Feb 2009)
Log Message:
-----------
Correct creation of filesystem.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -65,8 +65,7 @@
Calendar now = Calendar.getInstance();
now.setTime(new Date());
- int macDate = (int) HFSUtils
- .getDate(now.getTimeInMillis() / 1000, true);
+ int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
HFSUnicodeString dirName = new HFSUnicodeString(name);
CatalogThread thread = new CatalogThread(
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -52,6 +52,9 @@
public static final byte EK_DATA_FORK = (byte) 0x00;
public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
+
+ public static final int BYTES_PER_SECTOR = 512;
+ public static final int BITS_PER_SECTOR = 4096;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -162,6 +162,7 @@
- ((sb.getBlockSize() == 512) ? 2 : 1);
// ---
log.debug("Write allocation bitmap bits to disk.");
+ writeAllocationFile((int) volumeBlockUsed);
// ---
log.debug("Write Catalog to disk.");
long offset = sb.getCatalogFile().getExtents()[0].getStartBlock() * sb.getBlockSize();
@@ -176,4 +177,9 @@
throw new FileSystemException("Unable to create HFS+ filesystem", e);
}
}
+
+ private void writeAllocationFile(int blockUsed) {
+ int bytes = blockUsed >> 3;
+ int bits = blockUsed & 0x0007;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -58,4 +58,12 @@
public byte[] getBytes() {
return datas;
}
+
+ public String toString() {
+ StringBuffer b = new StringBuffer();
+ b.append((this.isLeafNode()) ? "Leaf node" : "Index node").append("\n");
+ b.append(this.getNodeDescriptor().toString());
+ return b.toString();
+
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNodeRecord.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -39,8 +39,8 @@
public byte[] getBytes() {
byte[] data = new byte[key.getKeyLength() + this.recordData.length];
- System.arraycopy(data, 0, key.getBytes(), 0, key.getKeyLength());
- System.arraycopy(data, key.getKeyLength(), this.recordData, 0, this.recordData.length);
+ System.arraycopy(key.getBytes(), 0, data, 0, key.getKeyLength());
+ System.arraycopy(this.recordData, 0, data, key.getKeyLength(), this.recordData.length);
return data;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-02-09 15:42:45 UTC (rev 5014)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-02-09 16:04:03 UTC (rev 5015)
@@ -33,7 +33,7 @@
public LeafRecord(final Key key, final byte[] nodeData, final int offset, final int recordDataSize) {
this.key = key;
this.recordData = new byte[recordDataSize];
- System.arraycopy(nodeData, offset + key.getKeyLength() + 2, this.recordData, 0, recordDataSize);
+ System.arraycopy(nodeData, offset + key.getKeyLength(), this.recordData, 0, recordDataSize);
}
public final int getType() {
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-05 16:35:21
|
Revision: 5088
http://jnode.svn.sourceforge.net/jnode/?rev=5088&view=rev
Author: galatnm
Date: 2009-03-05 16:31:25 +0000 (Thu, 05 Mar 2009)
Log Message:
-----------
Don't use AbstractFS classes anymore.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-05 11:33:43 UTC (rev 5087)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-05 16:31:25 UTC (rev 5088)
@@ -17,16 +17,18 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import java.io.IOException;
import java.util.Calendar;
import java.util.Date;
+import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import org.apache.log4j.Logger;
+import org.jnode.fs.FSDirectory;
import org.jnode.fs.FSEntry;
import org.jnode.fs.ReadOnlyFileSystemException;
import org.jnode.fs.hfsplus.catalog.CatalogFolder;
@@ -34,93 +36,117 @@
import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
import org.jnode.fs.hfsplus.catalog.CatalogThread;
import org.jnode.fs.hfsplus.tree.LeafRecord;
-import org.jnode.fs.spi.AbstractFSDirectory;
import org.jnode.fs.spi.FSEntryTable;
-public class HFSPlusDirectory extends AbstractFSDirectory {
- private final Logger log = Logger.getLogger(getClass());
+public class HFSPlusDirectory extends HFSPlusEntry implements FSDirectory {
- private LeafRecord record;
+ private static final Logger log = Logger.getLogger(HFSPlusDirectory.class);
+ /** Table of entries of our parent */
+ private FSEntryTable entries;
+
private CatalogFolder folder;
- public HFSPlusDirectory(final HFSPlusEntry e) {
- super((HfsPlusFileSystem) e.getFileSystem());
- this.record = e.getRecord();
+ public HFSPlusDirectory(HfsPlusFileSystem fs, HFSPlusDirectory parent, String name,
+ LeafRecord record) {
+ super(fs, parent, name, record);
this.folder = new CatalogFolder(record.getData());
- log.debug("Associated record:" + record.toString());
- if (record.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER) {
- log.debug("Associated folder : " + folder.toString());
- }
}
+ public FSEntryTable getTable() {
+ return entries;
+ }
+
@Override
- protected final FSEntry createDirectoryEntry(final String name)
- throws IOException {
- if (!canWrite()) {
- throw new ReadOnlyFileSystemException();
- }
- Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem())
- .getVolumeHeader();
+ public FSEntry addDirectory(String name) throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
- Calendar now = Calendar.getInstance();
- now.setTime(new Date());
- int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+ @Override
+ public FSEntry addFile(String name) throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
- HFSUnicodeString dirName = new HFSUnicodeString(name);
- CatalogThread thread = new CatalogThread(
- HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
- .getFolderId(), dirName);
+ @Override
+ public void flush() throws IOException {
+ // TODO Auto-generated method stub
- CatalogFolder newFolder = new CatalogFolder();
- newFolder
- .setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
- newFolder.setCreateDate(macDate);
- newFolder.setContentModDate(macDate);
- newFolder.setAttrModDate(macDate);
- log.debug("New catalog folder :\n" + newFolder.toString());
+ }
- CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
- log.debug("New catalog key :\n" + key.toString());
+ @Override
+ public FSEntry getEntry(String name) throws IOException {
+ // TODO Auto-generated method stub
+ return null;
+ }
- LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
- log.debug("New record folder :\n" + folderRecord.toString());
-
- HFSPlusEntry newEntry = new HFSPlusEntry(
- (HfsPlusFileSystem) getFileSystem(), null, this, name,
- folderRecord);
- volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
- log.debug("New volume header :\n" + volumeHeader.toString());
-
- return newEntry;
+ @Override
+ public Iterator<? extends FSEntry> iterator() throws IOException {
+ // TODO Auto-generated method stub
+ return null;
}
@Override
- protected final FSEntry createFileEntry(final String name)
- throws IOException {
- throw new ReadOnlyFileSystemException();
+ public void remove(String name) throws IOException {
+ // TODO Auto-generated method stub
+
}
- public synchronized void remove(String name) throws IOException {
- if (!canWrite()) {
- throw new ReadOnlyFileSystemException();
+ // Helper methods
+
+ /**
+ * BE CAREFULL : don't call this method from the constructor of this class
+ * because it call the method readEntries of the child classes that are not
+ * yet initialized (constructed).
+ */
+ protected final void checkEntriesLoaded() {
+ if (!isEntriesLoaded()) {
+ log.debug("checkEntriesLoaded : loading");
+ try {
+ if (rights.canRead()) {
+ entries = readEntries();
+ } else {
+ // the next time, we will call checkEntriesLoaded()
+ // we will retry to load entries
+ entries = FSEntryTable.EMPTY_TABLE;
+ log.debug("checkEntriesLoaded : can't read, using EMPTY_TABLE");
+ }
+ resetDirty();
+ } catch (IOException e) {
+ log.fatal("unable to read directory entries", e);
+ // the next time, we will call checkEntriesLoaded()
+ // we will retry to load entries
+ entries = FSEntryTable.EMPTY_TABLE;
+ }
}
+ log.debug("<<< END checkEntriesLoaded >>>");
}
- @Override
- protected final FSEntryTable readEntries() throws IOException {
+ /**
+ * Have we already loaded our entries from device ?
+ *
+ * @return if the entries are already loaded from the device
+ */
+ private final boolean isEntriesLoaded() {
+ return (entries != FSEntryTable.EMPTY_TABLE);
+ }
+
+ /**
+ *
+ * @return
+ * @throws IOException
+ */
+ private final FSEntryTable readEntries() throws IOException {
List<FSEntry> pathList = new LinkedList<FSEntry>();
HfsPlusFileSystem fs = (HfsPlusFileSystem) getFileSystem();
if (fs.getVolumeHeader().getFolderCount() > 0) {
LeafRecord[] records = fs.getCatalog().getRecords(folder.getFolderId());
for (LeafRecord rec : records) {
- if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER
- || rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
- String name = ((CatalogKey) rec.getKey()).getNodeName()
- .getUnicodeString();
- HFSPlusEntry e = new HFSPlusEntry(
- (HfsPlusFileSystem) getFileSystem(), null, this, name,
- rec);
+ if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER ||
+ rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
+ String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
+ HFSPlusEntry e = new HFSPlusEntry(fs, this, name, rec);
pathList.add(e);
}
}
@@ -128,10 +154,46 @@
return new FSEntryTable(((HfsPlusFileSystem) getFileSystem()), pathList);
}
- @Override
- protected void writeEntries(final FSEntryTable entries) throws IOException {
- // TODO Auto-generated method stub
+ /**
+ *
+ * @param name
+ * @return
+ * @throws IOException
+ */
+ private final FSEntry createDirectoryEntry(final String name) throws IOException {
+ if (fs.isReadOnly()) {
+ throw new ReadOnlyFileSystemException();
+ }
+ Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
+
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+
+ HFSUnicodeString dirName = new HFSUnicodeString(name);
+ CatalogThread thread =
+ new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
+ .getFolderId(), dirName);
+
+ CatalogFolder newFolder = new CatalogFolder();
+ newFolder.setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ newFolder.setCreateDate(macDate);
+ newFolder.setContentModDate(macDate);
+ newFolder.setAttrModDate(macDate);
+ log.debug("New catalog folder :\n" + newFolder.toString());
+
+ CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
+ log.debug("New catalog key :\n" + key.toString());
+
+ LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
+ log.debug("New record folder :\n" + folderRecord.toString());
+
+ HFSPlusEntry newEntry = new HFSPlusEntry(fs, this, name, folderRecord);
+ volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
+ log.debug("New volume header :\n" + volumeHeader.toString());
+
+ return newEntry;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-05 11:33:43 UTC (rev 5087)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-05 16:31:25 UTC (rev 5088)
@@ -17,24 +17,52 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
+import java.io.IOException;
+
+import org.jnode.fs.FSAccessRights;
+import org.jnode.fs.FSDirectory;
+import org.jnode.fs.FSEntry;
+import org.jnode.fs.FSFile;
+import org.jnode.fs.FileSystem;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.spi.AbstractFSEntry;
-import org.jnode.fs.spi.FSEntryTable;
+import org.jnode.fs.spi.UnixFSAccessRights;
-public class HFSPlusEntry extends AbstractFSEntry {
+public class HFSPlusEntry implements FSEntry {
- private LeafRecord record;
+ protected HfsPlusFileSystem fs;
+ protected HFSPlusDirectory parent;
+ protected String name;
+ protected LeafRecord record;
+ private int type;
- public HFSPlusEntry(final HfsPlusFileSystem fs, final FSEntryTable table, final HFSPlusDirectory parent,
- final String name, final LeafRecord record) {
- super(fs, table, parent, name, getFSEntryType(name, record));
+ protected boolean valid;
+ protected boolean dirty;
+ protected FSAccessRights rights;
+ private long lastModified;
+
+ /**
+ *
+ * @param fs
+ * @param parent
+ * @param name
+ * @param record
+ */
+ public HFSPlusEntry(HfsPlusFileSystem fs, HFSPlusDirectory parent, String name,
+ LeafRecord record) {
+ this.fs = fs;
+ this.parent = parent;
+ this.name = name;
this.record = record;
+ this.type = getFSEntryType();
+ this.rights = new UnixFSAccessRights(fs);
+ this.lastModified = System.currentTimeMillis();
}
- private static int getFSEntryType(final String name, final LeafRecord record) {
+ private int getFSEntryType() {
int mode = record.getType();
if ("/".equals(name)) {
return AbstractFSEntry.ROOT_ENTRY;
@@ -47,7 +75,91 @@
}
}
- public final LeafRecord getRecord() {
- return record;
+ @Override
+ public FSAccessRights getAccessRights() throws IOException {
+ return rights;
}
+
+ @Override
+ public FSDirectory getDirectory() throws IOException {
+ if (!isFile()) {
+ throw new IOException("It is not a Directory");
+ }
+ return (HFSPlusDirectory) this;
+ }
+
+ @Override
+ public FSFile getFile() throws IOException {
+ if (!isFile()) {
+ throw new IOException("It is not a file");
+ }
+ return (HFSPlusFile) this;
+ }
+
+ @Override
+ public long getLastModified() throws IOException {
+ // TODO Auto-generated method stub
+ return lastModified;
+ }
+
+ @Override
+ public String getName() {
+ return name;
+ }
+
+ @Override
+ public FSDirectory getParent() {
+ return parent;
+ }
+
+ @Override
+ public boolean isDirectory() {
+ return (type == AbstractFSEntry.DIR_ENTRY || type == AbstractFSEntry.ROOT_ENTRY);
+ }
+
+ @Override
+ public boolean isDirty() throws IOException {
+ return dirty;
+ }
+
+ public void setDirty() {
+ dirty = true;
+ }
+
+ public void resetDirty() {
+ dirty = false;
+ }
+
+ @Override
+ public boolean isFile() {
+ return (type == AbstractFSEntry.FILE_ENTRY);
+ }
+
+ @Override
+ public void setLastModified(long lastModified) throws IOException {
+ this.lastModified = lastModified;
+ }
+
+ @Override
+ public void setName(String newName) throws IOException {
+ if (type == AbstractFSEntry.ROOT_ENTRY) {
+ throw new IOException("Cannot change name of root directory");
+ }
+ if (parent.getTable().rename(name, newName) < 0) {
+ throw new IOException("Cannot change name");
+ }
+
+ this.name = newName;
+ }
+
+ @Override
+ public FileSystem<?> getFileSystem() {
+ return fs;
+ }
+
+ @Override
+ public boolean isValid() {
+ return valid;
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-05 11:33:43 UTC (rev 5087)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-05 16:31:25 UTC (rev 5088)
@@ -17,24 +17,23 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import java.io.IOException;
import java.nio.ByteBuffer;
+import org.jnode.fs.FSFile;
import org.jnode.fs.hfsplus.catalog.CatalogFile;
import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
import org.jnode.fs.hfsplus.tree.LeafRecord;
-import org.jnode.fs.spi.AbstractFSFile;
-public class HFSPlusFile extends AbstractFSFile {
- private LeafRecord record;
+public class HFSPlusFile extends HFSPlusEntry implements FSFile {
+
private CatalogFile file;
- public HFSPlusFile(final HFSPlusEntry e) {
- super((HfsPlusFileSystem) e.getFileSystem());
- this.record = e.getRecord();
+ public HFSPlusFile(HfsPlusFileSystem fs, HFSPlusDirectory parent, String name, LeafRecord record) {
+ super(fs, parent, name, record);
this.file = new CatalogFile(record.getData());
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-05 11:33:43 UTC (rev 5087)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-05 16:31:25 UTC (rev 5088)
@@ -90,21 +90,19 @@
@Override
protected final FSDirectory createDirectory(final FSEntry entry)
throws IOException {
- HFSPlusEntry e = (HFSPlusEntry) entry;
- return new HFSPlusDirectory(e);
+ return entry.getDirectory();
}
@Override
protected final FSFile createFile(final FSEntry entry) throws IOException {
- HFSPlusEntry e = (HFSPlusEntry) entry;
- return new HFSPlusFile(e);
+ return entry.getFile();
}
@Override
protected final HFSPlusEntry createRootEntry() throws IOException {
LeafRecord record = catalog.getRecord(CatalogNodeId.HFSPLUS_POR_CNID);
if (record != null) {
- return new HFSPlusEntry(this, null, null, "/", record);
+ return new HFSPlusEntry(this, null, "/", record);
}
log.debug("Root entry : No record found.");
return null;
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-13 19:35:56
|
Revision: 5097
http://jnode.svn.sourceforge.net/jnode/?rev=5097&view=rev
Author: galatnm
Date: 2009-03-13 19:35:50 +0000 (Fri, 13 Mar 2009)
Log Message:
-----------
Rewrite and update javadocs.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -176,11 +176,9 @@
new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
.getFolderId(), dirName);
- CatalogFolder newFolder = new CatalogFolder();
- newFolder.setFolderId(new CatalogNodeId(volumeHeader.getNextCatalogId()));
- newFolder.setCreateDate(macDate);
- newFolder.setContentModDate(macDate);
- newFolder.setAttrModDate(macDate);
+ CatalogFolder newFolder =
+ new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()), macDate,
+ macDate, macDate);
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -51,7 +51,7 @@
public final void read(final long fileOffset, final ByteBuffer dest) throws IOException {
HfsPlusFileSystem fs = (HfsPlusFileSystem) getFileSystem();
for (ExtentDescriptor d : file.getDataFork().getExtents()) {
- if (d.getStartBlock() != 0 && d.getBlockCount() != 0) {
+ if (!d.isEmpty()) {
long firstOffset = d.getStartBlock() * fs.getVolumeHeader().getBlockSize();
fs.getApi().read(firstOffset, dest);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -26,17 +26,31 @@
public class HFSPlusForkData {
public static final int FORK_DATA_LENGTH = 80;
private static final int EXTENT_OFFSET = 16;
+ /** The size in bytes of the valid data in the fork. */
+ private long totalSize;
+ /** */
+ private int clumpSize;
+ /** The total of allocation blocks use by the extents in the fork. */
+ private int totalBlock;
+ /** The first eight extent descriptors for the fork. */
+ private ExtentDescriptor[] extents;
- private byte[] data;
-
/**
+ * Create fork data from existing informations.
*
* @param src
* @param offset
*/
public HFSPlusForkData(final byte[] src, final int offset) {
- data = new byte[FORK_DATA_LENGTH];
+ byte[] data = new byte[FORK_DATA_LENGTH];
System.arraycopy(src, offset, data, 0, FORK_DATA_LENGTH);
+ totalSize = BigEndian.getInt64(data, 0);
+ clumpSize = BigEndian.getInt32(data, 8);
+ totalBlock = BigEndian.getInt32(data, 12);
+ extents = new ExtentDescriptor[8];
+ for (int i = 0; i < 8; i++) {
+ extents[i] = new ExtentDescriptor(data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
+ }
}
/**
@@ -47,60 +61,65 @@
* @param clumpSize
* @param totalBock
*/
- public HFSPlusForkData() {
- data = new byte[FORK_DATA_LENGTH];
+ public HFSPlusForkData(long totalSize, int clumpSize, int totalBlock) {
+ this.totalSize = totalSize;
+ this.clumpSize = clumpSize;
+ this.totalBlock = totalBlock;
+ this.extents = new ExtentDescriptor[8];
+ }
+
+ public byte[] getBytes() {
+ byte[] data = new byte[FORK_DATA_LENGTH];
+ BigEndian.setInt64(data, 0, totalSize);
+ BigEndian.setInt32(data, 8, clumpSize);
+ BigEndian.setInt32(data, 12, totalBlock);
+ return data;
}
-
- public final long getTotalSize() {
- return BigEndian.getInt64(data, 0);
+
+
+
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ public final String toString() {
+ StringBuffer s = new StringBuffer();
+ s.append("Total size : ").append(totalSize).append("\n");
+ s.append("Clump size : ").append(clumpSize).append("\n");
+ s.append("Total Blocks : ").append(totalBlock).append("\n");
+ for (int i = 0; i < extents.length; i++) {
+ s.append("Extent[" + i + "]: " + extents[i].toString());
+ }
+ return s.toString();
}
- public final void setTotalSize(long totalSize) {
- BigEndian.setInt64(data, 0, totalSize);
- }
+ public long getTotalSize() {
+ return totalSize;
+ }
- public final int getClumpSize() {
- return BigEndian.getInt32(data, 8);
- }
+ public int getClumpSize() {
+ return clumpSize;
+ }
- public final void setClumpSize(int clumpSize) {
- BigEndian.setInt32(data, 8, clumpSize);
+ public int getTotalBlocks() {
+ return totalBlock;
+ }
+
+ public ExtentDescriptor getExtent(int index){
+ return extents[index];
+ }
+
+ /**
+ *
+ * @param index
+ * @param desc
+ */
+ public final void addDescriptor(int index, ExtentDescriptor desc) {
+ extents[index] = desc;
}
- public final int getTotalBlocks() {
- return BigEndian.getInt32(data, 12);
+ public ExtentDescriptor[] getExtents(){
+ return extents;
}
- public final void setTotalBlocks(int totalBlock) {
- BigEndian.setInt32(data, 12, totalBlock);
- }
-
- public final ExtentDescriptor[] getExtents() {
- ExtentDescriptor[] list = new ExtentDescriptor[8];
- for (int i = 0; i < 8; i++) {
- list[i] = new ExtentDescriptor(data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
- }
- return list;
- }
-
- public final void setExtentDescriptor(int position, ExtentDescriptor desc) {
- int offset = EXTENT_OFFSET + (position * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
- System.arraycopy(desc.getBytes(), 0, data, offset, ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH);
- }
-
- public byte[] getBytes() {
- return data;
- }
-
- public final String toString() {
- StringBuffer s = new StringBuffer();
- s.append("Total size : ").append(getTotalSize()).append("\n");
- s.append("Clump size : ").append(getClumpSize()).append("\n");
- s.append("Total Blocks : ").append(getTotalBlocks()).append("\n");
- ExtentDescriptor[] list = getExtents();
- for (int i = 0; i < list.length; i++) {
- s.append("Extent[" + i + "]: " + list[i].toString());
- }
- return s.toString();
- }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -46,10 +46,6 @@
public static final int RECORD_TYPE_FOLDER_THREAD = 0x0003;
public static final int RECORD_TYPE_FILE_THREAD = 0x0004;
- public static final int kJIJournalInFSMask = 0x00000001;
- public static final int kJIJournalOnOtherDeviceMask = 0x00000002;
- public static final int kJIJournalNeedInitMask = 0x00000004;
-
public static final byte EK_DATA_FORK = (byte) 0x00;
public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -163,7 +163,7 @@
writeAllocationFile((int) volumeBlockUsed);
// ---
log.debug("Write Catalog to disk.");
- long offset = sb.getCatalogFile().getExtents()[0].getStartBlock() * sb.getBlockSize();
+ long offset = sb.getCatalogFile().getExtent(0).getStartBlock() * sb.getBlockSize();
Catalog catalog = new Catalog(params);
this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -17,32 +17,56 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import org.jnode.util.BigEndian;
public class JournalInfoBlock {
- private byte[] data;
+ /** Flag indicate that the journal is located in the volume itself. */
+ public static final int IN_FS_MASK = 0x00000001;
+ /**
+ * Flag indicate that the journal located in an other device. This flag is
+ * not currently supported.
+ */
+ public static final int ION_OTHER_DEVICE_MASK = 0x00000002;
+ /** Flag indicate that the journal header is invalid and must be initialize. */
+ public static final int NEED_INITIALIZATION = 0x00000004;
+ /** One-bits flag. See constants */
+ private int flag;
+ /** Device where the journal is located if it is not in the volume itself. */
+ private int deviceSignature;
+ /** journal start position on the volume */
+ private long offset;
+ /** Size of the journal included header and buffer. */
+ private long size;
- public JournalInfoBlock(final byte[] src) {
- data = new byte[180];
- System.arraycopy(src, 0, data, 0, 180);
- }
+ public JournalInfoBlock(final byte[] src) {
+ byte[] data = new byte[180];
+ System.arraycopy(src, 0, data, 0, 180);
+ flag = BigEndian.getInt32(data, 0);
+ deviceSignature = BigEndian.getInt32(data, 4);
+ offset = BigEndian.getInt64(data, 36);
+ size = BigEndian.getInt64(data, 44);
+ }
- public final int getFlag() {
- return BigEndian.getInt32(data, 0);
- }
+ public final String toString() {
+ return "Journal : " + offset + "::" + size;
+ }
- public final long getOffset() {
- return BigEndian.getInt64(data, 36);
- }
+ public int getFlag() {
+ return flag;
+ }
- public final long getSize() {
- return BigEndian.getInt64(data, 44);
- }
+ public int getDeviceSignature() {
+ return deviceSignature;
+ }
- public final String toString() {
- return "Journal : " + getOffset() + "::" + getSize();
- }
+ public long getOffset() {
+ return offset;
+ }
+
+ public long getSize() {
+ return size;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -133,18 +133,16 @@
long allocationClumpSize = getClumpSize(params.getBlockCount());
long bitmapBlocks = allocationClumpSize / blockSize;
long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
- HFSPlusForkData forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(allocationClumpSize);
- forkdata.setClumpSize((int) allocationClumpSize);
- forkdata.setTotalBlocks((int) bitmapBlocks);
- ExtentDescriptor desc = new ExtentDescriptor();
- desc.setStartBlock(1 + burnedBlocksBeforeVH);
- desc.setBlockCount((int) bitmapBlocks);
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 112, forkdata.FORK_DATA_LENGTH);
+
+ int startBlock = 1 + burnedBlocksBeforeVH;
+ int blockCount = (int) bitmapBlocks;
+
+ HFSPlusForkData forkdata = new HFSPlusForkData(allocationClumpSize, (int)allocationClumpSize,(int) bitmapBlocks);
+ ExtentDescriptor desc = new ExtentDescriptor(startBlock, blockCount);
+ forkdata.addDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 112, HFSPlusForkData.FORK_DATA_LENGTH);
// Journal creation
int nextBlock = 0;
-
if (params.isJournaled()) {
this.setFileCount(2);
this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
@@ -156,27 +154,18 @@
nextBlock = desc.getStartBlock() + desc.getBlockCount();
}
// Extent B-Tree initialization
- forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(params.getExtentClumpSize());
- forkdata.setClumpSize(params.getExtentClumpSize());
- forkdata.setTotalBlocks((params.getExtentClumpSize() / blockSize));
- desc = new ExtentDescriptor();
- desc.setStartBlock(nextBlock);
- desc.setBlockCount(forkdata.getTotalBlocks());
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 192, forkdata.FORK_DATA_LENGTH);
+ forkdata = new HFSPlusForkData(params.getExtentClumpSize(),params.getExtentClumpSize(),(params.getExtentClumpSize() / blockSize));
+ desc = new ExtentDescriptor(nextBlock, forkdata.getTotalBlocks());
+ forkdata.addDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 192, HFSPlusForkData.FORK_DATA_LENGTH);
blockUsed += forkdata.getTotalBlocks();
// Catalog B-Tree initialization
- forkdata = new HFSPlusForkData();
- forkdata.setTotalSize(params.getCatalogClumpSize());
- forkdata.setClumpSize(params.getCatalogClumpSize());
- forkdata.setTotalBlocks(params.getCatalogClumpSize() / blockSize);
- desc = new ExtentDescriptor();
- desc.setStartBlock(this.getExtentsFile().getExtents()[0].getStartBlock()
- + this.getExtentsFile().getExtents()[0].getBlockCount());
- desc.setBlockCount(forkdata.getTotalBlocks());
- forkdata.setExtentDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 272, forkdata.FORK_DATA_LENGTH);
+ forkdata = new HFSPlusForkData(params.getCatalogClumpSize(),params.getCatalogClumpSize(),(params.getCatalogClumpSize() / blockSize));
+ startBlock = this.getExtentsFile().getExtent(0).getStartBlock() + this.getExtentsFile().getExtent(0).getBlockCount();
+ blockCount = forkdata.getTotalBlocks();
+ desc = new ExtentDescriptor(startBlock, blockCount);
+ forkdata.addDescriptor(0, desc);
+ System.arraycopy(forkdata.getBytes(), 0, data, 272, HFSPlusForkData.FORK_DATA_LENGTH);
blockUsed += forkdata.getTotalBlocks();
this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -22,12 +22,15 @@
import java.io.IOException;
import java.nio.ByteBuffer;
+import java.util.Calendar;
+import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import org.apache.log4j.Logger;
import org.jnode.fs.hfsplus.HFSPlusParams;
import org.jnode.fs.hfsplus.HFSUnicodeString;
+import org.jnode.fs.hfsplus.HFSUtils;
import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
import org.jnode.fs.hfsplus.Superblock;
@@ -65,7 +68,7 @@
log.debug("Load B-Tree catalog file.\n");
this.fs = fs;
Superblock sb = fs.getVolumeHeader();
- ExtentDescriptor firstExtent = sb.getCatalogFile().getExtents()[0];
+ ExtentDescriptor firstExtent = sb.getCatalogFile().getExtent(0);
catalogHeaderNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
buffer = ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH
@@ -121,9 +124,13 @@
// First record (folder)
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
- CatalogFolder folder = new CatalogFolder();
- folder.setFolderId(CatalogNodeId.HFSPLUS_ROOT_CNID);
- folder.setValence(params.isJournaled() ? 2 : 0);
+ int valence = params.isJournaled() ? 2 : 0;
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+ CatalogFolder folder =
+ new CatalogFolder(valence, CatalogNodeId.HFSPLUS_ROOT_CNID, macDate, macDate,
+ macDate);
LeafRecord record = new LeafRecord(ck, folder.getBytes());
rootNode.addNodeRecord(0, record, offset);
// Second record (thread)
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -27,89 +27,103 @@
public class CatalogFolder {
public static final int CATALOG_FOLDER_SIZE = 88;
-
- private byte[] data;
+ private int recordType;
+ private int valence;
+ private CatalogNodeId folderId;
+ private int createDate;
+ private int contentModDate;
+ private int attrModDate;
+
+ /**
+ *
+ * @param src
+ */
public CatalogFolder(final byte[] src) {
- data = new byte[88];
+ byte[] data = new byte[88];
System.arraycopy(src, 0, data, 0, CATALOG_FOLDER_SIZE);
+ recordType = BigEndian.getInt16(data, 0);
+ valence = BigEndian.getInt32(data, 4);
+ folderId = new CatalogNodeId(data, 8);
+ createDate = BigEndian.getInt32(data, 12);
+ contentModDate = BigEndian.getInt32(data, 16);
+ attrModDate = BigEndian.getInt32(data, 20);
}
/**
- * Create a new catalog folder.
*
- * @param folderId
- *
+ * @param valence
+ * @param folderID
+ * @param createDate
+ * @param contentModDate
+ * @param attrModDate
*/
- public CatalogFolder() {
- data = new byte[88];
- BigEndian.setInt16(data, 0, HfsPlusConstants.RECORD_TYPE_FOLDER);
+ public CatalogFolder(int valence, CatalogNodeId folderID, int createDate,
+ int contentModDate, int attribModDate) {
+ this.recordType = HfsPlusConstants.RECORD_TYPE_FOLDER;
+ this.valence = valence;
+ this.folderId = folderID;
+ this.createDate = createDate;
+ this.contentModDate = contentModDate;
+ this.attrModDate = attribModDate;
}
- public final int getRecordType() {
- return BigEndian.getInt16(data, 0);
- }
-
- public final void setValence(int valence) {
+ /**
+ * Return bytes representation of the catalog folder.
+ *
+ * @return byte array representation.
+ */
+ public byte[] getBytes() {
+ byte[] data = new byte[88];
+ BigEndian.setInt16(data, 0, recordType);
BigEndian.setInt32(data, 4, valence);
+ System.arraycopy(folderId.getBytes(), 0, data, 8, folderId.getBytes().length);
+ BigEndian.setInt32(data, 12, createDate);
+ BigEndian.setInt32(data, 16, contentModDate);
+ BigEndian.setInt32(data, 20, attrModDate);
+ return data;
}
- public final int getValence() {
- return BigEndian.getInt32(data, 4);
+ /*
+ * (non-Javadoc)
+ * @see java.lang.Object#toString()
+ */
+ public String toString() {
+ StringBuffer s = new StringBuffer();
+ s.append("Record type: ").append(recordType).append("\n");
+ s.append("Valence: ").append(valence).append("\n");
+ s.append("Folder ID: ").append(folderId.getId()).append("\n");
+ s.append("Creation Date :").append(
+ HFSUtils.printDate(createDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(
+ HFSUtils.printDate(contentModDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Attr Mod Date :").append(
+ HFSUtils.printDate(attrModDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ return s.toString();
}
- public final CatalogNodeId getFolderId() {
- return new CatalogNodeId(data, 8);
+ public int getRecordType() {
+ return recordType;
}
- public final void setFolderId(CatalogNodeId folderId) {
- System.arraycopy(folderId.getBytes(), 0, data, 8,
- folderId.getBytes().length);
+ public int getValence() {
+ return valence;
}
- public final int getCreateDate() {
- return BigEndian.getInt32(data, 12);
+ public CatalogNodeId getFolderId() {
+ return folderId;
}
- public void setCreateDate(int time) {
- BigEndian.setInt32(data, 12, time);
+ public int getCreateDate() {
+ return createDate;
}
- public final int getContentModDate() {
- return BigEndian.getInt32(data, 16);
+ public int getContentModDate() {
+ return contentModDate;
}
- public void setContentModDate(int time) {
- BigEndian.setInt32(data, 16, time);
+ public int getAttrModDate() {
+ return attrModDate;
}
-
- public final int getAttrModDate() {
- return BigEndian.getInt32(data, 20);
- }
-
- public void setAttrModDate(int time) {
- BigEndian.setInt32(data, 20, time);
- }
-
- public byte[] getBytes() {
- return data;
- }
-
- public final String toString() {
- StringBuffer s = new StringBuffer();
- s.append("Record type: ").append(getRecordType()).append("\n");
- s.append("Valence: ").append(getValence()).append("\n");
- s.append("Folder ID: ").append(getFolderId().getId()).append("\n");
- s.append("Creation Date :").append(
- HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy"))
- .append("\n");
- s.append("Content Mod Date :").append(
- HFSUtils.printDate(getContentModDate(),
- "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Attr Mod Date :")
- .append(
- HFSUtils.printDate(getAttrModDate(),
- "EEE MMM d HH:mm:ss yyyy")).append("\n");
- return s.toString();
- }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -27,12 +27,19 @@
public class CatalogKey extends AbstractKey {
- public static final int MINIMUM_KEY_LENGTH = 6;
- public static final int MAXIMUM_KEY_LENGTH = 516;
+ public static final int MINIMUM_KEY_LENGTH = 6;
+ public static final int MAXIMUM_KEY_LENGTH = 516;
+ /**
+ * Catalog node id of the folder that contains file or folder represented by
+ * the record. For thread records, contains the catalog node id of the file
+ * or folder itself.
+ */
+ private CatalogNodeId parentId;
+ /** Name of the file or folder, empty for thread records. */
+ private HFSUnicodeString nodeName;
- private HFSUnicodeString nodeName;
-
/**
+ * Create catalog key from existing data.
*
* @param src
* @param offset
@@ -45,7 +52,7 @@
currentOffset += 2;
ck = new byte[4];
System.arraycopy(src, currentOffset, ck, 0, 4);
- parentID = new CatalogNodeId(ck, 0);
+ parentId = new CatalogNodeId(ck, 0);
currentOffset += 4;
if (keyLength > MINIMUM_KEY_LENGTH) {
nodeName = new HFSUnicodeString(src, currentOffset);
@@ -53,24 +60,20 @@
}
/**
- * Create catalog key based on parent CNID and the name of the file or folder.
+ * Create new catalog key based on parent CNID and the name of the file or folder.
*
* @param parentID Parent catalog node identifier.
* @param name Name of the file or folder.
*
*/
public CatalogKey(final CatalogNodeId parentID, final HFSUnicodeString name) {
- this.parentID = parentID;
+ this.parentId = parentID;
this.nodeName = name;
this.keyLength = MINIMUM_KEY_LENGTH + name.getLength();
}
- public final int getKeyLength() {
- return keyLength;
- }
-
public final CatalogNodeId getParentId() {
- return parentID;
+ return parentId;
}
public final HFSUnicodeString getNodeName() {
@@ -104,7 +107,7 @@
public byte[] getBytes() {
byte[] data = new byte[this.getKeyLength()];
BigEndian.setInt16(data, 0, this.getKeyLength());
- System.arraycopy(parentID.getBytes(), 0, data, 2, 4);
+ System.arraycopy(parentId.getBytes(), 0, data, 2, 4);
System.arraycopy(nodeName.getBytes(), 0, data, 6, nodeName.getLength());
return data;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -28,42 +28,50 @@
public class CatalogThread {
public static final int CATALOG_THREAD_SIZE = 512;
+ /**The catalog thread record type. Can be a file or a folder. */
+ private int recordType;
+ /** the catalog node id of the file or folder referenced by the thread record. */
+ private CatalogNodeId parentId;
+ /** the name of the file or folder reference by the thread record. */
+ private HFSUnicodeString nodeName;
- private byte[] data;
-
+ /**
+ * Create catalog thread from existing data.
+ *
+ * @param src byte array contains catalog thread data.
+ */
public CatalogThread(final byte[] src) {
- data = new byte[512];
+ byte[] data = new byte[512];
System.arraycopy(src, 0, data, 0, CATALOG_THREAD_SIZE);
+ recordType = BigEndian.getInt16(data, 0);
+ parentId = new CatalogNodeId(data, 4);
+ nodeName = new HFSUnicodeString(data, 8);
}
/**
* Create a new catalog thread.
*
- * @param type
- * @param parent
- * @param name
+ * @param type catalog thread record type.
+ * @param parent {@link CatalogNodeId} of the file or folder reference by the tread record.
+ * @param name {@link HFSUnicodeString} represent the name of the file or folder reference by the tread record.
*/
public CatalogThread(int type, CatalogNodeId parent, HFSUnicodeString name) {
- data = new byte[512];
- BigEndian.setInt16(data, 0, type);
- BigEndian.setInt32(data, 4, parent.getId());
- System.arraycopy(parent.getBytes(), 0, data, 4, 4);
- System.arraycopy(name.getBytes(), 0, data, 8, name.getBytes().length);
+ this.recordType = type;
+ this.parentId = parent;
+ this.nodeName = name;
}
-
- public final int getRecordType() {
- return BigEndian.getInt16(data, 0);
- }
-
- public final CatalogNodeId getParentId() {
- return new CatalogNodeId(data, 4);
- }
-
- public final HFSUnicodeString getNodeName() {
- return new HFSUnicodeString(data, 8);
- }
+ /**
+ *
+ * @return
+ */
public byte[] getBytes() {
+ byte[] data = new byte[512];
+ BigEndian.setInt16(data, 0, recordType);
+ BigEndian.setInt32(data, 4, parentId.getId());
+ System.arraycopy(parentId.getBytes(), 0, data, 4, 4);
+ System.arraycopy(nodeName.getBytes(), 0, data, 8, nodeName.getBytes().length);
return data;
}
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -23,44 +23,61 @@
import org.jnode.util.BigEndian;
public class ExtentDescriptor {
-
+ /** The size pf the extent descriptor. */
public static final int EXTENT_DESCRIPTOR_LENGTH = 8;
+ /** The first allocation block. */
+ private int startBlock;
+ /** The length in allocation blocks of the extent. */
+ private int blockCount;
- private byte[] data;
-
/**
- * Create empty extent descriptor.
+ * Create a new extent descriptor.
+ *
+ * @param startBlock first allocation block.
+ * @param blockCount number of blocks in the extent.
*/
- public ExtentDescriptor() {
- data = new byte[EXTENT_DESCRIPTOR_LENGTH];
- }
-
- public ExtentDescriptor(final byte[] src, final int offset) {
- data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ public ExtentDescriptor(int startBlock, int blockCount) {
+ this.startBlock = startBlock;
+ this.blockCount = blockCount;
+ }
+
+ /**
+ * Create extent descriptor from existing data.
+ *
+ * @param src byte array contains existing extent descriptor informations.
+ * @param offset position where data for extent descriptor begin.
+ */
+ public ExtentDescriptor(final byte[] src, final int offset) {
+ byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
System.arraycopy(src, offset, data, 0, EXTENT_DESCRIPTOR_LENGTH);
+ startBlock = BigEndian.getInt32(data, 0);
+ blockCount = BigEndian.getInt32(data, 4);
}
+ /**
+ *
+ * @return
+ */
+ public final byte[] getBytes() {
+ byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ BigEndian.setInt32(data, 0, startBlock);
+ BigEndian.setInt32(data, 4, blockCount);
+ return data;
+ }
- public final int getStartBlock() {
- return BigEndian.getInt32(data, 0);
+ public final String toString() {
+ return "Start block : " + startBlock + "\tBlock count : " + blockCount + "\n";
}
- public final void setStartBlock(int start) {
- BigEndian.setInt32(data, 0, start);
- }
+ public int getStartBlock() {
+ return startBlock;
+ }
- public final int getBlockCount() {
- return BigEndian.getInt32(data, 4);
- }
+ public int getBlockCount() {
+ return blockCount;
+ }
+
+ public boolean isEmpty(){
+ return (startBlock == 0 || blockCount == 0);
+ }
- public final void setBlockCount(int count) {
- BigEndian.setInt32(data, 4, count);
- }
-
- public final byte[] getBytes() {
- return data;
- }
-
- public final String toString() {
- return "Start block : " + getStartBlock() + "\tBlock count : " + getBlockCount() + "\n";
- }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.extent;
import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
@@ -26,71 +26,94 @@
import org.jnode.util.BigEndian;
public class ExtentKey extends AbstractKey {
-
- public static final byte DATA_FORK = (byte) 0x00;
- public static final byte RESOURCE_FORK = (byte) 0xFF;
- public static final int KEY_LENGTH = 12;
- byte[] ek;
+ public static final byte DATA_FORK = (byte) 0x00;
+ public static final byte RESOURCE_FORK = (byte) 0xFF;
+ public static final int KEY_LENGTH = 12;
- public ExtentKey(final byte[] src, final int offset) {
- ek = new byte[KEY_LENGTH];
- System.arraycopy(src, offset, ek, 0, KEY_LENGTH);
- }
+ private int forkType;
+ private int pad;
+ private CatalogNodeId fileId;
+ private int startBlock;
- @Override
- public final int getKeyLength() {
- return BigEndian.getInt16(ek, 0);
- }
-
- public final int getForkType() {
- return BigEndian.getInt8(ek, 2);
- }
+ /**
+ *
+ * @param src
+ * @param offset
+ */
+ public ExtentKey(final byte[] src, final int offset) {
+ byte[] ek = new byte[KEY_LENGTH];
+ System.arraycopy(src, offset, ek, 0, KEY_LENGTH);
+ keyLength = BigEndian.getInt16(ek, 0);
+ forkType = BigEndian.getInt8(ek, 2);
+ pad = BigEndian.getInt8(ek, 3);
+ fileId = new CatalogNodeId(ek, 4);
+ startBlock = BigEndian.getInt32(ek, 8);
+ }
- public final int getPad() {
- return BigEndian.getInt8(ek, 3);
- }
+ /**
+ *
+ * @param forkType
+ * @param pad
+ * @param fileId
+ * @param startBlock
+ */
+ public ExtentKey(int forkType, int pad, CatalogNodeId fileId, int startBlock) {
+ super();
+ this.forkType = forkType;
+ this.pad = pad;
+ this.fileId = fileId;
+ this.startBlock = startBlock;
+ }
- public final CatalogNodeId getCatalogNodeId() {
- return new CatalogNodeId(ek, 4);
- }
+ @Override
+ public final int compareTo(final Key key) {
+ int res = -1;
+ if (key instanceof ExtentKey) {
+ ExtentKey compareKey = (ExtentKey) key;
+ res = fileId.compareTo(compareKey.getFileId());
+ if (res == 0) {
+ res = compareForkType(compareKey.getForkType());
+ if (res == 0) {
+ return compareStartBlock(compareKey.getStartBlock());
+ }
+ }
+ }
+ return res;
+ }
- public final int getStartBlock() {
- return BigEndian.getInt32(ek, 8);
- }
+ @Override
+ public byte[] getBytes() {
+ byte[] data = new byte[this.getKeyLength()];
+ return data;
+ }
- @Override
- public final int compareTo(final Key key) {
- int res = -1;
- if (key instanceof ExtentKey) {
- ExtentKey compareKey = (ExtentKey) key;
- res = getCatalogNodeId().compareTo(compareKey.getCatalogNodeId());
- if (res == 0) {
- res = compareForkType(compareKey.getForkType());
- if (res == 0) {
- return compareStartBlock(compareKey.getStartBlock());
- }
- }
- }
- return res;
- }
+ private int compareForkType(int fork) {
+ Integer currentForkType = Integer.valueOf(forkType);
+ Integer forkType = Integer.valueOf(fork);
+ return currentForkType.compareTo(forkType);
+ }
- @Override
- public byte[] getBytes() {
- byte[] data = new byte[this.getKeyLength()];
- return data;
- }
-
- private int compareForkType(int fork) {
- Integer currentForkType = Integer.valueOf(this.getForkType());
- Integer forkType = Integer.valueOf(fork);
- return currentForkType.compareTo(forkType);
- }
-
- private int compareStartBlock(int block) {
- Integer currentStartBlock = Integer.valueOf(this.getStartBlock());
- Integer startBlock = Integer.valueOf(block);
- return currentStartBlock.compareTo(startBlock);
- }
+ private int compareStartBlock(int block) {
+ Integer currentStartBlock = Integer.valueOf(startBlock);
+ Integer startBlock = Integer.valueOf(block);
+ return currentStartBlock.compareTo(startBlock);
+ }
+ public int getForkType() {
+ return forkType;
+ }
+
+ public int getPad() {
+ return pad;
+ }
+
+ public CatalogNodeId getFileId() {
+ return fileId;
+ }
+
+ public int getStartBlock() {
+ return startBlock;
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-03-13 11:24:46 UTC (rev 5096)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractKey.java 2009-03-13 19:35:50 UTC (rev 5097)
@@ -20,14 +20,14 @@
package org.jnode.fs.hfsplus.tree;
-import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
public abstract class AbstractKey implements Key {
protected int keyLength;
- protected CatalogNodeId parentID;
- public abstract int getKeyLength();
+ public final int getKeyLength() {
+ return keyLength;
+ }
public abstract byte[] getBytes();
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-13 20:07:34
|
Revision: 5098
http://jnode.svn.sourceforge.net/jnode/?rev=5098&view=rev
Author: galatnm
Date: 2009-03-13 20:07:21 +0000 (Fri, 13 Mar 2009)
Log Message:
-----------
Fix checkstyle.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
@@ -42,14 +42,16 @@
* @param offset
*/
public HFSPlusForkData(final byte[] src, final int offset) {
- byte[] data = new byte[FORK_DATA_LENGTH];
+ byte[] data = new byte[FORK_DATA_LENGTH];
System.arraycopy(src, offset, data, 0, FORK_DATA_LENGTH);
totalSize = BigEndian.getInt64(data, 0);
clumpSize = BigEndian.getInt32(data, 8);
totalBlock = BigEndian.getInt32(data, 12);
extents = new ExtentDescriptor[8];
for (int i = 0; i < 8; i++) {
- extents[i] = new ExtentDescriptor(data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
+ extents[i] =
+ new ExtentDescriptor(data, EXTENT_OFFSET +
+ (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
}
}
@@ -62,24 +64,23 @@
* @param totalBock
*/
public HFSPlusForkData(long totalSize, int clumpSize, int totalBlock) {
- this.totalSize = totalSize;
- this.clumpSize = clumpSize;
- this.totalBlock = totalBlock;
- this.extents = new ExtentDescriptor[8];
- }
-
+ this.totalSize = totalSize;
+ this.clumpSize = clumpSize;
+ this.totalBlock = totalBlock;
+ this.extents = new ExtentDescriptor[8];
+ }
+
public byte[] getBytes() {
- byte[] data = new byte[FORK_DATA_LENGTH];
- BigEndian.setInt64(data, 0, totalSize);
- BigEndian.setInt32(data, 8, clumpSize);
- BigEndian.setInt32(data, 12, totalBlock);
+ byte[] data = new byte[FORK_DATA_LENGTH];
+ BigEndian.setInt64(data, 0, totalSize);
+ BigEndian.setInt32(data, 8, clumpSize);
+ BigEndian.setInt32(data, 12, totalBlock);
return data;
}
-
-
-
+
/*
* (non-Javadoc)
+ *
* @see java.lang.Object#toString()
*/
public final String toString() {
@@ -93,32 +94,32 @@
return s.toString();
}
- public long getTotalSize() {
- return totalSize;
- }
+ public long getTotalSize() {
+ return totalSize;
+ }
- public int getClumpSize() {
- return clumpSize;
- }
+ public int getClumpSize() {
+ return clumpSize;
+ }
- public int getTotalBlocks() {
- return totalBlock;
- }
-
- public ExtentDescriptor getExtent(int index){
- return extents[index];
- }
-
- /**
+ public int getTotalBlocks() {
+ return totalBlock;
+ }
+
+ public ExtentDescriptor getExtent(int index) {
+ return extents[index];
+ }
+
+ /**
*
* @param index
* @param desc
*/
public final void addDescriptor(int index, ExtentDescriptor desc) {
- extents[index] = desc;
+ extents[index] = desc;
}
- public ExtentDescriptor[] getExtents(){
+ public ExtentDescriptor[] getExtents() {
return extents;
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/JournalInfoBlock.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -23,50 +23,50 @@
import org.jnode.util.BigEndian;
public class JournalInfoBlock {
- /** Flag indicate that the journal is located in the volume itself. */
- public static final int IN_FS_MASK = 0x00000001;
- /**
- * Flag indicate that the journal located in an other device. This flag is
- * not currently supported.
- */
- public static final int ION_OTHER_DEVICE_MASK = 0x00000002;
- /** Flag indicate that the journal header is invalid and must be initialize. */
- public static final int NEED_INITIALIZATION = 0x00000004;
- /** One-bits flag. See constants */
- private int flag;
- /** Device where the journal is located if it is not in the volume itself. */
- private int deviceSignature;
- /** journal start position on the volume */
- private long offset;
- /** Size of the journal included header and buffer. */
- private long size;
+ /** Flag indicate that the journal is located in the volume itself. */
+ public static final int IN_FS_MASK = 0x00000001;
+ /**
+ * Flag indicate that the journal located in an other device. This flag is
+ * not currently supported.
+ */
+ public static final int ION_OTHER_DEVICE_MASK = 0x00000002;
+ /** Flag indicate that the journal header is invalid and must be initialize. */
+ public static final int NEED_INITIALIZATION = 0x00000004;
+ /** One-bits flag. See constants */
+ private int flag;
+ /** Device where the journal is located if it is not in the volume itself. */
+ private int deviceSignature;
+ /** journal start position on the volume */
+ private long offset;
+ /** Size of the journal included header and buffer. */
+ private long size;
- public JournalInfoBlock(final byte[] src) {
- byte[] data = new byte[180];
- System.arraycopy(src, 0, data, 0, 180);
- flag = BigEndian.getInt32(data, 0);
- deviceSignature = BigEndian.getInt32(data, 4);
- offset = BigEndian.getInt64(data, 36);
- size = BigEndian.getInt64(data, 44);
- }
+ public JournalInfoBlock(final byte[] src) {
+ byte[] data = new byte[180];
+ System.arraycopy(src, 0, data, 0, 180);
+ flag = BigEndian.getInt32(data, 0);
+ deviceSignature = BigEndian.getInt32(data, 4);
+ offset = BigEndian.getInt64(data, 36);
+ size = BigEndian.getInt64(data, 44);
+ }
- public final String toString() {
- return "Journal : " + offset + "::" + size;
- }
+ public final String toString() {
+ return "Journal : " + offset + "::" + size;
+ }
- public int getFlag() {
- return flag;
- }
+ public int getFlag() {
+ return flag;
+ }
- public int getDeviceSignature() {
- return deviceSignature;
- }
+ public int getDeviceSignature() {
+ return deviceSignature;
+ }
- public long getOffset() {
- return offset;
- }
+ public long getOffset() {
+ return offset;
+ }
- public long getSize() {
- return size;
- }
+ public long getSize() {
+ return size;
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import static org.jnode.fs.hfsplus.HfsPlusConstants.HFSPLUS_SUPER_MAGIC;
@@ -58,11 +58,10 @@
* Create the volume header and load information for the file system passed
* as parameter.
*
- * @param fs
- * The file system contains HFS+ partition.
+ * @param fs The file system contains HFS+ partition.
*
- * @throws FileSystemException
- * If magic number (0X482B) is incorrect or not available.
+ * @throws FileSystemException If magic number (0X482B) is incorrect or not
+ * available.
*/
public Superblock(final HfsPlusFileSystem fs, boolean create) throws FileSystemException {
super(fs);
@@ -77,7 +76,8 @@
data = new byte[SUPERBLOCK_LENGTH];
System.arraycopy(b.array(), 0, data, 0, SUPERBLOCK_LENGTH);
if (getMagic() != HFSPLUS_SUPER_MAGIC) {
- throw new FileSystemException("Not hfs+ volume header (" + getMagic() + ": bad magic)");
+ throw new FileSystemException("Not hfs+ volume header (" + getMagic() +
+ ": bad magic)");
}
}
} catch (IOException e) {
@@ -92,7 +92,8 @@
*
* @throws ApiNotFoundException
*/
- public void create(HFSPlusParams params) throws IOException, ApiNotFoundException, FileSystemException {
+ public void create(HFSPlusParams params)
+ throws IOException, ApiNotFoundException, FileSystemException {
int burnedBlocksBeforeVH = 0;
int burnedBlocksAfterAltVH = 0;
/*
@@ -133,11 +134,13 @@
long allocationClumpSize = getClumpSize(params.getBlockCount());
long bitmapBlocks = allocationClumpSize / blockSize;
long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
-
+
int startBlock = 1 + burnedBlocksBeforeVH;
int blockCount = (int) bitmapBlocks;
-
- HFSPlusForkData forkdata = new HFSPlusForkData(allocationClumpSize, (int)allocationClumpSize,(int) bitmapBlocks);
+
+ HFSPlusForkData forkdata =
+ new HFSPlusForkData(allocationClumpSize, (int) allocationClumpSize,
+ (int) bitmapBlocks);
ExtentDescriptor desc = new ExtentDescriptor(startBlock, blockCount);
forkdata.addDescriptor(0, desc);
System.arraycopy(forkdata.getBytes(), 0, data, 112, HFSPlusForkData.FORK_DATA_LENGTH);
@@ -154,14 +157,20 @@
nextBlock = desc.getStartBlock() + desc.getBlockCount();
}
// Extent B-Tree initialization
- forkdata = new HFSPlusForkData(params.getExtentClumpSize(),params.getExtentClumpSize(),(params.getExtentClumpSize() / blockSize));
+ forkdata =
+ new HFSPlusForkData(params.getExtentClumpSize(), params.getExtentClumpSize(),
+ (params.getExtentClumpSize() / blockSize));
desc = new ExtentDescriptor(nextBlock, forkdata.getTotalBlocks());
forkdata.addDescriptor(0, desc);
System.arraycopy(forkdata.getBytes(), 0, data, 192, HFSPlusForkData.FORK_DATA_LENGTH);
blockUsed += forkdata.getTotalBlocks();
// Catalog B-Tree initialization
- forkdata = new HFSPlusForkData(params.getCatalogClumpSize(),params.getCatalogClumpSize(),(params.getCatalogClumpSize() / blockSize));
- startBlock = this.getExtentsFile().getExtent(0).getStartBlock() + this.getExtentsFile().getExtent(0).getBlockCount();
+ forkdata =
+ new HFSPlusForkData(params.getCatalogClumpSize(), params.getCatalogClumpSize(),
+ (params.getCatalogClumpSize() / blockSize));
+ startBlock =
+ this.getExtentsFile().getExtent(0).getStartBlock() +
+ this.getExtentsFile().getExtent(0).getBlockCount();
blockCount = forkdata.getTotalBlocks();
desc = new ExtentDescriptor(startBlock, blockCount);
forkdata.addDescriptor(0, desc);
@@ -169,15 +178,14 @@
blockUsed += forkdata.getTotalBlocks();
this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
- this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10
- * (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
+ this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10 *
+ (this.getCatalogFile().getClumpSize() / this.getBlockSize()));
}
/**
* Calculate the number of blocks needed for bitmap.
*
- * @param totalBlocks
- * Total of blocks found in the device.
+ * @param totalBlocks Total of blocks found in the device.
*
* @return long - Number of blocks.
*
@@ -399,16 +407,15 @@
* @return
*/
public final String getAttributesAsString() {
- return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "")
- + ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "")
- + ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
+ return ((isAttribute(HFSPLUS_VOL_UNMNT_BIT)) ? " kHFSVolumeUnmountedBit" : "") +
+ ((isAttribute(HFSPLUS_VOL_INCNSTNT_BIT)) ? " kHFSBootVolumeInconsistentBit" : "") +
+ ((isAttribute(HFSPLUS_VOL_JOURNALED_BIT)) ? " kHFSVolumeJournaledBit" : "");
}
/**
* Check if the corresponding attribute corresponding is set.
*
- * @param maskBit
- * Bit position of the attribute. See constants.
+ * @param maskBit Bit position of the attribute. See constants.
*
* @return true if attribute is set.
*/
@@ -429,16 +436,17 @@
StringBuffer buffer = new StringBuffer();
buffer.append("Magic: 0x").append(NumberUtils.hex(getMagic(), 4)).append("\n");
buffer.append("Version: ").append(getVersion()).append("\n").append("\n");
- buffer.append("Attributes: ").append(getAttributesAsString()).append(" (").append(getAttributes()).append(")")
- .append("\n").append("\n");
- buffer.append("Create date: ").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
- buffer.append("Modify date: ").append(HFSUtils.printDate(getModifyDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
- buffer.append("Backup date: ").append(HFSUtils.printDate(getBackupDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
- buffer.append("Checked date: ").append(HFSUtils.printDate(getCheckedDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n").append("\n");
+ buffer.append("Attributes: ").append(getAttributesAsString()).append(" (").append(
+ getAttributes()).append(")").append("\n").append("\n");
+ buffer.append("Create date: ").append(
+ HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ buffer.append("Modify date: ").append(
+ HFSUtils.printDate(getModifyDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ buffer.append("Backup date: ").append(
+ HFSUtils.printDate(getBackupDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ buffer.append("Checked date: ").append(
+ HFSUtils.printDate(getCheckedDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n")
+ .append("\n");
buffer.append("File count: ").append(getFileCount()).append("\n");
buffer.append("Folder count: ").append(getFolderCount()).append("\n").append("\n");
buffer.append("Block size: ").append(getBlockSize()).append("\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.catalog;
import java.io.IOException;
@@ -60,7 +60,7 @@
/**
* Create Catalog based on meta-data that exist on the file system.
- *
+ *
* @param fs HFS+ file system that contains catalog informations.
* @throws IOException
*/
@@ -71,8 +71,9 @@
ExtentDescriptor firstExtent = sb.getCatalogFile().getExtent(0);
catalogHeaderNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
- buffer = ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH
- + BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
+ buffer =
+ ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH +
+ BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
fs.getApi().read(catalogHeaderNodeOffset, buffer);
buffer.rewind();
byte[] data = ByteBufferUtils.toArray(buffer);
@@ -84,7 +85,7 @@
/**
* Create new Catalog
- *
+ *
* @param params
*/
public Catalog(HFSPlusParams params) {
@@ -111,10 +112,10 @@
bthr.setClumpSize(params.getCatalogClumpSize());
// TODO initialize attributes, max key length and key comparaison type.
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
- // Create root node
+ // Create root node
int rootNodePosition = bthr.getRootNode() * nodeSize;
bufferLength += (rootNodePosition - bufferLength);
- //Create node descriptor
+ // Create node descriptor
NodeDescriptor nd = new NodeDescriptor();
nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
nd.setHeight(1);
@@ -136,8 +137,9 @@
// Second record (thread)
offset = offset + ck.getKeyLength() + CatalogFolder.CATALOG_FOLDER_SIZE;
CatalogKey tck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
- CatalogThread ct = new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
- CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
+ CatalogThread ct =
+ new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
+ CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
record = new LeafRecord(tck, ct.getBytes());
rootNode.addNodeRecord(1, record, offset);
buffer = ByteBuffer.allocate(bufferLength + bthr.getNodeSize());
@@ -153,13 +155,13 @@
* @return
* @throws IOException
*/
- public final LeafRecord getRecord(final CatalogNodeId parentID)
- throws IOException {
+ public final LeafRecord getRecord(final CatalogNodeId parentID) throws IOException {
int currentOffset = 0;
LeafRecord lr = null;
int nodeSize = getBTHeaderRecord().getNodeSize();
ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
- fs.getApi().read(catalogHeaderNodeOffset + (getBTHeaderRecord().getRootNode() * nodeSize), nodeData);
+ fs.getApi().read(catalogHeaderNodeOffset + (getBTHeaderRecord().getRootNode() * nodeSize),
+ nodeData);
nodeData.rewind();
byte[] data = ByteBufferUtils.toArray(nodeData);
CatalogNode node = new CatalogNode(data, nodeSize);
@@ -180,22 +182,22 @@
}
/**
- * Find leaf records corresponding to parentID. The search begin at the root node of the tree.
- *
+ * Find leaf records corresponding to parentID. The search begin at the root
+ * node of the tree.
+ *
* @param parentID Parent node id
* @return Array of LeafRecord
* @throws IOException
*/
- public final LeafRecord[] getRecords(final CatalogNodeId parentID)
- throws IOException {
+ public final LeafRecord[] getRecords(final CatalogNodeId parentID) throws IOException {
return getRecords(parentID, getBTHeaderRecord().getRootNode());
}
/**
- * Find leaf records corresponding to parentID. The search begin at the node correspding
- * to the index passed as parameter.
- *
- * @param parentID Parent node id
+ * Find leaf records corresponding to parentID. The search begin at the node
+ * correspding to the index passed as parameter.
+ *
+ * @param parentID Parent node id
* @param nodeNumber Index of node where the search begin.
* @return Array of LeafRecord
* @throws IOException
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogKey.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.catalog;
import org.jnode.fs.hfsplus.HFSUnicodeString;
@@ -26,19 +26,19 @@
import org.jnode.util.BigEndian;
public class CatalogKey extends AbstractKey {
-
- public static final int MINIMUM_KEY_LENGTH = 6;
- public static final int MAXIMUM_KEY_LENGTH = 516;
- /**
- * Catalog node id of the folder that contains file or folder represented by
- * the record. For thread records, contains the catalog node id of the file
- * or folder itself.
- */
- private CatalogNodeId parentId;
- /** Name of the file or folder, empty for thread records. */
- private HFSUnicodeString nodeName;
+ public static final int MINIMUM_KEY_LENGTH = 6;
+ public static final int MAXIMUM_KEY_LENGTH = 516;
/**
+ * Catalog node id of the folder that contains file or folder represented by
+ * the record. For thread records, contains the catalog node id of the file
+ * or folder itself.
+ */
+ private CatalogNodeId parentId;
+ /** Name of the file or folder, empty for thread records. */
+ private HFSUnicodeString nodeName;
+
+ /**
* Create catalog key from existing data.
*
* @param src
@@ -60,7 +60,8 @@
}
/**
- * Create new catalog key based on parent CNID and the name of the file or folder.
+ * Create new catalog key based on parent CNID and the name of the file or
+ * folder.
*
* @param parentID Parent catalog node identifier.
* @param name Name of the file or folder.
@@ -93,8 +94,9 @@
CatalogKey ck = (CatalogKey) key;
res = this.getParentId().compareTo(ck.getParentId());
if (res == 0) {
- res = this.getNodeName().getUnicodeString().compareTo(
- ck.getNodeName().getUnicodeString());
+ res =
+ this.getNodeName().getUnicodeString().compareTo(
+ ck.getNodeName().getUnicodeString());
}
}
return res;
@@ -102,6 +104,7 @@
/*
* (non-Javadoc)
+ *
* @see org.jnode.fs.hfsplus.tree.AbstractKey#getBytes()
*/
public byte[] getBytes() {
@@ -111,16 +114,18 @@
System.arraycopy(nodeName.getBytes(), 0, data, 6, nodeName.getLength());
return data;
}
-
+
/*
* (non-Javadoc)
+ *
* @see java.lang.Object#toString()
*/
public final String toString() {
StringBuffer s = new StringBuffer();
- s.append("[length, Parent ID, Node name]:").append(getKeyLength()).append(",").append(getParentId().getId())
- .append(",").append((getNodeName() != null) ? getNodeName().getUnicodeString() : "");
+ s.append("[length, Parent ID, Node name]:").append(getKeyLength()).append(",").append(
+ getParentId().getId()).append(",").append(
+ (getNodeName() != null) ? getNodeName().getUnicodeString() : "");
return s.toString();
}
-
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogThread.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,24 +17,25 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.catalog;
import org.jnode.fs.hfsplus.HFSUnicodeString;
import org.jnode.util.BigEndian;
+public class CatalogThread {
-
-public class CatalogThread {
-
public static final int CATALOG_THREAD_SIZE = 512;
- /**The catalog thread record type. Can be a file or a folder. */
+ /** The catalog thread record type. Can be a file or a folder. */
private int recordType;
- /** the catalog node id of the file or folder referenced by the thread record. */
+ /**
+ * the catalog node id of the file or folder referenced by the thread
+ * record.
+ */
private CatalogNodeId parentId;
/** the name of the file or folder reference by the thread record. */
private HFSUnicodeString nodeName;
-
+
/**
* Create catalog thread from existing data.
*
@@ -51,27 +52,29 @@
/**
* Create a new catalog thread.
*
- * @param type catalog thread record type.
- * @param parent {@link CatalogNodeId} of the file or folder reference by the tread record.
- * @param name {@link HFSUnicodeString} represent the name of the file or folder reference by the tread record.
+ * @param type catalog thread record type.
+ * @param parent {@link CatalogNodeId} of the file or folder reference by
+ * the tread record.
+ * @param name {@link HFSUnicodeString} represent the name of the file or
+ * folder reference by the tread record.
*/
public CatalogThread(int type, CatalogNodeId parent, HFSUnicodeString name) {
- this.recordType = type;
- this.parentId = parent;
- this.nodeName = name;
+ this.recordType = type;
+ this.parentId = parent;
+ this.nodeName = name;
}
-
+
/**
*
* @return
*/
public byte[] getBytes() {
- byte[] data = new byte[512];
+ byte[] data = new byte[512];
BigEndian.setInt16(data, 0, recordType);
BigEndian.setInt32(data, 4, parentId.getId());
System.arraycopy(parentId.getBytes(), 0, data, 4, 4);
System.arraycopy(nodeName.getBytes(), 0, data, 8, nodeName.getBytes().length);
return data;
}
-
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -17,13 +17,13 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.extent;
import org.jnode.util.BigEndian;
public class ExtentDescriptor {
- /** The size pf the extent descriptor. */
+ /** The size of the extent descriptor. */
public static final int EXTENT_DESCRIPTOR_LENGTH = 8;
/** The first allocation block. */
private int startBlock;
@@ -37,47 +37,48 @@
* @param blockCount number of blocks in the extent.
*/
public ExtentDescriptor(int startBlock, int blockCount) {
- this.startBlock = startBlock;
- this.blockCount = blockCount;
- }
-
+ this.startBlock = startBlock;
+ this.blockCount = blockCount;
+ }
+
/**
* Create extent descriptor from existing data.
*
* @param src byte array contains existing extent descriptor informations.
* @param offset position where data for extent descriptor begin.
*/
- public ExtentDescriptor(final byte[] src, final int offset) {
- byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ public ExtentDescriptor(final byte[] src, final int offset) {
+ byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
System.arraycopy(src, offset, data, 0, EXTENT_DESCRIPTOR_LENGTH);
startBlock = BigEndian.getInt32(data, 0);
- blockCount = BigEndian.getInt32(data, 4);
+ blockCount = BigEndian.getInt32(data, 4);
}
- /**
- *
- * @return
- */
+
+ /**
+ *
+ * @return
+ */
public final byte[] getBytes() {
- byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
- BigEndian.setInt32(data, 0, startBlock);
- BigEndian.setInt32(data, 4, blockCount);
- return data;
- }
+ byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ BigEndian.setInt32(data, 0, startBlock);
+ BigEndian.setInt32(data, 4, blockCount);
+ return data;
+ }
public final String toString() {
return "Start block : " + startBlock + "\tBlock count : " + blockCount + "\n";
}
- public int getStartBlock() {
- return startBlock;
- }
+ public int getStartBlock() {
+ return startBlock;
+ }
- public int getBlockCount() {
- return blockCount;
- }
-
- public boolean isEmpty(){
- return (startBlock == 0 || blockCount == 0);
- }
+ public int getBlockCount() {
+ return blockCount;
+ }
+ public boolean isEmpty() {
+ return (startBlock == 0 || blockCount == 0);
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-13 19:35:50 UTC (rev 5097)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-13 20:07:21 UTC (rev 5098)
@@ -27,93 +27,93 @@
public class ExtentKey extends AbstractKey {
- public static final byte DATA_FORK = (byte) 0x00;
- public static final byte RESOURCE_FORK = (byte) 0xFF;
- public static final int KEY_LENGTH = 12;
+ public static final byte DATA_FORK = (byte) 0x00;
+ public static final byte RESOURCE_FORK = (byte) 0xFF;
+ public static final int KEY_LENGTH = 12;
- private int forkType;
- private int pad;
- private CatalogNodeId fileId;
- private int startBlock;
+ private int forkType;
+ private int pad;
+ private CatalogNodeId fileId;
+ private int startBlock;
- /**
- *
- * @param src
- * @param offset
- */
- public ExtentKey(final byte[] src, final int offset) {
- byte[] ek = new byte[KEY_LENGTH];
- System.arraycopy(src, offset, ek, 0, KEY_LENGTH);
- keyLength = BigEndian.getInt16(ek, 0);
- forkType = BigEndian.getInt8(ek, 2);
- pad = BigEndian.getInt8(ek, 3);
- fileId = new CatalogNodeId(ek, 4);
- startBlock = BigEndian.getInt32(ek, 8);
- }
+ /**
+ *
+ * @param src
+ * @param offset
+ */
+ public ExtentKey(final byte[] src, final int offset) {
+ byte[] ek = new byte[KEY_LENGTH];
+ System.arraycopy(src, offset, ek, 0, KEY_LENGTH);
+ keyLength = BigEndian.getInt16(ek, 0);
+ forkType = BigEndian.getInt8(ek, 2);
+ pad = BigEndian.getInt8(ek, 3);
+ fileId = new CatalogNodeId(ek, 4);
+ startBlock = BigEndian.getInt32(ek, 8);
+ }
- /**
- *
- * @param forkType
- * @param pad
- * @param fileId
- * @param startBlock
- */
- public ExtentKey(int forkType, int pad, CatalogNodeId fileId, int startBlock) {
- super();
- this.forkType = forkType;
- this.pad = pad;
- this.fileId = fileId;
- this.startBlock = startBlock;
- }
+ /**
+ *
+ * @param forkType
+ * @param pad
+ * @param fileId
+ * @param startBlock
+ */
+ public ExtentKey(int forkType, int pad, CatalogNodeId fileId, int startBlock) {
+ super();
+ this.forkType = forkType;
+ this.pad = pad;
+ this.fileId = fileId;
+ this.startBlock = startBlock;
+ }
- @Override
- public final int compareTo(final Key key) {
- int res = -1;
- if (key instanceof ExtentKey) {
- ExtentKey compareKey = (ExtentKey) key;
- res = fileId.compareTo(compareKey.getFileId());
- if (res == 0) {
- res = compareForkType(compareKey.getForkType());
- if (res == 0) {
- return compareStartBlock(compareKey.getStartBlock());
- }
- }
- }
- return res;
- }
+ @Override
+ public final int compareTo(final Key key) {
+ int res = -1;
+ if (key instanceof ExtentKey) {
+ ExtentKey compareKey = (ExtentKey) key;
+ res = fileId.compareTo(compareKey.getFileId());
+ if (res == 0) {
+ res = compareForkType(compareKey.getForkType());
+ if (res == 0) {
+ return compareStartBlock(compareKey.getStartBlock());
+ }
+ }
+ }
+ return res;
+ }
- @Override
- public byte[] getBytes() {
- byte[] data = new byte[this.getKeyLength()];
- return data;
- }
+ @Override
+ public byte[] getBytes() {
+ byte[] data = new byte[this.getKeyLength()];
+ return data;
+ }
- private int compareForkType(int fork) {
- Integer currentForkType = Integer.valueOf(forkType);
- Integer forkType = Integer.valueOf(fork);
- return currentForkType.compareTo(forkType);
- }
+ private int compareForkType(int fork) {
+ Integer currentForkType = Integer.valueOf(forkType);
+ Integer forkType = Integer.valueOf(fork);
+ return currentForkType.compareTo(forkType);
+ }
- private int compareStartBlock(int block) {
- Integer currentStartBlock = Integer.valueOf(startBlock);
- Integer startBlock = Integer.valueOf(block);
- return currentStartBlock.compareTo(startBlock);
- }
+ private int compareStartBlock(int block) {
+ Integer currentStartBlock = Integer.valueOf(startBlock);
+ Integer startBlock = Integer.valueOf(block);
+ return currentStartBlock.compareTo(startBlock);
+ }
- public int getForkType() {
- return forkType;
- }
+ public int getForkType() {
+ return forkType;
+ }
- public int getPad() {
- return pad;
- }
+ public int getPad() {
+ return pad;
+ }
- public CatalogNodeId getFileId() {
- return fileId;
- }
+ public CatalogNodeId getFileId() {
+ return fileId;
+ }
- public int getStartBlock() {
- return startBlock;
- }
+ public int getStartBlock() {
+ return startBlock;
+ }
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-14 19:48:46
|
Revision: 5102
http://jnode.svn.sourceforge.net/jnode/?rev=5102&view=rev
Author: galatnm
Date: 2009-03-14 19:48:39 +0000 (Sat, 14 Mar 2009)
Log Message:
-----------
Rewrite and update javadocs.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
Added Paths:
-----------
trunk/fs/src/fs/org/jnode/fs/hfsplus/ExtendedFileInfo.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/FileInfo.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusBSDInfo.java
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/ExtendedFileInfo.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/ExtendedFileInfo.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/ExtendedFileInfo.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -0,0 +1,5 @@
+package org.jnode.fs.hfsplus;
+
+public class ExtendedFileInfo {
+
+}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/FileInfo.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/FileInfo.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/FileInfo.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -0,0 +1,5 @@
+package org.jnode.fs.hfsplus;
+
+public class FileInfo {
+
+}
Added: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusBSDInfo.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusBSDInfo.java (rev 0)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusBSDInfo.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -0,0 +1,5 @@
+package org.jnode.fs.hfsplus;
+
+public class HFSPlusBSDInfo {
+
+}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -166,19 +166,12 @@
}
Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
-
- Calendar now = Calendar.getInstance();
- now.setTime(new Date());
- int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
-
HFSUnicodeString dirName = new HFSUnicodeString(name);
CatalogThread thread =
- new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
- .getFolderId(), dirName);
-
+ new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
+ .getFolderId(), dirName);
CatalogFolder newFolder =
- new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()), macDate,
- macDate, macDate);
+ new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -44,13 +44,13 @@
@Override
public final long getLength() {
- return file.getDataFork().getTotalSize();
+ return file.getDatas().getTotalSize();
}
@Override
public final void read(final long fileOffset, final ByteBuffer dest) throws IOException {
HfsPlusFileSystem fs = (HfsPlusFileSystem) getFileSystem();
- for (ExtentDescriptor d : file.getDataFork().getExtents()) {
+ for (ExtentDescriptor d : file.getDatas().getExtents()) {
if (!d.isEmpty()) {
long firstOffset = d.getStartBlock() * fs.getVolumeHeader().getBlockSize();
fs.getApi().read(firstOffset, dest);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUnicodeString.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -29,9 +29,10 @@
private String string;
/**
+ * Create HFSUnicodeString from existing data.
*
- * @param src
- * @param offset
+ * @param src byte array contains data.
+ * @param offset start of data in the array.
*/
public HFSUnicodeString(final byte[] src, final int offset) {
length = BigEndian.getInt16(src, offset);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -22,6 +22,7 @@
import java.text.SimpleDateFormat;
import java.util.Calendar;
+import java.util.Date;
public class HFSUtils {
@@ -59,4 +60,14 @@
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
return sdf.format(cal.getTime());
}
+
+ /**
+ * Returns current date and time in mac format.
+ * @return current date and time.
+ */
+ public static int getNow(){
+ Calendar now = Calendar.getInstance();
+ now.setTime(new Date());
+ return (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+ }
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -125,13 +125,8 @@
// First record (folder)
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
- int valence = params.isJournaled() ? 2 : 0;
- Calendar now = Calendar.getInstance();
- now.setTime(new Date());
- int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
CatalogFolder folder =
- new CatalogFolder(valence, CatalogNodeId.HFSPLUS_ROOT_CNID, macDate, macDate,
- macDate);
+ new CatalogFolder(params.isJournaled() ? 2 : 0, CatalogNodeId.HFSPLUS_ROOT_CNID);
LeafRecord record = new LeafRecord(ck, folder.getBytes());
rootNode.addNodeRecord(0, record, offset);
// Second record (thread)
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -17,62 +17,174 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.catalog;
+import org.jnode.fs.hfsplus.ExtendedFileInfo;
+import org.jnode.fs.hfsplus.FileInfo;
+import org.jnode.fs.hfsplus.HFSPlusBSDInfo;
import org.jnode.fs.hfsplus.HFSPlusForkData;
import org.jnode.fs.hfsplus.HFSUtils;
+import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
+/**
+ * This class implements catalog file structure use in the catalog to hold
+ * information about a file on the volume.
+ *
+ * @author Fabien Lesire
+ *
+ */
public class CatalogFile {
- private byte[] data;
+ public static final int CATALOG_FILE_SIZE = 248;
+ /** catalog record type, always RECORD_TYPE_FILE */
+ private int recordType;
+ /** */
+ private int flags;
+ /** the catalog node id of the file */
+ private CatalogNodeId fileId;
+ /** The date and time the file was created */
+ private int createDate;
+ /** */
+ private int contentModDate;
+ /** */
+ private int attrModDate;
+ /** */
+ private int accessDate;
+ /** */
+ private int backupDate;
+ /** */
+ private HFSPlusBSDInfo permissions;
+ /** */
+ private FileInfo userInfo;
+ /** */
+ private ExtendedFileInfo finderInfo;
+ /** */
+ private int textEncoding;
+ /** data fork location and size */
+ private HFSPlusForkData datas;
+ /** resource fork location and size */
+ private HFSPlusForkData resources;
+
+ /**
+ *
+ * @param src
+ */
public CatalogFile(final byte[] src) {
- data = new byte[248];
+ byte[] data = new byte[CATALOG_FILE_SIZE];
System.arraycopy(src, 0, data, 0, 248);
+ recordType = BigEndian.getInt16(data, 0);
+ flags = BigEndian.getInt16(data, 2);
+ fileId = new CatalogNodeId(data, 8);
+ createDate = BigEndian.getInt32(data, 12);
+ contentModDate = BigEndian.getInt32(data, 16);
+ attrModDate = BigEndian.getInt32(data, 20);
+ datas = new HFSPlusForkData(data, 88);
+ resources = new HFSPlusForkData(data, 168);
}
- public final int getRecordType() {
- return BigEndian.getInt16(data, 0);
+ /**
+ *
+ * @param flags
+ * @param fileId
+ * @param createDate
+ * @param contentModDate
+ * @param attrModDate
+ * @param datas
+ * @param resources
+ */
+ public CatalogFile(int flags, CatalogNodeId fileId, HFSPlusForkData datas, HFSPlusForkData resources) {
+ this.recordType = HfsPlusConstants.RECORD_TYPE_FILE;
+ this.flags = flags;
+ this.fileId = fileId;
+ this.createDate = HFSUtils.getNow();
+ this.contentModDate = HFSUtils.getNow();
+ this.attrModDate = HFSUtils.getNow();
+ this.datas = datas;
+ this.resources = resources;
}
- public final int getFlags() {
- return BigEndian.getInt16(data, 2);
+ /**
+ *
+ * @return
+ */
+ public byte[] getBytes() {
+ return null;
}
- public final CatalogNodeId getFileId() {
- return new CatalogNodeId(data, 8);
+ /*
+ * (non-Javadoc)
+ *
+ * @see java.lang.Object#toString()
+ */
+ public final String toString() {
+ StringBuffer s = new StringBuffer();
+ s.append("Record type:").append(recordType).append("\t");
+ s.append("File ID :").append(fileId.getId()).append("\n");
+ s.append("Creation Date :").append(
+ HFSUtils.printDate(createDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Content Mod Date :").append(
+ HFSUtils.printDate(contentModDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ s.append("Attr Mod Date :").append(
+ HFSUtils.printDate(attrModDate, "EEE MMM d HH:mm:ss yyyy")).append("\n");
+ return s.toString();
}
- public final int getCreateDate() {
- return BigEndian.getInt32(data, 12);
+ public int getRecordType() {
+ return recordType;
}
- public final int getContentModDate() {
- return BigEndian.getInt32(data, 16);
+ public int getFlags() {
+ return flags;
}
- public final int getAttrModDate() {
- return BigEndian.getInt32(data, 20);
+ public CatalogNodeId getFileId() {
+ return fileId;
}
- public final HFSPlusForkData getDataFork() {
- return new HFSPlusForkData(data, 88);
+ public int getCreateDate() {
+ return createDate;
}
- public final HFSPlusForkData getResourceFork() {
- return new HFSPlusForkData(data, 168);
+ public int getContentModDate() {
+ return contentModDate;
}
- public final String toString() {
- StringBuffer s = new StringBuffer();
- s.append("Record type:").append(getRecordType()).append("\t");
- s.append("File ID :").append(getFileId().getId()).append("\n");
- s.append("Creation Date :").append(HFSUtils.printDate(getCreateDate(), "EEE MMM d HH:mm:ss yyyy")).append("\n");
- s.append("Content Mod Date :").append(HFSUtils.printDate(getContentModDate(), "EEE MMM d HH:mm:ss yyyy"))
- .append("\n");
- s.append("Attr Mod Date :").append(HFSUtils.printDate(getAttrModDate(), "EEE MMM d HH:mm:ss yyyy")).append(
- "\n");
- return s.toString();
+ public int getAttrModDate() {
+ return attrModDate;
}
+
+ public HFSPlusForkData getDatas() {
+ return datas;
+ }
+
+ public HFSPlusForkData getResources() {
+ return resources;
+ }
+
+ public int getAccessDate() {
+ return accessDate;
+ }
+
+ public int getBackupDate() {
+ return backupDate;
+ }
+
+ public HFSPlusBSDInfo getPermissions() {
+ return permissions;
+ }
+
+ public FileInfo getUserInfo() {
+ return userInfo;
+ }
+
+ public ExtendedFileInfo getFinderInfo() {
+ return finderInfo;
+ }
+
+ public int getTextEncoding() {
+ return textEncoding;
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-14 10:45:16 UTC (rev 5101)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-14 19:48:39 UTC (rev 5102)
@@ -54,18 +54,14 @@
*
* @param valence
* @param folderID
- * @param createDate
- * @param contentModDate
- * @param attrModDate
*/
- public CatalogFolder(int valence, CatalogNodeId folderID, int createDate,
- int contentModDate, int attribModDate) {
+ public CatalogFolder(int valence, CatalogNodeId folderID) {
this.recordType = HfsPlusConstants.RECORD_TYPE_FOLDER;
this.valence = valence;
this.folderId = folderID;
- this.createDate = createDate;
- this.contentModDate = contentModDate;
- this.attrModDate = attribModDate;
+ this.createDate = HFSUtils.getNow();
+ this.contentModDate = HFSUtils.getNow();
+ this.attrModDate = HFSUtils.getNow();
}
/**
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-18 21:28:02
|
Revision: 5115
http://jnode.svn.sourceforge.net/jnode/?rev=5115&view=rev
Author: galatnm
Date: 2009-03-18 21:27:56 +0000 (Wed, 18 Mar 2009)
Log Message:
-----------
Fix checkstyle.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-18 21:02:34 UTC (rev 5114)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-18 21:27:56 UTC (rev 5115)
@@ -22,8 +22,6 @@
import java.io.FileNotFoundException;
import java.io.IOException;
-import java.util.Calendar;
-import java.util.Date;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
@@ -61,7 +59,7 @@
@Override
public FSEntry addDirectory(String name) throws IOException {
log.debug("<<< BEGIN addDirectory " + name + " >>>");
- if (fs.isReadOnly()){
+ if (fs.isReadOnly()) {
throw new ReadOnlyFileSystemException();
}
@@ -77,7 +75,7 @@
@Override
public FSEntry addFile(String name) throws IOException {
log.debug("<<< BEGIN addFile " + name + " >>>");
- if (fs.isReadOnly()){
+ if (fs.isReadOnly()) {
throw new ReadOnlyFileSystemException();
}
if (getEntry(name) != null) {
@@ -89,10 +87,9 @@
log.debug("<<< END addFile " + name + " >>>");
return newEntry;
}
-
- private final FSEntry createFileEntry(final String name)
- throws IOException {
- //TODO
+
+ private final FSEntry createFileEntry(final String name) throws IOException {
+ // TODO
return null;
}
@@ -105,7 +102,7 @@
boolean flushEntries = isEntriesLoaded() && entries.isDirty();
if (isDirty() || flushEntries) {
writeEntries(entries);
- //entries.resetDirty();
+ // entries.resetDirty();
resetDirty();
}
log.debug("<<< END flush >>>");
@@ -125,7 +122,7 @@
@Override
public void remove(String name) throws IOException {
- if (fs.isReadOnly()){
+ if (fs.isReadOnly()) {
throw new ReadOnlyFileSystemException();
}
if (entries.remove(name) >= 0) {
@@ -197,7 +194,6 @@
}
return new FSEntryTable(((HfsPlusFileSystem) getFileSystem()), pathList);
}
-
private void writeEntries(final FSEntryTable entries) throws IOException {
// TODO Auto-generated method stub
@@ -217,10 +213,10 @@
Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
HFSUnicodeString dirName = new HFSUnicodeString(name);
CatalogThread thread =
- new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
- .getFolderId(), dirName);
+ new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
+ .getFolderId(), dirName);
CatalogFolder newFolder =
- new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
@@ -235,9 +231,10 @@
return newEntry;
}
-
+
/**
- * Find a free entry and set it with the given entry
+ * Find a free entry and set it with the given entry
+ *
* @param newEntry
* @throws IOException
*/
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-03-18 21:02:34 UTC (rev 5114)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSUtils.java 2009-03-18 21:27:56 UTC (rev 5115)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import java.text.SimpleDateFormat;
@@ -35,10 +35,8 @@
/**
* Convert time from/to java time to/from mac time.
*
- * @param time
- * in seconds since reference date.
- * @param encode
- * if set to true, convert from java to mac. If set to false,
+ * @param time in seconds since reference date.
+ * @param encode if set to true, convert from java to mac. If set to false,
* convert from mac to java.
*
* @return
@@ -60,12 +58,13 @@
SimpleDateFormat sdf = new SimpleDateFormat(dateFormat);
return sdf.format(cal.getTime());
}
-
+
/**
* Returns current date and time in mac format.
+ *
* @return current date and time.
*/
- public static int getNow(){
+ public static int getNow() {
Calendar now = Calendar.getInstance();
now.setTime(new Date());
return (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-18 22:53:32
|
Revision: 5116
http://jnode.svn.sourceforge.net/jnode/?rev=5116&view=rev
Author: galatnm
Date: 2009-03-18 22:53:16 +0000 (Wed, 18 Mar 2009)
Log Message:
-----------
rewrite BTHeaderRecord and add javadocs.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-18 21:27:56 UTC (rev 5115)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-18 22:53:16 UTC (rev 5116)
@@ -22,15 +22,12 @@
import java.io.IOException;
import java.nio.ByteBuffer;
-import java.util.Calendar;
-import java.util.Date;
import java.util.LinkedList;
import java.util.List;
import org.apache.log4j.Logger;
import org.jnode.fs.hfsplus.HFSPlusParams;
import org.jnode.fs.hfsplus.HFSUnicodeString;
-import org.jnode.fs.hfsplus.HFSUtils;
import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
import org.jnode.fs.hfsplus.Superblock;
@@ -100,17 +97,13 @@
btnd.setRecordCount(3);
bufferLength += NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
//
- bthr = new BTHeaderRecord();
- bthr.setTreeDepth(1);
- bthr.setRootNode(1);
- bthr.settFirstLeafNode(1);
- bthr.setLastLeafNode(1);
- bthr.setLeafRecords(params.isJournaled() ? 6 : 2);
- bthr.setNodeSize(nodeSize);
- bthr.setTotalNodes(params.getCatalogClumpSize() / params.getCatalogNodeSize());
- bthr.setFreeNodes(bthr.getTotalNodes() - 2);
- bthr.setClumpSize(params.getCatalogClumpSize());
- // TODO initialize attributes, max key length and key comparaison type.
+ int leafRecords = params.isJournaled() ? 6 : 2;
+ int totalNodes = params.getCatalogClumpSize() / params.getCatalogNodeSize();
+ int freeNodes = totalNodes - 2;
+ bthr =
+ new BTHeaderRecord(1, 1, leafRecords, 1, 1, nodeSize, 0, totalNodes, freeNodes,
+ params.getCatalogClumpSize(), 0, 0, 0);
+
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
// Create root node
int rootNodePosition = bthr.getRootNode() * nodeSize;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-18 21:27:56 UTC (rev 5115)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-18 22:53:16 UTC (rev 5116)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.extent;
import org.jnode.fs.hfsplus.HFSPlusParams;
@@ -35,17 +35,11 @@
btnd.setHeight(0);
btnd.setRecordCount(3);
//
- bthr = new BTHeaderRecord();
- bthr.setTreeDepth(0);
- bthr.setRootNode(0);
- bthr.settFirstLeafNode(0);
- bthr.setLastLeafNode(0);
- bthr.setLeafRecords(0);
- bthr.setNodeSize(params.getExtentNodeSize());
- bthr.setTotalNodes(params.getExtentClumpSize()
- / params.getExtentNodeSize());
- bthr.setFreeNodes(bthr.getTotalNodes() - 1);
- bthr.setClumpSize(params.getExtentClumpSize());
- bthr.setMaxKeyLength(ExtentKey.KEY_LENGTH);
+ int totalNodes = params.getExtentClumpSize() / params.getExtentNodeSize();
+ int freeNodes = totalNodes - 1;
+ bthr =
+ new BTHeaderRecord(0, 0, 0, 0, 0, params.getExtentNodeSize(), ExtentKey.KEY_LENGTH,
+ totalNodes, freeNodes, params.getExtentClumpSize(), 0, 0, 0);
+
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-18 21:27:56 UTC (rev 5115)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-18 22:53:16 UTC (rev 5116)
@@ -17,135 +17,151 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.tree;
import org.jnode.util.BigEndian;
public class BTHeaderRecord {
public static final int BT_HEADER_RECORD_LENGTH = 106;
- private byte[] data;
+ /** The depth of the current B-Tree. */
+ private int treeDepth;
+ /** The root node number */
+ private int rootNode;
+ /** The number of records contains in all leaf nodes. */
+ private int leafRecords;
+ /** The number of the first leaf node. This may be zero. */
+ private int firstLeafNode;
+ /** The number of the last leaf node. This may be zero. */
+ private int lastLeafNode;
+ /** The size in bytes of a node. */
+ private int nodeSize;
+ /** The maximum length of a key. */
+ private int maxKeyLength;
+ /** The total number of free or used nodes in the B-Tree. */
+ private int totalNodes;
+ /** The number of free node in the B-Tree. */
+ private int freeNodes;
+ /**
+ * Ignore for HFS+, clumpSize field from {@code HFSPlusForkData} used
+ * instead.
+ */
+ private int clumpSize;
+ /** The type of the B-Tree. */
+ private int treeType;
+ /** Ignore in HFS+, should be threat as reserved. */
+ private int keyCompareType;
+ /** Various attributes of the B-Tree. */
+ private int attributes;
- public BTHeaderRecord() {
- data = new byte[BT_HEADER_RECORD_LENGTH];
+ public BTHeaderRecord(int treeDepth, int rootNode, int leafRecords, int firstLeafNode,
+ int lastLeafNode, int nodeSize, int maxKeyLength, int totalNodes, int freeNodes,
+ int clumpsize, int treeType, int keyCompareType, int attributes) {
+ this.treeDepth = treeDepth;
+ this.rootNode = rootNode;
+ this.leafRecords = leafRecords;
+ this.firstLeafNode = firstLeafNode;
+ this.lastLeafNode = lastLeafNode;
+ this.nodeSize = nodeSize;
+ this.maxKeyLength = maxKeyLength;
+ this.totalNodes = totalNodes;
+ this.freeNodes = freeNodes;
+ this.clumpSize = clumpsize;
+ this.treeType = treeType;
+ this.keyCompareType = keyCompareType;
+ this.attributes = attributes;
}
public BTHeaderRecord(final byte[] src, int offset) {
- data = new byte[BT_HEADER_RECORD_LENGTH];
+ byte[] data = new byte[BT_HEADER_RECORD_LENGTH];
System.arraycopy(src, offset, data, 0, BT_HEADER_RECORD_LENGTH);
+ treeDepth = BigEndian.getInt16(data, 0);
+ rootNode = BigEndian.getInt32(data, 2);
+ leafRecords = BigEndian.getInt32(data, 6);
+ firstLeafNode = BigEndian.getInt32(data, 10);
+ lastLeafNode = BigEndian.getInt32(data, 14);
+ nodeSize = BigEndian.getInt16(data, 18);
+ maxKeyLength = BigEndian.getInt16(data, 20);
+ totalNodes = BigEndian.getInt16(data, 24);
+ freeNodes = BigEndian.getInt16(data, 28);
+ clumpSize = BigEndian.getInt16(data, 32);
+ treeType = BigEndian.getInt16(data, 36);
+ keyCompareType = BigEndian.getInt16(data, 37);
+ attributes = BigEndian.getInt32(data, 39);
}
- public final int getTreeDepth() {
- return BigEndian.getInt16(data, 0);
+ public byte[] getBytes() {
+ byte[] data = new byte[BT_HEADER_RECORD_LENGTH];
+ BigEndian.setInt16(data, 0, treeDepth);
+ BigEndian.setInt32(data, 2, rootNode);
+ BigEndian.setInt32(data, 6, leafRecords);
+ BigEndian.setInt32(data, 10, firstLeafNode);
+ BigEndian.setInt32(data, 14, lastLeafNode);
+ BigEndian.setInt16(data, 18, nodeSize);
+ BigEndian.setInt16(data, 20, maxKeyLength);
+ BigEndian.setInt32(data, 22, totalNodes);
+ BigEndian.setInt32(data, 26, freeNodes);
+ BigEndian.setInt32(data, 32, clumpSize);
+ BigEndian.setInt8(data, 36, treeType);
+ BigEndian.setInt8(data, 38, keyCompareType);
+ BigEndian.setInt32(data, 39, attributes);
+ return data;
}
- public void setTreeDepth(int depth) {
- BigEndian.setInt16(data, 0, depth);
+ public final String toString() {
+ return ("Root node: " + getRootNode() + "\n" + "First leaf: " + getFirstLeafNode() + "\n" +
+ "Last leaf: " + getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
}
- public final int getRootNode() {
- return BigEndian.getInt32(data, 2);
+ public int getTreeDepth() {
+ return treeDepth;
}
- public void setRootNode(int node) {
- BigEndian.setInt32(data, 2, node);
+ public int getRootNode() {
+ return rootNode;
}
- public final int getLeafRecords() {
- return BigEndian.getInt32(data, 6);
+ public int getLeafRecords() {
+ return leafRecords;
}
- public void setLeafRecords(int count) {
- BigEndian.setInt32(data, 6, count);
+ public int getFirstLeafNode() {
+ return firstLeafNode;
}
- public final int getFirstLeafNode() {
- return BigEndian.getInt32(data, 10);
+ public int getLastLeafNode() {
+ return lastLeafNode;
}
- public void settFirstLeafNode(int node) {
- BigEndian.setInt32(data, 10, node);
+ public int getNodeSize() {
+ return nodeSize;
}
- public final int getLastLeafNode() {
- return BigEndian.getInt32(data, 14);
- }
-
- public void setLastLeafNode(int node) {
- BigEndian.setInt32(data, 14, node);
- }
-
- public final int getNodeSize() {
- return BigEndian.getInt16(data, 18);
- }
-
- public void setNodeSize(int size) {
- BigEndian.setInt16(data, 18, size);
- }
-
public int getMaxKeyLength() {
- return BigEndian.getInt16(data, 20);
+ return maxKeyLength;
}
- public void setMaxKeyLength(int length) {
- BigEndian.setInt16(data, 20, length);
- }
-
public int getTotalNodes() {
- return BigEndian.getInt32(data, 22);
+ return totalNodes;
}
- public void setTotalNodes(int count) {
- BigEndian.setInt32(data, 22, count);
- }
-
public int getFreeNodes() {
- return BigEndian.getInt32(data, 26);
+ return freeNodes;
}
- public void setFreeNodes(int count) {
- BigEndian.setInt32(data, 26, count);
- }
-
public int getClumpSize() {
- return BigEndian.getInt32(data, 32);
+ return clumpSize;
}
- public void setClumpSize(int size) {
- BigEndian.setInt32(data, 32, size);
- }
-
public int getTreeType() {
- return BigEndian.getInt8(data, 36);
+ return treeType;
}
- public void setTreeType(int type) {
- BigEndian.setInt8(data, 36, type);
- }
-
public int getKeyCompareType() {
- return BigEndian.getInt8(data, 37);
+ return keyCompareType;
}
- public void setKeyCompareType(int type) {
- BigEndian.setInt8(data, 38, type);
- }
-
public long getAttributes() {
- return BigEndian.getInt32(data, 39);
+ return attributes;
}
-
- public void setAttributes(int attrs) {
- BigEndian.setInt32(data, 39, attrs);
- }
-
- public byte[] getBytes() {
- return data;
- }
-
- public final String toString() {
- return ("Root node: " + getRootNode() + "\n" + "First leaf: "
- + getFirstLeafNode() + "\n" + "Last leaf: "
- + getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
- }
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-19 14:10:52
|
Revision: 5117
http://jnode.svn.sourceforge.net/jnode/?rev=5117&view=rev
Author: galatnm
Date: 2009-03-19 14:10:46 +0000 (Thu, 19 Mar 2009)
Log Message:
-----------
Rewrite and javadocs.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
Removed Paths:
-------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -30,6 +30,7 @@
import org.jnode.fs.FSDirectory;
import org.jnode.fs.FSEntry;
import org.jnode.fs.ReadOnlyFileSystemException;
+import org.jnode.fs.hfsplus.catalog.CatalogFile;
import org.jnode.fs.hfsplus.catalog.CatalogFolder;
import org.jnode.fs.hfsplus.catalog.CatalogKey;
import org.jnode.fs.hfsplus.catalog.CatalogNodeId;
@@ -184,8 +185,8 @@
if (fs.getVolumeHeader().getFolderCount() > 0) {
LeafRecord[] records = fs.getCatalog().getRecords(folder.getFolderId());
for (LeafRecord rec : records) {
- if (rec.getType() == HfsPlusConstants.RECORD_TYPE_FOLDER ||
- rec.getType() == HfsPlusConstants.RECORD_TYPE_FILE) {
+ if (rec.getType() == CatalogFolder.RECORD_TYPE_FOLDER ||
+ rec.getType() == CatalogFile.RECORD_TYPE_FILE) {
String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
HFSPlusEntry e = new HFSPlusEntry(fs, this, name, rec);
pathList.add(e);
@@ -213,10 +214,9 @@
Superblock volumeHeader = ((HfsPlusFileSystem) getFileSystem()).getVolumeHeader();
HFSUnicodeString dirName = new HFSUnicodeString(name);
CatalogThread thread =
- new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD, this.folder
+ new CatalogThread(CatalogFolder.RECORD_TYPE_FOLDER_THREAD, this.folder
.getFolderId(), dirName);
- CatalogFolder newFolder =
- new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ CatalogFolder newFolder = new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -27,6 +27,8 @@
import org.jnode.fs.FSEntry;
import org.jnode.fs.FSFile;
import org.jnode.fs.FileSystem;
+import org.jnode.fs.hfsplus.catalog.CatalogFile;
+import org.jnode.fs.hfsplus.catalog.CatalogFolder;
import org.jnode.fs.hfsplus.tree.LeafRecord;
import org.jnode.fs.spi.AbstractFSEntry;
import org.jnode.fs.spi.UnixFSAccessRights;
@@ -66,9 +68,9 @@
int mode = record.getType();
if ("/".equals(name)) {
return AbstractFSEntry.ROOT_ENTRY;
- } else if (mode == HfsPlusConstants.RECORD_TYPE_FOLDER) {
+ } else if (mode == CatalogFolder.RECORD_TYPE_FOLDER) {
return AbstractFSEntry.DIR_ENTRY;
- } else if (mode == HfsPlusConstants.RECORD_TYPE_FILE) {
+ } else if (mode == CatalogFile.RECORD_TYPE_FILE) {
return AbstractFSEntry.FILE_ENTRY;
} else {
return AbstractFSEntry.OTHER_ENTRY;
Deleted: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusConstants.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -1,57 +0,0 @@
-/*
- * $Id$
- *
- * Copyright (C) 2003-2009 JNode.org
- *
- * This library is free software; you can redistribute it and/or modify it
- * under the terms of the GNU Lesser General Public License as published
- * by the Free Software Foundation; either version 2.1 of the License, or
- * (at your option) any later version.
- *
- * This library is distributed in the hope that it will be useful, but
- * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
- * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
- * License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public License
- * along with this library; If not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-package org.jnode.fs.hfsplus;
-
-public class HfsPlusConstants {
- public static final int HFSPLUS_SUPER_MAGIC = 0x482b;
-
- public static final int HFSPLUS_MIN_VERSION = 0x0004; /* HFS+ */
- public static final int HFSPLUS_CURRENT_VERSION = 5; /* HFSX */
-
- /* HFS+ volume attributes */
- public static final int HFSPLUS_VOL_UNMNT_BIT = 8;
- public static final int HFSPLUS_VOL_SPARE_BLK_BIT = 9;
- public static final int HFSPLUS_VOL_NOCACHE_BIT = 10;
- public static final int HFSPLUS_VOL_INCNSTNT_BIT = 11;
- public static final int HFSPLUS_VOL_NODEID_REUSED_BIT = 12;
- public static final int HFSPLUS_VOL_JOURNALED_BIT = 13;
- public static final int HFSPLUS_VOL_SOFTLOCK_BIT = 15;
-
- public static final int BT_LEAF_NODE = -1;
- public static final int BT_INDEX_NODE = 0;
- public static final int BT_HEADER_NODE = 1;
- public static final int BT_MAP_NODE = 2;
-
- /* Types */
- public static final int RECORD_TYPE_FOLDER = 0x0001;
- public static final int RECORD_TYPE_FILE = 0x0002;
- public static final int RECORD_TYPE_FOLDER_THREAD = 0x0003;
- public static final int RECORD_TYPE_FILE_THREAD = 0x0004;
-
- public static final byte EK_DATA_FORK = (byte) 0x00;
- public static final byte EK_RESOURCE_FORK = (byte) 0xFF;
-
- public static final int BYTES_PER_SECTOR = 512;
- public static final int BITS_PER_SECTOR = 4096;
-
-
-
-}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -63,18 +63,18 @@
public final void read() throws FileSystemException {
sb = new Superblock(this, false);
- if (!sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT)) {
+ if (!sb.isAttribute(Superblock.HFSPLUS_VOL_UNMNT_BIT)) {
log
.info(getDevice().getId()
+ " Filesystem has not been cleanly unmounted, mounting it readonly");
setReadOnly(true);
}
- if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_SOFTLOCK_BIT)) {
+ if (sb.isAttribute(Superblock.HFSPLUS_VOL_SOFTLOCK_BIT)) {
log.info(getDevice().getId()
+ " Filesystem is marked locked, mounting it readonly");
setReadOnly(true);
}
- if (sb.isAttribute(HfsPlusConstants.HFSPLUS_VOL_JOURNALED_BIT)) {
+ if (sb.isAttribute(Superblock.HFSPLUS_VOL_JOURNALED_BIT)) {
log
.info(getDevice().getId()
+ " Filesystem is journaled, write access is not supported. Mounting it readonly");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystemType.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -62,7 +62,7 @@
return false;
}
int magicNumber = BigEndian.getInt16(magic.array(), 0);
- return (magicNumber == HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
+ return (magicNumber == Superblock.HFSPLUS_SUPER_MAGIC);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -20,11 +20,6 @@
package org.jnode.fs.hfsplus;
-import static org.jnode.fs.hfsplus.HfsPlusConstants.HFSPLUS_SUPER_MAGIC;
-import static org.jnode.fs.hfsplus.HfsPlusConstants.HFSPLUS_VOL_INCNSTNT_BIT;
-import static org.jnode.fs.hfsplus.HfsPlusConstants.HFSPLUS_VOL_JOURNALED_BIT;
-import static org.jnode.fs.hfsplus.HfsPlusConstants.HFSPLUS_VOL_UNMNT_BIT;
-
import java.io.IOException;
import java.nio.ByteBuffer;
import java.util.Calendar;
@@ -46,6 +41,21 @@
*
*/
public class Superblock extends HFSPlusObject {
+
+ public static final int HFSPLUS_SUPER_MAGIC = 0x482b;
+
+ public static final int HFSPLUS_MIN_VERSION = 0x0004; /* HFS+ */
+ public static final int HFSPLUS_CURRENT_VERSION = 5; /* HFSX */
+
+ /* HFS+ volume attributes */
+ public static final int HFSPLUS_VOL_UNMNT_BIT = 8;
+ public static final int HFSPLUS_VOL_SPARE_BLK_BIT = 9;
+ public static final int HFSPLUS_VOL_NOCACHE_BIT = 10;
+ public static final int HFSPLUS_VOL_INCNSTNT_BIT = 11;
+ public static final int HFSPLUS_VOL_NODEID_REUSED_BIT = 12;
+ public static final int HFSPLUS_VOL_JOURNALED_BIT = 13;
+ public static final int HFSPLUS_VOL_SOFTLOCK_BIT = 15;
+
private final Logger log = Logger.getLogger(getClass());
/** Volume header data length */
@@ -109,8 +119,8 @@
}
// Populate volume header.
- this.setMagic(HfsPlusConstants.HFSPLUS_SUPER_MAGIC);
- this.setVersion(HfsPlusConstants.HFSPLUS_MIN_VERSION);
+ this.setMagic(HFSPLUS_SUPER_MAGIC);
+ this.setVersion(HFSPLUS_MIN_VERSION);
// Set attributes.
this.setAttribute(HFSPLUS_VOL_UNMNT_BIT);
this.setLastMountedVersion(0x446534a);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -28,7 +28,6 @@
import org.apache.log4j.Logger;
import org.jnode.fs.hfsplus.HFSPlusParams;
import org.jnode.fs.hfsplus.HFSUnicodeString;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.HfsPlusFileSystem;
import org.jnode.fs.hfsplus.Superblock;
import org.jnode.fs.hfsplus.extent.ExtentDescriptor;
@@ -69,13 +68,13 @@
catalogHeaderNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
buffer =
- ByteBuffer.allocate(NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH +
+ ByteBuffer.allocate(NodeDescriptor.BT_HEADER_NODE +
BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
fs.getApi().read(catalogHeaderNodeOffset, buffer);
buffer.rewind();
byte[] data = ByteBufferUtils.toArray(buffer);
btnd = new NodeDescriptor(data, 0);
- bthr = new BTHeaderRecord(data, NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH);
+ bthr = new BTHeaderRecord(data, BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
}
}
@@ -91,11 +90,8 @@
int nodeSize = params.getCatalogNodeSize();
int bufferLength = 0;
- btnd = new NodeDescriptor();
- btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
- btnd.setHeight(0);
- btnd.setRecordCount(3);
- bufferLength += NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
+ btnd = new NodeDescriptor(0, 0, NodeDescriptor.BT_HEADER_NODE, 0, 3);
+ bufferLength += NodeDescriptor.BT_HEADER_NODE;
//
int leafRecords = params.isJournaled() ? 6 : 2;
int totalNodes = params.getCatalogClumpSize() / params.getCatalogNodeSize();
@@ -109,12 +105,10 @@
int rootNodePosition = bthr.getRootNode() * nodeSize;
bufferLength += (rootNodePosition - bufferLength);
// Create node descriptor
- NodeDescriptor nd = new NodeDescriptor();
- nd.setKind(HfsPlusConstants.BT_LEAF_NODE);
- nd.setHeight(1);
- nd.setRecordCount(params.isJournaled() ? 6 : 2);
+ int numRecords = params.isJournaled() ? 6 : 2;
+ NodeDescriptor nd = new NodeDescriptor(0, 0, NodeDescriptor.BT_LEAF_NODE, 1, numRecords);
CatalogNode rootNode = new CatalogNode(nd, nodeSize);
- int offset = NodeDescriptor.BT_NODE_DESCRIPTOR_LENGTH;
+ int offset = NodeDescriptor.BT_HEADER_NODE;
// First record (folder)
HFSUnicodeString name = new HFSUnicodeString(params.getVolumeName());
CatalogKey ck = new CatalogKey(CatalogNodeId.HFSPLUS_POR_CNID, name);
@@ -126,7 +120,7 @@
offset = offset + ck.getKeyLength() + CatalogFolder.CATALOG_FOLDER_SIZE;
CatalogKey tck = new CatalogKey(CatalogNodeId.HFSPLUS_ROOT_CNID, name);
CatalogThread ct =
- new CatalogThread(HfsPlusConstants.RECORD_TYPE_FOLDER_THREAD,
+ new CatalogThread(CatalogFolder.RECORD_TYPE_FOLDER_THREAD,
CatalogNodeId.HFSPLUS_ROOT_CNID, new HFSUnicodeString(""));
record = new LeafRecord(tck, ct.getBytes());
rootNode.addNodeRecord(1, record, offset);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFile.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -25,7 +25,6 @@
import org.jnode.fs.hfsplus.HFSPlusBSDInfo;
import org.jnode.fs.hfsplus.HFSPlusForkData;
import org.jnode.fs.hfsplus.HFSUtils;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
/**
@@ -36,6 +35,9 @@
*
*/
public class CatalogFile {
+
+ public static final int RECORD_TYPE_FILE = 0x0002;
+ public static final int RECORD_TYPE_FILE_THREAD = 0x0004;
public static final int CATALOG_FILE_SIZE = 248;
/** catalog record type, always RECORD_TYPE_FILE */
@@ -95,7 +97,7 @@
* @param resources
*/
public CatalogFile(int flags, CatalogNodeId fileId, HFSPlusForkData datas, HFSPlusForkData resources) {
- this.recordType = HfsPlusConstants.RECORD_TYPE_FILE;
+ this.recordType = RECORD_TYPE_FILE;
this.flags = flags;
this.fileId = fileId;
this.createDate = HFSUtils.getNow();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/CatalogFolder.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -21,10 +21,12 @@
package org.jnode.fs.hfsplus.catalog;
import org.jnode.fs.hfsplus.HFSUtils;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
public class CatalogFolder {
+ /* Types */
+ public static final int RECORD_TYPE_FOLDER = 0x0001;
+ public static final int RECORD_TYPE_FOLDER_THREAD = 0x0003;
public static final int CATALOG_FOLDER_SIZE = 88;
@@ -56,7 +58,7 @@
* @param folderID
*/
public CatalogFolder(int valence, CatalogNodeId folderID) {
- this.recordType = HfsPlusConstants.RECORD_TYPE_FOLDER;
+ this.recordType = RECORD_TYPE_FOLDER;
this.valence = valence;
this.folderId = folderID;
this.createDate = HFSUtils.getNow();
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -21,7 +21,6 @@
package org.jnode.fs.hfsplus.extent;
import org.jnode.fs.hfsplus.HFSPlusParams;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.fs.hfsplus.tree.BTHeaderRecord;
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
@@ -30,10 +29,7 @@
private BTHeaderRecord bthr;
public Extent(HFSPlusParams params) {
- btnd = new NodeDescriptor();
- btnd.setKind(HfsPlusConstants.BT_HEADER_NODE);
- btnd.setHeight(0);
- btnd.setRecordCount(3);
+ btnd = new NodeDescriptor(0, 0, NodeDescriptor.BT_HEADER_NODE, 0, 3);
//
int totalNodes = params.getExtentClumpSize() / params.getExtentNodeSize();
int freeNodes = totalNodes - 1;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/AbstractNode.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -20,7 +20,6 @@
package org.jnode.fs.hfsplus.tree;
-import org.jnode.fs.hfsplus.HfsPlusConstants;
import org.jnode.util.BigEndian;
public abstract class AbstractNode implements Node {
@@ -34,11 +33,11 @@
}
public boolean isIndexNode() {
- return this.getNodeDescriptor().getKind() == HfsPlusConstants.BT_INDEX_NODE;
+ return this.getNodeDescriptor().getKind() == NodeDescriptor.BT_INDEX_NODE;
}
public boolean isLeafNode() {
- return this.getNodeDescriptor().getKind() == HfsPlusConstants.BT_LEAF_NODE;
+ return this.getNodeDescriptor().getKind() == NodeDescriptor.BT_LEAF_NODE;
}
@Override
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -107,7 +107,7 @@
BigEndian.setInt32(data, 39, attributes);
return data;
}
-
+
public final String toString() {
return ("Root node: " + getRootNode() + "\n" + "First leaf: " + getFirstLeafNode() + "\n" +
"Last leaf: " + getLastLeafNode() + "\n" + "node size: " + getNodeSize() + "\n");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/IndexRecord.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -23,6 +23,8 @@
import org.jnode.util.BigEndian;
public class IndexRecord extends AbstractNodeRecord {
+ /** A node number that represent a child node of the index node. */
+ private int index;
/**
*
@@ -34,10 +36,11 @@
this.key = key;
this.recordData = new byte[4];
System.arraycopy(nodeData, offset + key.getKeyLength(), recordData, 0, 4);
+ index = BigEndian.getInt32(recordData, 0);
}
public final int getIndex() {
- return BigEndian.getInt32(recordData, 0);
+ return index;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/LeafRecord.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -24,24 +24,28 @@
public class LeafRecord extends AbstractNodeRecord {
+ private int type;
+
public LeafRecord(final Key key, final byte[] recordData) {
this.key = key;
this.recordData = new byte[recordData.length];
System.arraycopy(recordData, 0, this.recordData, 0, recordData.length);
+ type = BigEndian.getInt16(this.recordData, 0);
}
public LeafRecord(final Key key, final byte[] nodeData, final int offset, final int recordDataSize) {
this.key = key;
this.recordData = new byte[recordDataSize];
System.arraycopy(nodeData, offset + key.getKeyLength(), this.recordData, 0, recordDataSize);
+ type = BigEndian.getInt16(this.recordData, 0);
}
public final int getType() {
- return BigEndian.getInt16(this.recordData, 0);
+ return type;
}
public final String toString() {
- return "Type : " + getType() + "\nKey : " + getKey().toString() + "\n";
+ return "Type : " + type + "\nKey : " + getKey().toString() + "\n";
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-03-18 22:53:16 UTC (rev 5116)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/NodeDescriptor.java 2009-03-19 14:10:46 UTC (rev 5117)
@@ -17,70 +17,99 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus.tree;
import org.jnode.util.BigEndian;
public class NodeDescriptor {
+ public static final int BT_LEAF_NODE = -1;
+ public static final int BT_INDEX_NODE = 0;
+ public static final int BT_HEADER_NODE = 1;
+ public static final int BT_MAP_NODE = 2;
+ /** The size of the node descriptor. */
public static final int BT_NODE_DESCRIPTOR_LENGTH = 14;
- private byte[] data;
+ /** The number of the next node. */
+ private int fLink;
+ /** The number of the previous node. */
+ private int bLink;
+ /** The type of the node. */
+ private int kind;
+ /** The depth of this node in the B-Tree. */
+ private int height;
+ /** The number of records in this node. */
+ private int numRecords;
- public NodeDescriptor() {
- data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
+ /**
+ * Creates a new node descriptor.
+ *
+ * @param fLink
+ * @param bLink
+ * @param kind
+ * @param height
+ * @param numRecords
+ */
+ public NodeDescriptor(int fLink, int bLink, int kind, int height, int numRecords) {
+ this.fLink = fLink;
+ this.bLink = bLink;
+ this.kind = kind;
+ this.height = height;
+ this.numRecords = numRecords;
}
+ /**
+ * Creates node descriptor from existing data.
+ *
+ * @param src byte array contains node descriptor data.
+ * @param offset start of node descriptor data.
+ */
public NodeDescriptor(final byte[] src, int offset) {
- data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
+ byte[] data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
System.arraycopy(src, offset, data, 0, BT_NODE_DESCRIPTOR_LENGTH);
+ fLink = BigEndian.getInt32(data, 0);
+ bLink = BigEndian.getInt32(data, 4);
+ kind = BigEndian.getInt8(data, 8);
+ height = BigEndian.getInt8(data, 9);
+ numRecords = BigEndian.getInt16(data, 10);
}
- public final int getFLink() {
- return BigEndian.getInt32(data, 0);
+ /**
+ *
+ * @return
+ */
+ public byte[] getBytes() {
+ byte[] data = new byte[BT_NODE_DESCRIPTOR_LENGTH];
+ BigEndian.setInt32(data, 0, fLink);
+ BigEndian.setInt32(data, 4, bLink);
+ BigEndian.setInt8(data, 8, kind);
+ BigEndian.setInt8(data, 9, height);
+ BigEndian.setInt16(data, 10, numRecords);
+ return data;
}
-
- public void setFLink(int link) {
- BigEndian.setInt32(data, 0, link);
- }
- public final int getBLink() {
- return BigEndian.getInt32(data, 4);
+ public final String toString() {
+ return ("FLink: " + getFLink() + "\n" + "BLink: " + getBLink() + "\n" + "Kind: " +
+ getKind() + "\n" + "height: " + getHeight() + "\n" + "#rec: " + getNumRecords() + "\n");
}
-
- public void setBLink(int link) {
- BigEndian.setInt32(data, 4, link);
- }
- public final int getKind() {
- return BigEndian.getInt8(data, 8);
+ public int getFLink() {
+ return fLink;
}
- public void setKind(int kind) {
- BigEndian.setInt8(data, 8, kind);
+ public int getBLink() {
+ return bLink;
}
- public final int getHeight() {
- return BigEndian.getInt8(data, 9);
+ public int getKind() {
+ return kind;
}
- public void setHeight(int height) {
- BigEndian.setInt8(data, 9, height);
+ public int getHeight() {
+ return height;
}
- public final int getNumRecords() {
- return BigEndian.getInt16(data, 10);
+ public int getNumRecords() {
+ return numRecords;
}
- public void setRecordCount(int count) {
- BigEndian.setInt16(data, 10, count);
- }
-
- public byte[] getBytes() {
- return data;
- }
-
- public final String toString() {
- return ("FLink: " + getFLink() + "\n" + "BLink: " + getBLink() + "\n" + "Kind: " + getKind() + "\n"
- + "height: " + getHeight() + "\n" + "#rec: " + getNumRecords() + "\n");
- }
}
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-19 16:23:56
|
Revision: 5118
http://jnode.svn.sourceforge.net/jnode/?rev=5118&view=rev
Author: galatnm
Date: 2009-03-19 16:23:45 +0000 (Thu, 19 Mar 2009)
Log Message:
-----------
Rewrite and javadocs. Also fix extent descriptor creation.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusFile.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -52,7 +52,7 @@
HfsPlusFileSystem fs = (HfsPlusFileSystem) getFileSystem();
for (ExtentDescriptor d : file.getDatas().getExtents()) {
if (!d.isEmpty()) {
- long firstOffset = d.getStartBlock() * fs.getVolumeHeader().getBlockSize();
+ long firstOffset = d.getStartOffset(fs.getVolumeHeader().getBlockSize());
fs.getApi().read(firstOffset, dest);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -68,14 +68,21 @@
this.clumpSize = clumpSize;
this.totalBlock = totalBlock;
this.extents = new ExtentDescriptor[8];
+ for (int i = 0; i < extents.length; i++) {
+ extents[i] = new ExtentDescriptor();
+ }
}
-
- public byte[] getBytes() {
+
+ public byte[] write(byte[] dest, int destOffSet){
byte[] data = new byte[FORK_DATA_LENGTH];
BigEndian.setInt64(data, 0, totalSize);
BigEndian.setInt32(data, 8, clumpSize);
BigEndian.setInt32(data, 12, totalBlock);
- return data;
+ for (int i = 0; i < extents.length; i++) {
+ extents[i].write(data, EXTENT_OFFSET + (i * ExtentDescriptor.EXTENT_DESCRIPTOR_LENGTH));
+ }
+ System.arraycopy(data, 0, dest, destOffSet, FORK_DATA_LENGTH);
+ return dest;
}
/*
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -284,4 +284,9 @@
public int getExtentNodeSize() {
return extentNodeSize;
}
+
+ public int getInitializeNumRecords(){
+ return journaled ? 6 : 2;
+ }
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -62,7 +62,6 @@
*/
public final void read() throws FileSystemException {
sb = new Superblock(this, false);
-
if (!sb.isAttribute(Superblock.HFSPLUS_VOL_UNMNT_BIT)) {
log
.info(getDevice().getId()
@@ -100,11 +99,12 @@
@Override
protected final HFSPlusEntry createRootEntry() throws IOException {
+ log.info("Create root entry.");
LeafRecord record = catalog.getRecord(CatalogNodeId.HFSPLUS_POR_CNID);
if (record != null) {
return new HFSPlusEntry(this, null, "/", record);
}
- log.debug("Root entry : No record found.");
+ log.error("Root entry : No record found.");
return null;
}
@@ -155,6 +155,7 @@
params.initializeDefaultsValues(this.getApi().getLength(), this
.getFSApi().getSectorSize());
sb.create(params);
+ log.debug(sb.toString());
// ---
long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks()
- ((sb.getBlockSize() == 512) ? 2 : 1);
@@ -163,7 +164,7 @@
writeAllocationFile((int) volumeBlockUsed);
// ---
log.debug("Write Catalog to disk.");
- long offset = sb.getCatalogFile().getExtent(0).getStartBlock() * sb.getBlockSize();
+ long offset = sb.getCatalogFile().getExtent(0).getStartOffset(sb.getBlockSize());
Catalog catalog = new Catalog(params);
this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -79,6 +79,7 @@
data = new byte[SUPERBLOCK_LENGTH];
try {
if (!create) {
+ log.info("load HFS+ volume header.");
// skip the first 1024 bytes (boot sector) and read the volume
// header.
ByteBuffer b = ByteBuffer.allocate(SUPERBLOCK_LENGTH);
@@ -89,6 +90,7 @@
throw new FileSystemException("Not hfs+ volume header (" + getMagic() +
": bad magic)");
}
+ log.debug(this.toString());
}
} catch (IOException e) {
throw new FileSystemException(e);
@@ -100,10 +102,13 @@
*
* @param params
*
+ * @throws IOException
* @throws ApiNotFoundException
+ * @throws FileSystemException
*/
public void create(HFSPlusParams params)
throws IOException, ApiNotFoundException, FileSystemException {
+ log.info("Create new HFS+ volume header (" + params.getVolumeName() + ") with block size of " + params.getBlockSize() + " bytes.");
int burnedBlocksBeforeVH = 0;
int burnedBlocksAfterAltVH = 0;
/*
@@ -117,7 +122,6 @@
} else if (blockSize == 1024) {
burnedBlocksBeforeVH = 1;
}
-
// Populate volume header.
this.setMagic(HFSPLUS_SUPER_MAGIC);
this.setVersion(HFSPLUS_MIN_VERSION);
@@ -126,7 +130,7 @@
this.setLastMountedVersion(0x446534a);
Calendar now = Calendar.getInstance();
now.setTime(new Date());
- int macDate = (int) HFSUtils.getDate(now.getTimeInMillis() / 1000, true);
+ int macDate = HFSUtils.getNow();
this.setCreateDate(macDate);
this.setModifyDate(macDate);
this.setBackupDate(0);
@@ -141,51 +145,48 @@
this.setDataClumpSize(params.getDataClumpSize());
this.setNextCatalogId(CatalogNodeId.HFSPLUS_FIRSTUSER_CNID.getId());
// Allocation file creation
+ log.info("Init allocation file.");
long allocationClumpSize = getClumpSize(params.getBlockCount());
long bitmapBlocks = allocationClumpSize / blockSize;
long blockUsed = 2 + burnedBlocksBeforeVH + burnedBlocksAfterAltVH + bitmapBlocks;
-
int startBlock = 1 + burnedBlocksBeforeVH;
int blockCount = (int) bitmapBlocks;
-
HFSPlusForkData forkdata =
new HFSPlusForkData(allocationClumpSize, (int) allocationClumpSize,
(int) bitmapBlocks);
ExtentDescriptor desc = new ExtentDescriptor(startBlock, blockCount);
forkdata.addDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 112, HFSPlusForkData.FORK_DATA_LENGTH);
+ forkdata.write(data, 112);
// Journal creation
int nextBlock = 0;
if (params.isJournaled()) {
this.setFileCount(2);
this.setAttribute(HFSPLUS_VOL_JOURNALED_BIT);
this.setNextCatalogId(this.getNextCatalogId() + 2);
- this.setJournalInfoBlock(desc.getStartBlock() + desc.getBlockCount());
+ this.setJournalInfoBlock(desc.getNext());
blockUsed = blockUsed + 1 + (params.getJournalSize() / blockSize);
} else {
this.setJournalInfoBlock(0);
- nextBlock = desc.getStartBlock() + desc.getBlockCount();
+ nextBlock = desc.getNext();
}
// Extent B-Tree initialization
+ log.info("Init extent file.");
forkdata =
new HFSPlusForkData(params.getExtentClumpSize(), params.getExtentClumpSize(),
(params.getExtentClumpSize() / blockSize));
desc = new ExtentDescriptor(nextBlock, forkdata.getTotalBlocks());
forkdata.addDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 192, HFSPlusForkData.FORK_DATA_LENGTH);
+ forkdata.write(data, 192);
blockUsed += forkdata.getTotalBlocks();
+ nextBlock = desc.getNext();
// Catalog B-Tree initialization
- forkdata =
- new HFSPlusForkData(params.getCatalogClumpSize(), params.getCatalogClumpSize(),
- (params.getCatalogClumpSize() / blockSize));
- startBlock =
- this.getExtentsFile().getExtent(0).getStartBlock() +
- this.getExtentsFile().getExtent(0).getBlockCount();
- blockCount = forkdata.getTotalBlocks();
- desc = new ExtentDescriptor(startBlock, blockCount);
+ log.info("Init catalog file.");
+ int totalBlocks = params.getCatalogClumpSize() / blockSize;
+ forkdata = new HFSPlusForkData(params.getCatalogClumpSize(), params.getCatalogClumpSize(), totalBlocks);
+ desc = new ExtentDescriptor(nextBlock, totalBlocks);
forkdata.addDescriptor(0, desc);
- System.arraycopy(forkdata.getBytes(), 0, data, 272, HFSPlusForkData.FORK_DATA_LENGTH);
- blockUsed += forkdata.getTotalBlocks();
+ forkdata.write(data, 272);
+ blockUsed += totalBlocks;
this.setFreeBlocks(this.getFreeBlocks() - (int) blockUsed);
this.setNextAllocation((int) blockUsed - 1 - burnedBlocksAfterAltVH + 10 *
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -61,12 +61,12 @@
* @throws IOException
*/
public Catalog(final HfsPlusFileSystem fs) throws IOException {
- log.debug("Load B-Tree catalog file.\n");
+ log.info("Load B-Tree catalog file.");
this.fs = fs;
Superblock sb = fs.getVolumeHeader();
ExtentDescriptor firstExtent = sb.getCatalogFile().getExtent(0);
- catalogHeaderNodeOffset = firstExtent.getStartBlock() * sb.getBlockSize();
- if (firstExtent.getStartBlock() != 0 && firstExtent.getBlockCount() != 0) {
+ catalogHeaderNodeOffset = firstExtent.getSize(sb.getBlockSize());
+ if (!firstExtent.isEmpty()) {
buffer =
ByteBuffer.allocate(NodeDescriptor.BT_HEADER_NODE +
BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
@@ -74,7 +74,7 @@
buffer.rewind();
byte[] data = ByteBufferUtils.toArray(buffer);
btnd = new NodeDescriptor(data, 0);
- bthr = new BTHeaderRecord(data, BTHeaderRecord.BT_HEADER_RECORD_LENGTH);
+ bthr = new BTHeaderRecord(data, NodeDescriptor.BT_HEADER_NODE);
}
}
@@ -85,28 +85,34 @@
* @param params
*/
public Catalog(HFSPlusParams params) {
- log.debug("Create B-Tree catalog file.\n");
-
+ log.info("Create B-Tree catalog file.");
int nodeSize = params.getCatalogNodeSize();
-
int bufferLength = 0;
btnd = new NodeDescriptor(0, 0, NodeDescriptor.BT_HEADER_NODE, 0, 3);
bufferLength += NodeDescriptor.BT_HEADER_NODE;
//
- int leafRecords = params.isJournaled() ? 6 : 2;
int totalNodes = params.getCatalogClumpSize() / params.getCatalogNodeSize();
int freeNodes = totalNodes - 2;
- bthr =
- new BTHeaderRecord(1, 1, leafRecords, 1, 1, nodeSize, 0, totalNodes, freeNodes,
- params.getCatalogClumpSize(), 0, 0, 0);
+ bthr = new BTHeaderRecord(1,
+ 1,
+ params.getInitializeNumRecords(),
+ 1,
+ 1,
+ nodeSize,
+ CatalogKey.MAXIMUM_KEY_LENGTH,
+ totalNodes,
+ freeNodes,
+ params.getCatalogClumpSize(),
+ BTHeaderRecord.BT_TYPE_HFS,
+ BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING,
+ BTHeaderRecord.BT_VARIABLE_INDEX_KEYS_MASK + BTHeaderRecord.BT_BIG_KEYS_MASK);
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
// Create root node
int rootNodePosition = bthr.getRootNode() * nodeSize;
bufferLength += (rootNodePosition - bufferLength);
// Create node descriptor
- int numRecords = params.isJournaled() ? 6 : 2;
- NodeDescriptor nd = new NodeDescriptor(0, 0, NodeDescriptor.BT_LEAF_NODE, 1, numRecords);
+ NodeDescriptor nd = new NodeDescriptor(0, 0, NodeDescriptor.BT_LEAF_NODE, 1, params.getInitializeNumRecords());
CatalogNode rootNode = new CatalogNode(nd, nodeSize);
int offset = NodeDescriptor.BT_HEADER_NODE;
// First record (folder)
@@ -140,7 +146,7 @@
public final LeafRecord getRecord(final CatalogNodeId parentID) throws IOException {
int currentOffset = 0;
LeafRecord lr = null;
- int nodeSize = getBTHeaderRecord().getNodeSize();
+ int nodeSize = bthr.getNodeSize();
ByteBuffer nodeData = ByteBuffer.allocate(nodeSize);
fs.getApi().read(catalogHeaderNodeOffset + (getBTHeaderRecord().getRootNode() * nodeSize),
nodeData);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -20,22 +20,26 @@
package org.jnode.fs.hfsplus.extent;
+import org.apache.log4j.Logger;
import org.jnode.fs.hfsplus.HFSPlusParams;
import org.jnode.fs.hfsplus.tree.BTHeaderRecord;
import org.jnode.fs.hfsplus.tree.NodeDescriptor;
public class Extent {
+ private final Logger log = Logger.getLogger(getClass());
+
private NodeDescriptor btnd;
private BTHeaderRecord bthr;
public Extent(HFSPlusParams params) {
+ log.info("Create B-Tree extent file.");
btnd = new NodeDescriptor(0, 0, NodeDescriptor.BT_HEADER_NODE, 0, 3);
//
int totalNodes = params.getExtentClumpSize() / params.getExtentNodeSize();
int freeNodes = totalNodes - 1;
bthr =
- new BTHeaderRecord(0, 0, 0, 0, 0, params.getExtentNodeSize(), ExtentKey.KEY_LENGTH,
- totalNodes, freeNodes, params.getExtentClumpSize(), 0, 0, 0);
+ new BTHeaderRecord(0, 0, 0, 0, 0, params.getExtentNodeSize(), ExtentKey.MAXIMUM_KEY_LENGTH,
+ totalNodes, freeNodes, params.getExtentClumpSize(),BTHeaderRecord.BT_TYPE_HFS, BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING, BTHeaderRecord.BT_BIG_KEYS_MASK);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -29,6 +29,11 @@
private int startBlock;
/** The length in allocation blocks of the extent. */
private int blockCount;
+
+ public ExtentDescriptor() {
+ this.startBlock = 0;
+ this.blockCount = 0;
+ }
/**
* Create a new extent descriptor.
@@ -64,19 +69,49 @@
BigEndian.setInt32(data, 4, blockCount);
return data;
}
+
+ public byte[] write(byte[] dest, int destOffSet){
+ byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
+ BigEndian.setInt32(data, 0, startBlock);
+ BigEndian.setInt32(data, 4, blockCount);
+ System.arraycopy(data, 0, dest, destOffSet, EXTENT_DESCRIPTOR_LENGTH);
+ return dest;
+ }
public final String toString() {
return "Start block : " + startBlock + "\tBlock count : " + blockCount + "\n";
}
-
- public int getStartBlock() {
- return startBlock;
+
+ /**
+ * Returns start position in bytes of the extent.
+ * @param nodeSize the size of a node.
+ * @return offset of the extent.
+ */
+ public int getStartOffset(int nodeSize){
+ return startBlock * nodeSize;
}
-
- public int getBlockCount() {
- return blockCount;
+
+ /**
+ * Returns block number of the next extent.
+ * @return block number of the next extent.
+ */
+ public int getNext(){
+ return startBlock + blockCount;
}
+
+ /**
+ * Returns size in byte of the extent.
+ * @param nodeSize the size of a node.
+ * @return size of the extent.
+ */
+ public int getSize(int nodeSize){
+ return blockCount * nodeSize;
+ }
+ /**
+ * Returns <tt>true</tt> if the extent is empty.
+ * @return <tt>true</tt> if the extent is empty.
+ */
public boolean isEmpty() {
return (startBlock == 0 || blockCount == 0);
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentKey.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -30,6 +30,7 @@
public static final byte DATA_FORK = (byte) 0x00;
public static final byte RESOURCE_FORK = (byte) 0xFF;
public static final int KEY_LENGTH = 12;
+ public static final int MAXIMUM_KEY_LENGTH = 10;
private int forkType;
private int pad;
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-19 14:10:46 UTC (rev 5117)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/tree/BTHeaderRecord.java 2009-03-19 16:23:45 UTC (rev 5118)
@@ -23,6 +23,17 @@
import org.jnode.util.BigEndian;
public class BTHeaderRecord {
+
+ public static final int KEY_COMPARE_TYPE_CASE_FOLDING = 0xCF;
+ /** B-Tree was not closed correctly and need check for consistency. */
+ public static final int BT_BAD_CLOSE_MASK = 0x00000001;
+ public static final int BT_BIG_KEYS_MASK = 0x00000002;
+ public static final int BT_VARIABLE_INDEX_KEYS_MASK = 0x00000004;
+
+ public static final int BT_TYPE_HFS = 0;
+ public static final int BT_TYPE_USER = 128;
+ public static final int BT_TYPE_RESERVED = 256;
+
public static final int BT_HEADER_RECORD_LENGTH = 106;
/** The depth of the current B-Tree. */
private int treeDepth;
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-19 19:10:22
|
Revision: 5121
http://jnode.svn.sourceforge.net/jnode/?rev=5121&view=rev
Author: galatnm
Date: 2009-03-19 19:10:14 +0000 (Thu, 19 Mar 2009)
Log Message:
-----------
Fix checkstyle and Fix problem with creation of root entry.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusForkData.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -72,8 +72,8 @@
extents[i] = new ExtentDescriptor();
}
}
-
- public byte[] write(byte[] dest, int destOffSet){
+
+ public byte[] write(byte[] dest, int destOffSet) {
byte[] data = new byte[FORK_DATA_LENGTH];
BigEndian.setInt64(data, 0, totalSize);
BigEndian.setInt32(data, 8, clumpSize);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusParams.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import org.jnode.fs.FileSystemException;
@@ -73,7 +73,8 @@
* @throws FileSystemException
*
*/
- public void initializeDefaultsValues(long blockDeviceSize, long sectorSize) throws FileSystemException {
+ public void initializeDefaultsValues(long blockDeviceSize, long sectorSize)
+ throws FileSystemException {
long clumpSize = 0;
this.blockDeviceSize = blockDeviceSize;
if (resourceClumpBlocks == 0) {
@@ -143,8 +144,9 @@
}
- private int[] extentClumpTable = new int[] {4, 4, 4, 5, 5, 6, 7, 8, 9, 11, 14, 16, 20, 25, 32 };
- private int[] catalogClumpTable = new int[] {4, 6, 8, 11, 14, 19, 25, 34, 45, 60, 80, 107, 144, 192, 256 };
+ private int[] extentClumpTable = new int[] {4, 4, 4, 5, 5, 6, 7, 8, 9, 11, 14, 16, 20, 25, 32};
+ private int[] catalogClumpTable =
+ new int[] {4, 6, 8, 11, 14, 19, 25, 34, 45, 60, 80, 107, 144, 192, 256};
/**
* Get the file clump size for Extent and catalog B-Tree files.
@@ -152,7 +154,8 @@
* @param blockSize
* @param nodeSize
* @param sectors
- * @param catalog If true, calculate catalog clump size. In the other case, calculate extent clump size.
+ * @param catalog If true, calculate catalog clump size. In the other case,
+ * calculate extent clump size.
*
* @return
*/
@@ -178,11 +181,11 @@
clumpSize /= size;
clumpSize *= size;
-
+
if (clumpSize == 0) {
clumpSize = size;
}
-
+
return clumpSize;
}
@@ -195,7 +198,8 @@
private int clumpSizeCalculation(long clumpBlocks) throws FileSystemException {
long clumpSize = clumpBlocks * blockSize;
if ((clumpSize & 0XFFFFFFFF00000000L) == 0) {
- throw new FileSystemException("Too many blocks (" + clumpBlocks + ") for clump size (" + clumpSize + ").");
+ throw new FileSystemException("Too many blocks (" + clumpBlocks + ") for clump size (" +
+ clumpSize + ").");
}
return (int) clumpSize;
}
@@ -284,9 +288,9 @@
public int getExtentNodeSize() {
return extentNodeSize;
}
-
- public int getInitializeNumRecords(){
+
+ public int getInitializeNumRecords() {
return journaled ? 6 : 2;
}
-
+
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -17,7 +17,7 @@
* along with this library; If not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
*/
-
+
package org.jnode.fs.hfsplus;
import java.io.IOException;
@@ -62,21 +62,20 @@
*/
public final void read() throws FileSystemException {
sb = new Superblock(this, false);
+ log.debug(sb.toString());
if (!sb.isAttribute(Superblock.HFSPLUS_VOL_UNMNT_BIT)) {
- log
- .info(getDevice().getId()
- + " Filesystem has not been cleanly unmounted, mounting it readonly");
+ log.info(getDevice().getId() +
+ " Filesystem has not been cleanly unmounted, mounting it readonly");
setReadOnly(true);
}
if (sb.isAttribute(Superblock.HFSPLUS_VOL_SOFTLOCK_BIT)) {
- log.info(getDevice().getId()
- + " Filesystem is marked locked, mounting it readonly");
+ log.info(getDevice().getId() + " Filesystem is marked locked, mounting it readonly");
setReadOnly(true);
}
if (sb.isAttribute(Superblock.HFSPLUS_VOL_JOURNALED_BIT)) {
log
- .info(getDevice().getId()
- + " Filesystem is journaled, write access is not supported. Mounting it readonly");
+ .info(getDevice().getId() +
+ " Filesystem is journaled, write access is not supported. Mounting it readonly");
setReadOnly(true);
}
try {
@@ -87,8 +86,7 @@
}
@Override
- protected final FSDirectory createDirectory(final FSEntry entry)
- throws IOException {
+ protected final FSDirectory createDirectory(final FSEntry entry) throws IOException {
return entry.getDirectory();
}
@@ -152,13 +150,13 @@
public void create(HFSPlusParams params) throws FileSystemException {
sb = new Superblock(this, true);
try {
- params.initializeDefaultsValues(this.getApi().getLength(), this
- .getFSApi().getSectorSize());
+ params.initializeDefaultsValues(this.getApi().getLength(), this.getFSApi()
+ .getSectorSize());
sb.create(params);
log.debug(sb.toString());
// ---
- long volumeBlockUsed = sb.getTotalBlocks() - sb.getFreeBlocks()
- - ((sb.getBlockSize() == 512) ? 2 : 1);
+ long volumeBlockUsed =
+ sb.getTotalBlocks() - sb.getFreeBlocks() - ((sb.getBlockSize() == 512) ? 2 : 1);
// ---
log.debug("Write allocation bitmap bits to disk.");
writeAllocationFile((int) volumeBlockUsed);
@@ -166,7 +164,7 @@
log.debug("Write Catalog to disk.");
long offset = sb.getCatalogFile().getExtent(0).getStartOffset(sb.getBlockSize());
Catalog catalog = new Catalog(params);
- this.getApi().write(offset, catalog.getBytes());
+ this.getApi().write(offset, catalog.getBytes());
log.debug("Write volume header to disk.");
this.getApi().write(1024, ByteBuffer.wrap(sb.getBytes()));
flush();
@@ -176,9 +174,9 @@
throw new FileSystemException("Unable to create HFS+ filesystem", e);
}
}
-
+
private void writeAllocationFile(int blockUsed) {
int bytes = blockUsed >> 3;
- int bits = blockUsed & 0x0007;
+ int bits = blockUsed & 0x0007;
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/Superblock.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -41,7 +41,7 @@
*
*/
public class Superblock extends HFSPlusObject {
-
+
public static final int HFSPLUS_SUPER_MAGIC = 0x482b;
public static final int HFSPLUS_MIN_VERSION = 0x0004; /* HFS+ */
@@ -55,7 +55,7 @@
public static final int HFSPLUS_VOL_NODEID_REUSED_BIT = 12;
public static final int HFSPLUS_VOL_JOURNALED_BIT = 13;
public static final int HFSPLUS_VOL_SOFTLOCK_BIT = 15;
-
+
private final Logger log = Logger.getLogger(getClass());
/** Volume header data length */
@@ -90,7 +90,7 @@
throw new FileSystemException("Not hfs+ volume header (" + getMagic() +
": bad magic)");
}
- log.debug(this.toString());
+
}
} catch (IOException e) {
throw new FileSystemException(e);
@@ -108,7 +108,8 @@
*/
public void create(HFSPlusParams params)
throws IOException, ApiNotFoundException, FileSystemException {
- log.info("Create new HFS+ volume header (" + params.getVolumeName() + ") with block size of " + params.getBlockSize() + " bytes.");
+ log.info("Create new HFS+ volume header (" + params.getVolumeName() +
+ ") with block size of " + params.getBlockSize() + " bytes.");
int burnedBlocksBeforeVH = 0;
int burnedBlocksAfterAltVH = 0;
/*
@@ -178,11 +179,13 @@
forkdata.addDescriptor(0, desc);
forkdata.write(data, 192);
blockUsed += forkdata.getTotalBlocks();
- nextBlock = desc.getNext();
+ nextBlock = desc.getNext();
// Catalog B-Tree initialization
log.info("Init catalog file.");
- int totalBlocks = params.getCatalogClumpSize() / blockSize;
- forkdata = new HFSPlusForkData(params.getCatalogClumpSize(), params.getCatalogClumpSize(), totalBlocks);
+ int totalBlocks = params.getCatalogClumpSize() / blockSize;
+ forkdata =
+ new HFSPlusForkData(params.getCatalogClumpSize(), params.getCatalogClumpSize(),
+ totalBlocks);
desc = new ExtentDescriptor(nextBlock, totalBlocks);
forkdata.addDescriptor(0, desc);
forkdata.write(data, 272);
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/catalog/Catalog.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -65,7 +65,7 @@
this.fs = fs;
Superblock sb = fs.getVolumeHeader();
ExtentDescriptor firstExtent = sb.getCatalogFile().getExtent(0);
- catalogHeaderNodeOffset = firstExtent.getSize(sb.getBlockSize());
+ catalogHeaderNodeOffset = firstExtent.getStartOffset(sb.getBlockSize());
if (!firstExtent.isEmpty()) {
buffer =
ByteBuffer.allocate(NodeDescriptor.BT_HEADER_NODE +
@@ -93,26 +93,22 @@
//
int totalNodes = params.getCatalogClumpSize() / params.getCatalogNodeSize();
int freeNodes = totalNodes - 2;
- bthr = new BTHeaderRecord(1,
- 1,
- params.getInitializeNumRecords(),
- 1,
- 1,
- nodeSize,
- CatalogKey.MAXIMUM_KEY_LENGTH,
- totalNodes,
- freeNodes,
- params.getCatalogClumpSize(),
- BTHeaderRecord.BT_TYPE_HFS,
- BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING,
- BTHeaderRecord.BT_VARIABLE_INDEX_KEYS_MASK + BTHeaderRecord.BT_BIG_KEYS_MASK);
+ bthr =
+ new BTHeaderRecord(1, 1, params.getInitializeNumRecords(), 1, 1, nodeSize,
+ CatalogKey.MAXIMUM_KEY_LENGTH, totalNodes, freeNodes, params
+ .getCatalogClumpSize(), BTHeaderRecord.BT_TYPE_HFS,
+ BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING,
+ BTHeaderRecord.BT_VARIABLE_INDEX_KEYS_MASK +
+ BTHeaderRecord.BT_BIG_KEYS_MASK);
bufferLength += BTHeaderRecord.BT_HEADER_RECORD_LENGTH;
// Create root node
int rootNodePosition = bthr.getRootNode() * nodeSize;
bufferLength += (rootNodePosition - bufferLength);
// Create node descriptor
- NodeDescriptor nd = new NodeDescriptor(0, 0, NodeDescriptor.BT_LEAF_NODE, 1, params.getInitializeNumRecords());
+ NodeDescriptor nd =
+ new NodeDescriptor(0, 0, NodeDescriptor.BT_LEAF_NODE, 1, params
+ .getInitializeNumRecords());
CatalogNode rootNode = new CatalogNode(nd, nodeSize);
int offset = NodeDescriptor.BT_HEADER_NODE;
// First record (folder)
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/Extent.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -27,7 +27,7 @@
public class Extent {
private final Logger log = Logger.getLogger(getClass());
-
+
private NodeDescriptor btnd;
private BTHeaderRecord bthr;
@@ -38,8 +38,11 @@
int totalNodes = params.getExtentClumpSize() / params.getExtentNodeSize();
int freeNodes = totalNodes - 1;
bthr =
- new BTHeaderRecord(0, 0, 0, 0, 0, params.getExtentNodeSize(), ExtentKey.MAXIMUM_KEY_LENGTH,
- totalNodes, freeNodes, params.getExtentClumpSize(),BTHeaderRecord.BT_TYPE_HFS, BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING, BTHeaderRecord.BT_BIG_KEYS_MASK);
+ new BTHeaderRecord(0, 0, 0, 0, 0, params.getExtentNodeSize(),
+ ExtentKey.MAXIMUM_KEY_LENGTH, totalNodes, freeNodes, params
+ .getExtentClumpSize(), BTHeaderRecord.BT_TYPE_HFS,
+ BTHeaderRecord.KEY_COMPARE_TYPE_CASE_FOLDING,
+ BTHeaderRecord.BT_BIG_KEYS_MASK);
}
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-19 18:15:48 UTC (rev 5120)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/extent/ExtentDescriptor.java 2009-03-19 19:10:14 UTC (rev 5121)
@@ -29,7 +29,7 @@
private int startBlock;
/** The length in allocation blocks of the extent. */
private int blockCount;
-
+
public ExtentDescriptor() {
this.startBlock = 0;
this.blockCount = 0;
@@ -69,8 +69,8 @@
BigEndian.setInt32(data, 4, blockCount);
return data;
}
-
- public byte[] write(byte[] dest, int destOffSet){
+
+ public byte[] write(byte[] dest, int destOffSet) {
byte[] data = new byte[EXTENT_DESCRIPTOR_LENGTH];
BigEndian.setInt32(data, 0, startBlock);
BigEndian.setInt32(data, 4, blockCount);
@@ -81,35 +81,39 @@
public final String toString() {
return "Start block : " + startBlock + "\tBlock count : " + blockCount + "\n";
}
-
+
/**
* Returns start position in bytes of the extent.
+ *
* @param nodeSize the size of a node.
* @return offset of the extent.
*/
- public int getStartOffset(int nodeSize){
+ public int getStartOffset(int nodeSize) {
return startBlock * nodeSize;
}
-
+
/**
* Returns block number of the next extent.
+ *
* @return block number of the next extent.
*/
- public int getNext(){
+ public int getNext() {
return startBlock + blockCount;
}
-
+
/**
* Returns size in byte of the extent.
+ *
* @param nodeSize the size of a node.
* @return size of the extent.
*/
- public int getSize(int nodeSize){
+ public int getSize(int nodeSize) {
return blockCount * nodeSize;
- }
+ }
/**
* Returns <tt>true</tt> if the extent is empty.
+ *
* @return <tt>true</tt> if the extent is empty.
*/
public boolean isEmpty() {
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|
|
From: <ga...@us...> - 2009-03-19 22:10:57
|
Revision: 5123
http://jnode.svn.sourceforge.net/jnode/?rev=5123&view=rev
Author: galatnm
Date: 2009-03-19 22:10:41 +0000 (Thu, 19 Mar 2009)
Log Message:
-----------
Fix in memory creation of directory.
Modified Paths:
--------------
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-19 19:48:22 UTC (rev 5122)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusDirectory.java 2009-03-19 22:10:41 UTC (rev 5123)
@@ -51,12 +51,9 @@
LeafRecord record) {
super(fs, parent, name, record);
this.folder = new CatalogFolder(record.getData());
+ this.entries = FSEntryTable.EMPTY_TABLE;
}
- public FSEntryTable getTable() {
- return entries;
- }
-
@Override
public FSEntry addDirectory(String name) throws IOException {
log.debug("<<< BEGIN addDirectory " + name + " >>>");
@@ -121,6 +118,10 @@
return null;
}
+ public int rename(String oldName, String newName) {
+ return entries.rename(oldName, newName);
+ }
+
@Override
public void remove(String name) throws IOException {
if (fs.isReadOnly()) {
@@ -188,7 +189,7 @@
if (rec.getType() == CatalogFolder.RECORD_TYPE_FOLDER ||
rec.getType() == CatalogFile.RECORD_TYPE_FILE) {
String name = ((CatalogKey) rec.getKey()).getNodeName().getUnicodeString();
- HFSPlusEntry e = new HFSPlusEntry(fs, this, name, rec);
+ HFSPlusEntry e = new HFSPlusDirectory(fs, this, name, rec);
pathList.add(e);
}
}
@@ -216,7 +217,8 @@
CatalogThread thread =
new CatalogThread(CatalogFolder.RECORD_TYPE_FOLDER_THREAD, this.folder
.getFolderId(), dirName);
- CatalogFolder newFolder = new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
+ CatalogFolder newFolder =
+ new CatalogFolder(0, new CatalogNodeId(volumeHeader.getNextCatalogId()));
log.debug("New catalog folder :\n" + newFolder.toString());
CatalogKey key = new CatalogKey(this.folder.getFolderId(), dirName);
@@ -225,7 +227,7 @@
LeafRecord folderRecord = new LeafRecord(key, newFolder.getBytes());
log.debug("New record folder :\n" + folderRecord.toString());
- HFSPlusEntry newEntry = new HFSPlusEntry(fs, this, name, folderRecord);
+ HFSPlusEntry newEntry = new HFSPlusDirectory(fs, this, name, folderRecord);
volumeHeader.setFolderCount(volumeHeader.getFolderCount() + 1);
log.debug("New volume header :\n" + volumeHeader.toString());
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-19 19:48:22 UTC (rev 5122)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HFSPlusEntry.java 2009-03-19 22:10:41 UTC (rev 5123)
@@ -84,7 +84,7 @@
@Override
public FSDirectory getDirectory() throws IOException {
- if (!isFile()) {
+ if (!isDirectory()) {
throw new IOException("It is not a Directory");
}
return (HFSPlusDirectory) this;
@@ -123,7 +123,7 @@
public boolean isDirty() throws IOException {
return dirty;
}
-
+
public void setDirty() {
dirty = true;
}
@@ -147,7 +147,7 @@
if (type == AbstractFSEntry.ROOT_ENTRY) {
throw new IOException("Cannot change name of root directory");
}
- if (parent.getTable().rename(name, newName) < 0) {
+ if (parent.rename(name, newName) < 0) {
throw new IOException("Cannot change name");
}
Modified: trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java
===================================================================
--- trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 19:48:22 UTC (rev 5122)
+++ trunk/fs/src/fs/org/jnode/fs/hfsplus/HfsPlusFileSystem.java 2009-03-19 22:10:41 UTC (rev 5123)
@@ -100,7 +100,7 @@
log.info("Create root entry.");
LeafRecord record = catalog.getRecord(CatalogNodeId.HFSPLUS_POR_CNID);
if (record != null) {
- return new HFSPlusEntry(this, null, "/", record);
+ return new HFSPlusDirectory(this, null, "/", record);
}
log.error("Root entry : No record found.");
return null;
This was sent by the SourceForge.net collaborative development platform, the world's largest Open Source development site.
|