1
0

Anvil storage writing (Basic storage is working, NO entities except for chests are working! Don't use for real servers)

git-svn-id: http://mc-server.googlecode.com/svn/trunk@475 0a769ca7-a7f5-676a-18bf-c427514a06d6
This commit is contained in:
madmaxoft@gmail.com 2012-04-23 21:20:32 +00:00
parent 60b5f4b66b
commit 9cb8872851
11 changed files with 431 additions and 35 deletions

View File

@ -235,9 +235,12 @@ public:
class cChunkDataCollector : class cChunkDataCollector :
public cChunkDataCallback public cChunkDataCallback
{ {
protected: public:
BLOCKTYPE m_BlockData[cChunkDef::BlockDataSize]; BLOCKTYPE m_BlockData[cChunkDef::BlockDataSize];
protected:
virtual void BlockTypes(const BLOCKTYPE * a_BlockTypes) override virtual void BlockTypes(const BLOCKTYPE * a_BlockTypes) override
{ {
memcpy(m_BlockData, a_BlockTypes, cChunkDef::NumBlocks); memcpy(m_BlockData, a_BlockTypes, cChunkDef::NumBlocks);
@ -266,6 +269,50 @@ protected:
/** A simple implementation of the cChunkDataCallback interface that collects all block data into a separate buffers
*/
class cChunkDataSeparateCollector :
public cChunkDataCallback
{
public:
BLOCKTYPE m_BlockTypes[cChunkDef::NumBlocks];
// TODO: These should be NIBBLETYPE:
BLOCKTYPE m_BlockMetas[cChunkDef::NumBlocks / 2];
BLOCKTYPE m_BlockLight[cChunkDef::NumBlocks / 2];
BLOCKTYPE m_BlockSkyLight[cChunkDef::NumBlocks / 2];
protected:
virtual void BlockTypes(const BLOCKTYPE * a_BlockTypes) override
{
memcpy(m_BlockTypes, a_BlockTypes, sizeof(m_BlockTypes));
}
virtual void BlockMeta(const BLOCKTYPE * a_BlockMeta) override
{
memcpy(m_BlockMetas, a_BlockMeta, sizeof(m_BlockMetas));
}
virtual void BlockLight(const BLOCKTYPE * a_BlockLight) override
{
memcpy(m_BlockLight, a_BlockLight, sizeof(m_BlockLight));
}
virtual void BlockSkyLight(const BLOCKTYPE * a_BlockSkyLight) override
{
memcpy(m_BlockSkyLight, a_BlockSkyLight, sizeof(m_BlockSkyLight));
}
} ;
/** Interface class used for comparing clients of two chunks. /** Interface class used for comparing clients of two chunks.
Used primarily for entity moving while both chunks are locked. Used primarily for entity moving while both chunks are locked.
*/ */

View File

@ -74,7 +74,7 @@ cNBTTag * cNBTTag::CreateTag(cNBTTag * a_Parent, eTagType a_Type, const AString
case TAG_Double: return new cNBTDouble (a_Parent, a_Name); case TAG_Double: return new cNBTDouble (a_Parent, a_Name);
case TAG_ByteArray: return new cNBTByteArray(a_Parent, a_Name); case TAG_ByteArray: return new cNBTByteArray(a_Parent, a_Name);
case TAG_String: return new cNBTString (a_Parent, a_Name); case TAG_String: return new cNBTString (a_Parent, a_Name);
case TAG_List: return new cNBTList (a_Parent, a_Name); case TAG_List: return new cNBTList (a_Parent, a_Name, TAG_End);
case TAG_Compound: return new cNBTCompound (a_Parent, a_Name); case TAG_Compound: return new cNBTCompound (a_Parent, a_Name);
default: default:
{ {
@ -128,8 +128,8 @@ void cNBTList::Clear(void)
int cNBTList::Add(cNBTTag * iTag) int cNBTList::Add(cNBTTag * iTag)
{ {
// Catch usage errors while debugging: // Catch usage errors while debugging:
ASSERT(m_ChildrenType != TAG_End); ASSERT(m_ChildrenType != TAG_End); // Didn't call SetChildrenType()
ASSERT(iTag->GetType() == m_ChildrenType); ASSERT(iTag->GetType() == m_ChildrenType); // Child of different type
// Catch errors while running: // Catch errors while running:
if (m_ChildrenType == TAG_End) if (m_ChildrenType == TAG_End)
@ -535,12 +535,11 @@ int cNBTParser::ReadTag(const char ** a_Data, int * a_Length, cNBTTag::eTagType
{ {
char ItemType; char ItemType;
RETURN_INT_IF_FAILED(ReadByte (a_Data, a_Length, ItemType)); RETURN_INT_IF_FAILED(ReadByte (a_Data, a_Length, ItemType));
cNBTList * List = new cNBTList(a_Parent, a_Name); cNBTList * List = new cNBTList(a_Parent, a_Name, (cNBTTag::eTagType)ItemType);
if (List == NULL) if (List == NULL)
{ {
return ERROR_NOT_ENOUGH_MEMORY; return ERROR_NOT_ENOUGH_MEMORY;
} }
RETURN_INT_IF_FAILED(List->SetChildrenType((cNBTTag::eTagType)ItemType));
RETURN_INT_IF_FAILED(ReadList(a_Data, a_Length, List)); RETURN_INT_IF_FAILED(ReadList(a_Data, a_Length, List));
*a_Tag = List; *a_Tag = List;
return ERROR_SUCCESS; return ERROR_SUCCESS;

View File

@ -109,8 +109,8 @@ class cNBTList :
eTagType m_ChildrenType; eTagType m_ChildrenType;
public: public:
cNBTList(cNBTTag * a_Parent) : cNBTTag(a_Parent, TAG_List), m_ChildrenType(TAG_End) {} cNBTList(cNBTTag * a_Parent, eTagType a_ChildrenType) : cNBTTag(a_Parent, TAG_List), m_ChildrenType(a_ChildrenType) {}
cNBTList(cNBTTag * a_Parent, const AString & a_Name) : cNBTTag(a_Parent, TAG_List, a_Name), m_ChildrenType(TAG_End) {} cNBTList(cNBTTag * a_Parent, const AString & a_Name, eTagType a_ChildrenType) : cNBTTag(a_Parent, TAG_List, a_Name), m_ChildrenType(a_ChildrenType) {}
virtual ~cNBTList() {Clear(); } virtual ~cNBTList() {Clear(); }
void Clear (void); void Clear (void);

View File

@ -11,6 +11,10 @@
#include "BlockID.h" #include "BlockID.h"
#include "cChestEntity.h" #include "cChestEntity.h"
#include "cItem.h" #include "cItem.h"
#include "StringCompression.h"
#include "cEntity.h"
#include "cBlockEntity.h"
#include "cMakeDir.h"
@ -22,16 +26,136 @@ Since only the header is actually in the memory, this number can be high, but st
#define MAX_MCA_FILES 32 #define MAX_MCA_FILES 32
/// The maximum size of an inflated chunk /// The maximum size of an inflated chunk
#define CHUNK_INFLATE_MAX 128 KiB #define CHUNK_INFLATE_MAX 256 KiB
///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cNBTChunkSerializer
class cNBTChunkSerializer :
public cChunkDataSeparateCollector
{
public:
cNBTChunkSerializer(cNBTList * a_Entities, cNBTList * a_TileEntities) :
m_Entities(a_Entities),
m_TileEntities(a_TileEntities)
{
}
protected:
/* From cChunkDataSeparateCollector we inherit:
- m_BlockTypes[]
- m_BlockMetas[]
- m_BlockLight[]
- m_BlockSkyLight[]
*/
// TODO: Biomes
// We need to save entities and blockentities into NBT
cNBTList * m_Entities; // Tag where entities will be saved
cNBTList * m_TileEntities; // Tag where block-entities will be saved
cNBTCompound * AddBasicTileEntity(cBlockEntity * a_Entity, const char * a_EntityTypeID)
{
cNBTCompound * res = new cNBTCompound(m_TileEntities);
res->Add(new cNBTInt (res, "x", a_Entity->GetPosX()));
res->Add(new cNBTInt (res, "y", a_Entity->GetPosY()));
res->Add(new cNBTInt (res, "z", a_Entity->GetPosZ()));
res->Add(new cNBTString(res, "id", a_EntityTypeID));
return res;
}
void AddItem(cNBTList * a_Items, cItem * a_Item, int a_Slot)
{
cNBTCompound * Tag = new cNBTCompound(a_Items);
Tag->Add(new cNBTShort(Tag, "id", a_Item->m_ItemID));
Tag->Add(new cNBTShort(Tag, "Damage", a_Item->m_ItemHealth));
Tag->Add(new cNBTByte (Tag, "Count", a_Item->m_ItemCount));
Tag->Add(new cNBTByte (Tag, "Slot", a_Slot));
}
void AddChestEntity(cChestEntity * a_Entity)
{
cNBTCompound * Base = AddBasicTileEntity(a_Entity, "chest");
cNBTList * Items = new cNBTList(Base, "Items", cNBTTag::TAG_Compound);
Base->Add(Items);
for (int i = 0; i < cChestEntity::c_ChestHeight * cChestEntity::c_ChestWidth; i++)
{
cItem * Item = a_Entity->GetSlot(i);
if ((Item == NULL) || Item->IsEmpty())
{
continue;
}
AddItem(Items, Item, i);
}
}
virtual void Entity(cEntity * a_Entity) override
{
// TODO: Add entity into NBT:
}
virtual void BlockEntity(cBlockEntity * a_Entity)
{
// Add tile-entity into NBT:
switch (a_Entity->GetBlockType())
{
case E_BLOCK_CHEST: AddChestEntity((cChestEntity *)a_Entity); break;
default:
{
ASSERT(!"Unhandled block entity saved into Anvil");
}
}
}
} ; // class cNBTChunkSerializer
/////////////////////////////////////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
// cWSSAnvil: // cWSSAnvil:
cWSSAnvil::cWSSAnvil(cWorld * a_World) :
super(a_World)
{
// Create a level.dat file for mapping tools, if it doesn't already exist:
AString fnam;
Printf(fnam, "%s/level.dat", a_World->GetName().c_str());
if (!cFile::Exists(fnam))
{
std::auto_ptr<cNBTCompound> Root(new cNBTCompound(NULL));
cNBTCompound * Data = new cNBTCompound(Root.get());
Root->Add(Data);
Data->Add(new cNBTInt(Data, "SpawnX", (int)(a_World->GetSpawnX())));
Data->Add(new cNBTInt(Data, "SpawnY", (int)(a_World->GetSpawnY())));
Data->Add(new cNBTInt(Data, "SpawnZ", (int)(a_World->GetSpawnZ())));
AString Uncompressed;
cNBTSerializer::Serialize(Root.get(), Uncompressed);
gzFile gz = gzopen(fnam.c_str(), "wb");
if (gz != NULL)
{
gzwrite(gz, Uncompressed.data(), Uncompressed.size());
}
gzclose(gz);
}
}
cWSSAnvil::~cWSSAnvil() cWSSAnvil::~cWSSAnvil()
{ {
cCSLock Lock(m_CS); cCSLock Lock(m_CS);
@ -63,8 +187,18 @@ bool cWSSAnvil::LoadChunk(const cChunkCoords & a_Chunk)
bool cWSSAnvil::SaveChunk(const cChunkCoords & a_Chunk) bool cWSSAnvil::SaveChunk(const cChunkCoords & a_Chunk)
{ {
// TODO: We're read-only for now AString ChunkData;
if (!SaveChunkToData(a_Chunk, ChunkData))
{
return false; return false;
}
if (!SetChunkData(a_Chunk, ChunkData))
{
return false;
}
// Everything successful
return true;
} }
@ -86,6 +220,21 @@ bool cWSSAnvil::GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data)
bool cWSSAnvil::SetChunkData(const cChunkCoords & a_Chunk, const AString & a_Data)
{
cCSLock Lock(m_CS);
cMCAFile * File = LoadMCAFile(a_Chunk);
if (File == NULL)
{
return false;
}
return File->SetChunkData(a_Chunk, a_Data);
}
cWSSAnvil::cMCAFile * cWSSAnvil::LoadMCAFile(const cChunkCoords & a_Chunk) cWSSAnvil::cMCAFile * cWSSAnvil::LoadMCAFile(const cChunkCoords & a_Chunk)
{ {
// ASSUME m_CS is locked // ASSUME m_CS is locked
@ -111,7 +260,9 @@ cWSSAnvil::cMCAFile * cWSSAnvil::LoadMCAFile(const cChunkCoords & a_Chunk)
// Load it anew: // Load it anew:
AString FileName; AString FileName;
Printf(FileName, "%s/r.%d.%d.mca", m_World->GetName().c_str(), RegionX, RegionZ); Printf(FileName, "%s/region", m_World->GetName().c_str());
cMakeDir::MakeDir(FileName);
AppendPrintf(FileName, "/r.%d.%d.mca", RegionX, RegionZ);
cMCAFile * f = new cMCAFile(FileName, RegionX, RegionZ); cMCAFile * f = new cMCAFile(FileName, RegionX, RegionZ);
if (f == NULL) if (f == NULL)
{ {
@ -167,6 +318,23 @@ bool cWSSAnvil::LoadChunkFromData(const cChunkCoords & a_Chunk, const AString &
bool cWSSAnvil::SaveChunkToData(const cChunkCoords & a_Chunk, AString & a_Data)
{
std::auto_ptr<cNBTTree> Tree(SaveChunkToNBT(a_Chunk));
if (Tree.get() == NULL)
{
return false;
}
AString Uncompressed;
cNBTSerializer::Serialize(Tree.get(), Uncompressed);
CompressString(Uncompressed.data(), Uncompressed.size(), a_Data);
return true;
}
bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, cNBTTag & a_NBT) bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, cNBTTag & a_NBT)
{ {
// The data arrays, in MCA-native y/z/x ordering (will be reordered for the final chunk data) // The data arrays, in MCA-native y/z/x ordering (will be reordered for the final chunk data)
@ -299,6 +467,50 @@ bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, cNBTTag & a_NBT)
cNBTTag * cWSSAnvil::SaveChunkToNBT(const cChunkCoords & a_Chunk)
{
std::auto_ptr<cNBTCompound> res(new cNBTCompound(NULL));
cNBTCompound * Level = new cNBTCompound(res.get(), "Level");
res->Add(Level);
cNBTList * Entities = new cNBTList(Level, "Entities", cNBTTag::TAG_Compound);
Level->Add(Entities);
cNBTList * TileEntities = new cNBTList(Level, "TileEntities", cNBTTag::TAG_Compound);
Level->Add(TileEntities);
cNBTChunkSerializer Serializer(Entities, TileEntities);
if (!m_World->GetChunkData(a_Chunk.m_ChunkX, a_Chunk.m_ChunkY, a_Chunk.m_ChunkZ, Serializer))
{
return NULL;
}
Level->Add(new cNBTInt(Level, "xPos", a_Chunk.m_ChunkX));
Level->Add(new cNBTInt(Level, "zPos", a_Chunk.m_ChunkZ));
// TODO: Save biomes:
// Level->Add(new cNBTByteArray(Level, "Biomes", AString(Serializer.m_Biomes, sizeof(Serializer.m_Biomes));
// Save blockdata:
cNBTList * Sections = new cNBTList(Level, "Sections", cNBTTag::TAG_Compound);
Level->Add(Sections);
int SliceSizeBlock = cChunkDef::Width * cChunkDef::Width * 16;
int SliceSizeNibble = SliceSizeBlock / 2;
for (int Y = 0; Y < 16; Y++)
{
cNBTCompound * Slice = new cNBTCompound(Sections);
Sections->Add(Slice);
Slice->Add(new cNBTByteArray(Slice, "Blocks", AString(Serializer.m_BlockTypes + Y * SliceSizeBlock, SliceSizeBlock)));
Slice->Add(new cNBTByteArray(Slice, "Data", AString(Serializer.m_BlockMetas + Y * SliceSizeNibble, SliceSizeNibble)));
Slice->Add(new cNBTByteArray(Slice, "SkyLight", AString(Serializer.m_BlockSkyLight + Y * SliceSizeNibble, SliceSizeNibble)));
Slice->Add(new cNBTByteArray(Slice, "BlockLight", AString(Serializer.m_BlockLight + Y * SliceSizeNibble, SliceSizeNibble)));
Slice->Add(new cNBTByte(Slice, "Y", Y));
}
return res.release();
}
void cWSSAnvil::LoadEntitiesFromNBT(cEntityList & a_Entitites, const cNBTList * a_NBT) void cWSSAnvil::LoadEntitiesFromNBT(cEntityList & a_Entitites, const cNBTList * a_NBT)
{ {
// TODO: Load the entities // TODO: Load the entities
@ -423,7 +635,7 @@ bool cWSSAnvil::GetBlockEntityNBTPos(const cNBTCompound * a_NBT, int & a_X, int
cWSSAnvil::cMCAFile::cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ) : cWSSAnvil::cMCAFile::cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ) :
m_RegionX(a_RegionX), m_RegionX(a_RegionX),
m_RegionZ(a_RegionZ), m_RegionZ(a_RegionZ),
m_File(a_FileName, cFile::fmRead), m_File(a_FileName, cFile::fmReadWrite),
m_FileName(a_FileName) m_FileName(a_FileName)
{ {
if (!m_File.IsOpen()) if (!m_File.IsOpen())
@ -434,10 +646,19 @@ cWSSAnvil::cMCAFile::cMCAFile(const AString & a_FileName, int a_RegionX, int a_R
// Load the header: // Load the header:
if (m_File.Read(m_Header, sizeof(m_Header)) != sizeof(m_Header)) if (m_File.Read(m_Header, sizeof(m_Header)) != sizeof(m_Header))
{ {
LOGWARNING("Cannot read MCA header from file \"%s\", chunks in that file will be lost", m_FileName.c_str()); // Cannot read the header - perhaps the file has just been created?
// Try writing a NULL header (both chunk offsets and timestamps):
memset(m_Header, 0, sizeof(m_Header));
if (
(m_File.Write(m_Header, sizeof(m_Header)) != sizeof(m_Header)) ||
(m_File.Write(m_Header, sizeof(m_Header)) != sizeof(m_Header))
)
{
LOGWARNING("Cannot process MCA header in file \"%s\", chunks in that file will be lost", m_FileName.c_str());
m_File.Close(); m_File.Close();
return; return;
} }
}
} }
@ -492,3 +713,80 @@ bool cWSSAnvil::cMCAFile::GetChunkData(const cChunkCoords & a_Chunk, AString & a
bool cWSSAnvil::cMCAFile::SetChunkData(const cChunkCoords & a_Chunk, const AString & a_Data)
{
if (!m_File.IsOpen())
{
return false;
}
int LocalX = a_Chunk.m_ChunkX % 32;
if (LocalX < 0)
{
LocalX = 32 + LocalX;
}
int LocalZ = a_Chunk.m_ChunkZ % 32;
if (LocalZ < 0)
{
LocalZ = 32 + LocalZ;
}
unsigned ChunkSector = FindFreeLocation(LocalX, LocalZ, a_Data);
// Store the chunk data:
m_File.Seek(ChunkSector * 4096);
unsigned ChunkSize = htonl(a_Data.size() + 1);
if (m_File.Write(&ChunkSize, 4) != 4)
{
return false;
}
char CompressionType = 2;
if (m_File.Write(&CompressionType, 1) != 1)
{
return false;
}
if (m_File.Write(a_Data.data(), a_Data.size()) != a_Data.size())
{
return false;
}
// Store the header:
ChunkSize = (a_Data.size() + MCA_CHUNK_HEADER_LENGTH + 4095) / 4096; // Round data size *up* to nearest 4KB sector, make it a sector number
ASSERT(ChunkSize < 256);
m_Header[LocalX + 32 * LocalZ] = htonl((ChunkSector << 8) | ChunkSize);
m_File.Seek(0);
if (m_File.Write(m_Header, sizeof(m_Header)) != sizeof(m_Header))
{
return false;
}
return true;
}
unsigned cWSSAnvil::cMCAFile::FindFreeLocation(int a_LocalX, int a_LocalZ, const AString & a_Data)
{
// See if it fits the current location:
unsigned ChunkLocation = ntohl(m_Header[a_LocalX + 32 * a_LocalZ]);
unsigned ChunkLen = ChunkLocation & 0xff;
if (a_Data.size() + MCA_CHUNK_HEADER_LENGTH <= (ChunkLen * 4096))
{
return ChunkLocation >> 8;
}
// Doesn't fit, append to the end of file (we're wasting a lot of space, TODO: fix this later)
unsigned MaxLocation = 2 << 8; // Minimum sector is #2 - after the headers
for (int i = 0; i < ARRAYCOUNT(m_Header); i++)
{
ChunkLocation = ntohl(m_Header[i]);
ChunkLocation = ChunkLocation + ((ChunkLocation & 0xff) << 8); // Add the number of sectors used; don't care about the 4th byte
if (MaxLocation < ChunkLocation)
{
MaxLocation = ChunkLocation;
}
} // for i - m_Header[]
return MaxLocation >> 8;
}

View File

@ -16,8 +16,14 @@
enum enum
{ {
// The MCA header is 8 KiB /// Maximum number of chunks in an MCA file - also the count of the header items
MCA_HEADER_SIZE = 8192, MCA_MAX_CHUNKS = 32 * 32,
/// The MCA header is 8 KiB
MCA_HEADER_SIZE = MCA_MAX_CHUNKS * 8,
/// There are 5 bytes of header in front of each chunk
MCA_CHUNK_HEADER_LENGTH = 5,
} ; } ;
@ -40,7 +46,7 @@ class cWSSAnvil :
public: public:
cWSSAnvil(cWorld * a_World) : super(a_World) {} cWSSAnvil(cWorld * a_World);
virtual ~cWSSAnvil(); virtual ~cWSSAnvil();
protected: protected:
@ -51,7 +57,9 @@ protected:
cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ); cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ);
bool GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data); bool GetChunkData (const cChunkCoords & a_Chunk, AString & a_Data);
bool SetChunkData (const cChunkCoords & a_Chunk, const AString & a_Data);
bool EraseChunkData(const cChunkCoords & a_Chunk);
int GetRegionX (void) const {return m_RegionX; } int GetRegionX (void) const {return m_RegionX; }
int GetRegionZ (void) const {return m_RegionZ; } int GetRegionZ (void) const {return m_RegionZ; }
@ -66,23 +74,36 @@ protected:
// The header, copied from the file so we don't have to seek to it all the time // The header, copied from the file so we don't have to seek to it all the time
// First 1024 entries are chunk locations - the 3 + 1 byte sector-offset and sector-count // First 1024 entries are chunk locations - the 3 + 1 byte sector-offset and sector-count
// The next 1024 entries are chunk timestamps - unused in MCS unsigned m_Header[MCA_MAX_CHUNKS];
unsigned m_Header[MCA_HEADER_SIZE / sizeof(unsigned)];
// Chunk timestamps, following the chunk headers, are unused by MCS
/// Finds a free location large enough to hold a_Data. Gets a hint of the chunk coords, places the data there if it fits. Returns the sector number.
unsigned FindFreeLocation(int a_LocalX, int a_LocalZ, const AString & a_Data);
} ; } ;
typedef std::list<cMCAFile *> cMCAFiles; typedef std::list<cMCAFile *> cMCAFiles;
cCriticalSection m_CS; cCriticalSection m_CS;
cMCAFiles m_Files; // a MRU cache of MCA files cMCAFiles m_Files; // a MRU cache of MCA files
/// Gets chunk data from the correct file; locks CS as needed /// Gets chunk data from the correct file; locks file CS as needed
bool GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data); bool GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data);
/// Sets chunk data into the correct file; locks file CS as needed
bool SetChunkData(const cChunkCoords & a_Chunk, const AString & a_Data);
/// Loads the chunk from the data (no locking needed) /// Loads the chunk from the data (no locking needed)
bool LoadChunkFromData(const cChunkCoords & a_Chunk, const AString & a_Data); bool LoadChunkFromData(const cChunkCoords & a_Chunk, const AString & a_Data);
/// Saves the chunk into datastream (no locking needed)
bool SaveChunkToData(const cChunkCoords & a_Chunk, AString & a_Data);
/// Loads the chunk from NBT data (no locking needed) /// Loads the chunk from NBT data (no locking needed)
bool LoadChunkFromNBT(const cChunkCoords & a_Chunk, cNBTTag & a_NBT); bool LoadChunkFromNBT(const cChunkCoords & a_Chunk, cNBTTag & a_NBT);
/// Saves the chunk into NBT data; returns NULL for failure
cNBTTag * SaveChunkToNBT(const cChunkCoords & a_Chunk);
/// Loads the chunk's entities from NBT data (a_NBT is the Level\\Entities list tag; may be NULL) /// Loads the chunk's entities from NBT data (a_NBT is the Level\\Entities list tag; may be NULL)
void LoadEntitiesFromNBT(cEntityList & a_Entitites, const cNBTList * a_NBT); void LoadEntitiesFromNBT(cEntityList & a_Entitites, const cNBTList * a_NBT);

View File

@ -34,10 +34,10 @@ class cPacket;
class cBlockEntity class cBlockEntity
{ {
protected: protected:
cBlockEntity(ENUM_BLOCK_ID a_BlockType, int a_X, int a_Y, int a_Z, cWorld * a_World) cBlockEntity(ENUM_BLOCK_ID a_BlockType, int a_BlockX, int a_BlockY, int a_BlockZ, cWorld * a_World)
: m_PosX( a_X ) : m_PosX( a_BlockX )
, m_PosY( a_Y ) , m_PosY( a_BlockY )
, m_PosZ( a_Z ) , m_PosZ( a_BlockZ )
, m_BlockType( a_BlockType ) , m_BlockType( a_BlockType )
, m_World( a_World ) , m_World( a_World )
{} {}
@ -45,6 +45,7 @@ public:
virtual ~cBlockEntity() {}; virtual ~cBlockEntity() {};
virtual void Destroy() {}; virtual void Destroy() {};
// Position, in absolute block coordinates:
int GetPosX() { return m_PosX; } int GetPosX() { return m_PosX; }
int GetPosY() { return m_PosY; } int GetPosY() { return m_PosY; }
int GetPosZ() { return m_PosZ; } int GetPosZ() { return m_PosZ; }
@ -78,7 +79,7 @@ public:
virtual cPacket * GetPacket(void) {return NULL; } virtual cPacket * GetPacket(void) {return NULL; }
protected: protected:
int m_PosX; // Position in block coordinates int m_PosX; // Position in absolute block coordinates
int m_PosY; int m_PosY;
int m_PosZ; int m_PosZ;

View File

@ -27,7 +27,7 @@ cChestEntity::cChestEntity(int a_X, int a_Y, int a_Z, cWorld * a_World)
, m_TopChest( false ) , m_TopChest( false )
, m_JoinedChest( NULL ) , m_JoinedChest( NULL )
{ {
m_Content = new cItem[ c_ChestHeight*c_ChestWidth ]; m_Content = new cItem[ c_ChestHeight * c_ChestWidth ];
} }

View File

@ -69,7 +69,7 @@ bool cFile::Open(const AString & iFileName, EMode iMode)
{ {
case fmRead: Mode = "rb"; break; case fmRead: Mode = "rb"; break;
case fmWrite: Mode = "wb"; break; case fmWrite: Mode = "wb"; break;
case fmReadWrite: Mode = "ab+"; break; case fmReadWrite: Mode = "rb+"; break;
default: default:
{ {
ASSERT(!"Unhandled file mode"); ASSERT(!"Unhandled file mode");
@ -77,6 +77,14 @@ bool cFile::Open(const AString & iFileName, EMode iMode)
} }
} }
m_File = fopen(iFileName.c_str(), Mode); m_File = fopen(iFileName.c_str(), Mode);
if ((m_File == NULL) && (iMode == fmReadWrite))
{
// Fix for MS not following C spec, opening "a" mode files for writing at the end only
// The file open operation has been tried with "read update", fails if file not found
// So now we know either the file doesn't exist or we don't have rights, no need to worry about file contents.
// Simply re-open for read-writing, erasing existing contents:
m_File = fopen(iFileName.c_str(), "wb+");
}
return (m_File != NULL); return (m_File != NULL);
} }
@ -251,3 +259,13 @@ int cFile::ReadRestOfFile(AString & a_Contents)
bool cFile::Exists(const AString & a_FileName)
{
cFile test(a_FileName, fmRead);
return test.IsOpen();
}

View File

@ -86,6 +86,9 @@ public:
/// Reads the file from current position till EOF into an AString; returns the number of bytes read or -1 for error /// Reads the file from current position till EOF into an AString; returns the number of bytes read or -1 for error
int ReadRestOfFile(AString & a_Contents); int ReadRestOfFile(AString & a_Contents);
/// Returns true if the file specified exists
static bool Exists(const AString & a_FileName);
private: private:
#ifdef USE_STDIO_FILE #ifdef USE_STDIO_FILE
FILE * m_File; FILE * m_File;

View File

@ -7,16 +7,16 @@
void cMakeDir::MakeDir( const char* a_Directory ) void cMakeDir::MakeDir(const AString & a_Directory)
{ {
#ifdef _WIN32 #ifdef _WIN32
SECURITY_ATTRIBUTES Attrib; SECURITY_ATTRIBUTES Attrib;
Attrib.nLength = sizeof(SECURITY_ATTRIBUTES); Attrib.nLength = sizeof(SECURITY_ATTRIBUTES);
Attrib.lpSecurityDescriptor = NULL; Attrib.lpSecurityDescriptor = NULL;
Attrib.bInheritHandle = false; Attrib.bInheritHandle = false;
::CreateDirectory(a_Directory, &Attrib); ::CreateDirectory(a_Directory.c_str(), &Attrib);
#else #else
mkdir(a_Directory, S_IRWXU | S_IRWXG | S_IRWXO); mkdir(a_Directory.c_str(), S_IRWXU | S_IRWXG | S_IRWXO);
#endif #endif
} }

View File

@ -1,7 +1,16 @@
#pragma once #pragma once
class cMakeDir class cMakeDir
{ {
public: public:
static void MakeDir( const char* a_Directory ); static void MakeDir(const AString & a_Directory);
}; };