1
0
Fork 0

Merge pull request #2400 from cuberite/OffloadBadChunks

Offload bad chunks
This commit is contained in:
Julian Laubstein 2015-07-31 14:18:43 +02:00
commit 41d7119a38
4 changed files with 155 additions and 84 deletions

View File

@ -147,7 +147,7 @@ bool cFile::IsEOF(void) const
int cFile::Read (void * iBuffer, size_t iNumBytes)
int cFile::Read (void * a_Buffer, size_t a_NumBytes)
{
ASSERT(IsOpen());
@ -156,14 +156,35 @@ int cFile::Read (void * iBuffer, size_t iNumBytes)
return -1;
}
return static_cast<int>(fread(iBuffer, 1, static_cast<size_t>(iNumBytes), m_File)); // fread() returns the portion of Count parameter actually read, so we need to send iNumBytes as Count
return static_cast<int>(fread(a_Buffer, 1, a_NumBytes, m_File)); // fread() returns the portion of Count parameter actually read, so we need to send a_a_NumBytes as Count
}
int cFile::Write(const void * iBuffer, size_t iNumBytes)
AString cFile::Read(size_t a_NumBytes)
{
ASSERT(IsOpen());
if (!IsOpen())
{
return AString();
}
// HACK: This depends on the knowledge that AString::data() returns the internal buffer, rather than a copy of it.
AString res;
res.resize(a_NumBytes);
auto newSize = fread(const_cast<char *>(res.data()), 1, a_NumBytes, m_File);
res.resize(newSize);
return res;
}
int cFile::Write(const void * a_Buffer, size_t a_NumBytes)
{
ASSERT(IsOpen());
@ -172,7 +193,7 @@ int cFile::Write(const void * iBuffer, size_t iNumBytes)
return -1;
}
int res = static_cast<int>(fwrite(iBuffer, 1, static_cast<size_t>(iNumBytes), m_File)); // fwrite() returns the portion of Count parameter actually written, so we need to send iNumBytes as Count
int res = static_cast<int>(fwrite(a_Buffer, 1, a_NumBytes, m_File)); // fwrite() returns the portion of Count parameter actually written, so we need to send a_NumBytes as Count
return res;
}

View File

@ -80,11 +80,14 @@ public:
bool IsOpen(void) const;
bool IsEOF(void) const;
/** Reads up to iNumBytes bytes into iBuffer, returns the number of bytes actually read, or -1 on failure; asserts if not open */
int Read (void * iBuffer, size_t iNumBytes);
/** Reads up to a_NumBytes bytes into a_Buffer, returns the number of bytes actually read, or -1 on failure; asserts if not open */
int Read(void * a_Buffer, size_t a_NumBytes);
/** Writes up to iNumBytes bytes from iBuffer, returns the number of bytes actually written, or -1 on failure; asserts if not open */
int Write(const void * iBuffer, size_t iNumBytes);
/** Reads up to a_NumBytes bytes, returns the bytes actually read, or empty string on failure; asserts if not open */
AString Read(size_t a_NumBytes);
/** Writes up to a_NumBytes bytes from a_Buffer, returns the number of bytes actually written, or -1 on failure; asserts if not open */
int Write(const void * a_Buffer, size_t a_NumBytes);
/** Seeks to iPosition bytes from file start, returns old position or -1 for failure; asserts if not open */
long Seek (int iPosition);

View File

@ -69,18 +69,6 @@ Since only the header is actually in the memory, this number can be high, but st
*/
#define MAX_MCA_FILES 32
#define LOAD_FAILED(CHX, CHZ) \
{ \
const int RegionX = FAST_FLOOR_DIV(CHX, 32); \
const int RegionZ = FAST_FLOOR_DIV(CHZ, 32); \
LOGERROR("%s (%d): Loading chunk [%d, %d] from file r.%d.%d.mca failed. " \
"The server will now abort in order to avoid further data loss. " \
"Please add the reported file and this message to the issue report.", \
__FUNCTION__, __LINE__, CHX, CHZ, RegionX, RegionZ \
); \
*(reinterpret_cast<volatile int *>(0)) = 0; /* Crash intentionally */ \
}
@ -109,11 +97,11 @@ cWSSAnvil::cWSSAnvil(cWorld * a_World, int a_CompressionFactor) :
Writer.AddByte("thundering", a_World->IsWeatherStorm() ? 1 : 0);
Writer.AddInt("GameType", static_cast<int>(a_World->GetGameMode()));
Writer.AddInt("generatorVersion", 1);
Writer.AddInt("SpawnX", static_cast<int>(a_World->GetSpawnX()));
Writer.AddInt("SpawnY", static_cast<int>(a_World->GetSpawnY()));
Writer.AddInt("SpawnZ", static_cast<int>(a_World->GetSpawnZ()));
Writer.AddInt("SpawnX", FloorC(a_World->GetSpawnX()));
Writer.AddInt("SpawnY", FloorC(a_World->GetSpawnY()));
Writer.AddInt("SpawnZ", FloorC(a_World->GetSpawnZ()));
Writer.AddInt("version", 19133);
Writer.AddLong("DayTime", static_cast<Int64>(a_World->GetTimeOfDay()));
Writer.AddLong("DayTime", a_World->GetTimeOfDay());
Writer.AddLong("Time", a_World->GetWorldAge());
Writer.AddLong("SizeOnDisk", 0);
Writer.AddString("generatorName", "default");
@ -122,11 +110,6 @@ cWSSAnvil::cWSSAnvil(cWorld * a_World, int a_CompressionFactor) :
Writer.EndCompound();
Writer.Finish();
#ifdef _DEBUG
cParsedNBT TestParse(Writer.GetResult().data(), Writer.GetResult().size());
ASSERT(TestParse.IsValid());
#endif // _DEBUG
gzFile gz = gzopen((FILE_IO_PREFIX + fnam).c_str(), "wb");
if (gz != nullptr)
{
@ -191,6 +174,56 @@ bool cWSSAnvil::SaveChunk(const cChunkCoords & a_Chunk)
void cWSSAnvil::ChunkLoadFailed(int a_ChunkX, int a_ChunkZ, const AString & a_Reason, const AString & a_ChunkDataToSave)
{
// Construct the filename for offloading:
AString OffloadFileName;
Printf(OffloadFileName, "%s%cregion%cbadchunks", m_World->GetName().c_str(), cFile::PathSeparator, cFile::PathSeparator);
cFile::CreateFolder(FILE_IO_PREFIX + OffloadFileName);
auto t = time(nullptr);
struct tm stm;
#ifdef _MSC_VER
localtime_s(&stm, &t);
#else
localtime_r(&t, &stm);
#endif
AppendPrintf(OffloadFileName, "%cch.%d.%d.%d-%02d-%02d-%02d-%02d-%02d.dat",
cFile::PathSeparator, a_ChunkX, a_ChunkZ,
stm.tm_year + 1900, stm.tm_mon + 1, stm.tm_mday, stm.tm_hour, stm.tm_min, stm.tm_sec
);
// Log the warning to console:
const int RegionX = FAST_FLOOR_DIV(a_ChunkX, 32);
const int RegionZ = FAST_FLOOR_DIV(a_ChunkZ, 32);
AString Info = Printf("Loading chunk [%d, %d] for world %s from file r.%d.%d.mca failed: %s. Offloading old chunk data to file %s and regenerating chunk.",
a_ChunkX, a_ChunkZ, m_World->GetName().c_str(), RegionX, RegionZ, a_Reason.c_str(), OffloadFileName.c_str()
);
LOGWARNING("%s", Info.c_str());
// Write the data:
cFile f;
if (!f.Open(OffloadFileName, cFile::fmWrite))
{
LOGWARNING("Cannot open file %s for writing! Old chunk data is lost.", OffloadFileName.c_str());
return;
}
f.Write(a_ChunkDataToSave.data(), a_ChunkDataToSave.size());
f.Close();
// Write a description file:
if (!f.Open(OffloadFileName + ".info", cFile::fmWrite))
{
LOGWARNING("Cannot open file %s.info for writing! The information about the failed chunk will not be written.", OffloadFileName.c_str());
return;
}
f.Write(Info.c_str(), Info.size());
f.Close();
}
bool cWSSAnvil::GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data)
{
cCSLock Lock(m_CS);
@ -254,7 +287,7 @@ cWSSAnvil::cMCAFile * cWSSAnvil::LoadMCAFile(const cChunkCoords & a_Chunk)
Printf(FileName, "%s%cregion", m_World->GetName().c_str(), cFile::PathSeparator);
cFile::CreateFolder(FILE_IO_PREFIX + FileName);
AppendPrintf(FileName, "/r.%d.%d.mca", RegionX, RegionZ);
cMCAFile * f = new cMCAFile(FileName, RegionX, RegionZ);
cMCAFile * f = new cMCAFile(*this, FileName, RegionX, RegionZ);
if (f == nullptr)
{
return nullptr;
@ -282,7 +315,7 @@ bool cWSSAnvil::LoadChunkFromData(const cChunkCoords & a_Chunk, const AString &
if (res != Z_OK)
{
LOGWARNING("Uncompressing chunk [%d, %d] failed: %d", a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, res);
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "InflateString() failed", a_Data);
return false;
}
@ -291,12 +324,12 @@ bool cWSSAnvil::LoadChunkFromData(const cChunkCoords & a_Chunk, const AString &
if (!NBT.IsValid())
{
// NBT Parsing failed
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "NBT parsing failed", a_Data);
return false;
}
// Load the data from NBT:
return LoadChunkFromNBT(a_Chunk, NBT);
return LoadChunkFromNBT(a_Chunk, NBT, a_Data);
}
@ -321,7 +354,7 @@ bool cWSSAnvil::SaveChunkToData(const cChunkCoords & a_Chunk, AString & a_Data)
bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT & a_NBT)
bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT & a_NBT, const AString & a_RawChunkData)
{
// The data arrays, in MCA-native y / z / x ordering (will be reordered for the final chunk data)
cChunkDef::BlockTypes BlockTypes;
@ -338,19 +371,19 @@ bool cWSSAnvil::LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT
int Level = a_NBT.FindChildByName(0, "Level");
if (Level < 0)
{
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Missing NBT tag: Level", a_RawChunkData);
return false;
}
int Sections = a_NBT.FindChildByName(Level, "Sections");
if ((Sections < 0) || (a_NBT.GetType(Sections) != TAG_List))
{
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Missing NBT tag: Sections", a_RawChunkData);
return false;
}
eTagType SectionsType = a_NBT.GetChildrenType(Sections);
if ((SectionsType != TAG_Compound) && (SectionsType != TAG_End))
{
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "NBT tag has wrong type: Sections", a_RawChunkData);
return false;
}
for (int Child = a_NBT.GetFirstChild(Sections); Child >= 0; Child = a_NBT.GetNextSibling(Child))
@ -1888,7 +1921,7 @@ void cWSSAnvil::LoadArrowFromNBT(cEntityList & a_Entities, const cParsedNBT & a_
case TAG_Short:
{
// Vanilla uses this
Arrow->SetBlockHit(Vector3i(static_cast<int>(a_NBT.GetShort(InBlockXIdx)), static_cast<int>(a_NBT.GetShort(InBlockYIdx)), static_cast<int>(a_NBT.GetShort(InBlockZIdx))));
Arrow->SetBlockHit(Vector3i(a_NBT.GetShort(InBlockXIdx), a_NBT.GetShort(InBlockYIdx), a_NBT.GetShort(InBlockZIdx)));
break;
}
default:
@ -3040,7 +3073,8 @@ bool cWSSAnvil::GetBlockEntityNBTPos(const cParsedNBT & a_NBT, int a_TagIdx, int
////////////////////////////////////////////////////////////////////////////////
// cWSSAnvil::cMCAFile:
cWSSAnvil::cMCAFile::cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ) :
cWSSAnvil::cMCAFile::cMCAFile(cWSSAnvil & a_ParentSchema, const AString & a_FileName, int a_RegionX, int a_RegionZ) :
m_ParentSchema(a_ParentSchema),
m_RegionX(a_RegionX),
m_RegionZ(a_RegionZ),
m_FileName(a_FileName)
@ -3138,38 +3172,45 @@ bool cWSSAnvil::cMCAFile::GetChunkData(const cChunkCoords & a_Chunk, AString & a
return false;
}
m_File.Seek(static_cast<int>(ChunkOffset) * 4096);
m_File.Seek(static_cast<int>(ChunkOffset * 4096));
int ChunkSize = 0;
UInt32 ChunkSize = 0;
if (m_File.Read(&ChunkSize, 4) != 4)
{
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
m_ParentSchema.ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Cannot read chunk size", "");
return false;
}
ChunkSize = ntohl(static_cast<u_long>(ChunkSize));
ChunkSize = ntohl(ChunkSize);
if (ChunkSize < 1)
{
// Chunk size too small
m_ParentSchema.ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Chunk size too small", "");
return false;
}
char CompressionType = 0;
if (m_File.Read(&CompressionType, 1) != 1)
{
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
return false;
}
if (CompressionType != 2)
{
// Chunk is in an unknown compression
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
m_ParentSchema.ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Cannot read chunk compression", "");
return false;
}
ChunkSize--;
// HACK: This depends on the internal knowledge that AString's data() function returns the internal buffer directly
a_Data.assign(static_cast<size_t>(ChunkSize), '\0');
if (static_cast<size_t>(m_File.Read(static_cast<void *>(const_cast<char*>(a_Data.data())), static_cast<size_t>(ChunkSize))) == static_cast<size_t>(ChunkSize))
a_Data = m_File.Read(ChunkSize);
if (a_Data.size() != ChunkSize)
{
m_ParentSchema.ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, "Cannot read entire chunk data", a_Data);
return false;
}
if (CompressionType != 2)
{
// Chunk is in an unknown compression
m_ParentSchema.ChunkLoadFailed(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ, Printf("Unknown chunk compression: %d", CompressionType).c_str(), a_Data);
return false;
}
return true;
}
LOAD_FAILED(a_Chunk.m_ChunkX, a_Chunk.m_ChunkZ);
return false;
}

View File

@ -29,13 +29,13 @@ class cWolf;
enum
{
/// Maximum number of chunks in an MCA file - also the count of the header items
/** Maximum number of chunks in an MCA file - also the count of the header items */
MCA_MAX_CHUNKS = 32 * 32,
/// The MCA header is 8 KiB
/** The MCA header is 8 KiB */
MCA_HEADER_SIZE = MCA_MAX_CHUNKS * 8,
/// There are 5 bytes of header in front of each chunk
/** There are 5 bytes of header in front of each chunk */
MCA_CHUNK_HEADER_LENGTH = 5,
} ;
@ -59,7 +59,7 @@ protected:
{
public:
cMCAFile(const AString & a_FileName, int a_RegionX, int a_RegionZ);
cMCAFile(cWSSAnvil & a_ParentSchema, const AString & a_FileName, int a_RegionX, int a_RegionZ);
bool GetChunkData (const cChunkCoords & a_Chunk, AString & a_Data);
bool SetChunkData (const cChunkCoords & a_Chunk, const AString & a_Data);
@ -70,6 +70,8 @@ protected:
const AString & GetFileName(void) const {return m_FileName; }
protected:
cWSSAnvil & m_ParentSchema;
int m_RegionX;
int m_RegionZ;
@ -83,10 +85,10 @@ protected:
// Chunk timestamps, following the chunk headers
unsigned m_TimeStamps[MCA_MAX_CHUNKS];
/// Finds a free location large enough to hold a_Data. Gets a hint of the chunk coords, places the data there if it fits. Returns the sector number.
/** Finds a free location large enough to hold a_Data. Gets a hint of the chunk coords, places the data there if it fits. Returns the sector number. */
unsigned FindFreeLocation(int a_LocalX, int a_LocalZ, const AString & a_Data);
/// Opens a MCA file either for a Read operation (fails if doesn't exist) or for a Write operation (creates new if not found)
/** Opens a MCA file either for a Read operation (fails if doesn't exist) or for a Write operation (creates new if not found) */
bool OpenFile(bool a_IsForReading);
} ;
typedef std::list<cMCAFile *> cMCAFiles;
@ -96,47 +98,51 @@ protected:
int m_CompressionFactor;
/// Gets chunk data from the correct file; locks file CS as needed
/** Reports that the specified chunk failed to load and saves the chunk data to an external file. */
void ChunkLoadFailed(int a_ChunkX, int a_ChunkZ, const AString & a_Reason, const AString & a_ChunkDataToSave);
/** Gets chunk data from the correct file; locks file CS as needed */
bool GetChunkData(const cChunkCoords & a_Chunk, AString & a_Data);
/// Sets chunk data into the correct file; locks file CS as needed
/** Sets chunk data into the correct file; locks file CS as needed */
bool SetChunkData(const cChunkCoords & a_Chunk, const AString & a_Data);
/// Loads the chunk from the data (no locking needed)
/** Loads the chunk from the data (no locking needed) */
bool LoadChunkFromData(const cChunkCoords & a_Chunk, const AString & a_Data);
/// Saves the chunk into datastream (no locking needed)
/** Saves the chunk into datastream (no locking needed) */
bool SaveChunkToData(const cChunkCoords & a_Chunk, AString & a_Data);
/// Loads the chunk from NBT data (no locking needed)
bool LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT & a_NBT);
/** Loads the chunk from NBT data (no locking needed).
a_RawChunkData is the raw (compressed) chunk data, used for offloading when chunk loading fails. */
bool LoadChunkFromNBT(const cChunkCoords & a_Chunk, const cParsedNBT & a_NBT, const AString & a_RawChunkData);
/// Saves the chunk into NBT data using a_Writer; returns true on success
/** Saves the chunk into NBT data using a_Writer; returns true on success */
bool SaveChunkToNBT(const cChunkCoords & a_Chunk, cFastNBTWriter & a_Writer);
/// Loads the chunk's biome map from vanilla-format; returns a_BiomeMap if biomes present and valid, nullptr otherwise
/** Loads the chunk's biome map from vanilla-format; returns a_BiomeMap if biomes present and valid, nullptr otherwise */
cChunkDef::BiomeMap * LoadVanillaBiomeMapFromNBT(cChunkDef::BiomeMap * a_BiomeMap, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads the chunk's biome map from MCS format; returns a_BiomeMap if biomes present and valid, nullptr otherwise
/** Loads the chunk's biome map from MCS format; returns a_BiomeMap if biomes present and valid, nullptr otherwise */
cChunkDef::BiomeMap * LoadBiomeMapFromNBT(cChunkDef::BiomeMap * a_BiomeMap, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads the chunk's entities from NBT data (a_Tag is the Level\\Entities list tag; may be -1)
/** Loads the chunk's entities from NBT data (a_Tag is the Level\\Entities list tag; may be -1) */
void LoadEntitiesFromNBT(cEntityList & a_Entitites, const cParsedNBT & a_NBT, int a_Tag);
/// Loads the chunk's BlockEntities from NBT data (a_Tag is the Level\\TileEntities list tag; may be -1)
/** Loads the chunk's BlockEntities from NBT data (a_Tag is the Level\\TileEntities list tag; may be -1) */
void LoadBlockEntitiesFromNBT(cBlockEntityList & a_BlockEntitites, const cParsedNBT & a_NBT, int a_Tag, BLOCKTYPE * a_BlockTypes, NIBBLETYPE * a_BlockMetas);
/** Loads the data for a block entity from the specified NBT tag.
Returns the loaded block entity, or nullptr upon failure. */
cBlockEntity * LoadBlockEntityFromNBT(const cParsedNBT & a_NBT, int a_Tag, int a_BlockX, int a_BlockY, int a_BlockZ, BLOCKTYPE a_BlockType, NIBBLETYPE a_BlockMeta);
/// Loads a cItem contents from the specified NBT tag; returns true if successful. Doesn't load the Slot tag
/** Loads a cItem contents from the specified NBT tag; returns true if successful. Doesn't load the Slot tag */
bool LoadItemFromNBT(cItem & a_Item, const cParsedNBT & a_NBT, int a_TagIdx);
/** Loads contentents of an Items[] list tag into a cItemGrid
ItemGrid begins at the specified slot offset
Slots outside the ItemGrid range are ignored
*/
Slots outside the ItemGrid range are ignored */
void LoadItemGridFromNBT(cItemGrid & a_ItemGrid, const cParsedNBT & a_NBT, int a_ItemsTagIdx, int s_SlotOffset = 0);
/** Returns true iff the "id" child tag inside the specified tag equals the specified expected type. */
@ -217,28 +223,28 @@ protected:
/** Loads the wolf's owner information from the NBT into the specified wolf entity. */
void LoadWolfOwner(cWolf & a_Wolf, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads entity common data from the NBT compound; returns true if successful
/** Loads entity common data from the NBT compound; returns true if successful */
bool LoadEntityBaseFromNBT(cEntity & a_Entity, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads monster common data from the NBT compound; returns true if successful
/** Loads monster common data from the NBT compound; returns true if successful */
bool LoadMonsterBaseFromNBT(cMonster & a_Monster, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads projectile common data from the NBT compound; returns true if successful
/** Loads projectile common data from the NBT compound; returns true if successful */
bool LoadProjectileBaseFromNBT(cProjectileEntity & a_Entity, const cParsedNBT & a_NBT, int a_TagIx);
/// Loads an array of doubles of the specified length from the specified NBT list tag a_TagIdx; returns true if successful
/** Loads an array of doubles of the specified length from the specified NBT list tag a_TagIdx; returns true if successful */
bool LoadDoublesListFromNBT(double * a_Doubles, int a_NumDoubles, const cParsedNBT & a_NBT, int a_TagIdx);
/// Loads an array of floats of the specified length from the specified NBT list tag a_TagIdx; returns true if successful
/** Loads an array of floats of the specified length from the specified NBT list tag a_TagIdx; returns true if successful */
bool LoadFloatsListFromNBT(float * a_Floats, int a_NumFloats, const cParsedNBT & a_NBT, int a_TagIdx);
/// Helper function for extracting the X, Y, and Z int subtags of a NBT compound; returns true if successful
/** Helper function for extracting the X, Y, and Z int subtags of a NBT compound; returns true if successful */
bool GetBlockEntityNBTPos(const cParsedNBT & a_NBT, int a_TagIdx, int & a_X, int & a_Y, int & a_Z);
/// Gets the correct MCA file either from cache or from disk, manages the m_MCAFiles cache; assumes m_CS is locked
/** Gets the correct MCA file either from cache or from disk, manages the m_MCAFiles cache; assumes m_CS is locked */
cMCAFile * LoadMCAFile(const cChunkCoords & a_Chunk);
/// Copies a_Length bytes of data from the specified NBT Tag's Child into the a_Destination buffer
/** Copies a_Length bytes of data from the specified NBT Tag's Child into the a_Destination buffer */
void CopyNBTData(const cParsedNBT & a_NBT, int a_Tag, const AString & a_ChildName, char * a_Destination, size_t a_Length);
// cWSSchema overrides: