mirror of
https://github.com/OpenDiablo2/OpenDiablo2
synced 2025-02-02 14:46:28 -05:00
d2mpq refactored (#1020)
* d2mpq refactor * d2mpq refactor last standing lint error * d2mpq refactor: less linter noise * d2mpq refactor: more linter issues
This commit is contained in:
parent
5cd404e4a5
commit
db83814527
131
d2common/d2fileformats/d2mpq/crypto.go
Normal file
131
d2common/d2fileformats/d2mpq/crypto.go
Normal file
@ -0,0 +1,131 @@
|
|||||||
|
package d2mpq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cryptoBuffer [0x500]uint32 //nolint:gochecknoglobals // will fix later..
|
||||||
|
var cryptoBufferReady bool //nolint:gochecknoglobals // will fix later..
|
||||||
|
|
||||||
|
func cryptoLookup(index uint32) uint32 {
|
||||||
|
if !cryptoBufferReady {
|
||||||
|
cryptoInitialize()
|
||||||
|
|
||||||
|
cryptoBufferReady = true
|
||||||
|
}
|
||||||
|
|
||||||
|
return cryptoBuffer[index]
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // Decryption magic
|
||||||
|
func cryptoInitialize() {
|
||||||
|
seed := uint32(0x00100001)
|
||||||
|
|
||||||
|
for index1 := 0; index1 < 0x100; index1++ {
|
||||||
|
index2 := index1
|
||||||
|
|
||||||
|
for i := 0; i < 5; i++ {
|
||||||
|
seed = (seed*125 + 3) % 0x2AAAAB
|
||||||
|
temp1 := (seed & 0xFFFF) << 0x10
|
||||||
|
seed = (seed*125 + 3) % 0x2AAAAB
|
||||||
|
temp2 := seed & 0xFFFF
|
||||||
|
cryptoBuffer[index2] = temp1 | temp2
|
||||||
|
index2 += 0x100
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // Decryption magic
|
||||||
|
func decrypt(data []uint32, seed uint32) {
|
||||||
|
seed2 := uint32(0xeeeeeeee)
|
||||||
|
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
seed2 += cryptoLookup(0x400 + (seed & 0xff))
|
||||||
|
result := data[i]
|
||||||
|
result ^= seed + seed2
|
||||||
|
|
||||||
|
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
||||||
|
seed2 = result + seed2 + (seed2 << 5) + 3
|
||||||
|
data[i] = result
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // Decryption magic
|
||||||
|
func decryptBytes(data []byte, seed uint32) {
|
||||||
|
seed2 := uint32(0xEEEEEEEE)
|
||||||
|
for i := 0; i < len(data)-3; i += 4 {
|
||||||
|
seed2 += cryptoLookup(0x400 + (seed & 0xFF))
|
||||||
|
result := binary.LittleEndian.Uint32(data[i : i+4])
|
||||||
|
result ^= seed + seed2
|
||||||
|
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
||||||
|
seed2 = result + seed2 + (seed2 << 5) + 3
|
||||||
|
|
||||||
|
data[i+0] = uint8(result & 0xff)
|
||||||
|
data[i+1] = uint8((result >> 8) & 0xff)
|
||||||
|
data[i+2] = uint8((result >> 16) & 0xff)
|
||||||
|
data[i+3] = uint8((result >> 24) & 0xff)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // Decryption magic
|
||||||
|
func decryptTable(r io.Reader, size uint32, name string) ([]uint32, error) {
|
||||||
|
seed := hashString(name, 3)
|
||||||
|
seed2 := uint32(0xEEEEEEEE)
|
||||||
|
size *= 4
|
||||||
|
|
||||||
|
table := make([]uint32, size)
|
||||||
|
buf := make([]byte, 4)
|
||||||
|
|
||||||
|
for i := uint32(0); i < size; i++ {
|
||||||
|
seed2 += cryptoBuffer[0x400+(seed&0xff)]
|
||||||
|
|
||||||
|
if _, err := r.Read(buf); err != nil {
|
||||||
|
return table, err
|
||||||
|
}
|
||||||
|
|
||||||
|
result := binary.LittleEndian.Uint32(buf)
|
||||||
|
result ^= seed + seed2
|
||||||
|
|
||||||
|
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
||||||
|
seed2 = result + seed2 + (seed2 << 5) + 3
|
||||||
|
table[i] = result
|
||||||
|
}
|
||||||
|
|
||||||
|
return table, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func hashFilename(key string) uint64 {
|
||||||
|
a, b := hashString(key, 1), hashString(key, 2)
|
||||||
|
return uint64(a)<<32 | uint64(b)
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // Decryption magic
|
||||||
|
func hashString(key string, hashType uint32) uint32 {
|
||||||
|
seed1 := uint32(0x7FED7FED)
|
||||||
|
seed2 := uint32(0xEEEEEEEE)
|
||||||
|
|
||||||
|
/* prepare seeds. */
|
||||||
|
for _, char := range strings.ToUpper(key) {
|
||||||
|
seed1 = cryptoLookup((hashType*0x100)+uint32(char)) ^ (seed1 + seed2)
|
||||||
|
seed2 = uint32(char) + seed1 + seed2 + (seed2 << 5) + 3
|
||||||
|
}
|
||||||
|
|
||||||
|
return seed1
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:unused,deadcode,gomnd // will use this for creating mpq's
|
||||||
|
func encrypt(data []uint32, seed uint32) {
|
||||||
|
seed2 := uint32(0xeeeeeeee)
|
||||||
|
|
||||||
|
for i := 0; i < len(data); i++ {
|
||||||
|
seed2 += cryptoLookup(0x400 + (seed & 0xff))
|
||||||
|
result := data[i]
|
||||||
|
result ^= seed + seed2
|
||||||
|
|
||||||
|
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
||||||
|
seed2 = data[i] + seed2 + (seed2 << 5) + 3
|
||||||
|
data[i] = result
|
||||||
|
}
|
||||||
|
}
|
@ -1,32 +0,0 @@
|
|||||||
package d2mpq
|
|
||||||
|
|
||||||
var cryptoBuffer [0x500]uint32 //nolint:gochecknoglobals // will fix later..
|
|
||||||
var cryptoBufferReady bool //nolint:gochecknoglobals // will fix later..
|
|
||||||
|
|
||||||
func cryptoLookup(index uint32) uint32 {
|
|
||||||
if !cryptoBufferReady {
|
|
||||||
cryptoInitialize()
|
|
||||||
|
|
||||||
cryptoBufferReady = true
|
|
||||||
}
|
|
||||||
|
|
||||||
return cryptoBuffer[index]
|
|
||||||
}
|
|
||||||
|
|
||||||
//nolint:gomnd // magic cryptographic stuff here...
|
|
||||||
func cryptoInitialize() {
|
|
||||||
seed := uint32(0x00100001)
|
|
||||||
|
|
||||||
for index1 := 0; index1 < 0x100; index1++ {
|
|
||||||
index2 := index1
|
|
||||||
|
|
||||||
for i := 0; i < 5; i++ {
|
|
||||||
seed = (seed*125 + 3) % 0x2AAAAB
|
|
||||||
temp1 := (seed & 0xFFFF) << 0x10
|
|
||||||
seed = (seed*125 + 3) % 0x2AAAAB
|
|
||||||
temp2 := seed & 0xFFFF
|
|
||||||
cryptoBuffer[index2] = temp1 | temp2
|
|
||||||
index2 += 0x100
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
@ -1,35 +0,0 @@
|
|||||||
package d2mpq
|
|
||||||
|
|
||||||
// HashEntryMap represents a hash entry map
|
|
||||||
type HashEntryMap struct {
|
|
||||||
entries map[uint64]HashTableEntry
|
|
||||||
}
|
|
||||||
|
|
||||||
// Insert inserts a hash entry into the table
|
|
||||||
func (hem *HashEntryMap) Insert(entry *HashTableEntry) {
|
|
||||||
if hem.entries == nil {
|
|
||||||
hem.entries = make(map[uint64]HashTableEntry)
|
|
||||||
}
|
|
||||||
|
|
||||||
hem.entries[uint64(entry.NamePartA)<<32|uint64(entry.NamePartB)] = *entry
|
|
||||||
}
|
|
||||||
|
|
||||||
// Find finds a hash entry
|
|
||||||
func (hem *HashEntryMap) Find(fileName string) (*HashTableEntry, bool) {
|
|
||||||
if hem.entries == nil {
|
|
||||||
return nil, false
|
|
||||||
}
|
|
||||||
|
|
||||||
hashA := hashString(fileName, 1)
|
|
||||||
hashB := hashString(fileName, 2)
|
|
||||||
|
|
||||||
entry, found := hem.entries[uint64(hashA)<<32|uint64(hashB)]
|
|
||||||
|
|
||||||
return &entry, found
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains returns true if the hash entry contains the values
|
|
||||||
func (hem *HashEntryMap) Contains(fileName string) bool {
|
|
||||||
_, found := hem.Find(fileName)
|
|
||||||
return found
|
|
||||||
}
|
|
@ -2,10 +2,9 @@ package d2mpq
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"bufio"
|
"bufio"
|
||||||
"encoding/binary"
|
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"log"
|
|
||||||
"os"
|
"os"
|
||||||
"path"
|
"path"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
@ -19,33 +18,11 @@ var _ d2interface.Archive = &MPQ{} // Static check to confirm struct conforms to
|
|||||||
|
|
||||||
// MPQ represents an MPQ archive
|
// MPQ represents an MPQ archive
|
||||||
type MPQ struct {
|
type MPQ struct {
|
||||||
filePath string
|
filePath string
|
||||||
file *os.File
|
file *os.File
|
||||||
hashEntryMap HashEntryMap
|
hashes map[uint64]*Hash
|
||||||
blockTableEntries []BlockTableEntry
|
blocks []*Block
|
||||||
data Data
|
header Header
|
||||||
}
|
|
||||||
|
|
||||||
// Data Represents a MPQ file
|
|
||||||
type Data struct {
|
|
||||||
Magic [4]byte
|
|
||||||
HeaderSize uint32
|
|
||||||
ArchiveSize uint32
|
|
||||||
FormatVersion uint16
|
|
||||||
BlockSize uint16
|
|
||||||
HashTableOffset uint32
|
|
||||||
BlockTableOffset uint32
|
|
||||||
HashTableEntries uint32
|
|
||||||
BlockTableEntries uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// HashTableEntry represents a hashed file entry in the MPQ file
|
|
||||||
type HashTableEntry struct { // 16 bytes
|
|
||||||
NamePartA uint32
|
|
||||||
NamePartB uint32
|
|
||||||
Locale uint16
|
|
||||||
Platform uint16
|
|
||||||
BlockIndex uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// PatchInfo represents patch info for the MPQ.
|
// PatchInfo represents patch info for the MPQ.
|
||||||
@ -53,71 +30,153 @@ type PatchInfo struct {
|
|||||||
Length uint32 // Length of patch info header, in bytes
|
Length uint32 // Length of patch info header, in bytes
|
||||||
Flags uint32 // Flags. 0x80000000 = MD5 (?)
|
Flags uint32 // Flags. 0x80000000 = MD5 (?)
|
||||||
DataSize uint32 // Uncompressed size of the patch file
|
DataSize uint32 // Uncompressed size of the patch file
|
||||||
Md5 [16]byte // MD5 of the entire patch file after decompression
|
MD5 [16]byte // MD5 of the entire patch file after decompression
|
||||||
}
|
}
|
||||||
|
|
||||||
// FileFlag represents flags for a file record in the MPQ archive
|
// New loads an MPQ file and only reads the header
|
||||||
type FileFlag uint32
|
func New(fileName string) (*MPQ, error) {
|
||||||
|
mpq := &MPQ{filePath: fileName}
|
||||||
const (
|
|
||||||
// FileImplode - File is compressed using PKWARE Data compression library
|
|
||||||
FileImplode FileFlag = 0x00000100
|
|
||||||
// FileCompress - File is compressed using combination of compression methods
|
|
||||||
FileCompress FileFlag = 0x00000200
|
|
||||||
// FileEncrypted - The file is encrypted
|
|
||||||
FileEncrypted FileFlag = 0x00010000
|
|
||||||
// FileFixKey - The decryption key for the file is altered according to the position of the file in the archive
|
|
||||||
FileFixKey FileFlag = 0x00020000
|
|
||||||
// FilePatchFile - The file contains incremental patch for an existing file in base MPQ
|
|
||||||
FilePatchFile FileFlag = 0x00100000
|
|
||||||
// FileSingleUnit - Instead of being divided to 0x1000-bytes blocks, the file is stored as single unit
|
|
||||||
FileSingleUnit FileFlag = 0x01000000
|
|
||||||
// FileDeleteMarker - File is a deletion marker, indicating that the file no longer exists. This is used to allow patch
|
|
||||||
// archives to delete files present in lower-priority archives in the search chain. The file usually
|
|
||||||
// has length of 0 or 1 byte and its name is a hash
|
|
||||||
FileDeleteMarker FileFlag = 0x02000000
|
|
||||||
// FileSectorCrc - File has checksums for each sector. Ignored if file is not compressed or imploded.
|
|
||||||
FileSectorCrc FileFlag = 0x04000000
|
|
||||||
// FileExists - Set if file exists, reset when the file was deleted
|
|
||||||
FileExists FileFlag = 0x80000000
|
|
||||||
)
|
|
||||||
|
|
||||||
// BlockTableEntry represents an entry in the block table
|
|
||||||
type BlockTableEntry struct { // 16 bytes
|
|
||||||
FilePosition uint32
|
|
||||||
CompressedFileSize uint32
|
|
||||||
UncompressedFileSize uint32
|
|
||||||
Flags FileFlag
|
|
||||||
// Local Stuff...
|
|
||||||
FileName string
|
|
||||||
EncryptionSeed uint32
|
|
||||||
}
|
|
||||||
|
|
||||||
// HasFlag returns true if the specified flag is present
|
|
||||||
func (v BlockTableEntry) HasFlag(flag FileFlag) bool {
|
|
||||||
return (v.Flags & flag) != 0
|
|
||||||
}
|
|
||||||
|
|
||||||
// Load loads an MPQ file and returns a MPQ structure
|
|
||||||
func Load(fileName string) (d2interface.Archive, error) {
|
|
||||||
result := &MPQ{filePath: fileName}
|
|
||||||
|
|
||||||
var err error
|
var err error
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" {
|
||||||
result.file, err = openIgnoreCase(fileName)
|
mpq.file, err = openIgnoreCase(fileName)
|
||||||
} else {
|
} else {
|
||||||
result.file, err = os.Open(fileName) //nolint:gosec // Will fix later
|
mpq.file, err = os.Open(fileName) //nolint:gosec // Will fix later
|
||||||
}
|
}
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
if err := result.readHeader(); err != nil {
|
if err := mpq.readHeader(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read reader: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mpq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FromFile loads an MPQ file and returns a MPQ structure
|
||||||
|
func FromFile(fileName string) (*MPQ, error) {
|
||||||
|
mpq, err := New(fileName)
|
||||||
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, nil
|
if err := mpq.readHashTable(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read hash table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := mpq.readBlockTable(); err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to read block table: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
return mpq, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// getFileBlockData gets a block table entry
|
||||||
|
func (mpq *MPQ) getFileBlockData(fileName string) (*Block, error) {
|
||||||
|
fileEntry, ok := mpq.hashes[hashFilename(fileName)]
|
||||||
|
if !ok {
|
||||||
|
return nil, errors.New("file not found")
|
||||||
|
}
|
||||||
|
|
||||||
|
if fileEntry.BlockIndex >= uint32(len(mpq.blocks)) {
|
||||||
|
return nil, errors.New("invalid block index")
|
||||||
|
}
|
||||||
|
|
||||||
|
return mpq.blocks[fileEntry.BlockIndex], nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Close closes the MPQ file
|
||||||
|
func (mpq *MPQ) Close() error {
|
||||||
|
return mpq.file.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFile reads a file from the MPQ and returns a memory stream
|
||||||
|
func (mpq *MPQ) ReadFile(fileName string) ([]byte, error) {
|
||||||
|
fileBlockData, err := mpq.getFileBlockData(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBlockData.FileName = strings.ToLower(fileName)
|
||||||
|
|
||||||
|
stream, err := CreateStream(mpq, fileBlockData, fileName)
|
||||||
|
if err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
buffer := make([]byte, fileBlockData.UncompressedFileSize)
|
||||||
|
if _, err := stream.Read(buffer, 0, fileBlockData.UncompressedFileSize); err != nil {
|
||||||
|
return []byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return buffer, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFileStream reads the mpq file data and returns a stream
|
||||||
|
func (mpq *MPQ) ReadFileStream(fileName string) (d2interface.DataStream, error) {
|
||||||
|
fileBlockData, err := mpq.getFileBlockData(fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
fileBlockData.FileName = strings.ToLower(fileName)
|
||||||
|
|
||||||
|
stream, err := CreateStream(mpq, fileBlockData, fileName)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return &MpqDataStream{stream: stream}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadTextFile reads a file and returns it as a string
|
||||||
|
func (mpq *MPQ) ReadTextFile(fileName string) (string, error) {
|
||||||
|
data, err := mpq.ReadFile(fileName)
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return "", err
|
||||||
|
}
|
||||||
|
|
||||||
|
return string(data), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Listfile returns the list of files in this MPQ
|
||||||
|
func (mpq *MPQ) Listfile() ([]string, error) {
|
||||||
|
data, err := mpq.ReadFile("(listfile)")
|
||||||
|
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
raw := strings.TrimRight(string(data), "\x00")
|
||||||
|
s := bufio.NewScanner(strings.NewReader(raw))
|
||||||
|
|
||||||
|
var filePaths []string
|
||||||
|
|
||||||
|
for s.Scan() {
|
||||||
|
filePath := s.Text()
|
||||||
|
filePaths = append(filePaths, filePath)
|
||||||
|
}
|
||||||
|
|
||||||
|
return filePaths, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// Path returns the MPQ file path
|
||||||
|
func (mpq *MPQ) Path() string {
|
||||||
|
return mpq.filePath
|
||||||
|
}
|
||||||
|
|
||||||
|
// Contains returns bool for whether the given filename exists in the mpq
|
||||||
|
func (mpq *MPQ) Contains(filename string) bool {
|
||||||
|
_, ok := mpq.hashes[hashFilename(filename)]
|
||||||
|
return ok
|
||||||
|
}
|
||||||
|
|
||||||
|
// Size returns the size of the mpq in bytes
|
||||||
|
func (mpq *MPQ) Size() uint32 {
|
||||||
|
return mpq.header.ArchiveSize
|
||||||
}
|
}
|
||||||
|
|
||||||
func openIgnoreCase(mpqPath string) (*os.File, error) {
|
func openIgnoreCase(mpqPath string) (*os.File, error) {
|
||||||
@ -142,258 +201,5 @@ func openIgnoreCase(mpqPath string) (*os.File, error) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
file, err := os.Open(path.Join(mpqDir, mpqName)) //nolint:gosec // Will fix later
|
return os.Open(path.Join(mpqDir, mpqName)) //nolint:gosec // Will fix later
|
||||||
|
|
||||||
return file, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *MPQ) readHeader() error {
|
|
||||||
err := binary.Read(v.file, binary.LittleEndian, &v.data)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
if string(v.data.Magic[:]) != "MPQ\x1A" {
|
|
||||||
return errors.New("invalid mpq header")
|
|
||||||
}
|
|
||||||
|
|
||||||
err = v.loadHashTable()
|
|
||||||
if err != nil {
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
v.loadBlockTable()
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *MPQ) loadHashTable() error {
|
|
||||||
_, err := v.file.Seek(int64(v.data.HashTableOffset), 0)
|
|
||||||
if err != nil {
|
|
||||||
log.Panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hashData := make([]uint32, v.data.HashTableEntries*4) //nolint:gomnd // // Decryption magic
|
|
||||||
hash := make([]byte, 4)
|
|
||||||
|
|
||||||
for i := range hashData {
|
|
||||||
_, err := v.file.Read(hash)
|
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
hashData[i] = binary.LittleEndian.Uint32(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
decrypt(hashData, hashString("(hash table)", 3))
|
|
||||||
|
|
||||||
for i := uint32(0); i < v.data.HashTableEntries; i++ {
|
|
||||||
v.hashEntryMap.Insert(&HashTableEntry{
|
|
||||||
NamePartA: hashData[i*4],
|
|
||||||
NamePartB: hashData[(i*4)+1],
|
|
||||||
// https://github.com/OpenDiablo2/OpenDiablo2/issues/812
|
|
||||||
Locale: uint16(hashData[(i*4)+2] >> 16), //nolint:gomnd // // binary data
|
|
||||||
Platform: uint16(hashData[(i*4)+2] & 0xFFFF), //nolint:gomnd // // binary data
|
|
||||||
BlockIndex: hashData[(i*4)+3],
|
|
||||||
})
|
|
||||||
}
|
|
||||||
|
|
||||||
return nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *MPQ) loadBlockTable() {
|
|
||||||
_, err := v.file.Seek(int64(v.data.BlockTableOffset), 0)
|
|
||||||
if err != nil {
|
|
||||||
log.Panic(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
blockData := make([]uint32, v.data.BlockTableEntries*4) //nolint:gomnd // // binary data
|
|
||||||
hash := make([]byte, 4)
|
|
||||||
|
|
||||||
for i := range blockData {
|
|
||||||
_, err = v.file.Read(hash) //nolint:errcheck // Will fix later
|
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
}
|
|
||||||
|
|
||||||
blockData[i] = binary.LittleEndian.Uint32(hash)
|
|
||||||
}
|
|
||||||
|
|
||||||
decrypt(blockData, hashString("(block table)", 3))
|
|
||||||
|
|
||||||
for i := uint32(0); i < v.data.BlockTableEntries; i++ {
|
|
||||||
v.blockTableEntries = append(v.blockTableEntries, BlockTableEntry{
|
|
||||||
FilePosition: blockData[(i * 4)],
|
|
||||||
CompressedFileSize: blockData[(i*4)+1],
|
|
||||||
UncompressedFileSize: blockData[(i*4)+2],
|
|
||||||
Flags: FileFlag(blockData[(i*4)+3]),
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decrypt(data []uint32, seed uint32) {
|
|
||||||
seed2 := uint32(0xeeeeeeee) //nolint:gomnd // Decryption magic
|
|
||||||
|
|
||||||
for i := 0; i < len(data); i++ {
|
|
||||||
seed2 += cryptoLookup(0x400 + (seed & 0xff)) //nolint:gomnd // Decryption magic
|
|
||||||
result := data[i]
|
|
||||||
result ^= seed + seed2
|
|
||||||
|
|
||||||
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
|
||||||
seed2 = result + seed2 + (seed2 << 5) + 3 //nolint:gomnd // Decryption magic
|
|
||||||
data[i] = result
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decryptBytes(data []byte, seed uint32) {
|
|
||||||
seed2 := uint32(0xEEEEEEEE) //nolint:gomnd // Decryption magic
|
|
||||||
for i := 0; i < len(data)-3; i += 4 {
|
|
||||||
seed2 += cryptoLookup(0x400 + (seed & 0xFF)) //nolint:gomnd // Decryption magic
|
|
||||||
result := binary.LittleEndian.Uint32(data[i : i+4])
|
|
||||||
result ^= seed + seed2
|
|
||||||
seed = ((^seed << 21) + 0x11111111) | (seed >> 11)
|
|
||||||
seed2 = result + seed2 + (seed2 << 5) + 3 //nolint:gomnd // Decryption magic
|
|
||||||
|
|
||||||
data[i+0] = uint8(result & 0xff) //nolint:gomnd // Decryption magic
|
|
||||||
data[i+1] = uint8((result >> 8) & 0xff) //nolint:gomnd // Decryption magic
|
|
||||||
data[i+2] = uint8((result >> 16) & 0xff) //nolint:gomnd // Decryption magic
|
|
||||||
data[i+3] = uint8((result >> 24) & 0xff) //nolint:gomnd // Decryption magic
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func hashString(key string, hashType uint32) uint32 {
|
|
||||||
seed1 := uint32(0x7FED7FED) //nolint:gomnd // Decryption magic
|
|
||||||
seed2 := uint32(0xEEEEEEEE) //nolint:gomnd // Decryption magic
|
|
||||||
|
|
||||||
/* prepare seeds. */
|
|
||||||
for _, char := range strings.ToUpper(key) {
|
|
||||||
seed1 = cryptoLookup((hashType*0x100)+uint32(char)) ^ (seed1 + seed2)
|
|
||||||
seed2 = uint32(char) + seed1 + seed2 + (seed2 << 5) + 3 //nolint:gomnd // Decryption magic
|
|
||||||
}
|
|
||||||
|
|
||||||
return seed1
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileBlockData gets a block table entry
|
|
||||||
func (v *MPQ) getFileBlockData(fileName string) (BlockTableEntry, error) {
|
|
||||||
fileEntry, found := v.hashEntryMap.Find(fileName)
|
|
||||||
|
|
||||||
if !found || fileEntry.BlockIndex >= uint32(len(v.blockTableEntries)) {
|
|
||||||
return BlockTableEntry{}, errors.New("file not found")
|
|
||||||
}
|
|
||||||
|
|
||||||
return v.blockTableEntries[fileEntry.BlockIndex], nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Close closes the MPQ file
|
|
||||||
func (v *MPQ) Close() {
|
|
||||||
err := v.file.Close()
|
|
||||||
if err != nil {
|
|
||||||
log.Panic(err)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// FileExists checks the mpq to see if the file exists
|
|
||||||
func (v *MPQ) FileExists(fileName string) bool {
|
|
||||||
return v.hashEntryMap.Contains(fileName)
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFile reads a file from the MPQ and returns a memory stream
|
|
||||||
func (v *MPQ) ReadFile(fileName string) ([]byte, error) {
|
|
||||||
fileBlockData, err := v.getFileBlockData(fileName)
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fileBlockData.FileName = strings.ToLower(fileName)
|
|
||||||
|
|
||||||
fileBlockData.calculateEncryptionSeed()
|
|
||||||
mpqStream, err := CreateStream(v, fileBlockData, fileName)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return []byte{}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
buffer := make([]byte, fileBlockData.UncompressedFileSize)
|
|
||||||
mpqStream.Read(buffer, 0, fileBlockData.UncompressedFileSize)
|
|
||||||
|
|
||||||
return buffer, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadFileStream reads the mpq file data and returns a stream
|
|
||||||
func (v *MPQ) ReadFileStream(fileName string) (d2interface.DataStream, error) {
|
|
||||||
fileBlockData, err := v.getFileBlockData(fileName)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
fileBlockData.FileName = strings.ToLower(fileName)
|
|
||||||
fileBlockData.calculateEncryptionSeed()
|
|
||||||
|
|
||||||
mpqStream, err := CreateStream(v, fileBlockData, fileName)
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
return &MpqDataStream{stream: mpqStream}, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// ReadTextFile reads a file and returns it as a string
|
|
||||||
func (v *MPQ) ReadTextFile(fileName string) (string, error) {
|
|
||||||
data, err := v.ReadFile(fileName)
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return string(data), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
func (v *BlockTableEntry) calculateEncryptionSeed() {
|
|
||||||
fileName := path.Base(v.FileName)
|
|
||||||
v.EncryptionSeed = hashString(fileName, 3)
|
|
||||||
|
|
||||||
if !v.HasFlag(FileFixKey) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
v.EncryptionSeed = (v.EncryptionSeed + v.FilePosition) ^ v.UncompressedFileSize
|
|
||||||
}
|
|
||||||
|
|
||||||
// GetFileList returns the list of files in this MPQ
|
|
||||||
func (v *MPQ) GetFileList() ([]string, error) {
|
|
||||||
data, err := v.ReadFile("(listfile)")
|
|
||||||
|
|
||||||
if err != nil {
|
|
||||||
return nil, err
|
|
||||||
}
|
|
||||||
|
|
||||||
raw := strings.TrimRight(string(data), "\x00")
|
|
||||||
s := bufio.NewScanner(strings.NewReader(raw))
|
|
||||||
|
|
||||||
var filePaths []string
|
|
||||||
|
|
||||||
for s.Scan() {
|
|
||||||
filePath := s.Text()
|
|
||||||
filePaths = append(filePaths, filePath)
|
|
||||||
}
|
|
||||||
|
|
||||||
return filePaths, nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// Path returns the MPQ file path
|
|
||||||
func (v *MPQ) Path() string {
|
|
||||||
return v.filePath
|
|
||||||
}
|
|
||||||
|
|
||||||
// Contains returns bool for whether the given filename exists in the mpq
|
|
||||||
func (v *MPQ) Contains(filename string) bool {
|
|
||||||
return v.hashEntryMap.Contains(filename)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Size returns the size of the mpq in bytes
|
|
||||||
func (v *MPQ) Size() uint32 {
|
|
||||||
return v.data.ArchiveSize
|
|
||||||
}
|
}
|
||||||
|
77
d2common/d2fileformats/d2mpq/mpq_block.go
Normal file
77
d2common/d2fileformats/d2mpq/mpq_block.go
Normal file
@ -0,0 +1,77 @@
|
|||||||
|
package d2mpq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"io"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileFlag represents flags for a file record in the MPQ archive
|
||||||
|
type FileFlag uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
// FileImplode - File is compressed using PKWARE Data compression library
|
||||||
|
FileImplode FileFlag = 0x00000100
|
||||||
|
// FileCompress - File is compressed using combination of compression methods
|
||||||
|
FileCompress FileFlag = 0x00000200
|
||||||
|
// FileEncrypted - The file is encrypted
|
||||||
|
FileEncrypted FileFlag = 0x00010000
|
||||||
|
// FileFixKey - The decryption key for the file is altered according to the position of the file in the archive
|
||||||
|
FileFixKey FileFlag = 0x00020000
|
||||||
|
// FilePatchFile - The file contains incremental patch for an existing file in base MPQ
|
||||||
|
FilePatchFile FileFlag = 0x00100000
|
||||||
|
// FileSingleUnit - Instead of being divided to 0x1000-bytes blocks, the file is stored as single unit
|
||||||
|
FileSingleUnit FileFlag = 0x01000000
|
||||||
|
// FileDeleteMarker - File is a deletion marker, indicating that the file no longer exists. This is used to allow patch
|
||||||
|
// archives to delete files present in lower-priority archives in the search chain. The file usually
|
||||||
|
// has length of 0 or 1 byte and its name is a hash
|
||||||
|
FileDeleteMarker FileFlag = 0x02000000
|
||||||
|
// FileSectorCrc - File has checksums for each sector. Ignored if file is not compressed or imploded.
|
||||||
|
FileSectorCrc FileFlag = 0x04000000
|
||||||
|
// FileExists - Set if file exists, reset when the file was deleted
|
||||||
|
FileExists FileFlag = 0x80000000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Block represents an entry in the block table
|
||||||
|
type Block struct { // 16 bytes
|
||||||
|
FilePosition uint32
|
||||||
|
CompressedFileSize uint32
|
||||||
|
UncompressedFileSize uint32
|
||||||
|
Flags FileFlag
|
||||||
|
// Local Stuff...
|
||||||
|
FileName string
|
||||||
|
EncryptionSeed uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// HasFlag returns true if the specified flag is present
|
||||||
|
func (b *Block) HasFlag(flag FileFlag) bool {
|
||||||
|
return (b.Flags & flag) != 0
|
||||||
|
}
|
||||||
|
|
||||||
|
func (b *Block) calculateEncryptionSeed(fileName string) {
|
||||||
|
fileName = fileName[strings.LastIndex(fileName, `\`)+1:]
|
||||||
|
seed := hashString(fileName, 3)
|
||||||
|
b.EncryptionSeed = (seed + b.FilePosition) ^ b.UncompressedFileSize
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // number
|
||||||
|
func (mpq *MPQ) readBlockTable() error {
|
||||||
|
if _, err := mpq.file.Seek(int64(mpq.header.BlockTableOffset), io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
blockData, err := decryptTable(mpq.file, mpq.header.BlockTableEntries, "(block table)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
for n, i := uint32(0), uint32(0); i < mpq.header.BlockTableEntries; n, i = n+4, i+1 {
|
||||||
|
mpq.blocks = append(mpq.blocks, &Block{
|
||||||
|
FilePosition: blockData[n],
|
||||||
|
CompressedFileSize: blockData[n+1],
|
||||||
|
UncompressedFileSize: blockData[n+2],
|
||||||
|
Flags: FileFlag(blockData[n+3]),
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -11,14 +11,14 @@ type MpqDataStream struct {
|
|||||||
|
|
||||||
// Read reads data from the data stream
|
// Read reads data from the data stream
|
||||||
func (m *MpqDataStream) Read(p []byte) (n int, err error) {
|
func (m *MpqDataStream) Read(p []byte) (n int, err error) {
|
||||||
totalRead := m.stream.Read(p, 0, uint32(len(p)))
|
totalRead, err := m.stream.Read(p, 0, uint32(len(p)))
|
||||||
return int(totalRead), nil
|
return int(totalRead), err
|
||||||
}
|
}
|
||||||
|
|
||||||
// Seek sets the position of the data stream
|
// Seek sets the position of the data stream
|
||||||
func (m *MpqDataStream) Seek(offset int64, whence int) (int64, error) {
|
func (m *MpqDataStream) Seek(offset int64, whence int) (int64, error) {
|
||||||
m.stream.CurrentPosition = uint32(offset + int64(whence))
|
m.stream.Position = uint32(offset + int64(whence))
|
||||||
return int64(m.stream.CurrentPosition), nil
|
return int64(m.stream.Position), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close closes the data stream
|
// Close closes the data stream
|
||||||
|
45
d2common/d2fileformats/d2mpq/mpq_hash.go
Normal file
45
d2common/d2fileformats/d2mpq/mpq_hash.go
Normal file
@ -0,0 +1,45 @@
|
|||||||
|
package d2mpq
|
||||||
|
|
||||||
|
import "io"
|
||||||
|
|
||||||
|
// Hash represents a hashed file entry in the MPQ file
|
||||||
|
type Hash struct { // 16 bytes
|
||||||
|
A uint32
|
||||||
|
B uint32
|
||||||
|
Locale uint16
|
||||||
|
Platform uint16
|
||||||
|
BlockIndex uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name64 returns part A and B as uint64
|
||||||
|
func (h *Hash) Name64() uint64 {
|
||||||
|
return uint64(h.A)<<32 | uint64(h.B)
|
||||||
|
}
|
||||||
|
|
||||||
|
//nolint:gomnd // number
|
||||||
|
func (mpq *MPQ) readHashTable() error {
|
||||||
|
if _, err := mpq.file.Seek(int64(mpq.header.HashTableOffset), io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
hashData, err := decryptTable(mpq.file, mpq.header.HashTableEntries, "(hash table)")
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
mpq.hashes = make(map[uint64]*Hash)
|
||||||
|
|
||||||
|
for n, i := uint32(0), uint32(0); i < mpq.header.HashTableEntries; n, i = n+4, i+1 {
|
||||||
|
e := &Hash{
|
||||||
|
A: hashData[n],
|
||||||
|
B: hashData[n+1],
|
||||||
|
// https://github.com/OpenDiablo2/OpenDiablo2/issues/812
|
||||||
|
Locale: uint16(hashData[n+2] >> 16), //nolint:gomnd // // binary data
|
||||||
|
Platform: uint16(hashData[n+2] & 0xFFFF), //nolint:gomnd // // binary data
|
||||||
|
BlockIndex: hashData[n+3],
|
||||||
|
}
|
||||||
|
mpq.hashes[e.Name64()] = e
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
36
d2common/d2fileformats/d2mpq/mpq_header.go
Normal file
36
d2common/d2fileformats/d2mpq/mpq_header.go
Normal file
@ -0,0 +1,36 @@
|
|||||||
|
package d2mpq
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Header Represents a MPQ file
|
||||||
|
type Header struct {
|
||||||
|
Magic [4]byte
|
||||||
|
HeaderSize uint32
|
||||||
|
ArchiveSize uint32
|
||||||
|
FormatVersion uint16
|
||||||
|
BlockSize uint16
|
||||||
|
HashTableOffset uint32
|
||||||
|
BlockTableOffset uint32
|
||||||
|
HashTableEntries uint32
|
||||||
|
BlockTableEntries uint32
|
||||||
|
}
|
||||||
|
|
||||||
|
func (mpq *MPQ) readHeader() error {
|
||||||
|
if _, err := mpq.file.Seek(0, io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := binary.Read(mpq.file, binary.LittleEndian, &mpq.header); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
if string(mpq.header.Magic[:]) != "MPQ\x1A" {
|
||||||
|
return errors.New("invalid mpq header")
|
||||||
|
}
|
||||||
|
|
||||||
|
return nil
|
||||||
|
}
|
@ -6,8 +6,7 @@ import (
|
|||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"log"
|
"io"
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/JoshVarga/blast"
|
"github.com/JoshVarga/blast"
|
||||||
|
|
||||||
@ -17,80 +16,63 @@ import (
|
|||||||
|
|
||||||
// Stream represents a stream of data in an MPQ archive
|
// Stream represents a stream of data in an MPQ archive
|
||||||
type Stream struct {
|
type Stream struct {
|
||||||
BlockTableEntry BlockTableEntry
|
Data []byte
|
||||||
BlockPositions []uint32
|
Positions []uint32
|
||||||
CurrentData []byte
|
MPQ *MPQ
|
||||||
FileName string
|
Block *Block
|
||||||
MPQData *MPQ
|
Index uint32
|
||||||
EncryptionSeed uint32
|
Size uint32
|
||||||
CurrentPosition uint32
|
Position uint32
|
||||||
CurrentBlockIndex uint32
|
|
||||||
BlockSize uint32
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// CreateStream creates an MPQ stream
|
// CreateStream creates an MPQ stream
|
||||||
func CreateStream(mpq *MPQ, blockTableEntry BlockTableEntry, fileName string) (*Stream, error) {
|
func CreateStream(mpq *MPQ, block *Block, fileName string) (*Stream, error) {
|
||||||
result := &Stream{
|
s := &Stream{
|
||||||
MPQData: mpq,
|
MPQ: mpq,
|
||||||
BlockTableEntry: blockTableEntry,
|
Block: block,
|
||||||
CurrentBlockIndex: 0xFFFFFFFF, //nolint:gomnd // MPQ magic
|
Index: 0xFFFFFFFF, //nolint:gomnd // MPQ magic
|
||||||
}
|
|
||||||
fileSegs := strings.Split(fileName, `\`)
|
|
||||||
result.EncryptionSeed = hashString(fileSegs[len(fileSegs)-1], 3)
|
|
||||||
|
|
||||||
if result.BlockTableEntry.HasFlag(FileFixKey) {
|
|
||||||
result.EncryptionSeed = (result.EncryptionSeed + result.BlockTableEntry.FilePosition) ^ result.BlockTableEntry.UncompressedFileSize
|
|
||||||
}
|
}
|
||||||
|
|
||||||
result.BlockSize = 0x200 << result.MPQData.data.BlockSize //nolint:gomnd // MPQ magic
|
if s.Block.HasFlag(FileFixKey) {
|
||||||
|
s.Block.calculateEncryptionSeed(fileName)
|
||||||
if result.BlockTableEntry.HasFlag(FilePatchFile) {
|
|
||||||
log.Fatal("Patching is not supported")
|
|
||||||
}
|
}
|
||||||
|
|
||||||
var err error
|
s.Size = 0x200 << s.MPQ.header.BlockSize //nolint:gomnd // MPQ magic
|
||||||
|
|
||||||
if (result.BlockTableEntry.HasFlag(FileCompress) || result.BlockTableEntry.HasFlag(FileImplode)) &&
|
if s.Block.HasFlag(FilePatchFile) {
|
||||||
!result.BlockTableEntry.HasFlag(FileSingleUnit) {
|
return nil, errors.New("patching is not supported")
|
||||||
err = result.loadBlockOffsets()
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return result, err
|
if (s.Block.HasFlag(FileCompress) || s.Block.HasFlag(FileImplode)) && !s.Block.HasFlag(FileSingleUnit) {
|
||||||
|
if err := s.loadBlockOffsets(); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return s, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) loadBlockOffsets() error {
|
func (v *Stream) loadBlockOffsets() error {
|
||||||
blockPositionCount := ((v.BlockTableEntry.UncompressedFileSize + v.BlockSize - 1) / v.BlockSize) + 1
|
if _, err := v.MPQ.file.Seek(int64(v.Block.FilePosition), io.SeekStart); err != nil {
|
||||||
v.BlockPositions = make([]uint32, blockPositionCount)
|
|
||||||
|
|
||||||
_, err := v.MPQData.file.Seek(int64(v.BlockTableEntry.FilePosition), 0)
|
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
mpqBytes := make([]byte, blockPositionCount*4) //nolint:gomnd // MPQ magic
|
blockPositionCount := ((v.Block.UncompressedFileSize + v.Size - 1) / v.Size) + 1
|
||||||
|
v.Positions = make([]uint32, blockPositionCount)
|
||||||
|
|
||||||
_, err = v.MPQData.file.Read(mpqBytes)
|
if err := binary.Read(v.MPQ.file, binary.LittleEndian, &v.Positions); err != nil {
|
||||||
if err != nil {
|
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
for i := range v.BlockPositions {
|
if v.Block.HasFlag(FileEncrypted) {
|
||||||
idx := i * 4 //nolint:gomnd // MPQ magic
|
decrypt(v.Positions, v.Block.EncryptionSeed-1)
|
||||||
v.BlockPositions[i] = binary.LittleEndian.Uint32(mpqBytes[idx : idx+4])
|
|
||||||
}
|
|
||||||
|
|
||||||
blockPosSize := blockPositionCount << 2 //nolint:gomnd // MPQ magic
|
blockPosSize := blockPositionCount << 2 //nolint:gomnd // MPQ magic
|
||||||
|
if v.Positions[0] != blockPosSize {
|
||||||
if v.BlockTableEntry.HasFlag(FileEncrypted) {
|
|
||||||
decrypt(v.BlockPositions, v.EncryptionSeed-1)
|
|
||||||
|
|
||||||
if v.BlockPositions[0] != blockPosSize {
|
|
||||||
log.Println("Decryption of MPQ failed!")
|
|
||||||
return errors.New("decryption of MPQ failed")
|
return errors.New("decryption of MPQ failed")
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.BlockPositions[1] > v.BlockSize+blockPosSize {
|
if v.Positions[1] > v.Size+blockPosSize {
|
||||||
log.Println("Decryption of MPQ failed!")
|
|
||||||
return errors.New("decryption of MPQ failed")
|
return errors.New("decryption of MPQ failed")
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -98,16 +80,18 @@ func (v *Stream) loadBlockOffsets() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) Read(buffer []byte, offset, count uint32) uint32 {
|
func (v *Stream) Read(buffer []byte, offset, count uint32) (readTotal uint32, err error) {
|
||||||
if v.BlockTableEntry.HasFlag(FileSingleUnit) {
|
if v.Block.HasFlag(FileSingleUnit) {
|
||||||
return v.readInternalSingleUnit(buffer, offset, count)
|
return v.readInternalSingleUnit(buffer, offset, count)
|
||||||
}
|
}
|
||||||
|
|
||||||
toRead := count
|
var read uint32
|
||||||
readTotal := uint32(0)
|
|
||||||
|
|
||||||
|
toRead := count
|
||||||
for toRead > 0 {
|
for toRead > 0 {
|
||||||
read := v.readInternal(buffer, offset, toRead)
|
if read, err = v.readInternal(buffer, offset, toRead); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
if read == 0 {
|
if read == 0 {
|
||||||
break
|
break
|
||||||
@ -118,149 +102,153 @@ func (v *Stream) Read(buffer []byte, offset, count uint32) uint32 {
|
|||||||
toRead -= read
|
toRead -= read
|
||||||
}
|
}
|
||||||
|
|
||||||
return readTotal
|
return readTotal, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) readInternalSingleUnit(buffer []byte, offset, count uint32) uint32 {
|
func (v *Stream) readInternalSingleUnit(buffer []byte, offset, count uint32) (uint32, error) {
|
||||||
if len(v.CurrentData) == 0 {
|
if len(v.Data) == 0 {
|
||||||
v.loadSingleUnit()
|
if err := v.loadSingleUnit(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bytesToCopy := d2math.Min(uint32(len(v.CurrentData))-v.CurrentPosition, count)
|
return v.copy(buffer, offset, v.Position, count)
|
||||||
|
|
||||||
copy(buffer[offset:offset+bytesToCopy], v.CurrentData[v.CurrentPosition:v.CurrentPosition+bytesToCopy])
|
|
||||||
|
|
||||||
v.CurrentPosition += bytesToCopy
|
|
||||||
|
|
||||||
return bytesToCopy
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) readInternal(buffer []byte, offset, count uint32) uint32 {
|
func (v *Stream) readInternal(buffer []byte, offset, count uint32) (uint32, error) {
|
||||||
v.bufferData()
|
if err := v.bufferData(); err != nil {
|
||||||
|
return 0, err
|
||||||
|
}
|
||||||
|
|
||||||
localPosition := v.CurrentPosition % v.BlockSize
|
localPosition := v.Position % v.Size
|
||||||
bytesToCopy := d2math.MinInt32(int32(len(v.CurrentData))-int32(localPosition), int32(count))
|
|
||||||
|
|
||||||
|
return v.copy(buffer, offset, localPosition, count)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (v *Stream) copy(buffer []byte, offset, pos, count uint32) (uint32, error) {
|
||||||
|
bytesToCopy := d2math.Min(uint32(len(v.Data))-pos, count)
|
||||||
if bytesToCopy <= 0 {
|
if bytesToCopy <= 0 {
|
||||||
return 0
|
return 0, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
copy(buffer[offset:offset+uint32(bytesToCopy)], v.CurrentData[localPosition:localPosition+uint32(bytesToCopy)])
|
copy(buffer[offset:offset+bytesToCopy], v.Data[pos:pos+bytesToCopy])
|
||||||
|
v.Position += bytesToCopy
|
||||||
|
|
||||||
v.CurrentPosition += uint32(bytesToCopy)
|
return bytesToCopy, nil
|
||||||
|
|
||||||
return uint32(bytesToCopy)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) bufferData() {
|
func (v *Stream) bufferData() (err error) {
|
||||||
requiredBlock := v.CurrentPosition / v.BlockSize
|
blockIndex := v.Position / v.Size
|
||||||
|
|
||||||
if requiredBlock == v.CurrentBlockIndex {
|
if blockIndex == v.Index {
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
expectedLength := d2math.Min(v.BlockTableEntry.UncompressedFileSize-(requiredBlock*v.BlockSize), v.BlockSize)
|
expectedLength := d2math.Min(v.Block.UncompressedFileSize-(blockIndex*v.Size), v.Size)
|
||||||
v.CurrentData = v.loadBlock(requiredBlock, expectedLength)
|
if v.Data, err = v.loadBlock(blockIndex, expectedLength); err != nil {
|
||||||
v.CurrentBlockIndex = requiredBlock
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
v.Index = blockIndex
|
||||||
|
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) loadSingleUnit() {
|
func (v *Stream) loadSingleUnit() (err error) {
|
||||||
fileData := make([]byte, v.BlockSize)
|
if _, err = v.MPQ.file.Seek(int64(v.MPQ.header.HeaderSize), io.SeekStart); err != nil {
|
||||||
|
return err
|
||||||
_, err := v.MPQData.file.Seek(int64(v.MPQData.data.HeaderSize), 0)
|
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = v.MPQData.file.Read(fileData)
|
fileData := make([]byte, v.Size)
|
||||||
if err != nil {
|
|
||||||
log.Print(err)
|
if _, err = v.MPQ.file.Read(fileData); err != nil {
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.BlockSize == v.BlockTableEntry.UncompressedFileSize {
|
if v.Size == v.Block.UncompressedFileSize {
|
||||||
v.CurrentData = fileData
|
v.Data = fileData
|
||||||
return
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
v.CurrentData = decompressMulti(fileData, v.BlockTableEntry.UncompressedFileSize)
|
v.Data, err = decompressMulti(fileData, v.Block.UncompressedFileSize)
|
||||||
|
|
||||||
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (v *Stream) loadBlock(blockIndex, expectedLength uint32) []byte {
|
func (v *Stream) loadBlock(blockIndex, expectedLength uint32) ([]byte, error) {
|
||||||
var (
|
var (
|
||||||
offset uint32
|
offset uint32
|
||||||
toRead uint32
|
toRead uint32
|
||||||
)
|
)
|
||||||
|
|
||||||
if v.BlockTableEntry.HasFlag(FileCompress) || v.BlockTableEntry.HasFlag(FileImplode) {
|
if v.Block.HasFlag(FileCompress) || v.Block.HasFlag(FileImplode) {
|
||||||
offset = v.BlockPositions[blockIndex]
|
offset = v.Positions[blockIndex]
|
||||||
toRead = v.BlockPositions[blockIndex+1] - offset
|
toRead = v.Positions[blockIndex+1] - offset
|
||||||
} else {
|
} else {
|
||||||
offset = blockIndex * v.BlockSize
|
offset = blockIndex * v.Size
|
||||||
toRead = expectedLength
|
toRead = expectedLength
|
||||||
}
|
}
|
||||||
|
|
||||||
offset += v.BlockTableEntry.FilePosition
|
offset += v.Block.FilePosition
|
||||||
data := make([]byte, toRead)
|
data := make([]byte, toRead)
|
||||||
|
|
||||||
_, err := v.MPQData.file.Seek(int64(offset), 0)
|
if _, err := v.MPQ.file.Seek(int64(offset), io.SeekStart); err != nil {
|
||||||
if err != nil {
|
return []byte{}, err
|
||||||
log.Print(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
_, err = v.MPQData.file.Read(data)
|
if _, err := v.MPQ.file.Read(data); err != nil {
|
||||||
if err != nil {
|
return []byte{}, err
|
||||||
log.Print(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.BlockTableEntry.HasFlag(FileEncrypted) && v.BlockTableEntry.UncompressedFileSize > 3 {
|
if v.Block.HasFlag(FileEncrypted) && v.Block.UncompressedFileSize > 3 {
|
||||||
if v.EncryptionSeed == 0 {
|
if v.Block.EncryptionSeed == 0 {
|
||||||
panic("Unable to determine encryption key")
|
return []byte{}, errors.New("unable to determine encryption key")
|
||||||
}
|
}
|
||||||
|
|
||||||
decryptBytes(data, blockIndex+v.EncryptionSeed)
|
decryptBytes(data, blockIndex+v.Block.EncryptionSeed)
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.BlockTableEntry.HasFlag(FileCompress) && (toRead != expectedLength) {
|
if v.Block.HasFlag(FileCompress) && (toRead != expectedLength) {
|
||||||
if !v.BlockTableEntry.HasFlag(FileSingleUnit) {
|
if !v.Block.HasFlag(FileSingleUnit) {
|
||||||
data = decompressMulti(data, expectedLength)
|
return decompressMulti(data, expectedLength)
|
||||||
} else {
|
|
||||||
data = pkDecompress(data)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return pkDecompress(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
if v.BlockTableEntry.HasFlag(FileImplode) && (toRead != expectedLength) {
|
if v.Block.HasFlag(FileImplode) && (toRead != expectedLength) {
|
||||||
data = pkDecompress(data)
|
return pkDecompress(data)
|
||||||
}
|
}
|
||||||
|
|
||||||
return data
|
return data, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
//nolint:gomnd // Will fix enum values later
|
//nolint:gomnd // Will fix enum values later
|
||||||
func decompressMulti(data []byte /*expectedLength*/, _ uint32) []byte {
|
func decompressMulti(data []byte /*expectedLength*/, _ uint32) ([]byte, error) {
|
||||||
compressionType := data[0]
|
compressionType := data[0]
|
||||||
|
|
||||||
switch compressionType {
|
switch compressionType {
|
||||||
case 1: // Huffman
|
case 1: // Huffman
|
||||||
panic("huffman decompression not supported")
|
return []byte{}, errors.New("huffman decompression not supported")
|
||||||
case 2: // ZLib/Deflate
|
case 2: // ZLib/Deflate
|
||||||
return deflate(data[1:])
|
return deflate(data[1:])
|
||||||
case 8: // PKLib/Impode
|
case 8: // PKLib/Impode
|
||||||
return pkDecompress(data[1:])
|
return pkDecompress(data[1:])
|
||||||
case 0x10: // BZip2
|
case 0x10: // BZip2
|
||||||
panic("bzip2 decompression not supported")
|
return []byte{}, errors.New("bzip2 decompression not supported")
|
||||||
case 0x80: // IMA ADPCM Stereo
|
case 0x80: // IMA ADPCM Stereo
|
||||||
return d2compression.WavDecompress(data[1:], 2)
|
return d2compression.WavDecompress(data[1:], 2), nil
|
||||||
case 0x40: // IMA ADPCM Mono
|
case 0x40: // IMA ADPCM Mono
|
||||||
return d2compression.WavDecompress(data[1:], 1)
|
return d2compression.WavDecompress(data[1:], 1), nil
|
||||||
case 0x12:
|
case 0x12:
|
||||||
panic("lzma decompression not supported")
|
return []byte{}, errors.New("lzma decompression not supported")
|
||||||
// Combos
|
// Combos
|
||||||
case 0x22:
|
case 0x22:
|
||||||
// sparse then zlib
|
// sparse then zlib
|
||||||
panic("sparse decompression + deflate decompression not supported")
|
return []byte{}, errors.New("sparse decompression + deflate decompression not supported")
|
||||||
case 0x30:
|
case 0x30:
|
||||||
// sparse then bzip2
|
// sparse then bzip2
|
||||||
panic("sparse decompression + bzip2 decompression not supported")
|
return []byte{}, errors.New("sparse decompression + bzip2 decompression not supported")
|
||||||
case 0x41:
|
case 0x41:
|
||||||
sinput := d2compression.HuffmanDecompress(data[1:])
|
sinput := d2compression.HuffmanDecompress(data[1:])
|
||||||
sinput = d2compression.WavDecompress(sinput, 1)
|
sinput = d2compression.WavDecompress(sinput, 1)
|
||||||
@ -268,69 +256,68 @@ func decompressMulti(data []byte /*expectedLength*/, _ uint32) []byte {
|
|||||||
|
|
||||||
copy(tmp, sinput)
|
copy(tmp, sinput)
|
||||||
|
|
||||||
return tmp
|
return tmp, nil
|
||||||
case 0x48:
|
case 0x48:
|
||||||
// byte[] result = PKDecompress(sinput, outputLength);
|
// byte[] result = PKDecompress(sinput, outputLength);
|
||||||
// return MpqWavCompression.Decompress(new MemoryStream(result), 1);
|
// return MpqWavCompression.Decompress(new MemoryStream(result), 1);
|
||||||
panic("pk + mpqwav decompression not supported")
|
return []byte{}, errors.New("pk + mpqwav decompression not supported")
|
||||||
case 0x81:
|
case 0x81:
|
||||||
sinput := d2compression.HuffmanDecompress(data[1:])
|
sinput := d2compression.HuffmanDecompress(data[1:])
|
||||||
sinput = d2compression.WavDecompress(sinput, 2)
|
sinput = d2compression.WavDecompress(sinput, 2)
|
||||||
tmp := make([]byte, len(sinput))
|
tmp := make([]byte, len(sinput))
|
||||||
copy(tmp, sinput)
|
copy(tmp, sinput)
|
||||||
|
|
||||||
return tmp
|
return tmp, nil
|
||||||
case 0x88:
|
case 0x88:
|
||||||
// byte[] result = PKDecompress(sinput, outputLength);
|
// byte[] result = PKDecompress(sinput, outputLength);
|
||||||
// return MpqWavCompression.Decompress(new MemoryStream(result), 2);
|
// return MpqWavCompression.Decompress(new MemoryStream(result), 2);
|
||||||
panic("pk + wav decompression not supported")
|
return []byte{}, errors.New("pk + wav decompression not supported")
|
||||||
default:
|
|
||||||
panic(fmt.Sprintf("decompression not supported for unknown compression type %X", compressionType))
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return []byte{}, fmt.Errorf("decompression not supported for unknown compression type %X", compressionType)
|
||||||
}
|
}
|
||||||
|
|
||||||
func deflate(data []byte) []byte {
|
func deflate(data []byte) ([]byte, error) {
|
||||||
b := bytes.NewReader(data)
|
b := bytes.NewReader(data)
|
||||||
|
|
||||||
r, err := zlib.NewReader(b)
|
r, err := zlib.NewReader(b)
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
|
|
||||||
_, err = buffer.ReadFrom(r)
|
_, err = buffer.ReadFrom(r)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panic(err)
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.Close()
|
err = r.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Panic(err)
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buffer.Bytes()
|
return buffer.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func pkDecompress(data []byte) []byte {
|
func pkDecompress(data []byte) ([]byte, error) {
|
||||||
b := bytes.NewReader(data)
|
b := bytes.NewReader(data)
|
||||||
r, err := blast.NewReader(b)
|
|
||||||
|
|
||||||
|
r, err := blast.NewReader(b)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
buffer := new(bytes.Buffer)
|
buffer := new(bytes.Buffer)
|
||||||
|
|
||||||
_, err = buffer.ReadFrom(r)
|
if _, err = buffer.ReadFrom(r); err != nil {
|
||||||
if err != nil {
|
return []byte{}, err
|
||||||
panic(err)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
err = r.Close()
|
err = r.Close()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
panic(err)
|
return []byte{}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
return buffer.Bytes()
|
return buffer.Bytes(), nil
|
||||||
}
|
}
|
||||||
|
@ -8,10 +8,9 @@ type Archive interface {
|
|||||||
Path() string
|
Path() string
|
||||||
Contains(string) bool
|
Contains(string) bool
|
||||||
Size() uint32
|
Size() uint32
|
||||||
Close()
|
Close() error
|
||||||
FileExists(fileName string) bool
|
|
||||||
ReadFile(fileName string) ([]byte, error)
|
ReadFile(fileName string) ([]byte, error)
|
||||||
ReadFileStream(fileName string) (DataStream, error)
|
ReadFileStream(fileName string) (DataStream, error)
|
||||||
ReadTextFile(fileName string) (string, error)
|
ReadTextFile(fileName string) (string, error)
|
||||||
GetFileList() ([]string, error)
|
Listfile() ([]string, error)
|
||||||
}
|
}
|
||||||
|
@ -37,7 +37,8 @@ func Ext2SourceType(ext string) SourceType {
|
|||||||
func CheckSourceType(path string) SourceType {
|
func CheckSourceType(path string) SourceType {
|
||||||
// on MacOS, the MPQ's from blizzard don't have file extensions
|
// on MacOS, the MPQ's from blizzard don't have file extensions
|
||||||
// so we just attempt to init the file as an mpq
|
// so we just attempt to init the file as an mpq
|
||||||
if _, err := d2mpq.Load(path); err == nil {
|
if mpq, err := d2mpq.New(path); err == nil {
|
||||||
|
_ = mpq.Close()
|
||||||
return AssetSourceMPQ
|
return AssetSourceMPQ
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -14,7 +14,7 @@ var _ asset.Source = &Source{}
|
|||||||
|
|
||||||
// NewSource creates a new MPQ Source
|
// NewSource creates a new MPQ Source
|
||||||
func NewSource(sourcePath string) (asset.Source, error) {
|
func NewSource(sourcePath string) (asset.Source, error) {
|
||||||
loaded, err := d2mpq.Load(sourcePath)
|
loaded, err := d2mpq.FromFile(sourcePath)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
@ -33,13 +33,13 @@ func main() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
filename := flag.Arg(0)
|
filename := flag.Arg(0)
|
||||||
mpq, err := d2mpq.Load(filename)
|
|
||||||
|
|
||||||
|
mpq, err := d2mpq.FromFile(filename)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
|
||||||
list, err := mpq.GetFileList()
|
list, err := mpq.Listfile()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(err)
|
log.Fatal(err)
|
||||||
}
|
}
|
||||||
|
Loading…
Reference in New Issue
Block a user