add mft read method
parent
46a0efe741
commit
b957619ac3
@ -0,0 +1,218 @@
|
|||||||
|
package wincmd
|
||||||
|
|
||||||
|
import (
|
||||||
|
"b612.me/win32api"
|
||||||
|
"b612.me/wincmd/ntfs/binutil"
|
||||||
|
"b612.me/wincmd/ntfs/mft"
|
||||||
|
"b612.me/wincmd/ntfs/utf16"
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"time"
|
||||||
|
"unsafe"
|
||||||
|
)
|
||||||
|
|
||||||
|
type MFTFile struct {
|
||||||
|
Name string
|
||||||
|
Path string
|
||||||
|
ModTime time.Time
|
||||||
|
Size uint64
|
||||||
|
IsDir bool
|
||||||
|
Node uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFileListsByMftFn(driver string, fn func(string, bool) bool) ([]MFTFile, error) {
|
||||||
|
var result []MFTFile
|
||||||
|
fileMap := make(map[win32api.DWORDLONG]FileEntry)
|
||||||
|
f, size, err := mft.GetMFTFile(driver)
|
||||||
|
if err != nil {
|
||||||
|
return []MFTFile{}, err
|
||||||
|
}
|
||||||
|
recordSize := int64(1024)
|
||||||
|
alreadyGot := int64(0)
|
||||||
|
maxRecordSize := size / recordSize
|
||||||
|
if maxRecordSize > 1024 {
|
||||||
|
maxRecordSize = 1024
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
for {
|
||||||
|
if (size - alreadyGot) < maxRecordSize*recordSize {
|
||||||
|
maxRecordSize--
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if maxRecordSize < 10 {
|
||||||
|
maxRecordSize = 1
|
||||||
|
}
|
||||||
|
buf := make([]byte, maxRecordSize*recordSize)
|
||||||
|
got, err := io.ReadFull(f, buf)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return []MFTFile{}, err
|
||||||
|
}
|
||||||
|
alreadyGot += int64(got)
|
||||||
|
for j := int64(0); j < 1024*maxRecordSize; j += 1024 {
|
||||||
|
record, err := mft.ParseRecord(buf[j : j+1024])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if record.Flags&mft.RecordFlagInUse == 1 && record.Flags&mft.RecordFlagIsIndex == 0 {
|
||||||
|
var file MFTFile
|
||||||
|
file.IsDir = record.Flags&mft.RecordFlagIsDirectory != 0
|
||||||
|
file.Node = record.FileReference.ToUint64()
|
||||||
|
parent := uint64(0)
|
||||||
|
for _, v := range record.Attributes {
|
||||||
|
if v.Type == mft.AttributeTypeFileName {
|
||||||
|
if file.Name != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file.Name = utf16.DecodeString(v.Data[66:], binary.LittleEndian)
|
||||||
|
if file.Name != "" {
|
||||||
|
parent = binutil.NewLittleEndianReader(v.Data[:8]).Uint64(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Type == mft.AttributeTypeData {
|
||||||
|
file.Size = v.ActualSize
|
||||||
|
}
|
||||||
|
if v.Type == mft.AttributeTypeStandardInformation {
|
||||||
|
if len(v.Data) < 48 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r := binutil.NewLittleEndianReader(v.Data)
|
||||||
|
file.ModTime = mft.ConvertFileTime(r.Uint64(0x08))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if file.Name != "" {
|
||||||
|
canAdd := fn(file.Name, file.IsDir)
|
||||||
|
if canAdd {
|
||||||
|
result = append(result, file)
|
||||||
|
}
|
||||||
|
if canAdd || file.IsDir {
|
||||||
|
fileMap[win32api.DWORDLONG(file.Node)] = FileEntry{
|
||||||
|
Name: file.Name,
|
||||||
|
Parent: win32api.DWORDLONG(parent),
|
||||||
|
Type: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(*reflect.SliceHeader)(unsafe.Pointer(&result)).Cap = len(result)
|
||||||
|
for k, v := range result {
|
||||||
|
result[k].Path = GetFullUsnPath(driver, fileMap, win32api.DWORDLONG(v.Node))
|
||||||
|
}
|
||||||
|
fileMap = nil
|
||||||
|
runtime.GC()
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFileListsByMft(driver string) ([]MFTFile, error) {
|
||||||
|
return GetFileListsByMftFn(driver, func(string, bool) bool { return true })
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFileListsFromMftFileFn(filepath string, fn func(string, bool) bool) ([]MFTFile, error) {
|
||||||
|
var result []MFTFile
|
||||||
|
fileMap := make(map[win32api.DWORDLONG]FileEntry)
|
||||||
|
f, err := os.Open(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return []MFTFile{}, err
|
||||||
|
}
|
||||||
|
stat, err := f.Stat()
|
||||||
|
if err != nil {
|
||||||
|
return []MFTFile{}, err
|
||||||
|
}
|
||||||
|
size := stat.Size()
|
||||||
|
recordSize := int64(1024)
|
||||||
|
alreadyGot := int64(0)
|
||||||
|
maxRecordSize := size / recordSize
|
||||||
|
if maxRecordSize > 1024 {
|
||||||
|
maxRecordSize = 1024
|
||||||
|
}
|
||||||
|
for {
|
||||||
|
for {
|
||||||
|
if (size - alreadyGot) < maxRecordSize*recordSize {
|
||||||
|
maxRecordSize--
|
||||||
|
} else {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if maxRecordSize < 10 {
|
||||||
|
maxRecordSize = 1
|
||||||
|
}
|
||||||
|
buf := make([]byte, maxRecordSize*recordSize)
|
||||||
|
got, err := io.ReadFull(f, buf)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, io.EOF) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
return []MFTFile{}, err
|
||||||
|
}
|
||||||
|
alreadyGot += int64(got)
|
||||||
|
for j := int64(0); j < 1024*maxRecordSize; j += 1024 {
|
||||||
|
record, err := mft.ParseRecord(buf[j : j+1024])
|
||||||
|
if err != nil {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
if record.Flags&mft.RecordFlagInUse == 1 && record.Flags&mft.RecordFlagIsIndex == 0 {
|
||||||
|
var file MFTFile
|
||||||
|
file.IsDir = record.Flags&mft.RecordFlagIsDirectory != 0
|
||||||
|
file.Node = record.FileReference.ToUint64()
|
||||||
|
parent := uint64(0)
|
||||||
|
for _, v := range record.Attributes {
|
||||||
|
if v.Type == mft.AttributeTypeFileName {
|
||||||
|
if file.Name != "" {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
file.Name = utf16.DecodeString(v.Data[66:], binary.LittleEndian)
|
||||||
|
if file.Name != "" {
|
||||||
|
parent = binutil.NewLittleEndianReader(v.Data[:8]).Uint64(0)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if v.Type == mft.AttributeTypeData {
|
||||||
|
file.Size = v.ActualSize
|
||||||
|
}
|
||||||
|
if v.Type == mft.AttributeTypeStandardInformation {
|
||||||
|
if len(v.Data) < 48 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
r := binutil.NewLittleEndianReader(v.Data)
|
||||||
|
file.ModTime = mft.ConvertFileTime(r.Uint64(0x08))
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if file.Name != "" {
|
||||||
|
canAdd := fn(file.Name, file.IsDir)
|
||||||
|
if canAdd {
|
||||||
|
result = append(result, file)
|
||||||
|
}
|
||||||
|
if canAdd || file.IsDir {
|
||||||
|
fileMap[win32api.DWORDLONG(file.Node)] = FileEntry{
|
||||||
|
Name: file.Name,
|
||||||
|
Parent: win32api.DWORDLONG(parent),
|
||||||
|
Type: 0,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
(*reflect.SliceHeader)(unsafe.Pointer(&result)).Cap = len(result)
|
||||||
|
for k, v := range result {
|
||||||
|
result[k].Path = GetFullUsnPath(" ", fileMap, win32api.DWORDLONG(v.Node))
|
||||||
|
}
|
||||||
|
fileMap = nil
|
||||||
|
runtime.GC()
|
||||||
|
return result, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetFileListsFromMftFile(filepath string) ([]MFTFile, error) {
|
||||||
|
return GetFileListsFromMftFileFn(filepath, func(string, bool) bool { return true })
|
||||||
|
}
|
@ -0,0 +1,111 @@
|
|||||||
|
// Package binutil contains some helpful utilities for reading binary data from byte slices.
|
||||||
|
package binutil
|
||||||
|
|
||||||
|
import "encoding/binary"
|
||||||
|
|
||||||
|
// Duplicate creates a full copy of the input byte slice.
|
||||||
|
func Duplicate(in []byte) []byte {
|
||||||
|
out := make([]byte, len(in))
|
||||||
|
copy(out, in)
|
||||||
|
return out
|
||||||
|
}
|
||||||
|
|
||||||
|
// IsOnlyZeroes return true when the input data is all bytes of zero value and false if any of the bytes has a nonzero
|
||||||
|
// value.
|
||||||
|
func IsOnlyZeroes(data []byte) bool {
|
||||||
|
for _, b := range data {
|
||||||
|
if b != 0 {
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
// BinReader helps to read data from a byte slice using an offset and a data length (instead two offsets when using
|
||||||
|
// a slice expression). For example b[2:4] yields the same as Read(2, 2) using a BinReader over b. Also some convenient
|
||||||
|
// methods are provided to read integer values using a binary.ByteOrder from the slice directly.
|
||||||
|
//
|
||||||
|
// Note that methods that return a []byte may not necessarily copy the data, so modifying the returned slice may also
|
||||||
|
// affect the data in the BinReader.
|
||||||
|
//
|
||||||
|
// Methods will panic when any offset or length is outside of the bounds of the original data.
|
||||||
|
type BinReader struct {
|
||||||
|
data []byte
|
||||||
|
bo binary.ByteOrder
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewBinReader creates a BinReader over data using the specified binary.ByteOrder. The data slice is stored directly,
|
||||||
|
// no copy is made, so modifying the original slice will also affect the returned BinReader.
|
||||||
|
func NewBinReader(data []byte, bo binary.ByteOrder) *BinReader {
|
||||||
|
return &BinReader{data: data, bo: bo}
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLittleEndianReader creates a BinReader over data using binary.LittleEndian. The data slice is stored directly,
|
||||||
|
// no copy is made, so modifying the original slice will also affect the returned BinReader.
|
||||||
|
func NewLittleEndianReader(data []byte) *BinReader {
|
||||||
|
return NewBinReader(data, binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewLittleEndianReader creates a BinReader over data using binary.BigEndian. The data slice is stored directly,
|
||||||
|
// no copy is made, so modifying the original slice will also affect the returned BinReader.
|
||||||
|
func NewBigEndianReader(data []byte, bo binary.ByteOrder) *BinReader {
|
||||||
|
return NewBinReader(data, binary.BigEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Data returns all data inside this BinReader.
|
||||||
|
func (r *BinReader) Data() []byte {
|
||||||
|
return r.data
|
||||||
|
}
|
||||||
|
|
||||||
|
// ByteOrder returns the ByteOrder for this BinReader.
|
||||||
|
func (r *BinReader) ByteOrder() binary.ByteOrder {
|
||||||
|
return r.bo
|
||||||
|
}
|
||||||
|
|
||||||
|
// Length returns the length of the contained data.
|
||||||
|
func (r *BinReader) Length() int {
|
||||||
|
return len(r.data)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Read reads an amount of bytes as specified by length from the provided offset. The returned slice's length is the
|
||||||
|
// same as the specified length.
|
||||||
|
func (r *BinReader) Read(offset int, length int) []byte {
|
||||||
|
return r.data[offset : offset+length]
|
||||||
|
}
|
||||||
|
|
||||||
|
// Reader returns a new BinReader over the data read by Read(offset, length) using the same ByteOrder as this reader.
|
||||||
|
// There is no guarantee a copy of the data is made, so modifying the new reader's data may affect the original.
|
||||||
|
func (r *BinReader) Reader(offset int, length int) *BinReader {
|
||||||
|
return &BinReader{data: r.data[offset : offset+length], bo: r.bo}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Byte returns the byte at the position indicated by the offset.
|
||||||
|
func (r *BinReader) Byte(offset int) byte {
|
||||||
|
return r.Read(offset, 1)[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReadFrom returns all data starting at the specified offset.
|
||||||
|
func (r *BinReader) ReadFrom(offset int) []byte {
|
||||||
|
return r.data[offset:]
|
||||||
|
}
|
||||||
|
|
||||||
|
// ReaderFrom returns a BinReader over the data read by ReadFrom(offset) using the same ByteOrder as this reader.
|
||||||
|
// There is no guarantee a copy of the data is made, so modifying the new reader's data may affect the original.
|
||||||
|
func (r *BinReader) ReaderFrom(offset int) *BinReader {
|
||||||
|
return &BinReader{data: r.data[offset:], bo: r.bo}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint16 reads 2 bytes from the provided offset and parses them into a uint16 using the provided ByteOrder.
|
||||||
|
func (r *BinReader) Uint16(offset int) uint16 {
|
||||||
|
return r.bo.Uint16(r.Read(offset, 2))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint32 reads 4 bytes from the provided offset and parses them into a uint32 using the provided ByteOrder.
|
||||||
|
func (r *BinReader) Uint32(offset int) uint32 {
|
||||||
|
return r.bo.Uint32(r.Read(offset, 4))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Uint64 reads 8 bytes from the provided offset and parses them into a uint64 using the provided ByteOrder.
|
||||||
|
func (r *BinReader) Uint64(offset int) uint64 {
|
||||||
|
return r.bo.Uint64(r.Read(offset, 8))
|
||||||
|
}
|
@ -0,0 +1,72 @@
|
|||||||
|
/*
|
||||||
|
Package bootsect provides functions to parse the boot sector (also sometimes called Volume Boot Record, VBR, or
|
||||||
|
$Boot file) of an NTFS volume.
|
||||||
|
*/
|
||||||
|
package bootsect
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"b612.me/wincmd/ntfs/binutil"
|
||||||
|
)
|
||||||
|
|
||||||
|
// BootSector represents the parsed data of an NTFS boot sector. The OemId should typically be "NTFS " ("NTFS"
|
||||||
|
// followed by 4 trailing spaces) for a valid NTFS boot sector.
|
||||||
|
type BootSector struct {
|
||||||
|
OemId string
|
||||||
|
BytesPerSector int
|
||||||
|
SectorsPerCluster int
|
||||||
|
MediaDescriptor byte
|
||||||
|
SectorsPerTrack int
|
||||||
|
NumberofHeads int
|
||||||
|
HiddenSectors int
|
||||||
|
TotalSectors uint64
|
||||||
|
MftClusterNumber uint64
|
||||||
|
MftMirrorClusterNumber uint64
|
||||||
|
FileRecordSegmentSizeInBytes int
|
||||||
|
IndexBufferSizeInBytes int
|
||||||
|
VolumeSerialNumber []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// Parse parses the data of an NTFS boot sector into a BootSector structure.
|
||||||
|
func Parse(data []byte) (BootSector, error) {
|
||||||
|
if len(data) < 80 {
|
||||||
|
return BootSector{}, fmt.Errorf("boot sector data should be at least 80 bytes but is %d", len(data))
|
||||||
|
}
|
||||||
|
r := binutil.NewLittleEndianReader(data)
|
||||||
|
bytesPerSector := int(r.Uint16(0x0B))
|
||||||
|
sectorsPerCluster := int(int8(r.Byte(0x0D)))
|
||||||
|
if sectorsPerCluster < 0 {
|
||||||
|
// Quoth Wikipedia: The number of sectors in a cluster. If the value is negative, the amount of sectors is 2
|
||||||
|
// to the power of the absolute value of this field.
|
||||||
|
sectorsPerCluster = 1 << -sectorsPerCluster
|
||||||
|
}
|
||||||
|
bytesPerCluster := bytesPerSector * sectorsPerCluster
|
||||||
|
return BootSector{
|
||||||
|
OemId: string(r.Read(0x03, 8)),
|
||||||
|
BytesPerSector: bytesPerSector,
|
||||||
|
SectorsPerCluster: sectorsPerCluster,
|
||||||
|
MediaDescriptor: r.Byte(0x15),
|
||||||
|
SectorsPerTrack: int(r.Uint16(0x18)),
|
||||||
|
NumberofHeads: int(r.Uint16(0x1A)),
|
||||||
|
HiddenSectors: int(r.Uint16(0x1C)),
|
||||||
|
TotalSectors: r.Uint64(0x28),
|
||||||
|
MftClusterNumber: r.Uint64(0x30),
|
||||||
|
MftMirrorClusterNumber: r.Uint64(0x38),
|
||||||
|
FileRecordSegmentSizeInBytes: bytesOrClustersToBytes(r.Byte(0x40), bytesPerCluster),
|
||||||
|
IndexBufferSizeInBytes: bytesOrClustersToBytes(r.Byte(0x44), bytesPerCluster),
|
||||||
|
VolumeSerialNumber: binutil.Duplicate(r.Read(0x48, 8)),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func bytesOrClustersToBytes(b byte, bytesPerCluster int) int {
|
||||||
|
// From Wikipedia:
|
||||||
|
// A positive value denotes the number of clusters in a File Record Segment. A negative value denotes the amount of
|
||||||
|
// bytes in a File Record Segment, in which case the size is 2 to the power of the absolute value.
|
||||||
|
// (0xF6 = -10 → 210 = 1024).
|
||||||
|
i := int(int8(b))
|
||||||
|
if i < 0 {
|
||||||
|
return 1 << -i
|
||||||
|
}
|
||||||
|
return i * bytesPerCluster
|
||||||
|
}
|
@ -0,0 +1,251 @@
|
|||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"b612.me/wincmd/ntfs/bootsect"
|
||||||
|
"b612.me/wincmd/ntfs/fragment"
|
||||||
|
"b612.me/wincmd/ntfs/mft"
|
||||||
|
"flag"
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"path/filepath"
|
||||||
|
"runtime"
|
||||||
|
"strings"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
)
|
||||||
|
|
||||||
|
const supportedOemId = "NTFS "
|
||||||
|
|
||||||
|
const (
|
||||||
|
exitCodeUserError int = iota + 2
|
||||||
|
exitCodeFunctionalError
|
||||||
|
exitCodeTechnicalError
|
||||||
|
)
|
||||||
|
|
||||||
|
const isWin = runtime.GOOS == "windows"
|
||||||
|
|
||||||
|
var (
|
||||||
|
// flags
|
||||||
|
verbose = false
|
||||||
|
overwriteOutputIfExists = false
|
||||||
|
showProgress = false
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
start := time.Now()
|
||||||
|
verboseFlag := flag.Bool("v", false, "verbose; print details about what's going on")
|
||||||
|
forceFlag := flag.Bool("f", false, "force; overwrite the output file if it already exists")
|
||||||
|
progressFlag := flag.Bool("p", false, "progress; show progress during dumping")
|
||||||
|
|
||||||
|
flag.Usage = printUsage
|
||||||
|
flag.Parse()
|
||||||
|
|
||||||
|
verbose = *verboseFlag
|
||||||
|
overwriteOutputIfExists = *forceFlag
|
||||||
|
showProgress = *progressFlag
|
||||||
|
args := flag.Args()
|
||||||
|
|
||||||
|
if len(args) != 2 {
|
||||||
|
printUsage()
|
||||||
|
os.Exit(exitCodeUserError)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
volume := args[0]
|
||||||
|
if isWin {
|
||||||
|
volume = `\\.\` + volume
|
||||||
|
}
|
||||||
|
//fmt.Println(volume)
|
||||||
|
outfile := args[1]
|
||||||
|
|
||||||
|
in, err := os.Open(volume)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to open volume using path %s: %v\n", volume, err)
|
||||||
|
}
|
||||||
|
defer in.Close()
|
||||||
|
|
||||||
|
printVerbose("Reading boot sector\n")
|
||||||
|
bootSectorData := make([]byte, 512)
|
||||||
|
_, err = io.ReadFull(in, bootSectorData)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to read boot sector: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
printVerbose("Read %d bytes of boot sector, parsing boot sector\n", len(bootSectorData))
|
||||||
|
bootSector, err := bootsect.Parse(bootSectorData)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to parse boot sector data: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bootSector.OemId != supportedOemId {
|
||||||
|
fatalf(exitCodeFunctionalError, "Unknown OemId (file system type) %q (expected %q)\n", bootSector.OemId, supportedOemId)
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesPerCluster := bootSector.BytesPerSector * bootSector.SectorsPerCluster
|
||||||
|
mftPosInBytes := int64(bootSector.MftClusterNumber) * int64(bytesPerCluster)
|
||||||
|
|
||||||
|
_, err = in.Seek(mftPosInBytes, 0)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to seek to MFT position: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mftSizeInBytes := bootSector.FileRecordSegmentSizeInBytes
|
||||||
|
printVerbose("Reading $MFT file record at position %d (size: %d bytes)\n", mftPosInBytes, mftSizeInBytes)
|
||||||
|
mftData := make([]byte, mftSizeInBytes)
|
||||||
|
_, err = io.ReadFull(in, mftData)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to read $MFT record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
printVerbose("Parsing $MFT file record\n")
|
||||||
|
record, err := mft.ParseRecord(mftData)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to parse $MFT record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
printVerbose("Reading $DATA attribute in $MFT file record\n")
|
||||||
|
dataAttributes := record.FindAttributes(mft.AttributeTypeData)
|
||||||
|
if len(dataAttributes) == 0 {
|
||||||
|
fatalf(exitCodeTechnicalError, "No $DATA attribute found in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dataAttributes) > 1 {
|
||||||
|
fatalf(exitCodeTechnicalError, "More than 1 $DATA attribute found in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
dataAttribute := dataAttributes[0]
|
||||||
|
if dataAttribute.Resident {
|
||||||
|
fatalf(exitCodeTechnicalError, "Don't know how to handle resident $DATA attribute in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
dataRuns, err := mft.ParseDataRuns(dataAttribute.Data)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Unable to parse dataruns in $MFT $DATA record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dataRuns) == 0 {
|
||||||
|
fatalf(exitCodeTechnicalError, "No dataruns found in $MFT $DATA record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fragments := mft.DataRunsToFragments(dataRuns, bytesPerCluster)
|
||||||
|
totalLength := int64(0)
|
||||||
|
for _, frag := range fragments {
|
||||||
|
totalLength += int64(frag.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
out, err := openOutputFile(outfile)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeFunctionalError, "Unable to open output file: %v\n", err)
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
|
||||||
|
printVerbose("Copying %d bytes (%s) of data to %s\n", totalLength, formatBytes(totalLength), outfile)
|
||||||
|
n, err := copy(out, fragment.NewReader(in, fragments), totalLength)
|
||||||
|
if err != nil {
|
||||||
|
fatalf(exitCodeTechnicalError, "Error copying data to output file: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if n != totalLength {
|
||||||
|
fatalf(exitCodeTechnicalError, "Expected to copy %d bytes, but copied only %d\n", totalLength, n)
|
||||||
|
}
|
||||||
|
end := time.Now()
|
||||||
|
dur := end.Sub(start)
|
||||||
|
printVerbose("Finished in %v\n", dur)
|
||||||
|
}
|
||||||
|
|
||||||
|
func copy(dst io.Writer, src io.Reader, totalLength int64) (written int64, err error) {
|
||||||
|
buf := make([]byte, 1024*1024)
|
||||||
|
if !showProgress {
|
||||||
|
return io.CopyBuffer(dst, src, buf)
|
||||||
|
}
|
||||||
|
|
||||||
|
onePercent := float64(totalLength) / float64(100.0)
|
||||||
|
totalSize := formatBytes(totalLength)
|
||||||
|
|
||||||
|
// Below copied from io.copyBuffer (https://golang.org/src/io/io.go?s=12796:12856#L380)
|
||||||
|
for {
|
||||||
|
printProgress(written, totalSize, onePercent)
|
||||||
|
|
||||||
|
nr, er := src.Read(buf)
|
||||||
|
if nr > 0 {
|
||||||
|
nw, ew := dst.Write(buf[0:nr])
|
||||||
|
if nw > 0 {
|
||||||
|
written += int64(nw)
|
||||||
|
}
|
||||||
|
if ew != nil {
|
||||||
|
err = ew
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nr != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
if er != io.EOF {
|
||||||
|
err = er
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
printProgress(written, totalSize, onePercent)
|
||||||
|
fmt.Println()
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func printProgress(n int64, totalSize string, onePercent float64) {
|
||||||
|
percentage := float64(n) / onePercent
|
||||||
|
barCount := int(percentage / 2.0)
|
||||||
|
spaceCount := 50 - barCount
|
||||||
|
fmt.Printf("\r[%s%s] %.2f%% (%s / %s) ", strings.Repeat("|", barCount), strings.Repeat(" ", spaceCount), percentage, formatBytes(n), totalSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func openOutputFile(outfile string) (*os.File, error) {
|
||||||
|
if overwriteOutputIfExists {
|
||||||
|
return os.Create(outfile)
|
||||||
|
} else {
|
||||||
|
return os.OpenFile(outfile, os.O_WRONLY|os.O_CREATE|os.O_EXCL, 0666)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func printUsage() {
|
||||||
|
out := os.Stderr
|
||||||
|
exe := filepath.Base(os.Args[0])
|
||||||
|
fmt.Fprintf(out, "\nusage: %s [flags] <volume> <output file>\n\n", exe)
|
||||||
|
fmt.Fprintln(out, "Dump the MFT of a volume to a file. The volume should be NTFS formatted.")
|
||||||
|
fmt.Fprintln(out, "\nFlags:")
|
||||||
|
|
||||||
|
flag.PrintDefaults()
|
||||||
|
|
||||||
|
fmt.Fprintf(out, "\nFor example: ")
|
||||||
|
if isWin {
|
||||||
|
fmt.Fprintf(out, "%s -v -f C: D:\\c.mft\n", exe)
|
||||||
|
} else {
|
||||||
|
fmt.Fprintf(out, "%s -v -f /dev/sdb1 ~/sdb1.mft\n", exe)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func fatalf(exitCode int, format string, v ...interface{}) {
|
||||||
|
fmt.Printf(format, v...)
|
||||||
|
os.Exit(exitCode)
|
||||||
|
}
|
||||||
|
|
||||||
|
func printVerbose(format string, v ...interface{}) {
|
||||||
|
if verbose {
|
||||||
|
fmt.Printf(format, v...)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func formatBytes(b int64) string {
|
||||||
|
if b < 1024 {
|
||||||
|
return fmt.Sprintf("%dB", b)
|
||||||
|
}
|
||||||
|
if b < 1048576 {
|
||||||
|
return fmt.Sprintf("%.2fKiB", float32(b)/float32(1024))
|
||||||
|
}
|
||||||
|
if b < 1073741824 {
|
||||||
|
return fmt.Sprintf("%.2fMiB", float32(b)/float32(1048576))
|
||||||
|
}
|
||||||
|
return fmt.Sprintf("%.2fGiB", float32(b)/float32(1073741824))
|
||||||
|
}
|
@ -0,0 +1,83 @@
|
|||||||
|
/*
|
||||||
|
Package fragment contains a Reader which can read Fragments which may be scattered around a volume (and perhaps even
|
||||||
|
not in sequence). Typically these could be translated from MFT attribute DataRuns. To convert MFT attribute DataRuns
|
||||||
|
to Fragments for use in the fragment Reader, use mft.DataRunsToFragments().
|
||||||
|
|
||||||
|
Implementation notes
|
||||||
|
|
||||||
|
When the fragment Reader is near the end of a fragment and a Read() call requests more data than what is left in
|
||||||
|
the current fragment, the Reader will exhaust only the current fragment and return that data (which could be less
|
||||||
|
than len(p)). A next Read() call will then seek to the next fragment and continue reading there. When the last
|
||||||
|
fragment is exhausted by a Read(), it will return the remaining bytes read and a nil error. Any subsequent Read()
|
||||||
|
calls after that will return 0, io.EOF.
|
||||||
|
|
||||||
|
When accessing a new fragment, the Reader will seek using the absolute Length in the fragment from the start
|
||||||
|
of the contained io.ReadSeeker (using io.SeekStart).
|
||||||
|
*/
|
||||||
|
package fragment
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Fragment contains an absolute Offset in bytes from the start of a volume and a Length of the fragment, also in bytes.
|
||||||
|
type Fragment struct {
|
||||||
|
Offset int64
|
||||||
|
Length int64
|
||||||
|
}
|
||||||
|
|
||||||
|
// A fragment Reader will read data from the fragments in order. When one fragment is depleted, it will seek to the
|
||||||
|
// position of the next fragment and continue reading from there, until all fragments have been exhausted. When the last
|
||||||
|
// fragment has been exhaused, each subsequent Read() will return io.EOF.
|
||||||
|
type Reader struct {
|
||||||
|
src io.ReadSeeker
|
||||||
|
fragments []Fragment
|
||||||
|
idx int
|
||||||
|
remaining int64
|
||||||
|
file *os.File
|
||||||
|
}
|
||||||
|
|
||||||
|
// NewReader initializes a new Reader from the io.ReaderSeeker and fragments and returns a pointer to. Note that
|
||||||
|
// fragments may not be sequential in order, so the io.ReadSeeker should support seeking backwards (or rather, from the
|
||||||
|
// start).
|
||||||
|
func NewReader(src io.ReadSeeker, fragments []Fragment) *Reader {
|
||||||
|
return &Reader{src: src, fragments: fragments, idx: -1, remaining: 0}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (r *Reader) Read(p []byte) (n int, err error) {
|
||||||
|
if r.idx >= len(r.fragments) {
|
||||||
|
r.src.(*os.File).Close()
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(p) == 0 {
|
||||||
|
return 0, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
if r.remaining == 0 {
|
||||||
|
r.idx++
|
||||||
|
if r.idx >= len(r.fragments) {
|
||||||
|
return 0, io.EOF
|
||||||
|
}
|
||||||
|
next := r.fragments[r.idx]
|
||||||
|
r.remaining = next.Length
|
||||||
|
seeked, err := r.src.Seek(next.Offset, io.SeekStart)
|
||||||
|
if err != nil {
|
||||||
|
return 0, fmt.Errorf("unable to seek to next offset %d: %v", next.Offset, err)
|
||||||
|
}
|
||||||
|
if seeked != next.Offset {
|
||||||
|
return 0, fmt.Errorf("wanted to seek to %d but reached %d", next.Offset, seeked)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
target := p
|
||||||
|
if int64(len(p)) > r.remaining {
|
||||||
|
target = p[:r.remaining]
|
||||||
|
}
|
||||||
|
|
||||||
|
n, err = io.ReadFull(r.src, target)
|
||||||
|
r.remaining -= int64(n)
|
||||||
|
return n, err
|
||||||
|
}
|
@ -0,0 +1,343 @@
|
|||||||
|
package mft
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"b612.me/wincmd/ntfs/binutil"
|
||||||
|
"b612.me/wincmd/ntfs/utf16"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
reallyStrangeEpoch = time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
)
|
||||||
|
|
||||||
|
// StandardInformation represents the data contained in a $STANDARD_INFORMATION attribute.
|
||||||
|
type StandardInformation struct {
|
||||||
|
Creation time.Time
|
||||||
|
FileLastModified time.Time
|
||||||
|
MftLastModified time.Time
|
||||||
|
LastAccess time.Time
|
||||||
|
FileAttributes FileAttribute
|
||||||
|
MaximumNumberOfVersions uint32
|
||||||
|
VersionNumber uint32
|
||||||
|
ClassId uint32
|
||||||
|
OwnerId uint32
|
||||||
|
SecurityId uint32
|
||||||
|
QuotaCharged uint64
|
||||||
|
UpdateSequenceNumber uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseStandardInformation parses the data of a $STANDARD_INFORMATION attribute's data (type
|
||||||
|
// AttributeTypeStandardInformation) into StandardInformation. Note that no additional correctness checks are done, so
|
||||||
|
// it's up to the caller to ensure the passed data actually represents a $STANDARD_INFORMATION attribute's data.
|
||||||
|
func ParseStandardInformation(b []byte) (StandardInformation, error) {
|
||||||
|
if len(b) < 48 {
|
||||||
|
return StandardInformation{}, fmt.Errorf("expected at least %d bytes but got %d", 48, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
ownerId := uint32(0)
|
||||||
|
securityId := uint32(0)
|
||||||
|
quotaCharged := uint64(0)
|
||||||
|
updateSequenceNumber := uint64(0)
|
||||||
|
if len(b) >= 0x30+4 {
|
||||||
|
ownerId = r.Uint32(0x30)
|
||||||
|
}
|
||||||
|
if len(b) >= 0x34+4 {
|
||||||
|
securityId = r.Uint32(0x34)
|
||||||
|
}
|
||||||
|
if len(b) >= 0x38+8 {
|
||||||
|
quotaCharged = r.Uint64(0x38)
|
||||||
|
}
|
||||||
|
if len(b) >= 0x40+8 {
|
||||||
|
updateSequenceNumber = r.Uint64(0x40)
|
||||||
|
}
|
||||||
|
return StandardInformation{
|
||||||
|
Creation: ConvertFileTime(r.Uint64(0x00)),
|
||||||
|
FileLastModified: ConvertFileTime(r.Uint64(0x08)),
|
||||||
|
MftLastModified: ConvertFileTime(r.Uint64(0x10)),
|
||||||
|
LastAccess: ConvertFileTime(r.Uint64(0x18)),
|
||||||
|
FileAttributes: FileAttribute(r.Uint32(0x20)),
|
||||||
|
MaximumNumberOfVersions: r.Uint32(0x24),
|
||||||
|
VersionNumber: r.Uint32(0x28),
|
||||||
|
ClassId: r.Uint32(0x2C),
|
||||||
|
OwnerId: ownerId,
|
||||||
|
SecurityId: securityId,
|
||||||
|
QuotaCharged: quotaCharged,
|
||||||
|
UpdateSequenceNumber: updateSequenceNumber,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileAttribute represents a bit mask of various file attributes.
|
||||||
|
type FileAttribute uint32
|
||||||
|
|
||||||
|
// Bit values for FileAttribute. For example, a normal, hidden file has value 0x0082.
|
||||||
|
const (
|
||||||
|
FileAttributeReadOnly FileAttribute = 0x0001
|
||||||
|
FileAttributeHidden FileAttribute = 0x0002
|
||||||
|
FileAttributeSystem FileAttribute = 0x0004
|
||||||
|
FileAttributeArchive FileAttribute = 0x0020
|
||||||
|
FileAttributeDevice FileAttribute = 0x0040
|
||||||
|
FileAttributeNormal FileAttribute = 0x0080
|
||||||
|
FileAttributeTemporary FileAttribute = 0x0100
|
||||||
|
FileAttributeSparseFile FileAttribute = 0x0200
|
||||||
|
FileAttributeReparsePoint FileAttribute = 0x0400
|
||||||
|
FileAttributeCompressed FileAttribute = 0x1000
|
||||||
|
FileAttributeOffline FileAttribute = 0x1000
|
||||||
|
FileAttributeNotContentIndexed FileAttribute = 0x2000
|
||||||
|
FileAttributeEncrypted FileAttribute = 0x4000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Is checks if this FileAttribute's bit mask contains the specified attribute value.
|
||||||
|
func (a *FileAttribute) Is(c FileAttribute) bool {
|
||||||
|
return *a&c == c
|
||||||
|
}
|
||||||
|
|
||||||
|
// FileNameNamespace indicates the namespace of a $FILE_NAME attribute's file name.
|
||||||
|
type FileNameNamespace byte
|
||||||
|
|
||||||
|
const (
|
||||||
|
FileNameNamespacePosix FileNameNamespace = 0
|
||||||
|
FileNameNamespaceWin32 FileNameNamespace = 1
|
||||||
|
FileNameNamespaceDos FileNameNamespace = 2
|
||||||
|
FileNameNamespaceWin32Dos FileNameNamespace = 3
|
||||||
|
)
|
||||||
|
|
||||||
|
// FileName represents the data of a $FILE_NAME attribute. ParentFileReference points to the MFT record that is the
|
||||||
|
// parent (ie. containing directory of this file). The AllocatedSize and ActualSize may be zero, in which case the file
|
||||||
|
// size may be found in a $DATA attribute instead (it could also be the ActualSize is zero, while the AllocatedSize does
|
||||||
|
// contain a value).
|
||||||
|
type FileName struct {
|
||||||
|
ParentFileReference FileReference
|
||||||
|
Creation time.Time
|
||||||
|
FileLastModified time.Time
|
||||||
|
MftLastModified time.Time
|
||||||
|
LastAccess time.Time
|
||||||
|
AllocatedSize uint64
|
||||||
|
ActualSize uint64
|
||||||
|
Flags FileAttribute
|
||||||
|
ExtendedData uint32
|
||||||
|
Namespace FileNameNamespace
|
||||||
|
Name string
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseFileName parses the data of a $FILE_NAME attribute's data (type AttributeTypeFileName) into FileName. Note that
|
||||||
|
// no additional correctness checks are done, so it's up to the caller to ensure the passed data actually represents a
|
||||||
|
// $FILE_NAME attribute's data.
|
||||||
|
func ParseFileName(b []byte) (FileName, error) {
|
||||||
|
if len(b) < 66 {
|
||||||
|
return FileName{}, fmt.Errorf("expected at least %d bytes but got %d", 66, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
fileNameLength := int(b[0x40 : 0x40+1][0]) * 2
|
||||||
|
minExpectedSize := 66 + fileNameLength
|
||||||
|
if len(b) < minExpectedSize {
|
||||||
|
return FileName{}, fmt.Errorf("expected at least %d bytes but got %d", minExpectedSize, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
parentRef, err := ParseFileReference(r.Read(0x00, 8))
|
||||||
|
if err != nil {
|
||||||
|
return FileName{}, fmt.Errorf("unable to parse file reference: %v", err)
|
||||||
|
}
|
||||||
|
return FileName{
|
||||||
|
ParentFileReference: parentRef,
|
||||||
|
Creation: ConvertFileTime(r.Uint64(0x08)),
|
||||||
|
FileLastModified: ConvertFileTime(r.Uint64(0x10)),
|
||||||
|
MftLastModified: ConvertFileTime(r.Uint64(0x18)),
|
||||||
|
LastAccess: ConvertFileTime(r.Uint64(0x20)),
|
||||||
|
AllocatedSize: r.Uint64(0x28),
|
||||||
|
ActualSize: r.Uint64(0x30),
|
||||||
|
Flags: FileAttribute(r.Uint32(0x38)),
|
||||||
|
ExtendedData: r.Uint32(0x3c),
|
||||||
|
Namespace: FileNameNamespace(r.Byte(0x41)),
|
||||||
|
Name: utf16.DecodeString(r.Read(0x42, fileNameLength), binary.LittleEndian),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttributeListEntry represents an entry in an $ATTRIBUTE_LIST attribute. The Type indicates the attribute type, while
|
||||||
|
// the BaseRecordReference indicates which MFT record the attribute is located in (ie. an "extension record", if it is
|
||||||
|
// not the same as the one where the $ATTRIBUTE_LIST is located).
|
||||||
|
type AttributeListEntry struct {
|
||||||
|
Type AttributeType
|
||||||
|
Name string
|
||||||
|
StartingVCN uint64
|
||||||
|
BaseRecordReference FileReference
|
||||||
|
AttributeId uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAttributeList parses the data of a $ATTRIBUTE_LIST attribute's data (type AttributeTypeAttributeList) into a
|
||||||
|
// list of AttributeListEntry. Note that no additional correctness checks are done, so it's up to the caller to ensure
|
||||||
|
// the passed data actually represents a $ATTRIBUTE_LIST attribute's data.
|
||||||
|
func ParseAttributeList(b []byte) ([]AttributeListEntry, error) {
|
||||||
|
if len(b) < 26 {
|
||||||
|
return []AttributeListEntry{}, fmt.Errorf("expected at least %d bytes but got %d", 26, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
entries := make([]AttributeListEntry, 0)
|
||||||
|
|
||||||
|
for len(b) > 0 {
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
entryLength := int(r.Uint16(0x04))
|
||||||
|
if len(b) < entryLength {
|
||||||
|
return entries, fmt.Errorf("expected at least %d bytes remaining for AttributeList entry but is %d", entryLength, len(b))
|
||||||
|
}
|
||||||
|
nameLength := int(r.Byte(0x06))
|
||||||
|
name := ""
|
||||||
|
if nameLength != 0 {
|
||||||
|
nameOffset := int(r.Byte(0x07))
|
||||||
|
name = utf16.DecodeString(r.Read(nameOffset, nameLength*2), binary.LittleEndian)
|
||||||
|
}
|
||||||
|
baseRef, err := ParseFileReference(r.Read(0x10, 8))
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("unable to parse base record reference: %v", err)
|
||||||
|
}
|
||||||
|
entry := AttributeListEntry{
|
||||||
|
Type: AttributeType(r.Uint32(0)),
|
||||||
|
Name: name,
|
||||||
|
StartingVCN: r.Uint64(0x08),
|
||||||
|
BaseRecordReference: baseRef,
|
||||||
|
AttributeId: r.Uint16(0x18),
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
b = r.ReadFrom(entryLength)
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// CollationType indicates how the entries in an index should be ordered.
|
||||||
|
type CollationType uint32
|
||||||
|
|
||||||
|
const (
|
||||||
|
CollationTypeBinary CollationType = 0x00000000
|
||||||
|
CollationTypeFileName CollationType = 0x00000001
|
||||||
|
CollationTypeUnicodeString CollationType = 0x00000002
|
||||||
|
CollationTypeNtofsULong CollationType = 0x00000010
|
||||||
|
CollationTypeNtofsSid CollationType = 0x00000011
|
||||||
|
CollationTypeNtofsSecurityHash CollationType = 0x00000012
|
||||||
|
CollationTypeNtofsUlongs CollationType = 0x00000013
|
||||||
|
)
|
||||||
|
|
||||||
|
// IndexRoot represents the data (header and entries) of an $INDEX_ROOT attribute, which typically is the root of a
|
||||||
|
// directory's B+tree index containing file names of the directory (but could be use for other types of indices, too).
|
||||||
|
// The AttributeType is the type of attributes that are contained in the entries (currently only $FILE_NAME attributes
|
||||||
|
// are supported).
|
||||||
|
type IndexRoot struct {
|
||||||
|
AttributeType AttributeType
|
||||||
|
CollationType CollationType
|
||||||
|
BytesPerRecord uint32
|
||||||
|
ClustersPerRecord uint32
|
||||||
|
Flags uint32
|
||||||
|
Entries []IndexEntry
|
||||||
|
}
|
||||||
|
|
||||||
|
// IndexEntry represents an entry in an B+tree index. Currently only $FILE_NAME attribute entries are supported. The
|
||||||
|
// FileReference points to the MFT record of the indexed file.
|
||||||
|
type IndexEntry struct {
|
||||||
|
FileReference FileReference
|
||||||
|
Flags uint32
|
||||||
|
FileName FileName
|
||||||
|
SubNodeVCN uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseIndexRoot parses the data of a $INDEX_ROOT attribute's data (type AttributeTypeIndexRoot) into
|
||||||
|
// IndexRoot. Note that no additional correctness checks are done, so it's up to the caller to ensure the passed data
|
||||||
|
// actually represents a $INDEX_ROOT attribute's data.
|
||||||
|
func ParseIndexRoot(b []byte) (IndexRoot, error) {
|
||||||
|
if len(b) < 32 {
|
||||||
|
return IndexRoot{}, fmt.Errorf("expected at least %d bytes but got %d", 32, len(b))
|
||||||
|
}
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
attributeType := AttributeType(r.Uint32(0x00))
|
||||||
|
if attributeType != AttributeTypeFileName {
|
||||||
|
return IndexRoot{}, fmt.Errorf("unable to handle attribute type %d (%s) in $INDEX_ROOT", attributeType, attributeType.Name())
|
||||||
|
}
|
||||||
|
|
||||||
|
uTotalSize := r.Uint32(0x14)
|
||||||
|
if int64(uTotalSize) > maxInt {
|
||||||
|
return IndexRoot{}, fmt.Errorf("index root size %d overflows maximum int value %d", uTotalSize, maxInt)
|
||||||
|
}
|
||||||
|
totalSize := int(uTotalSize)
|
||||||
|
expectedSize := totalSize + 16
|
||||||
|
if len(b) < expectedSize {
|
||||||
|
return IndexRoot{}, fmt.Errorf("expected %d bytes in $INDEX_ROOT but is %d", expectedSize, len(b))
|
||||||
|
}
|
||||||
|
entries := []IndexEntry{}
|
||||||
|
if totalSize >= 16 {
|
||||||
|
parsed, err := parseIndexEntries(r.Read(0x20, totalSize-16))
|
||||||
|
if err != nil {
|
||||||
|
return IndexRoot{}, fmt.Errorf("error parsing index entries: %v", err)
|
||||||
|
}
|
||||||
|
entries = parsed
|
||||||
|
}
|
||||||
|
|
||||||
|
return IndexRoot{
|
||||||
|
AttributeType: attributeType,
|
||||||
|
CollationType: CollationType(r.Uint32(0x04)),
|
||||||
|
BytesPerRecord: r.Uint32(0x08),
|
||||||
|
ClustersPerRecord: r.Uint32(0x0C),
|
||||||
|
Flags: r.Uint32(0x1C),
|
||||||
|
Entries: entries,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func parseIndexEntries(b []byte) ([]IndexEntry, error) {
|
||||||
|
if len(b) < 13 {
|
||||||
|
return []IndexEntry{}, fmt.Errorf("expected at least %d bytes but got %d", 13, len(b))
|
||||||
|
}
|
||||||
|
entries := make([]IndexEntry, 0)
|
||||||
|
for len(b) > 0 {
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
entryLength := int(r.Uint16(0x08))
|
||||||
|
|
||||||
|
if len(b) < entryLength {
|
||||||
|
return entries, fmt.Errorf("index entry length indicates %d bytes but got %d", entryLength, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
flags := r.Uint32(0x0C)
|
||||||
|
pointsToSubNode := flags&0b1 != 0
|
||||||
|
isLastEntryInNode := flags&0b10 != 0
|
||||||
|
contentLength := int(r.Uint16(0x0A))
|
||||||
|
|
||||||
|
fileName := FileName{}
|
||||||
|
if contentLength != 0 && !isLastEntryInNode {
|
||||||
|
parsedFileName, err := ParseFileName(r.Read(0x10, contentLength))
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("error parsing $FILE_NAME record in index entry: %v", err)
|
||||||
|
}
|
||||||
|
fileName = parsedFileName
|
||||||
|
}
|
||||||
|
subNodeVcn := uint64(0)
|
||||||
|
if pointsToSubNode {
|
||||||
|
subNodeVcn = r.Uint64(entryLength - 8)
|
||||||
|
}
|
||||||
|
|
||||||
|
fileReference, err := ParseFileReference(r.Read(0x00, 8))
|
||||||
|
if err != nil {
|
||||||
|
return entries, fmt.Errorf("unable to file reference: %v", err)
|
||||||
|
}
|
||||||
|
entry := IndexEntry{
|
||||||
|
FileReference: fileReference,
|
||||||
|
Flags: flags,
|
||||||
|
FileName: fileName,
|
||||||
|
SubNodeVCN: subNodeVcn,
|
||||||
|
}
|
||||||
|
entries = append(entries, entry)
|
||||||
|
b = r.ReadFrom(entryLength)
|
||||||
|
}
|
||||||
|
return entries, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ConvertFileTime converts a Windows "file time" to a time.Time. A "file time" is a 64-bit value that represents the
|
||||||
|
// number of 100-nanosecond intervals that have elapsed since 12:00 A.M. January 1, 1601 Coordinated Universal Time
|
||||||
|
// (UTC). See also: https://docs.microsoft.com/en-us/windows/win32/sysinfo/file-times
|
||||||
|
func ConvertFileTime(timeValue uint64) time.Time {
|
||||||
|
dur := time.Duration(int64(timeValue))
|
||||||
|
r := time.Date(1601, time.January, 1, 0, 0, 0, 0, time.UTC)
|
||||||
|
for i := 0; i < 100; i++ {
|
||||||
|
r = r.Add(dur)
|
||||||
|
}
|
||||||
|
return r
|
||||||
|
}
|
@ -0,0 +1,462 @@
|
|||||||
|
/*
|
||||||
|
Package mft provides functions to parse records and their attributes in an NTFS Master File Table ("MFT" for short).
|
||||||
|
|
||||||
|
Basic usage
|
||||||
|
|
||||||
|
First parse a record using mft.ParseRecord(), which parses the record header and the attribute headers. Then parse
|
||||||
|
each attribute's data individually using the various mft.Parse...() functions.
|
||||||
|
// Error handling left out for brevity
|
||||||
|
record, err := mft.ParseRecord()
|
||||||
|
attrs, err := record.FindAttributes(mft.AttributeTypeFileName)
|
||||||
|
fileName, err := mft.ParseFileName(attrs[0])
|
||||||
|
*/
|
||||||
|
package mft
|
||||||
|
|
||||||
|
import (
|
||||||
|
"bytes"
|
||||||
|
"encoding/binary"
|
||||||
|
"fmt"
|
||||||
|
|
||||||
|
"b612.me/wincmd/ntfs/binutil"
|
||||||
|
"b612.me/wincmd/ntfs/fragment"
|
||||||
|
"b612.me/wincmd/ntfs/utf16"
|
||||||
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
fileSignature = []byte{0x46, 0x49, 0x4c, 0x45}
|
||||||
|
)
|
||||||
|
|
||||||
|
const maxInt = int64(^uint(0) >> 1)
|
||||||
|
|
||||||
|
// A Record represents an MFT entry, excluding all technical data (such as "offset to first attribute"). The Attributes
|
||||||
|
// list only contains the attribute headers and raw data; the attribute data has to be parsed separately. When this is a
|
||||||
|
// base record, the BaseRecordReference will be zero. When it is an extension record, the BaseRecordReference points to
|
||||||
|
// the record's base record.
|
||||||
|
type Record struct {
|
||||||
|
Signature []byte
|
||||||
|
FileReference FileReference
|
||||||
|
BaseRecordReference FileReference
|
||||||
|
LogFileSequenceNumber uint64
|
||||||
|
HardLinkCount int
|
||||||
|
Flags RecordFlag
|
||||||
|
ActualSize uint32
|
||||||
|
AllocatedSize uint32
|
||||||
|
NextAttributeId int
|
||||||
|
Attributes []Attribute
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseRecord parses bytes into a Record after applying fixup. The data is assumed to be in Little Endian order. Only
|
||||||
|
// the attribute headers are parsed, not the actual attribute data.
|
||||||
|
func ParseRecord(b []byte) (Record, error) {
|
||||||
|
if len(b) < 42 {
|
||||||
|
return Record{}, fmt.Errorf("record data length should be at least 42 but is %d", len(b))
|
||||||
|
}
|
||||||
|
sig := b[:4]
|
||||||
|
if bytes.Compare(sig, fileSignature) != 0 {
|
||||||
|
return Record{}, fmt.Errorf("unknown record signature: %# x", sig)
|
||||||
|
}
|
||||||
|
|
||||||
|
b = binutil.Duplicate(b)
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
baseRecordRef, err := ParseFileReference(r.Read(0x20, 8))
|
||||||
|
if err != nil {
|
||||||
|
return Record{}, fmt.Errorf("unable to parse base record reference: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
firstAttributeOffset := int(r.Uint16(0x14))
|
||||||
|
if firstAttributeOffset < 0 || firstAttributeOffset >= len(b) {
|
||||||
|
return Record{}, fmt.Errorf("invalid first attribute offset %d (data length: %d)", firstAttributeOffset, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
updateSequenceOffset := int(r.Uint16(0x04))
|
||||||
|
updateSequenceSize := int(r.Uint16(0x06))
|
||||||
|
b, err = applyFixUp(b, updateSequenceOffset, updateSequenceSize)
|
||||||
|
if err != nil {
|
||||||
|
return Record{}, fmt.Errorf("unable to apply fixup: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
attributes, err := ParseAttributes(b[firstAttributeOffset:])
|
||||||
|
if err != nil {
|
||||||
|
return Record{}, err
|
||||||
|
}
|
||||||
|
return Record{
|
||||||
|
Signature: binutil.Duplicate(sig),
|
||||||
|
FileReference: FileReference{RecordNumber: uint64(r.Uint32(0x2C)), SequenceNumber: r.Uint16(0x10)},
|
||||||
|
BaseRecordReference: baseRecordRef,
|
||||||
|
LogFileSequenceNumber: r.Uint64(0x08),
|
||||||
|
HardLinkCount: int(r.Uint16(0x12)),
|
||||||
|
Flags: RecordFlag(r.Uint16(0x16)),
|
||||||
|
ActualSize: r.Uint32(0x18),
|
||||||
|
AllocatedSize: r.Uint32(0x1C),
|
||||||
|
NextAttributeId: int(r.Uint16(0x28)),
|
||||||
|
Attributes: attributes,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A FileReference represents a reference to an MFT record. Since the FileReference in a Record is only 4 bytes, the
|
||||||
|
// RecordNumber will probably not exceed 32 bits.
|
||||||
|
type FileReference struct {
|
||||||
|
RecordNumber uint64
|
||||||
|
SequenceNumber uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func (f FileReference) ToUint64() uint64 {
|
||||||
|
origin := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint16(origin, f.SequenceNumber)
|
||||||
|
origin[6] = origin[0]
|
||||||
|
origin[7] = origin[1]
|
||||||
|
binary.LittleEndian.PutUint32(origin, uint32(f.RecordNumber))
|
||||||
|
return binary.LittleEndian.Uint64(origin)
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseFileReference parses a Little Endian ordered 8-byte slice into a FileReference. The first 6 bytes indicate the
|
||||||
|
// record number, while the final 2 bytes indicate the sequence number.
|
||||||
|
func ParseFileReference(b []byte) (FileReference, error) {
|
||||||
|
if len(b) != 8 {
|
||||||
|
return FileReference{}, fmt.Errorf("expected 8 bytes but got %d", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
return FileReference{
|
||||||
|
RecordNumber: binary.LittleEndian.Uint64(padTo(b[:6], 8)),
|
||||||
|
SequenceNumber: binary.LittleEndian.Uint16(b[6:]),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// RecordFlag represents a bit mask flag indicating the status of the MFT record.
|
||||||
|
type RecordFlag uint16
|
||||||
|
|
||||||
|
// Bit values for the RecordFlag. For example, an in-use directory has value 0x0003.
|
||||||
|
const (
|
||||||
|
RecordFlagInUse RecordFlag = 0x0001
|
||||||
|
RecordFlagIsDirectory RecordFlag = 0x0002
|
||||||
|
RecordFlagInExtend RecordFlag = 0x0004
|
||||||
|
RecordFlagIsIndex RecordFlag = 0x0008
|
||||||
|
)
|
||||||
|
|
||||||
|
// Is checks if this RecordFlag's bit mask contains the specified flag.
|
||||||
|
func (f *RecordFlag) Is(c RecordFlag) bool {
|
||||||
|
return *f&c == c
|
||||||
|
}
|
||||||
|
|
||||||
|
func applyFixUp(b []byte, offset int, length int) ([]byte, error) {
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
|
||||||
|
updateSequence := r.Read(offset, length*2) // length is in pairs, not bytes
|
||||||
|
updateSequenceNumber := updateSequence[:2]
|
||||||
|
updateSequenceArray := updateSequence[2:]
|
||||||
|
|
||||||
|
sectorCount := len(updateSequenceArray) / 2
|
||||||
|
sectorSize := len(b) / sectorCount
|
||||||
|
|
||||||
|
for i := 1; i <= sectorCount; i++ {
|
||||||
|
offset := sectorSize*i - 2
|
||||||
|
if bytes.Compare(updateSequenceNumber, b[offset:offset+2]) != 0 {
|
||||||
|
return nil, fmt.Errorf("update sequence mismatch at pos %d", offset)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
for i := 0; i < sectorCount; i++ {
|
||||||
|
offset := sectorSize*(i+1) - 2
|
||||||
|
num := i * 2
|
||||||
|
copy(b[offset:offset+2], updateSequenceArray[num:num+2])
|
||||||
|
}
|
||||||
|
|
||||||
|
return b, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// FindAttributes returns all attributes of the specified type contained in this record. When no matches are found an
|
||||||
|
// empty slice is returned.
|
||||||
|
func (r *Record) FindAttributes(attrType AttributeType) []Attribute {
|
||||||
|
ret := make([]Attribute, 0)
|
||||||
|
for _, a := range r.Attributes {
|
||||||
|
if a.Type == attrType {
|
||||||
|
ret = append(ret, a)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return ret
|
||||||
|
}
|
||||||
|
|
||||||
|
// Attribute represents an MFT record attribute header and its corresponding raw attribute Data (excluding header data).
|
||||||
|
// When the attribute is Resident, the Data contains the actual attribute's data. When the attribute is non-resident,
|
||||||
|
// the Data contains DataRuns pointing to the actual data. DataRun data can be parsed using ParseDataRuns().
|
||||||
|
type Attribute struct {
|
||||||
|
Type AttributeType
|
||||||
|
Resident bool
|
||||||
|
Name string
|
||||||
|
Flags AttributeFlags
|
||||||
|
AttributeId int
|
||||||
|
AllocatedSize uint64
|
||||||
|
ActualSize uint64
|
||||||
|
Data []byte
|
||||||
|
}
|
||||||
|
|
||||||
|
// AttributeType represents the type of an Attribute. Use Name() to get the attribute type's name.
|
||||||
|
type AttributeType uint32
|
||||||
|
|
||||||
|
// Known values for AttributeType. Note that other values might occur too.
|
||||||
|
const (
|
||||||
|
AttributeTypeStandardInformation AttributeType = 0x10 // $STANDARD_INFORMATION; always resident
|
||||||
|
AttributeTypeAttributeList AttributeType = 0x20 // $ATTRIBUTE_LIST; mixed residency
|
||||||
|
AttributeTypeFileName AttributeType = 0x30 // $FILE_NAME; always resident
|
||||||
|
AttributeTypeObjectId AttributeType = 0x40 // $OBJECT_ID; always resident
|
||||||
|
AttributeTypeSecurityDescriptor AttributeType = 0x50 // $SECURITY_DESCRIPTOR; always resident?
|
||||||
|
AttributeTypeVolumeName AttributeType = 0x60 // $VOLUME_NAME; always resident?
|
||||||
|
AttributeTypeVolumeInformation AttributeType = 0x70 // $VOLUME_INFORMATION; never resident?
|
||||||
|
AttributeTypeData AttributeType = 0x80 // $DATA; mixed residency
|
||||||
|
AttributeTypeIndexRoot AttributeType = 0x90 // $INDEX_ROOT; always resident
|
||||||
|
AttributeTypeIndexAllocation AttributeType = 0xa0 // $INDEX_ALLOCATION; never resident?
|
||||||
|
AttributeTypeBitmap AttributeType = 0xb0 // $BITMAP; nearly always resident?
|
||||||
|
AttributeTypeReparsePoint AttributeType = 0xc0 // $REPARSE_POINT; always resident?
|
||||||
|
AttributeTypeEAInformation AttributeType = 0xd0 // $EA_INFORMATION; always resident
|
||||||
|
AttributeTypeEA AttributeType = 0xe0 // $EA; nearly always resident?
|
||||||
|
AttributeTypePropertySet AttributeType = 0xf0 // $PROPERTY_SET
|
||||||
|
AttributeTypeLoggedUtilityStream AttributeType = 0x100 // $LOGGED_UTILITY_STREAM; always resident
|
||||||
|
AttributeTypeTerminator AttributeType = 0xFFFFFFFF // Indicates the last attribute in a list; will not actually be returned by ParseAttributes
|
||||||
|
)
|
||||||
|
|
||||||
|
// AttributeFlags represents a bit mask flag indicating various properties of an attribute's data.
|
||||||
|
type AttributeFlags uint16
|
||||||
|
|
||||||
|
// Bit values for the AttributeFlags. For example, an encrypted, compressed attribute has value 0x4001.
|
||||||
|
const (
|
||||||
|
AttributeFlagsCompressed AttributeFlags = 0x0001
|
||||||
|
AttributeFlagsEncrypted AttributeFlags = 0x4000
|
||||||
|
AttributeFlagsSparse AttributeFlags = 0x8000
|
||||||
|
)
|
||||||
|
|
||||||
|
// Is checks if this AttributeFlags's bit mask contains the specified flag.
|
||||||
|
func (f *AttributeFlags) Is(c AttributeFlags) bool {
|
||||||
|
return *f&c == c
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAttributes parses bytes into Attributes. The data is assumed to be in Little Endian order. Only the attribute
|
||||||
|
// headers are parsed, not the actual attribute data.
|
||||||
|
func ParseAttributes(b []byte) ([]Attribute, error) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return []Attribute{}, nil
|
||||||
|
}
|
||||||
|
attributes := make([]Attribute, 0)
|
||||||
|
for len(b) > 0 {
|
||||||
|
if len(b) < 4 {
|
||||||
|
return nil, fmt.Errorf("attribute header data should be at least 4 bytes but is %d", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
attrType := r.Uint32(0)
|
||||||
|
if attrType == uint32(AttributeTypeTerminator) {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(b) < 8 {
|
||||||
|
return nil, fmt.Errorf("cannot read attribute header record length, data should be at least 8 bytes but is %d", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
uRecordLength := r.Uint32(0x04)
|
||||||
|
if int64(uRecordLength) > maxInt {
|
||||||
|
return nil, fmt.Errorf("record length %d overflows maximum int value %d", uRecordLength, maxInt)
|
||||||
|
}
|
||||||
|
recordLength := int(uRecordLength)
|
||||||
|
if recordLength <= 0 {
|
||||||
|
return nil, fmt.Errorf("cannot handle attribute with zero or negative record length %d", recordLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
if recordLength > len(b) {
|
||||||
|
return nil, fmt.Errorf("attribute record length %d exceeds data length %d", recordLength, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
recordData := r.Read(0, recordLength)
|
||||||
|
attribute, err := ParseAttribute(recordData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
attributes = append(attributes, attribute)
|
||||||
|
b = r.ReadFrom(recordLength)
|
||||||
|
}
|
||||||
|
return attributes, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseAttribute parses bytes into an Attribute. The data is assumed to be in Little Endian order. Only the attribute
|
||||||
|
// headers are parsed, not the actual attribute data.
|
||||||
|
func ParseAttribute(b []byte) (Attribute, error) {
|
||||||
|
if len(b) < 22 {
|
||||||
|
return Attribute{}, fmt.Errorf("attribute data should be at least 22 bytes but is %d", len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
|
||||||
|
nameLength := r.Byte(0x09)
|
||||||
|
nameOffset := r.Uint16(0x0A)
|
||||||
|
|
||||||
|
name := ""
|
||||||
|
if nameLength != 0 {
|
||||||
|
nameBytes := r.Read(int(nameOffset), int(nameLength)*2)
|
||||||
|
name = utf16.DecodeString(nameBytes, binary.LittleEndian)
|
||||||
|
}
|
||||||
|
|
||||||
|
resident := r.Byte(0x08) == 0x00
|
||||||
|
var attributeData []byte
|
||||||
|
actualSize := uint64(0)
|
||||||
|
allocatedSize := uint64(0)
|
||||||
|
if resident {
|
||||||
|
dataOffset := int(r.Uint16(0x14))
|
||||||
|
uDataLength := r.Uint32(0x10)
|
||||||
|
if int64(uDataLength) > maxInt {
|
||||||
|
return Attribute{}, fmt.Errorf("attribute data length %d overflows maximum int value %d", uDataLength, maxInt)
|
||||||
|
}
|
||||||
|
dataLength := int(uDataLength)
|
||||||
|
expectedDataLength := dataOffset + dataLength
|
||||||
|
|
||||||
|
if len(b) < expectedDataLength {
|
||||||
|
return Attribute{}, fmt.Errorf("expected attribute data length to be at least %d but is %d", expectedDataLength, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
attributeData = r.Read(dataOffset, dataLength)
|
||||||
|
} else {
|
||||||
|
dataOffset := int(r.Uint16(0x20))
|
||||||
|
if len(b) < dataOffset {
|
||||||
|
return Attribute{}, fmt.Errorf("expected attribute data length to be at least %d but is %d", dataOffset, len(b))
|
||||||
|
}
|
||||||
|
allocatedSize = r.Uint64(0x28)
|
||||||
|
actualSize = r.Uint64(0x30)
|
||||||
|
attributeData = r.ReadFrom(int(dataOffset))
|
||||||
|
}
|
||||||
|
|
||||||
|
return Attribute{
|
||||||
|
Type: AttributeType(r.Uint32(0)),
|
||||||
|
Resident: resident,
|
||||||
|
Name: name,
|
||||||
|
Flags: AttributeFlags(r.Uint16(0x0C)),
|
||||||
|
AttributeId: int(r.Uint16(0x0E)),
|
||||||
|
AllocatedSize: allocatedSize,
|
||||||
|
ActualSize: actualSize,
|
||||||
|
Data: binutil.Duplicate(attributeData),
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// A DataRun represents a fragment of data somewhere on a volume. The OffsetCluster, which can be negative, is relative
|
||||||
|
// to a previous DataRun's offset. The OffsetCluster of the first DataRun in a list is relative to the beginning of the
|
||||||
|
// volume.
|
||||||
|
type DataRun struct {
|
||||||
|
OffsetCluster int64
|
||||||
|
LengthInClusters uint64
|
||||||
|
}
|
||||||
|
|
||||||
|
// ParseDataRuns parses bytes into a list of DataRuns. Each DataRun's OffsetCluster is relative to the DataRun before
|
||||||
|
// it. The first element's OffsetCluster is relative to the beginning of the volume.
|
||||||
|
func ParseDataRuns(b []byte) ([]DataRun, error) {
|
||||||
|
if len(b) == 0 {
|
||||||
|
return []DataRun{}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
runs := make([]DataRun, 0)
|
||||||
|
for len(b) > 0 {
|
||||||
|
r := binutil.NewLittleEndianReader(b)
|
||||||
|
header := r.Byte(0)
|
||||||
|
if header == 0 {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
lengthLength := int(header &^ 0xF0)
|
||||||
|
offsetLength := int(header >> 4)
|
||||||
|
|
||||||
|
dataRunDataLength := offsetLength + lengthLength
|
||||||
|
|
||||||
|
headerAndDataLength := dataRunDataLength + 1
|
||||||
|
if len(b) < headerAndDataLength {
|
||||||
|
return nil, fmt.Errorf("expected at least %d bytes of datarun data but is %d", headerAndDataLength, len(b))
|
||||||
|
}
|
||||||
|
|
||||||
|
dataRunData := r.Reader(1, dataRunDataLength)
|
||||||
|
|
||||||
|
lengthBytes := dataRunData.Read(0, lengthLength)
|
||||||
|
dataLength := binary.LittleEndian.Uint64(padTo(lengthBytes, 8))
|
||||||
|
|
||||||
|
offsetBytes := dataRunData.Read(lengthLength, offsetLength)
|
||||||
|
dataOffset := int64(binary.LittleEndian.Uint64(padTo(offsetBytes, 8)))
|
||||||
|
|
||||||
|
runs = append(runs, DataRun{OffsetCluster: dataOffset, LengthInClusters: dataLength})
|
||||||
|
|
||||||
|
b = r.ReadFrom(headerAndDataLength)
|
||||||
|
}
|
||||||
|
|
||||||
|
return runs, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// DataRunsToFragments transform a list of DataRuns with relative offsets and lengths specified in cluster into a list
|
||||||
|
// of fragment.Fragment elements with absolute offsets and lengths specified in bytes (for example for use in a
|
||||||
|
// fragment.Reader). Note that data will probably not align to a cluster exactly so there could be some padding at the
|
||||||
|
// end. It is up to the user of the Fragments to limit reads to actual data size (eg. by using an io.LimitedReader or
|
||||||
|
// modifying the last element in the list to limit its length).
|
||||||
|
func DataRunsToFragments(runs []DataRun, bytesPerCluster int) []fragment.Fragment {
|
||||||
|
frags := make([]fragment.Fragment, len(runs))
|
||||||
|
previousOffsetCluster := int64(0)
|
||||||
|
for i, run := range runs {
|
||||||
|
exactClusterOffset := previousOffsetCluster + run.OffsetCluster
|
||||||
|
frags[i] = fragment.Fragment{
|
||||||
|
Offset: exactClusterOffset * int64(bytesPerCluster),
|
||||||
|
Length: int64(run.LengthInClusters) * int64(bytesPerCluster),
|
||||||
|
}
|
||||||
|
previousOffsetCluster = exactClusterOffset
|
||||||
|
}
|
||||||
|
return frags
|
||||||
|
}
|
||||||
|
|
||||||
|
func padTo(data []byte, length int) []byte {
|
||||||
|
if len(data) > length {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
if len(data) == length {
|
||||||
|
return data
|
||||||
|
}
|
||||||
|
result := make([]byte, length)
|
||||||
|
if len(data) == 0 {
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
copy(result, data)
|
||||||
|
if data[len(data)-1]&0b10000000 == 0b10000000 {
|
||||||
|
for i := len(data); i < length; i++ {
|
||||||
|
result[i] = 0xFF
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return result
|
||||||
|
}
|
||||||
|
|
||||||
|
// Name returns a string representation of the attribute type. For example "$STANDARD_INFORMATION" or "$FILE_NAME". For
|
||||||
|
// anyte attribute type which is unknown, Name will return "unknown".
|
||||||
|
func (at AttributeType) Name() string {
|
||||||
|
switch at {
|
||||||
|
case AttributeTypeStandardInformation:
|
||||||
|
return "$STANDARD_INFORMATION"
|
||||||
|
case AttributeTypeAttributeList:
|
||||||
|
return "$ATTRIBUTE_LIST"
|
||||||
|
case AttributeTypeFileName:
|
||||||
|
return "$FILE_NAME"
|
||||||
|
case AttributeTypeObjectId:
|
||||||
|
return "$OBJECT_ID"
|
||||||
|
case AttributeTypeSecurityDescriptor:
|
||||||
|
return "$SECURITY_DESCRIPTOR"
|
||||||
|
case AttributeTypeVolumeName:
|
||||||
|
return "$VOLUME_NAME"
|
||||||
|
case AttributeTypeVolumeInformation:
|
||||||
|
return "$VOLUME_INFORMATION"
|
||||||
|
case AttributeTypeData:
|
||||||
|
return "$DATA"
|
||||||
|
case AttributeTypeIndexRoot:
|
||||||
|
return "$INDEX_ROOT"
|
||||||
|
case AttributeTypeIndexAllocation:
|
||||||
|
return "$INDEX_ALLOCATION"
|
||||||
|
case AttributeTypeBitmap:
|
||||||
|
return "$BITMAP"
|
||||||
|
case AttributeTypeReparsePoint:
|
||||||
|
return "$REPARSE_POINT"
|
||||||
|
case AttributeTypeEAInformation:
|
||||||
|
return "$EA_INFORMATION"
|
||||||
|
case AttributeTypeEA:
|
||||||
|
return "$EA"
|
||||||
|
case AttributeTypePropertySet:
|
||||||
|
return "$PROPERTY_SET"
|
||||||
|
case AttributeTypeLoggedUtilityStream:
|
||||||
|
return "$LOGGED_UTILITY_STREAM"
|
||||||
|
}
|
||||||
|
return "unknown"
|
||||||
|
}
|
@ -0,0 +1,187 @@
|
|||||||
|
package mft
|
||||||
|
|
||||||
|
import (
|
||||||
|
"b612.me/wincmd/ntfs/bootsect"
|
||||||
|
"b612.me/wincmd/ntfs/fragment"
|
||||||
|
"bytes"
|
||||||
|
"fmt"
|
||||||
|
"github.com/t9t/gomft/mft"
|
||||||
|
"io"
|
||||||
|
"os"
|
||||||
|
"runtime"
|
||||||
|
)
|
||||||
|
|
||||||
|
const supportedOemId = "NTFS "
|
||||||
|
|
||||||
|
|
||||||
|
const isWin = runtime.GOOS == "windows"
|
||||||
|
|
||||||
|
func GetMFTFileBytes(volume string) ([]byte, error) {
|
||||||
|
reader, length, err := GetMFTFile(volume)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
buf := make([]byte, length)
|
||||||
|
bfio := bytes.NewBuffer(buf)
|
||||||
|
written, err := copyBytes(bfio, reader, length)
|
||||||
|
if written != length {
|
||||||
|
return nil, fmt.Errorf("Write Not Ok,Should %d got %d", length, written)
|
||||||
|
}
|
||||||
|
return bfio.Bytes(), nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func DumpMFTFile(volume, filepath string, fn func(int64, int64, float64)) error {
|
||||||
|
reader, length, err := GetMFTFile(volume)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
out, err := os.Create(filepath)
|
||||||
|
if err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
defer out.Close()
|
||||||
|
written, err := copyFiles(out, reader, length, fn)
|
||||||
|
if written != length {
|
||||||
|
return fmt.Errorf("Write Not Ok,Should %d got %d", length, written)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func GetMFTFile(volume string) (io.Reader, int64, error) {
|
||||||
|
if isWin {
|
||||||
|
volume = `\\.\` + volume[:len(volume)-1]
|
||||||
|
}
|
||||||
|
in, err := os.Open(volume)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, err
|
||||||
|
}
|
||||||
|
bootSectorData := make([]byte, 512)
|
||||||
|
_, err = io.ReadFull(in, bootSectorData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to read boot sector: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
bootSector, err := bootsect.Parse(bootSectorData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to parse boot sector data: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bootSector.OemId != supportedOemId {
|
||||||
|
return nil, 0, fmt.Errorf("Unknown OemId (file system type) %q (expected %q)\n", bootSector.OemId, supportedOemId)
|
||||||
|
}
|
||||||
|
|
||||||
|
bytesPerCluster := bootSector.BytesPerSector * bootSector.SectorsPerCluster
|
||||||
|
mftPosInBytes := int64(bootSector.MftClusterNumber) * int64(bytesPerCluster)
|
||||||
|
|
||||||
|
_, err = in.Seek(mftPosInBytes, 0)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to seek to MFT position: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
mftSizeInBytes := bootSector.FileRecordSegmentSizeInBytes
|
||||||
|
mftData := make([]byte, mftSizeInBytes)
|
||||||
|
_, err = io.ReadFull(in, mftData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to read $MFT record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
record, err := mft.ParseRecord(mftData)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to parse $MFT record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
dataAttributes := record.FindAttributes(mft.AttributeTypeData)
|
||||||
|
if len(dataAttributes) == 0 {
|
||||||
|
return nil, 0, fmt.Errorf("No $DATA attribute found in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dataAttributes) > 1 {
|
||||||
|
return nil, 0, fmt.Errorf("More than 1 $DATA attribute found in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
dataAttribute := dataAttributes[0]
|
||||||
|
if dataAttribute.Resident {
|
||||||
|
return nil, 0, fmt.Errorf("Don't know how to handle resident $DATA attribute in $MFT record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
dataRuns, err := ParseDataRuns(dataAttribute.Data)
|
||||||
|
if err != nil {
|
||||||
|
return nil, 0, fmt.Errorf("Unable to parse dataruns in $MFT $DATA record: %v\n", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(dataRuns) == 0 {
|
||||||
|
return nil, 0, fmt.Errorf("No dataruns found in $MFT $DATA record\n")
|
||||||
|
}
|
||||||
|
|
||||||
|
fragments := DataRunsToFragments(dataRuns, bytesPerCluster)
|
||||||
|
totalLength := int64(0)
|
||||||
|
for _, frag := range fragments {
|
||||||
|
totalLength += int64(frag.Length)
|
||||||
|
}
|
||||||
|
|
||||||
|
return fragment.NewReader(in, fragments), totalLength, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyBytes(dst io.Writer, src io.Reader, totalLength int64) (written int64, err error) {
|
||||||
|
buf := make([]byte, 1024*1024)
|
||||||
|
|
||||||
|
// Below copied from io.copyBuffer (https://golang.org/src/io/io.go?s=12796:12856#L380)
|
||||||
|
for {
|
||||||
|
|
||||||
|
nr, er := src.Read(buf)
|
||||||
|
if nr > 0 {
|
||||||
|
nw, ew := dst.Write(buf[0:nr])
|
||||||
|
if nw > 0 {
|
||||||
|
written += int64(nw)
|
||||||
|
}
|
||||||
|
if ew != nil {
|
||||||
|
err = ew
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nr != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
if er != io.EOF {
|
||||||
|
err = er
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return written, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func copyFiles(dst io.Writer, src io.Reader, totalLength int64, fn func(int64, int64, float64)) (written int64, err error) {
|
||||||
|
buf := make([]byte, 1024*1024)
|
||||||
|
onePercent := float64(written) / float64(totalLength) * float64(100.0)
|
||||||
|
|
||||||
|
// Below copied from io.copyBuffer (https://golang.org/src/io/io.go?s=12796:12856#L380)
|
||||||
|
for {
|
||||||
|
fn(written, totalLength, onePercent)
|
||||||
|
nr, er := src.Read(buf)
|
||||||
|
if nr > 0 {
|
||||||
|
nw, ew := dst.Write(buf[0:nr])
|
||||||
|
if nw > 0 {
|
||||||
|
written += int64(nw)
|
||||||
|
}
|
||||||
|
if ew != nil {
|
||||||
|
err = ew
|
||||||
|
break
|
||||||
|
}
|
||||||
|
if nr != nw {
|
||||||
|
err = io.ErrShortWrite
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if er != nil {
|
||||||
|
if er != io.EOF {
|
||||||
|
err = er
|
||||||
|
}
|
||||||
|
break
|
||||||
|
}
|
||||||
|
}
|
||||||
|
fn(written, totalLength, onePercent)
|
||||||
|
return written, err
|
||||||
|
}
|
@ -0,0 +1,18 @@
|
|||||||
|
package utf16
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"unicode/utf16"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Decode the input data as UTF-16 using the provided byte order and convert the result to a string. The input data
|
||||||
|
// length must be a multiple of 2. DecodeString will panic if that is not the case.
|
||||||
|
func DecodeString(b []byte, bo binary.ByteOrder) string {
|
||||||
|
slen := len(b) / 2
|
||||||
|
shorts := make([]uint16, slen)
|
||||||
|
for i := 0; i < slen; i++ {
|
||||||
|
bi := i * 2
|
||||||
|
shorts[i] = bo.Uint16(b[bi : bi+2])
|
||||||
|
}
|
||||||
|
return string(utf16.Decode(shorts))
|
||||||
|
}
|
Loading…
Reference in New Issue