First upload version 0.0.1
This commit is contained in:
4
node_modules/node-llama-cpp/dist/gguf/consts.d.ts
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/gguf/consts.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import retry from "async-retry";
|
||||
export declare const ggufDefaultFetchRetryOptions: retry.Options;
|
||||
export declare const defaultExtraAllocationSize: number;
|
||||
export declare const noDirectSubNestingGGufMetadataKeys: readonly string[];
|
||||
12
node_modules/node-llama-cpp/dist/gguf/consts.js
generated
vendored
Normal file
12
node_modules/node-llama-cpp/dist/gguf/consts.js
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
export const ggufDefaultFetchRetryOptions = {
|
||||
retries: 10,
|
||||
factor: 2,
|
||||
minTimeout: 1000,
|
||||
maxTimeout: 1000 * 16
|
||||
};
|
||||
export const defaultExtraAllocationSize = 1024 * 1024 * 4; // 4MB
|
||||
export const noDirectSubNestingGGufMetadataKeys = [
|
||||
"general.license",
|
||||
"tokenizer.chat_template"
|
||||
];
|
||||
//# sourceMappingURL=consts.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/consts.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/consts.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"consts.js","sourceRoot":"","sources":["../../src/gguf/consts.ts"],"names":[],"mappings":"AAEA,MAAM,CAAC,MAAM,4BAA4B,GAAkB;IACvD,OAAO,EAAE,EAAE;IACX,MAAM,EAAE,CAAC;IACT,UAAU,EAAE,IAAI;IAChB,UAAU,EAAE,IAAI,GAAG,EAAE;CACf,CAAC;AAEX,MAAM,CAAC,MAAM,0BAA0B,GAAG,IAAI,GAAG,IAAI,GAAG,CAAC,CAAC,CAAC,MAAM;AAEjE,MAAM,CAAC,MAAM,kCAAkC,GAAsB;IACjE,iBAAiB;IACjB,yBAAyB;CAC5B,CAAC"}
|
||||
3
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.d.ts
generated
vendored
Normal file
3
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
export declare class InvalidGgufMagicError extends Error {
|
||||
constructor(expectedGgufMagic: string, actualGgufMagic: string);
|
||||
}
|
||||
6
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.js
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.js
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
export class InvalidGgufMagicError extends Error {
|
||||
constructor(expectedGgufMagic, actualGgufMagic) {
|
||||
super(`Invalid GGUF magic. Expected "${expectedGgufMagic}" but got "${actualGgufMagic}".`);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=InvalidGgufMagicError.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/errors/InvalidGgufMagicError.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"InvalidGgufMagicError.js","sourceRoot":"","sources":["../../../src/gguf/errors/InvalidGgufMagicError.ts"],"names":[],"mappings":"AAAA,MAAM,OAAO,qBAAsB,SAAQ,KAAK;IAC5C,YAAmB,iBAAyB,EAAE,eAAuB;QACjE,KAAK,CAAC,iCAAiC,iBAAiB,cAAc,eAAe,IAAI,CAAC,CAAC;IAC/F,CAAC;CACJ"}
|
||||
4
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.d.ts
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export declare class UnsupportedGgufValueTypeError extends Error {
|
||||
readonly ggufValueType: number;
|
||||
constructor(ggufValueType: number);
|
||||
}
|
||||
9
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.js
generated
vendored
Normal file
9
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.js
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
export class UnsupportedGgufValueTypeError extends Error {
|
||||
ggufValueType;
|
||||
constructor(ggufValueType) {
|
||||
super(`Unsupported GGUF value type "${ggufValueType}"`);
|
||||
Object.defineProperty(this, "ggufValueType", { enumerable: false });
|
||||
this.ggufValueType = ggufValueType;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=UnsupportedGgufValueTypeError.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/errors/UnsupportedGgufValueTypeError.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"UnsupportedGgufValueTypeError.js","sourceRoot":"","sources":["../../../src/gguf/errors/UnsupportedGgufValueTypeError.ts"],"names":[],"mappings":"AAAA,MAAM,OAAO,6BAA8B,SAAQ,KAAK;IACpC,aAAa,CAAS;IAEtC,YAAmB,aAAqB;QACpC,KAAK,CAAC,gCAAgC,aAAa,GAAG,CAAC,CAAC;QAExD,MAAM,CAAC,cAAc,CAAC,IAAI,EAAE,eAAoC,EAAE,EAAC,UAAU,EAAE,KAAK,EAAC,CAAC,CAAC;QAEvF,IAAI,CAAC,aAAa,GAAG,aAAa,CAAC;IACvC,CAAC;CACJ"}
|
||||
36
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.d.ts
generated
vendored
Normal file
36
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.d.ts
generated
vendored
Normal file
@@ -0,0 +1,36 @@
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { Promisable } from "../../utils/transformPromisable.js";
|
||||
export declare const valueTypeToBytesToRead: {
|
||||
readonly uint8: 1;
|
||||
readonly uint16: 2;
|
||||
readonly uint32: 4;
|
||||
readonly uint64: 8;
|
||||
readonly int8: 1;
|
||||
readonly int16: 2;
|
||||
readonly int32: 4;
|
||||
readonly int64: 8;
|
||||
readonly float32: 4;
|
||||
readonly float64: 8;
|
||||
readonly bool: 1;
|
||||
};
|
||||
export declare abstract class GgufFileReader {
|
||||
protected _buffer: Buffer<ArrayBuffer>;
|
||||
abstract readByteRange(offset: number | GgufReadOffset, length: number): Promisable<Buffer>;
|
||||
protected abstract ensureHasByteRange(offset: number | GgufReadOffset, length: number): Promisable<void>;
|
||||
readUint8(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readUint16(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readUint32(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readUint64(offset: number | GgufReadOffset): Promisable<bigint>;
|
||||
readInt8(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readInt16(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readInt32(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readInt64(offset: number | GgufReadOffset): Promisable<bigint>;
|
||||
readFloat32(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readFloat64(offset: number | GgufReadOffset): Promisable<number>;
|
||||
readBool(offset: number | GgufReadOffset): Promisable<boolean>;
|
||||
readString(offset: number | GgufReadOffset): Promisable<string>;
|
||||
readStringWithLength(offset: number | GgufReadOffset, length: number): Promisable<string>;
|
||||
protected _addToBuffer(buffer: Buffer): void;
|
||||
private _withBufferRead;
|
||||
static castNumberIfSafe(value: bigint): number | bigint;
|
||||
}
|
||||
106
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.js
generated
vendored
Normal file
106
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.js
generated
vendored
Normal file
@@ -0,0 +1,106 @@
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { transformPromisable } from "../../utils/transformPromisable.js";
|
||||
export const valueTypeToBytesToRead = {
|
||||
uint8: 1,
|
||||
uint16: 2,
|
||||
uint32: 4,
|
||||
uint64: 8,
|
||||
int8: 1,
|
||||
int16: 2,
|
||||
int32: 4,
|
||||
int64: 8,
|
||||
float32: 4,
|
||||
float64: 8,
|
||||
bool: 1
|
||||
};
|
||||
export class GgufFileReader {
|
||||
_buffer = Buffer.alloc(0);
|
||||
readUint8(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.uint8, (resolvedOffset) => {
|
||||
return this._buffer.readUInt8(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readUint16(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.uint16, (resolvedOffset) => {
|
||||
return this._buffer.readUInt16LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readUint32(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.uint32, (resolvedOffset) => {
|
||||
return this._buffer.readUInt32LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readUint64(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.uint64, (resolvedOffset) => {
|
||||
return this._buffer.readBigUInt64LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readInt8(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.int8, (resolvedOffset) => {
|
||||
return this._buffer.readInt8(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readInt16(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.int16, (resolvedOffset) => {
|
||||
return this._buffer.readInt16LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readInt32(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.int32, (resolvedOffset) => {
|
||||
return this._buffer.readInt32LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readInt64(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.int64, (resolvedOffset) => {
|
||||
return this._buffer.readBigInt64LE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readFloat32(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.float32, (resolvedOffset) => {
|
||||
return this._buffer.readFloatLE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readFloat64(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.float64, (resolvedOffset) => {
|
||||
return this._buffer.readDoubleLE(resolvedOffset);
|
||||
});
|
||||
}
|
||||
readBool(offset) {
|
||||
return this._withBufferRead(offset, valueTypeToBytesToRead.uint8, (resolvedOffset) => {
|
||||
return this._buffer.readUInt8(resolvedOffset) === 1;
|
||||
});
|
||||
}
|
||||
readString(offset) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
return transformPromisable(this.readUint64(readOffset), (length) => {
|
||||
return this.readStringWithLength(readOffset, Number(length));
|
||||
});
|
||||
}
|
||||
readStringWithLength(offset, length) {
|
||||
const readLength = valueTypeToBytesToRead.uint8 * length;
|
||||
return this._withBufferRead(offset, readLength, (resolvedOffset) => {
|
||||
return this._buffer.toString("utf8", resolvedOffset, Math.min(resolvedOffset + readLength, this._buffer.length));
|
||||
});
|
||||
}
|
||||
_addToBuffer(buffer) {
|
||||
const newBuffer = Buffer.alloc(this._buffer.byteLength + buffer.byteLength);
|
||||
this._buffer.copy(newBuffer);
|
||||
buffer.copy(newBuffer, this._buffer.byteLength);
|
||||
this._buffer = newBuffer;
|
||||
}
|
||||
_withBufferRead(offset, length, reader) {
|
||||
return transformPromisable(this.ensureHasByteRange(offset, length), () => {
|
||||
const resolvedOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
return transformPromisable(reader(resolvedOffset.offset), (res) => {
|
||||
resolvedOffset.moveBy(Math.min(length, this._buffer.length - resolvedOffset.offset));
|
||||
return res;
|
||||
});
|
||||
});
|
||||
}
|
||||
static castNumberIfSafe(value) {
|
||||
if (value > Number.MAX_SAFE_INTEGER)
|
||||
return value;
|
||||
return Number(value);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=GgufFileReader.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFileReader.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufFileReader.js","sourceRoot":"","sources":["../../../src/gguf/fileReaders/GgufFileReader.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,cAAc,EAAC,MAAM,4BAA4B,CAAC;AAC1D,OAAO,EAAa,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AAEnF,MAAM,CAAC,MAAM,sBAAsB,GAAG;IAClC,KAAK,EAAE,CAAC;IACR,MAAM,EAAE,CAAC;IACT,MAAM,EAAE,CAAC;IACT,MAAM,EAAE,CAAC;IACT,IAAI,EAAE,CAAC;IACP,KAAK,EAAE,CAAC;IACR,KAAK,EAAE,CAAC;IACR,KAAK,EAAE,CAAC;IACR,OAAO,EAAE,CAAC;IACV,OAAO,EAAE,CAAC;IACV,IAAI,EAAE,CAAC;CACD,CAAC;AAEX,MAAM,OAAgB,cAAc;IACtB,OAAO,GAAG,MAAM,CAAC,KAAK,CAAC,CAAC,CAAC,CAAC;IAK7B,SAAS,CAAC,MAA+B;QAC5C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,KAAK,EAAE,CAAC,cAAc,EAAE,EAAE;YACjF,OAAO,IAAI,CAAC,OAAO,CAAC,SAAS,CAAC,cAAc,CAAC,CAAC;QAClD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,UAAU,CAAC,MAA+B;QAC7C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,MAAM,EAAE,CAAC,cAAc,EAAE,EAAE;YAClF,OAAO,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;QACrD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,UAAU,CAAC,MAA+B;QAC7C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,MAAM,EAAE,CAAC,cAAc,EAAE,EAAE;YAClF,OAAO,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;QACrD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,UAAU,CAAC,MAA+B;QAC7C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,MAAM,EAAE,CAAC,cAAc,EAAE,EAAE;YAClF,OAAO,IAAI,CAAC,OAAO,CAAC,eAAe,CAAC,cAAc,CAAC,CAAC;QACxD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,QAAQ,CAAC,MAA+B;QAC3C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,IAAI,EAAE,CAAC,cAAc,EAAE,EAAE;YAChF,OAAO,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,cAAc,CAAC,CAAC;QACjD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,SAAS,CAAC,MAA+B;QAC5C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,KAAK,EAAE,CAAC,cAAc,EAAE,EAAE;YACjF,OAAO,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,CAAC;QACpD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,SAAS,CAAC,MAA+B;QAC5C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,KAAK,EAAE,CAAC,cAAc,EAAE,EAAE;YACjF,OAAO,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,CAAC;QACpD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,SAAS,CAAC,MAA+B;QAC5C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,KAAK,EAAE,CAAC,cAAc,EAAE,EAAE;YACjF,OAAO,IAAI,CAAC,OAAO,CAAC,cAAc,CAAC,cAAc,CAAC,CAAC;QACvD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,WAAW,CAAC,MAA+B;QAC9C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,OAAO,EAAE,CAAC,cAAc,EAAE,EAAE;YACnF,OAAO,IAAI,CAAC,OAAO,CAAC,WAAW,CAAC,cAAc,CAAC,CAAC;QACpD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,WAAW,CAAC,MAA+B;QAC9C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,OAAO,EAAE,CAAC,cAAc,EAAE,EAAE;YACnF,OAAO,IAAI,CAAC,OAAO,CAAC,YAAY,CAAC,cAAc,CAAC,CAAC;QACrD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,QAAQ,CAAC,MAA+B;QAC3C,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,sBAAsB,CAAC,KAAK,EAAE,CAAC,cAAc,EAAE,EAAE;YACjF,OAAO,IAAI,CAAC,OAAO,CAAC,SAAS,CAAC,cAAc,CAAC,KAAK,CAAC,CAAC;QACxD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,UAAU,CAAC,MAA+B;QAC7C,MAAM,UAAU,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAE5D,OAAO,mBAAmB,CAAC,IAAI,CAAC,UAAU,CAAC,UAAU,CAAC,EAAE,CAAC,MAAM,EAAE,EAAE;YAC/D,OAAO,IAAI,CAAC,oBAAoB,CAAC,UAAU,EAAE,MAAM,CAAC,MAAM,CAAC,CAAC,CAAC;QACjE,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,oBAAoB,CAAC,MAA+B,EAAE,MAAc;QACvE,MAAM,UAAU,GAAG,sBAAsB,CAAC,KAAK,GAAG,MAAM,CAAC;QAEzD,OAAO,IAAI,CAAC,eAAe,CAAC,MAAM,EAAE,UAAU,EAAE,CAAC,cAAc,EAAE,EAAE;YAC/D,OAAO,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,MAAM,EAAE,cAAc,EAAE,IAAI,CAAC,GAAG,CAAC,cAAc,GAAG,UAAU,EAAE,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC,CAAC;QACrH,CAAC,CAAC,CAAC;IACP,CAAC;IAES,YAAY,CAAC,MAAc;QACjC,MAAM,SAAS,GAAG,MAAM,CAAC,KAAK,CAAC,IAAI,CAAC,OAAO,CAAC,UAAU,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC;QAC5E,IAAI,CAAC,OAAO,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;QAC7B,MAAM,CAAC,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,OAAO,CAAC,UAAU,CAAC,CAAC;QAEhD,IAAI,CAAC,OAAO,GAAG,SAAS,CAAC;IAC7B,CAAC;IAEO,eAAe,CAAI,MAA+B,EAAE,MAAc,EAAE,MAAqC;QAC7G,OAAO,mBAAmB,CAAC,IAAI,CAAC,kBAAkB,CAAC,MAAM,EAAE,MAAM,CAAC,EAAE,GAAG,EAAE;YACrE,MAAM,cAAc,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;YAEhE,OAAO,mBAAmB,CAAC,MAAM,CAAC,cAAc,CAAC,MAAM,CAAC,EAAE,CAAC,GAAG,EAAE,EAAE;gBAC9D,cAAc,CAAC,MAAM,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,EAAE,IAAI,CAAC,OAAO,CAAC,MAAM,GAAG,cAAc,CAAC,MAAM,CAAC,CAAC,CAAC;gBAErF,OAAO,GAAG,CAAC;YACf,CAAC,CAAC,CAAC;QACP,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,MAAM,CAAC,gBAAgB,CAAC,KAAa;QACxC,IAAI,KAAK,GAAG,MAAM,CAAC,gBAAgB;YAC/B,OAAO,KAAK,CAAC;QAEjB,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC;IACzB,CAAC;CACJ"}
|
||||
16
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.d.ts
generated
vendored
Normal file
16
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.d.ts
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { GgufFileReader } from "./GgufFileReader.js";
|
||||
type GgufFsFileReaderOptions = {
|
||||
filePath: string;
|
||||
signal?: AbortSignal;
|
||||
};
|
||||
export declare class GgufFsFileReader extends GgufFileReader {
|
||||
readonly filePath: string;
|
||||
private readonly _signal?;
|
||||
constructor({ filePath, signal }: GgufFsFileReaderOptions);
|
||||
readByteRange(offset: number | GgufReadOffset, length: number): Buffer<ArrayBuffer> | Promise<Buffer<ArrayBuffer>>;
|
||||
protected ensureHasByteRange(offset: number | GgufReadOffset, length: number): Promise<void> | undefined;
|
||||
private _readToExpandBufferUpToOffset;
|
||||
private _readByteRange;
|
||||
}
|
||||
export {};
|
||||
62
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.js
generated
vendored
Normal file
62
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.js
generated
vendored
Normal file
@@ -0,0 +1,62 @@
|
||||
import fs from "node:fs/promises";
|
||||
import path from "node:path";
|
||||
import { withLock } from "lifecycle-utils";
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { defaultExtraAllocationSize } from "../consts.js";
|
||||
import { GgufFileReader } from "./GgufFileReader.js";
|
||||
export class GgufFsFileReader extends GgufFileReader {
|
||||
filePath;
|
||||
_signal;
|
||||
constructor({ filePath, signal }) {
|
||||
super();
|
||||
this.filePath = path.resolve(process.cwd(), filePath);
|
||||
this._signal = signal;
|
||||
}
|
||||
readByteRange(offset, length) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
const endOffset = readOffset.offset + length;
|
||||
if (endOffset >= this._buffer.length)
|
||||
return this._readToExpandBufferUpToOffset(endOffset)
|
||||
.then(() => {
|
||||
const res = this._buffer.subarray(readOffset.offset, endOffset);
|
||||
readOffset.moveBy(length);
|
||||
return res;
|
||||
});
|
||||
const res = this._buffer.subarray(readOffset.offset, endOffset);
|
||||
readOffset.moveBy(length);
|
||||
return res;
|
||||
}
|
||||
ensureHasByteRange(offset, length) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
const endOffset = readOffset.offset + length;
|
||||
if (endOffset >= this._buffer.length)
|
||||
return this._readToExpandBufferUpToOffset(endOffset)
|
||||
.then(() => {
|
||||
if (endOffset >= this._buffer.length)
|
||||
throw new Error("Expected buffer to be long enough for the requested byte range");
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
async _readToExpandBufferUpToOffset(endOffset, extraAllocationSize = defaultExtraAllocationSize) {
|
||||
return await withLock([this, "modifyBuffer"], this._signal, async () => {
|
||||
if (endOffset < this._buffer.length)
|
||||
return;
|
||||
const missingBytesBuffer = await this._readByteRange(this._buffer.length, endOffset + extraAllocationSize - this._buffer.length);
|
||||
this._addToBuffer(missingBytesBuffer);
|
||||
});
|
||||
}
|
||||
async _readByteRange(start, length) {
|
||||
const fd = await fs.open(this.filePath, "r");
|
||||
try {
|
||||
if (this._signal?.aborted)
|
||||
throw this._signal.reason;
|
||||
const buffer = Buffer.alloc(length);
|
||||
await fd.read(buffer, 0, length, start);
|
||||
return buffer;
|
||||
}
|
||||
finally {
|
||||
await fd.close();
|
||||
}
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=GgufFsFileReader.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufFsFileReader.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufFsFileReader.js","sourceRoot":"","sources":["../../../src/gguf/fileReaders/GgufFsFileReader.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,kBAAkB,CAAC;AAClC,OAAO,IAAI,MAAM,WAAW,CAAC;AAC7B,OAAO,EAAC,QAAQ,EAAC,MAAM,iBAAiB,CAAC;AACzC,OAAO,EAAC,cAAc,EAAC,MAAM,4BAA4B,CAAC;AAC1D,OAAO,EAAC,0BAA0B,EAAC,MAAM,cAAc,CAAC;AACxD,OAAO,EAAC,cAAc,EAAC,MAAM,qBAAqB,CAAC;AAOnD,MAAM,OAAO,gBAAiB,SAAQ,cAAc;IAChC,QAAQ,CAAS;IAChB,OAAO,CAAe;IAEvC,YAAmB,EAAC,QAAQ,EAAE,MAAM,EAA0B;QAC1D,KAAK,EAAE,CAAC;QACR,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC,OAAO,CAAC,OAAO,CAAC,GAAG,EAAE,EAAE,QAAQ,CAAC,CAAC;QACtD,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC;IAC1B,CAAC;IAEM,aAAa,CAAC,MAA+B,EAAE,MAAc;QAChE,MAAM,UAAU,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC5D,MAAM,SAAS,GAAG,UAAU,CAAC,MAAM,GAAG,MAAM,CAAC;QAE7C,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;YAChC,OAAO,IAAI,CAAC,6BAA6B,CAAC,SAAS,CAAC;iBAC/C,IAAI,CAAC,GAAG,EAAE;gBACP,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,EAAE,SAAS,CAAC,CAAC;gBAChE,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;gBAC1B,OAAO,GAAG,CAAC;YACf,CAAC,CAAC,CAAC;QAEX,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,EAAE,SAAS,CAAC,CAAC;QAChE,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAC1B,OAAO,GAAG,CAAC;IACf,CAAC;IAES,kBAAkB,CAAC,MAA+B,EAAE,MAAc;QACxE,MAAM,UAAU,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC5D,MAAM,SAAS,GAAG,UAAU,CAAC,MAAM,GAAG,MAAM,CAAC;QAE7C,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;YAChC,OAAO,IAAI,CAAC,6BAA6B,CAAC,SAAS,CAAC;iBAC/C,IAAI,CAAC,GAAG,EAAE;gBACP,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;oBAChC,MAAM,IAAI,KAAK,CAAC,gEAAgE,CAAC,CAAC;YAC1F,CAAC,CAAC,CAAC;QAEX,OAAO,SAAS,CAAC;IACrB,CAAC;IAEO,KAAK,CAAC,6BAA6B,CAAC,SAAiB,EAAE,sBAA8B,0BAA0B;QACnH,OAAO,MAAM,QAAQ,CAAC,CAAC,IAAwB,EAAE,cAAc,CAAC,EAAE,IAAI,CAAC,OAAO,EAAE,KAAK,IAAI,EAAE;YACvF,IAAI,SAAS,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM;gBAC/B,OAAO;YAEX,MAAM,kBAAkB,GAAG,MAAM,IAAI,CAAC,cAAc,CAChD,IAAI,CAAC,OAAO,CAAC,MAAM,EACnB,SAAS,GAAG,mBAAmB,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CACxD,CAAC;YAEF,IAAI,CAAC,YAAY,CAAC,kBAAkB,CAAC,CAAC;QAC1C,CAAC,CAAC,CAAC;IACP,CAAC;IAEO,KAAK,CAAC,cAAc,CAAC,KAAa,EAAE,MAAc;QACtD,MAAM,EAAE,GAAG,MAAM,EAAE,CAAC,IAAI,CAAC,IAAI,CAAC,QAAQ,EAAE,GAAG,CAAC,CAAC;QAC7C,IAAI,CAAC;YACD,IAAI,IAAI,CAAC,OAAO,EAAE,OAAO;gBACrB,MAAM,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC;YAE9B,MAAM,MAAM,GAAG,MAAM,CAAC,KAAK,CAAC,MAAM,CAAC,CAAC;YACpC,MAAM,EAAE,CAAC,IAAI,CAAC,MAAM,EAAE,CAAC,EAAE,MAAM,EAAE,KAAK,CAAC,CAAC;YACxC,OAAO,MAAM,CAAC;QAClB,CAAC;gBAAS,CAAC;YACP,MAAM,EAAE,CAAC,KAAK,EAAE,CAAC;QACrB,CAAC;IACL,CAAC;CACJ"}
|
||||
28
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.d.ts
generated
vendored
Normal file
28
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.d.ts
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import retry from "async-retry";
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { ModelFileAccessTokens } from "../../utils/modelFileAccessTokens.js";
|
||||
import { ModelDownloadEndpoints } from "../../utils/modelDownloadEndpoints.js";
|
||||
import { GgufFileReader } from "./GgufFileReader.js";
|
||||
type GgufFetchFileReaderOptions = {
|
||||
url: string;
|
||||
retryOptions?: retry.Options;
|
||||
headers?: Record<string, string>;
|
||||
signal?: AbortSignal;
|
||||
tokens?: ModelFileAccessTokens;
|
||||
endpoints?: ModelDownloadEndpoints;
|
||||
};
|
||||
export declare class GgufNetworkFetchFileReader extends GgufFileReader {
|
||||
readonly url: string;
|
||||
readonly retryOptions: retry.Options;
|
||||
readonly headers: Record<string, string>;
|
||||
readonly tokens?: ModelFileAccessTokens;
|
||||
readonly endpoints?: ModelDownloadEndpoints;
|
||||
private readonly _signal?;
|
||||
private _tryHeaders;
|
||||
constructor({ url, retryOptions, headers, tokens, endpoints, signal }: GgufFetchFileReaderOptions);
|
||||
readByteRange(offset: number | GgufReadOffset, length: number): Buffer<ArrayBuffer> | Promise<Buffer<ArrayBuffer>>;
|
||||
protected ensureHasByteRange(offset: number | GgufReadOffset, length: number): Promise<void> | undefined;
|
||||
private _fetchToExpandBufferUpToOffset;
|
||||
private _fetchByteRange;
|
||||
}
|
||||
export {};
|
||||
94
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.js
generated
vendored
Normal file
94
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.js
generated
vendored
Normal file
@@ -0,0 +1,94 @@
|
||||
import retry from "async-retry";
|
||||
import { withLock } from "lifecycle-utils";
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { defaultExtraAllocationSize, ggufDefaultFetchRetryOptions } from "../consts.js";
|
||||
import { resolveModelFileAccessTokensTryHeaders } from "../../utils/modelFileAccessTokens.js";
|
||||
import { GgufFileReader } from "./GgufFileReader.js";
|
||||
export class GgufNetworkFetchFileReader extends GgufFileReader {
|
||||
url;
|
||||
retryOptions;
|
||||
headers;
|
||||
tokens;
|
||||
endpoints;
|
||||
_signal;
|
||||
_tryHeaders = undefined;
|
||||
constructor({ url, retryOptions = ggufDefaultFetchRetryOptions, headers, tokens, endpoints, signal }) {
|
||||
super();
|
||||
this.url = url;
|
||||
this.retryOptions = retryOptions;
|
||||
this.headers = headers ?? {};
|
||||
this.tokens = tokens;
|
||||
this.endpoints = endpoints;
|
||||
this._signal = signal;
|
||||
}
|
||||
readByteRange(offset, length) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
const endOffset = readOffset.offset + length;
|
||||
if (endOffset >= this._buffer.length)
|
||||
return this._fetchToExpandBufferUpToOffset(endOffset)
|
||||
.then(() => {
|
||||
const res = this._buffer.subarray(readOffset.offset, endOffset);
|
||||
readOffset.moveBy(length);
|
||||
return res;
|
||||
});
|
||||
const res = this._buffer.subarray(readOffset.offset, endOffset);
|
||||
readOffset.moveBy(length);
|
||||
return res;
|
||||
}
|
||||
ensureHasByteRange(offset, length) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
const endOffset = readOffset.offset + length;
|
||||
if (endOffset >= this._buffer.length)
|
||||
return this._fetchToExpandBufferUpToOffset(endOffset)
|
||||
.then(() => {
|
||||
if (endOffset >= this._buffer.length)
|
||||
throw new Error("Expected buffer to be long enough for the requested byte range");
|
||||
});
|
||||
return undefined;
|
||||
}
|
||||
async _fetchToExpandBufferUpToOffset(endOffset, extraAllocationSize = defaultExtraAllocationSize) {
|
||||
await withLock([this, "modifyBuffer"], this._signal, async () => {
|
||||
if (endOffset < this._buffer.length)
|
||||
return;
|
||||
const missingBytesBuffer = await retry(async (bail) => {
|
||||
try {
|
||||
return await this._fetchByteRange(this._buffer.length, endOffset + extraAllocationSize - this._buffer.length);
|
||||
}
|
||||
catch (err) {
|
||||
if (this._signal?.aborted) {
|
||||
bail(this._signal.reason);
|
||||
throw this._signal.reason;
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}, this.retryOptions);
|
||||
if (this._signal?.aborted)
|
||||
throw this._signal.reason;
|
||||
this._addToBuffer(missingBytesBuffer);
|
||||
});
|
||||
}
|
||||
async _fetchByteRange(start, length) {
|
||||
if (this._tryHeaders == null)
|
||||
this._tryHeaders = await resolveModelFileAccessTokensTryHeaders(this.url, this.tokens, this.endpoints, this.headers);
|
||||
const headersToTry = [this.headers, ...this._tryHeaders];
|
||||
while (headersToTry.length > 0) {
|
||||
const headers = headersToTry.shift();
|
||||
const response = await fetch(this.url, {
|
||||
headers: {
|
||||
...headers,
|
||||
Range: `bytes=${start}-${start + length}`,
|
||||
accept: "*/*"
|
||||
},
|
||||
signal: this._signal
|
||||
});
|
||||
if ((response.status >= 500 || response.status === 429 || response.status === 401) && headersToTry.length > 0)
|
||||
continue;
|
||||
if (!response.ok)
|
||||
throw new Error(`Failed to fetch byte range: ${response.status} ${response.statusText}`);
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
return Buffer.from(arrayBuffer);
|
||||
}
|
||||
throw new Error("Failed to fetch byte range: no more headers to try");
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=GgufNetworkFetchFileReader.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/fileReaders/GgufNetworkFetchFileReader.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufNetworkFetchFileReader.js","sourceRoot":"","sources":["../../../src/gguf/fileReaders/GgufNetworkFetchFileReader.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,aAAa,CAAC;AAChC,OAAO,EAAC,QAAQ,EAAC,MAAM,iBAAiB,CAAC;AACzC,OAAO,EAAC,cAAc,EAAC,MAAM,4BAA4B,CAAC;AAC1D,OAAO,EAAC,0BAA0B,EAAE,4BAA4B,EAAC,MAAM,cAAc,CAAC;AACtF,OAAO,EAAwB,sCAAsC,EAAC,MAAM,sCAAsC,CAAC;AAEnH,OAAO,EAAC,cAAc,EAAC,MAAM,qBAAqB,CAAC;AAWnD,MAAM,OAAO,0BAA2B,SAAQ,cAAc;IAC1C,GAAG,CAAS;IACZ,YAAY,CAAgB;IAC5B,OAAO,CAAyB;IAChC,MAAM,CAAyB;IAC/B,SAAS,CAA0B;IAClC,OAAO,CAAe;IAC/B,WAAW,GAAyC,SAAS,CAAC;IAEtE,YAAmB,EAAC,GAAG,EAAE,YAAY,GAAG,4BAA4B,EAAE,OAAO,EAAE,MAAM,EAAE,SAAS,EAAE,MAAM,EAA6B;QACjI,KAAK,EAAE,CAAC;QACR,IAAI,CAAC,GAAG,GAAG,GAAG,CAAC;QACf,IAAI,CAAC,YAAY,GAAG,YAAY,CAAC;QACjC,IAAI,CAAC,OAAO,GAAG,OAAO,IAAI,EAAE,CAAC;QAC7B,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;QACrB,IAAI,CAAC,SAAS,GAAG,SAAS,CAAC;QAC3B,IAAI,CAAC,OAAO,GAAG,MAAM,CAAC;IAC1B,CAAC;IAEM,aAAa,CAAC,MAA+B,EAAE,MAAc;QAChE,MAAM,UAAU,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC5D,MAAM,SAAS,GAAG,UAAU,CAAC,MAAM,GAAG,MAAM,CAAC;QAE7C,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;YAChC,OAAO,IAAI,CAAC,8BAA8B,CAAC,SAAS,CAAC;iBAChD,IAAI,CAAC,GAAG,EAAE;gBACP,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,EAAE,SAAS,CAAC,CAAC;gBAChE,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;gBAC1B,OAAO,GAAG,CAAC;YACf,CAAC,CAAC,CAAC;QAEX,MAAM,GAAG,GAAG,IAAI,CAAC,OAAO,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,EAAE,SAAS,CAAC,CAAC;QAChE,UAAU,CAAC,MAAM,CAAC,MAAM,CAAC,CAAC;QAC1B,OAAO,GAAG,CAAC;IACf,CAAC;IAES,kBAAkB,CAAC,MAA+B,EAAE,MAAc;QACxE,MAAM,UAAU,GAAG,cAAc,CAAC,iBAAiB,CAAC,MAAM,CAAC,CAAC;QAC5D,MAAM,SAAS,GAAG,UAAU,CAAC,MAAM,GAAG,MAAM,CAAC;QAE7C,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;YAChC,OAAO,IAAI,CAAC,8BAA8B,CAAC,SAAS,CAAC;iBAChD,IAAI,CAAC,GAAG,EAAE;gBACP,IAAI,SAAS,IAAI,IAAI,CAAC,OAAO,CAAC,MAAM;oBAChC,MAAM,IAAI,KAAK,CAAC,gEAAgE,CAAC,CAAC;YAC1F,CAAC,CAAC,CAAC;QAEX,OAAO,SAAS,CAAC;IACrB,CAAC;IAEO,KAAK,CAAC,8BAA8B,CAAC,SAAiB,EAAE,sBAA8B,0BAA0B;QACpH,MAAM,QAAQ,CAAC,CAAC,IAAkC,EAAE,cAAc,CAAC,EAAE,IAAI,CAAC,OAAO,EAAE,KAAK,IAAI,EAAE;YAC1F,IAAI,SAAS,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM;gBAC/B,OAAO;YAEX,MAAM,kBAAkB,GAAG,MAAM,KAAK,CAAC,KAAK,EAAE,IAAI,EAAE,EAAE;gBAClD,IAAI,CAAC;oBACD,OAAO,MAAM,IAAI,CAAC,eAAe,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,EAAE,SAAS,GAAG,mBAAmB,GAAG,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;gBAClH,CAAC;gBAAC,OAAO,GAAG,EAAE,CAAC;oBACX,IAAI,IAAI,CAAC,OAAO,EAAE,OAAO,EAAE,CAAC;wBACxB,IAAI,CAAC,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;wBAC1B,MAAM,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC;oBAC9B,CAAC;oBAED,MAAM,GAAG,CAAC;gBACd,CAAC;YACL,CAAC,EAAE,IAAI,CAAC,YAAY,CAAC,CAAC;YAEtB,IAAI,IAAI,CAAC,OAAO,EAAE,OAAO;gBACrB,MAAM,IAAI,CAAC,OAAO,CAAC,MAAM,CAAC;YAE9B,IAAI,CAAC,YAAY,CAAC,kBAAkB,CAAC,CAAC;QAC1C,CAAC,CAAC,CAAC;IACP,CAAC;IAEO,KAAK,CAAC,eAAe,CAAC,KAAa,EAAE,MAAc;QACvD,IAAI,IAAI,CAAC,WAAW,IAAI,IAAI;YACxB,IAAI,CAAC,WAAW,GAAG,MAAM,sCAAsC,CAAC,IAAI,CAAC,GAAG,EAAE,IAAI,CAAC,MAAM,EAAE,IAAI,CAAC,SAAS,EAAE,IAAI,CAAC,OAAO,CAAC,CAAC;QAEzH,MAAM,YAAY,GAAG,CAAC,IAAI,CAAC,OAAO,EAAE,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC;QAEzD,OAAO,YAAY,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC7B,MAAM,OAAO,GAAG,YAAY,CAAC,KAAK,EAAE,CAAC;YAErC,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,IAAI,CAAC,GAAG,EAAE;gBACnC,OAAO,EAAE;oBACL,GAAG,OAAO;oBACV,KAAK,EAAE,SAAS,KAAK,IAAI,KAAK,GAAG,MAAM,EAAE;oBACzC,MAAM,EAAE,KAAK;iBAChB;gBACD,MAAM,EAAE,IAAI,CAAC,OAAO;aACvB,CAAC,CAAC;YAEH,IAAI,CAAC,QAAQ,CAAC,MAAM,IAAI,GAAG,IAAI,QAAQ,CAAC,MAAM,KAAK,GAAG,IAAI,QAAQ,CAAC,MAAM,KAAK,GAAG,CAAC,IAAI,YAAY,CAAC,MAAM,GAAG,CAAC;gBACzG,SAAS;YAEb,IAAI,CAAC,QAAQ,CAAC,EAAE;gBACZ,MAAM,IAAI,KAAK,CAAC,+BAA+B,QAAQ,CAAC,MAAM,IAAI,QAAQ,CAAC,UAAU,EAAE,CAAC,CAAC;YAE7F,MAAM,WAAW,GAAG,MAAM,QAAQ,CAAC,WAAW,EAAE,CAAC;YACjD,OAAO,MAAM,CAAC,IAAI,CAAC,WAAW,CAAC,CAAC;QACpC,CAAC;QAED,MAAM,IAAI,KAAK,CAAC,oDAAoD,CAAC,CAAC;IAC1E,CAAC;CACJ"}
|
||||
67
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.d.ts
generated
vendored
Normal file
67
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.d.ts
generated
vendored
Normal file
@@ -0,0 +1,67 @@
|
||||
import { Llama } from "../../bindings/Llama.js";
|
||||
import { GgufFileInfo } from "../types/GgufFileInfoTypes.js";
|
||||
import { GgufInsightsConfigurationResolver } from "./GgufInsightsConfigurationResolver.js";
|
||||
import { GgufInsightsTokens } from "./GgufInsightsTokens.js";
|
||||
export type GgufInsightsResourceRequirements = {
|
||||
cpuRam: number;
|
||||
gpuVram: number;
|
||||
};
|
||||
export declare class GgufInsights {
|
||||
private constructor();
|
||||
/**
|
||||
* Get warnings about the model file that would affect its usage.
|
||||
*
|
||||
* Most of these warnings are also generated by `llama.cpp`
|
||||
*/
|
||||
getWarnings(modelFilePath?: string): string[];
|
||||
get ggufFileInfo(): GgufFileInfo;
|
||||
get configurationResolver(): GgufInsightsConfigurationResolver;
|
||||
get tokens(): GgufInsightsTokens;
|
||||
/** The context size the model was trained on */
|
||||
get trainContextSize(): number | undefined;
|
||||
/** The size of an embedding vector the model can produce */
|
||||
get embeddingVectorSize(): number | undefined;
|
||||
get totalLayers(): number;
|
||||
get modelSize(): number;
|
||||
get flashAttentionSupported(): boolean;
|
||||
get hasEncoder(): boolean;
|
||||
get hasDecoder(): boolean;
|
||||
get isRecurrent(): boolean;
|
||||
get supportsRanking(): boolean;
|
||||
/**
|
||||
* The size of the SWA (Sliding Window Attention).
|
||||
*
|
||||
* When `undefined`, the model does not use sliding window attention.
|
||||
*/
|
||||
get swaSize(): number | undefined;
|
||||
estimateModelResourceRequirements({ gpuLayers, useMmap, gpuSupportsMmap }: {
|
||||
gpuLayers: number;
|
||||
useMmap?: boolean;
|
||||
gpuSupportsMmap?: boolean;
|
||||
}): GgufInsightsResourceRequirements;
|
||||
/**
|
||||
* Estimates the memory required to create a context of the given parameters based on the implementation details of `llama.cpp`.
|
||||
* The calculation doesn't include a precise estimation of the graph overhead memory, so it uses a rough estimate for that.
|
||||
* The estimation for the graph overhead memory will be improved in the future to be more precise, but it's good enough for now.
|
||||
*/
|
||||
estimateContextResourceRequirements({ contextSize, modelGpuLayers, batchSize, sequences, isEmbeddingContext, includeGraphOverhead, flashAttention, swaFullCache }: {
|
||||
contextSize: number;
|
||||
modelGpuLayers: number;
|
||||
batchSize?: number;
|
||||
sequences?: number;
|
||||
isEmbeddingContext?: boolean;
|
||||
flashAttention?: boolean;
|
||||
includeGraphOverhead?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
}): GgufInsightsResourceRequirements;
|
||||
/**
|
||||
* @param ggufFileInfo
|
||||
* @param llama - If you already have a `Llama` instance, pass it to reuse it for the `GgufInsights` instance.
|
||||
* If you don't pass a `Llama` instance, a basic `Llama` instance is created as a fallback - it's a slim instance that
|
||||
* doesn't instantiate a `llama.cpp` backend, so it won't utilize the GPU at all, and be shared with other `GgufInsights` instances
|
||||
* that need a fallback `Llama` instance.
|
||||
*/
|
||||
static from(ggufFileInfo: GgufFileInfo, llama?: Llama): Promise<GgufInsights>;
|
||||
}
|
||||
export declare function parseRankingTemplate(template: string | undefined | null): string | undefined;
|
||||
export declare function isRankingTemplateValid(template: string | undefined | null): boolean;
|
||||
653
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.js
generated
vendored
Normal file
653
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.js
generated
vendored
Normal file
@@ -0,0 +1,653 @@
|
||||
import { getLlamaWithoutBackend } from "../../bindings/utils/getLlamaWithoutBackend.js";
|
||||
import { getDefaultContextBatchSize, getDefaultContextSequences } from "../../evaluator/LlamaContext/LlamaContext.js";
|
||||
import { GgufArchitectureType } from "../types/GgufMetadataTypes.js";
|
||||
import { getReadablePath } from "../../cli/utils/getReadablePath.js";
|
||||
import { padSafeContextSize } from "../../evaluator/LlamaContext/utils/padSafeContextSize.js";
|
||||
import { GgufInsightsConfigurationResolver } from "./GgufInsightsConfigurationResolver.js";
|
||||
import { GgufInsightsTokens } from "./GgufInsightsTokens.js";
|
||||
export class GgufInsights {
|
||||
/** @internal */ _llama;
|
||||
/** @internal */ _modelSize;
|
||||
/** @internal */ _totalFileLayers = null;
|
||||
/** @internal */ _supportsRanking;
|
||||
/** @internal */ _ggufFileInfo;
|
||||
/** @internal */ _configurationResolver;
|
||||
/** @internal */ _tokens;
|
||||
constructor(ggufFileInfo, llama) {
|
||||
this._llama = llama;
|
||||
this._ggufFileInfo = ggufFileInfo;
|
||||
this._modelSize = calculateTensorsSize(ggufFileInfo.fullTensorInfo ?? [], llama, true, true);
|
||||
this._configurationResolver = GgufInsightsConfigurationResolver._create(this);
|
||||
this._tokens = GgufInsightsTokens._create(this);
|
||||
}
|
||||
/**
|
||||
* Get warnings about the model file that would affect its usage.
|
||||
*
|
||||
* Most of these warnings are also generated by `llama.cpp`
|
||||
*/
|
||||
getWarnings(modelFilePath) {
|
||||
const warnings = [];
|
||||
const modelFilePathText = (modelFilePath != null && modelFilePath !== "")
|
||||
? ` ("${getReadablePath(modelFilePath)}")`
|
||||
: "";
|
||||
if (this._ggufFileInfo?.metadata?.tokenizer?.ggml?.model === "gpt2" &&
|
||||
this._ggufFileInfo?.metadata?.tokenizer?.ggml?.model == null) {
|
||||
// equivalent to the warning in `llama.cpp` under `llm_load_vocab`: "missing pre-tokenizer type, using: 'default'"
|
||||
warnings.push(`This model file${modelFilePathText} is missing a pre-tokenizer configuration. ` +
|
||||
"This may cause incorrect tokenization and thus degrade the generation quality. " +
|
||||
"Consider using a newer model or regenerating this GGUF model file");
|
||||
}
|
||||
return warnings;
|
||||
}
|
||||
get ggufFileInfo() {
|
||||
return this._ggufFileInfo;
|
||||
}
|
||||
get configurationResolver() {
|
||||
return this._configurationResolver;
|
||||
}
|
||||
get tokens() {
|
||||
return this._tokens;
|
||||
}
|
||||
/** The context size the model was trained on */
|
||||
get trainContextSize() {
|
||||
return this._ggufFileInfo.architectureMetadata.context_length;
|
||||
}
|
||||
/** The size of an embedding vector the model can produce */
|
||||
get embeddingVectorSize() {
|
||||
return this._ggufFileInfo.architectureMetadata.embedding_length;
|
||||
}
|
||||
get totalLayers() {
|
||||
const outputLayers = 1;
|
||||
return this._getTotalFileLayers() + outputLayers;
|
||||
}
|
||||
get modelSize() {
|
||||
return this._modelSize;
|
||||
}
|
||||
get flashAttentionSupported() {
|
||||
// source: `llama_new_context_with_model` in `llama.cpp`
|
||||
if (this._ggufFileInfo.metadata?.general?.architecture === GgufArchitectureType.grok)
|
||||
return false;
|
||||
else if (this._ggufFileInfo.metadata?.general?.architecture === GgufArchitectureType.gemma2)
|
||||
return false;
|
||||
else {
|
||||
const nHead = this._ggufFileInfo.architectureMetadata?.attention?.head_count ?? 0;
|
||||
const nEmbd = this._ggufFileInfo.architectureMetadata?.embedding_length ?? 0;
|
||||
const nEmbdHeadK = this._ggufFileInfo.architectureMetadata?.attention?.key_length ?? ((nHead == 0) ? 0 : (nEmbd / nHead));
|
||||
const nEmbdHeadV = this._ggufFileInfo.architectureMetadata?.attention?.value_length ?? ((nHead == 0) ? 0 : nEmbd / nHead);
|
||||
if (nEmbdHeadK !== nEmbdHeadV)
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
get hasEncoder() {
|
||||
switch (this._ggufFileInfo.metadata?.general?.architecture) {
|
||||
case GgufArchitectureType.t5:
|
||||
case GgufArchitectureType.t5encoder:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
get hasDecoder() {
|
||||
switch (this._ggufFileInfo.metadata?.general?.architecture) {
|
||||
case GgufArchitectureType.t5encoder:
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
get isRecurrent() {
|
||||
switch (this._ggufFileInfo.metadata?.general?.architecture) {
|
||||
case GgufArchitectureType.mamba:
|
||||
case GgufArchitectureType.mamba2:
|
||||
case GgufArchitectureType.rwkv6:
|
||||
case GgufArchitectureType.rwkv6qwen2:
|
||||
case GgufArchitectureType.rwkv7:
|
||||
case GgufArchitectureType.arwkv7:
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
get supportsRanking() {
|
||||
if (this._supportsRanking != null)
|
||||
return this._supportsRanking;
|
||||
const layers = this._ggufFileInfo.fullTensorInfo ?? [];
|
||||
for (let i = layers.length - 1; i >= 0; i--) {
|
||||
const tensor = layers[i];
|
||||
if (tensor == null)
|
||||
continue;
|
||||
if (tensor.name === "cls.weight" || tensor.name === "cls.output.weight") {
|
||||
this._supportsRanking = this.tokens.sepToken != null || this.tokens.eosToken != null ||
|
||||
isRankingTemplateValid(parseRankingTemplate(this._ggufFileInfo.metadata?.tokenizer?.["chat_template.rerank"]));
|
||||
this._supportsRanking &&= !(this.hasEncoder && this.hasDecoder); // encoder-decoder models are not supported
|
||||
return this._supportsRanking;
|
||||
}
|
||||
}
|
||||
this._supportsRanking = false;
|
||||
return this._supportsRanking;
|
||||
}
|
||||
/**
|
||||
* The size of the SWA (Sliding Window Attention).
|
||||
*
|
||||
* When `undefined`, the model does not use sliding window attention.
|
||||
*/
|
||||
get swaSize() {
|
||||
const slidingWindow = this._ggufFileInfo?.architectureMetadata?.attention?.sliding_window;
|
||||
if (slidingWindow == null || slidingWindow <= 0)
|
||||
return undefined;
|
||||
const trainContextSize = this.trainContextSize;
|
||||
if (trainContextSize != null && slidingWindow >= trainContextSize)
|
||||
return undefined;
|
||||
return slidingWindow;
|
||||
}
|
||||
estimateModelResourceRequirements({ gpuLayers, useMmap = this._llama.supportsMmap, gpuSupportsMmap = this._llama.gpuSupportsMmap }) {
|
||||
const { cpu, gpu } = this._getTensorResourceSplit(gpuLayers);
|
||||
return {
|
||||
cpuRam: calculateTensorsSize(cpu, this._llama, false),
|
||||
gpuVram: calculateTensorsSize(gpu, this._llama, useMmap && gpuSupportsMmap)
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Estimates the memory required to create a context of the given parameters based on the implementation details of `llama.cpp`.
|
||||
* The calculation doesn't include a precise estimation of the graph overhead memory, so it uses a rough estimate for that.
|
||||
* The estimation for the graph overhead memory will be improved in the future to be more precise, but it's good enough for now.
|
||||
*/
|
||||
estimateContextResourceRequirements({ contextSize, modelGpuLayers, batchSize, sequences, isEmbeddingContext = false, includeGraphOverhead = true, flashAttention = false, swaFullCache = false }) {
|
||||
if (sequences == null)
|
||||
sequences = getDefaultContextSequences();
|
||||
if (batchSize == null)
|
||||
batchSize = getDefaultContextBatchSize({ contextSize, sequences });
|
||||
const llmData = this._ggufFileInfo.architectureMetadata;
|
||||
const tensorInfo = this._ggufFileInfo.fullTensorInfo ?? [];
|
||||
const slidingWindow = this.swaSize ?? 0;
|
||||
const kvUnified = false;
|
||||
const usingSWA = !swaFullCache && slidingWindow > 0 && slidingWindow < contextSize &&
|
||||
(this.trainContextSize == null || slidingWindow < this.trainContextSize);
|
||||
const swaPattern = getSwaPatternForArchitecture(this._ggufFileInfo.metadata?.general?.architecture);
|
||||
const nonSwaPercent = swaPattern <= 1
|
||||
? 1
|
||||
: (1 / (swaPattern + (flashAttention ? -0.5 : -1)));
|
||||
// source: `llama_kv_cache_unified::get_padding` in `llama-kv-cache.cpp`
|
||||
const kvCachePadding = 1;
|
||||
const actualContextSize = kvUnified
|
||||
? padSafeContextSize(sequences * contextSize, "up")
|
||||
: sequences * padSafeContextSize(contextSize, "up");
|
||||
const kvSize = usingSWA
|
||||
? ((1 - nonSwaPercent) * Math.min(actualContextSize, ggmlPad(sequences * slidingWindow + batchSize, kvCachePadding)) +
|
||||
nonSwaPercent * actualContextSize)
|
||||
: actualContextSize;
|
||||
const totalFileLayers = this._getTotalFileLayers();
|
||||
const finalGpuLayers = Math.max(0, Math.min(modelGpuLayers ?? totalFileLayers, totalFileLayers));
|
||||
const finalCpuLayers = totalFileLayers - finalGpuLayers;
|
||||
const usingGpu = finalGpuLayers !== 0;
|
||||
const vocabularySize = llmData.vocab_size ?? this._ggufFileInfo.metadata.tokenizer?.ggml?.tokens?.length ?? 0;
|
||||
const embeddingSize = llmData.embedding_length ?? 0;
|
||||
const floatBytes = 4; // sizeof(float)
|
||||
const int32TBytes = 4; // sizeof(int32_t)
|
||||
const estimateOutput = (nOutputs) => {
|
||||
// source: `llama_context::output_reserve` in `llama-context.cpp`
|
||||
const nOutputsMax = Math.max(batchSize, nOutputs);
|
||||
const isT5 = this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.t5;
|
||||
const hasLogits = isT5 || !isEmbeddingContext;
|
||||
const hasEmbd = isT5 || isEmbeddingContext;
|
||||
const logitsSize = hasLogits
|
||||
? (vocabularySize * nOutputsMax)
|
||||
: 0;
|
||||
const embdSize = hasEmbd
|
||||
? (embeddingSize * nOutputsMax)
|
||||
: 0;
|
||||
const outputBufferSize = (logitsSize + embdSize) * floatBytes;
|
||||
const outputIdsArr = int32TBytes * batchSize;
|
||||
return outputBufferSize + outputIdsArr;
|
||||
};
|
||||
const estimateGraphOverheadMemory = () => {
|
||||
const s1MB = Math.pow(1024, 2);
|
||||
const tensorInfo = this._ggufFileInfo.fullTensorInfo ?? [];
|
||||
const expertCount = llmData?.expert_count ?? 0;
|
||||
const headCount = llmData?.attention?.head_count ?? 0;
|
||||
const embeddingLength = llmData?.embedding_length ?? 0;
|
||||
let defaultCalculationAdjustment = 0;
|
||||
if (batchSize == null)
|
||||
return 0;
|
||||
if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.llama) {
|
||||
if (expertCount > 0) {
|
||||
const expertsUsedCount = this._ggufFileInfo.architectureMetadata.expert_used_count ?? 2;
|
||||
return int32TBytes * batchSize * (((expertsUsedCount + 1) * embeddingLength) + (kvSize * headCount));
|
||||
}
|
||||
return int32TBytes * batchSize * (embeddingLength + (kvSize * headCount));
|
||||
}
|
||||
else if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.qwen2) {
|
||||
if (modelGpuLayers === this.totalLayers) {
|
||||
defaultCalculationAdjustment -= (s1MB * 340) * (this.trainContextSize == null
|
||||
? 1
|
||||
: kvSize / this.trainContextSize);
|
||||
}
|
||||
else {
|
||||
defaultCalculationAdjustment -= (s1MB * 250) + ((s1MB * 50) * (this.trainContextSize == null
|
||||
? 1
|
||||
: kvSize / this.trainContextSize));
|
||||
}
|
||||
}
|
||||
else if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.gemma) {
|
||||
// only works properly when all layers are on the GPU, which is why it's commented out:
|
||||
// return int32TBytes * batchSize * ((llmData.embedding_length ?? 0));
|
||||
if (modelGpuLayers === this.totalLayers) {
|
||||
defaultCalculationAdjustment += (s1MB * 40) - ((s1MB * 270) * (this.trainContextSize == null
|
||||
? 1
|
||||
: kvSize / this.trainContextSize));
|
||||
}
|
||||
else {
|
||||
defaultCalculationAdjustment += -(s1MB * 550) + ((s1MB * 150) * (this.trainContextSize == null
|
||||
? 1
|
||||
: Math.max(0, (1 - (kvSize / this.trainContextSize)))));
|
||||
}
|
||||
}
|
||||
else if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.stablelm) {
|
||||
const headCount = this._ggufFileInfo.architectureMetadata.attention?.head_count ?? 0;
|
||||
return (int32TBytes * batchSize * kvSize * headCount) - (50 * s1MB);
|
||||
// if (modelGpuLayers === this.totalLayers) {
|
||||
// defaultCalculationAdjustment += -(s1MB * 20) + (
|
||||
// (s1MB * 250) * (
|
||||
// this.trainContextSize == null
|
||||
// ? 1
|
||||
// : kvSize / this.trainContextSize
|
||||
// )
|
||||
// );
|
||||
// } else {
|
||||
// defaultCalculationAdjustment += -(s1MB * 40) + (
|
||||
// (s1MB * 300) * (
|
||||
// this.trainContextSize == null
|
||||
// ? 1
|
||||
// : kvSize / this.trainContextSize
|
||||
// )
|
||||
// );
|
||||
// }
|
||||
}
|
||||
else if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.qwen3) {
|
||||
return int32TBytes * batchSize * (embeddingLength + (kvSize * headCount));
|
||||
}
|
||||
else if (expertCount > 0) {
|
||||
const expertsUsedCount = this._ggufFileInfo.architectureMetadata.expert_used_count ?? 2;
|
||||
return int32TBytes * batchSize * (((expertsUsedCount + 1) * embeddingLength) + (kvSize * headCount));
|
||||
}
|
||||
const totalElements = tensorInfo.length === 0
|
||||
? this.totalLayers * (((llmData.embedding_length ?? 0) +
|
||||
(llmData.feed_forward_length ?? 0)) / 2)
|
||||
: tensorInfo.reduce((res, tensor) => {
|
||||
return res + tensor.dimensions.reduce((res, dim) => res + Number(dim), 0);
|
||||
}, 0);
|
||||
if (this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.phi3) {
|
||||
// magic numbers for estimation. will be improved in the future
|
||||
return (totalElements * 123 * (kvSize / 4096)) + defaultCalculationAdjustment;
|
||||
}
|
||||
// magic numbers for estimation. will be improved in the future
|
||||
return (totalElements * 77.655 * (kvSize / 4096)) + defaultCalculationAdjustment;
|
||||
};
|
||||
const gpuKVCacheSize = usingGpu
|
||||
? this._estimateKvMemorySizeInBytes(kvSize, finalGpuLayers < totalFileLayers
|
||||
? (finalGpuLayers + 1)
|
||||
: finalGpuLayers)
|
||||
: 0;
|
||||
const cpuKVCacheSize = this._estimateKvMemorySizeInBytes(kvSize, finalCpuLayers);
|
||||
// source: `llama_context::graph_max_nodes` in `llama-context.cpp`
|
||||
const getMaxNodesMultiplier = (arch, nTokens) => {
|
||||
if (arch === GgufArchitectureType.qwen3next)
|
||||
return {
|
||||
min: nTokens * 40,
|
||||
multiplier: 32
|
||||
};
|
||||
return {
|
||||
min: 1024,
|
||||
multiplier: 8
|
||||
};
|
||||
};
|
||||
const maxNodesMultiplier = getMaxNodesMultiplier(this._ggufFileInfo.metadata?.general?.architecture, Math.min(actualContextSize, batchSize));
|
||||
const maxNodes = Math.max(maxNodesMultiplier.min, maxNodesMultiplier.multiplier * tensorInfo.length);
|
||||
const cpuNodes = maxNodesMultiplier.multiplier * (tensorInfo.length * (finalCpuLayers / totalFileLayers));
|
||||
const gpuNodes = maxNodes - cpuNodes;
|
||||
const gpuComputeBufferSize = (this._llama._consts.ggmlTensorOverhead * gpuNodes) +
|
||||
this._llama._bindings.getGgmlGraphOverheadCustom(gpuNodes, false);
|
||||
const cpuComputeBufferSize = (this._llama._consts.ggmlTensorOverhead * cpuNodes) +
|
||||
this._llama._bindings.getGgmlGraphOverheadCustom(cpuNodes, false);
|
||||
const graphOverheadMemory = (flashAttention || !includeGraphOverhead)
|
||||
? 0
|
||||
: estimateGraphOverheadMemory();
|
||||
const graphOverheadGpuSize = usingGpu
|
||||
? Math.round(graphOverheadMemory * (finalGpuLayers / totalFileLayers))
|
||||
: 0;
|
||||
const graphOverheadCpuSize = graphOverheadMemory - graphOverheadGpuSize;
|
||||
const outputBufferSize = estimateOutput(sequences);
|
||||
const gpuVram = gpuKVCacheSize + gpuComputeBufferSize + graphOverheadGpuSize + outputBufferSize;
|
||||
const cpuRam = cpuKVCacheSize + cpuComputeBufferSize + graphOverheadCpuSize + outputBufferSize;
|
||||
return {
|
||||
cpuRam,
|
||||
gpuVram: usingGpu
|
||||
? gpuVram
|
||||
: 0
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Get the split tensor resources for CPU and GPU based on the number of GPU layers
|
||||
* @internal
|
||||
*/
|
||||
_getTensorResourceSplit(gpuLayers) {
|
||||
const tensorInfo = this._ggufFileInfo.fullTensorInfo ?? [];
|
||||
const architecture = this._ggufFileInfo.metadata?.general?.architecture;
|
||||
if (gpuLayers === 0) {
|
||||
return {
|
||||
cpu: tensorInfo,
|
||||
gpu: []
|
||||
};
|
||||
}
|
||||
const fileLayers = this._getFileLayers();
|
||||
const startGpuLayer = Math.max(0, fileLayers - gpuLayers);
|
||||
const gpuTensors = [];
|
||||
const cpuTensors = [];
|
||||
let tokenEmbedLayer;
|
||||
let mainOutputLayer;
|
||||
for (const singleTensorInfo of tensorInfo) {
|
||||
if (isMainOutputLayer(singleTensorInfo.name))
|
||||
mainOutputLayer = singleTensorInfo;
|
||||
else if (isTokenEmbedLayer(singleTensorInfo.name))
|
||||
tokenEmbedLayer = singleTensorInfo;
|
||||
// in the implementation of `llm_load_tensors`, layers with `LLM_TENSOR_LAYER_INPUT` are always
|
||||
// loaded with `model.dev_input`, which is always set to the CPU
|
||||
if (isInputLayer(singleTensorInfo.name)) {
|
||||
cpuTensors.push(singleTensorInfo);
|
||||
continue;
|
||||
// in the implementation of `llm_load_tensors`, layers with `LLM_TENSOR_LAYER_OUTPUT` are always
|
||||
// loaded with `model.dev_output`, which is set to the GPU only if all the layers are on the GPU
|
||||
}
|
||||
else if (isOutputLayer(singleTensorInfo.name)) {
|
||||
if (gpuLayers === this.totalLayers) {
|
||||
gpuTensors.push(singleTensorInfo);
|
||||
continue;
|
||||
}
|
||||
else {
|
||||
cpuTensors.push(singleTensorInfo);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
const { layerNumber } = parseTensorName(singleTensorInfo.name);
|
||||
if (gpuLayers !== this.totalLayers) {
|
||||
if (architecture === GgufArchitectureType.qwen2 || architecture === GgufArchitectureType.gemma) {
|
||||
if (layerNumber != null && layerNumber >= startGpuLayer)
|
||||
gpuTensors.push(singleTensorInfo);
|
||||
else
|
||||
cpuTensors.push(singleTensorInfo);
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if (layerNumber == null || layerNumber >= startGpuLayer)
|
||||
gpuTensors.push(singleTensorInfo);
|
||||
else
|
||||
cpuTensors.push(singleTensorInfo);
|
||||
}
|
||||
if (mainOutputLayer == null && tokenEmbedLayer != null && gpuLayers === this.totalLayers && !gpuTensors.includes(tokenEmbedLayer))
|
||||
gpuTensors.push(tokenEmbedLayer);
|
||||
return {
|
||||
cpu: cpuTensors,
|
||||
gpu: gpuTensors
|
||||
};
|
||||
}
|
||||
/** @internal */
|
||||
_determineNumberOfLayersFromTensorInfo() {
|
||||
const layerNumbers = new Set();
|
||||
for (const singleTensorInfo of (this._ggufFileInfo.fullTensorInfo ?? [])) {
|
||||
const { layerNumber } = parseTensorName(singleTensorInfo.name);
|
||||
if (layerNumber != null)
|
||||
layerNumbers.add(layerNumber);
|
||||
}
|
||||
return layerNumbers.size;
|
||||
}
|
||||
/** @internal */
|
||||
_getFileLayers() {
|
||||
return this._ggufFileInfo.architectureMetadata.block_count ?? this._determineNumberOfLayersFromTensorInfo();
|
||||
}
|
||||
/** @internal */
|
||||
_estimateKvMemorySizeInBytes(kvSize, layers) {
|
||||
// source: `llama_kv_cache_init` in `llama.cpp`
|
||||
const nHead = this._ggufFileInfo.architectureMetadata.attention?.head_count ?? 0;
|
||||
const nEmbd = this._ggufFileInfo.architectureMetadata.embedding_length ?? 0;
|
||||
const nEmbdHeadK = this._ggufFileInfo.architectureMetadata.attention?.key_length ?? ((nHead == 0) ? 0 : (nEmbd / nHead));
|
||||
const nHeadKv = this._ggufFileInfo.architectureMetadata.attention?.head_count_kv ?? nHead;
|
||||
const nEmbdHeadV = this._ggufFileInfo.architectureMetadata.attention?.value_length ?? ((nHead == 0) ? 0 : nEmbd / nHead);
|
||||
const ssmDConv = this._ggufFileInfo.architectureMetadata.ssm?.conv_kernel ?? 0;
|
||||
const ssmDInner = this._ggufFileInfo.architectureMetadata.ssm?.inner_size ?? 0;
|
||||
const modelNEmbdKS = (this._ggufFileInfo.architectureMetadata.wkv?.head_size ?? 0) !== 0
|
||||
? (this._ggufFileInfo.architectureMetadata.token_shift_count ?? 0) * nEmbd
|
||||
: (ssmDConv > 0 ? (ssmDConv - 1) : 0) * ssmDInner;
|
||||
const ssmDState = this._ggufFileInfo.architectureMetadata.ssm?.state_size ?? 0;
|
||||
const modelNEmbdVS = (this._ggufFileInfo.architectureMetadata.wkv?.head_size ?? 0) !== 0
|
||||
? nEmbd * (this._ggufFileInfo.architectureMetadata.wkv?.head_size ?? 0)
|
||||
: ssmDState * ssmDInner;
|
||||
let totalElementsK = 0;
|
||||
let totalElementsV = 0;
|
||||
for (let i = 0; i < layers; i++) {
|
||||
const nHeadKvArrayItem = (typeof nHeadKv === "number")
|
||||
? nHeadKv
|
||||
: nHeadKv[i] !== 0
|
||||
? nHeadKv[i]
|
||||
: nHead;
|
||||
const nEmbdKGqa = nEmbdHeadK * nHeadKvArrayItem;
|
||||
const nEmbdVGqa = nEmbdHeadV * nHeadKvArrayItem;
|
||||
const totalNEmbdKGqa = nEmbdKGqa + modelNEmbdKS;
|
||||
const totalNEmbdVGqa = nEmbdVGqa + modelNEmbdVS;
|
||||
totalElementsK += totalNEmbdKGqa * kvSize;
|
||||
totalElementsV += totalNEmbdVGqa * kvSize;
|
||||
}
|
||||
const keyTypeSize = this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.mamba
|
||||
// if `type_k` of `llama_context_params` changes to be configurable in `LlamaContext`,
|
||||
// this would have to depend on that value
|
||||
? this._llama._consts.ggmlTypeF32Size
|
||||
: this._llama._consts.ggmlTypeF16Size;
|
||||
const valueTypeSize = this._ggufFileInfo.metadata.general?.architecture === GgufArchitectureType.mamba
|
||||
// if `type_v` of `llama_context_params` changes to be configurable in `LlamaContext`,
|
||||
// this would have to depend on that value
|
||||
? this._llama._consts.ggmlTypeF32Size
|
||||
: this._llama._consts.ggmlTypeF16Size;
|
||||
return ((totalElementsK * keyTypeSize) +
|
||||
(totalElementsV * valueTypeSize));
|
||||
}
|
||||
/** @internal */
|
||||
_getTotalFileLayers() {
|
||||
if (this._totalFileLayers != null)
|
||||
return this._totalFileLayers;
|
||||
this._totalFileLayers = this._getFileLayers();
|
||||
return this._totalFileLayers;
|
||||
}
|
||||
/**
|
||||
* @param ggufFileInfo
|
||||
* @param llama - If you already have a `Llama` instance, pass it to reuse it for the `GgufInsights` instance.
|
||||
* If you don't pass a `Llama` instance, a basic `Llama` instance is created as a fallback - it's a slim instance that
|
||||
* doesn't instantiate a `llama.cpp` backend, so it won't utilize the GPU at all, and be shared with other `GgufInsights` instances
|
||||
* that need a fallback `Llama` instance.
|
||||
*/
|
||||
static async from(ggufFileInfo, llama) {
|
||||
let resolvedLlama = llama;
|
||||
if (resolvedLlama == null)
|
||||
resolvedLlama = await getLlamaWithoutBackend();
|
||||
return new GgufInsights(ggufFileInfo, resolvedLlama);
|
||||
}
|
||||
}
|
||||
function parseTensorName(tensorName) {
|
||||
if (tensorName == null)
|
||||
return { layerNumber: undefined };
|
||||
const layerTensorPrefix = "blk.";
|
||||
if (!tensorName.startsWith(layerTensorPrefix))
|
||||
return { layerNumber: undefined };
|
||||
const dotIndex = tensorName.indexOf(".", layerTensorPrefix.length);
|
||||
const layerNumberString = tensorName.slice(layerTensorPrefix.length, dotIndex < 0
|
||||
? tensorName.length
|
||||
: dotIndex);
|
||||
const layerNumber = parseInt(layerNumberString);
|
||||
if (Number.isFinite(layerNumber))
|
||||
return { layerNumber };
|
||||
return { layerNumber: undefined };
|
||||
}
|
||||
function calculateTensorsSize(tensorsInfo, llama, useMmap, startFromTensorDataOffset = false) {
|
||||
if (!useMmap) {
|
||||
let size = 0;
|
||||
for (const tensorInfo of tensorsInfo)
|
||||
size += calculateTensorSize(tensorInfo, llama);
|
||||
return size;
|
||||
}
|
||||
const fileStats = new Map();
|
||||
for (const tensorInfo of tensorsInfo) {
|
||||
let stats = fileStats.get(tensorInfo.filePart);
|
||||
if (stats == null) {
|
||||
stats = {
|
||||
tensorsSize: 0
|
||||
};
|
||||
fileStats.set(tensorInfo.filePart, stats);
|
||||
}
|
||||
const tensorSize = calculateTensorSize(tensorInfo, llama);
|
||||
stats.tensorsSize += tensorSize;
|
||||
const startOffset = tensorInfo.offset;
|
||||
const endOffset = typeof startOffset === "number"
|
||||
? startOffset + tensorSize
|
||||
: startOffset + BigInt(tensorSize);
|
||||
if (startFromTensorDataOffset)
|
||||
stats.startOffset = Number(BigInt(tensorInfo.fileOffset) - BigInt(tensorInfo.offset));
|
||||
else if (stats.startOffset == null || startOffset < stats.startOffset)
|
||||
stats.startOffset = startOffset;
|
||||
if (stats.endOffset == null || endOffset > stats.endOffset)
|
||||
stats.endOffset = endOffset;
|
||||
}
|
||||
let size = 0;
|
||||
for (const [, stats] of fileStats) {
|
||||
const offsetSize = (stats.endOffset == null || stats.startOffset == null)
|
||||
? 0
|
||||
: Number(BigInt(stats.endOffset) - BigInt(stats.startOffset));
|
||||
const tensorsSize = stats.tensorsSize;
|
||||
size += Math.max(offsetSize, tensorsSize);
|
||||
}
|
||||
return size;
|
||||
}
|
||||
function calculateTensorSize(tensor, llama) {
|
||||
const typeSize = llama._bindings.getTypeSizeForGgmlType(tensor.ggmlType);
|
||||
const blockSize = llama._bindings.getBlockSizeForGgmlType(tensor.ggmlType);
|
||||
const ggmlMaxDims = llama._consts.ggmlMaxDims;
|
||||
if (typeSize == null || blockSize == null)
|
||||
throw new Error("Invalid type or block size");
|
||||
const { ne, nb } = getTensorNeAndNb(tensor, { typeSize, blockSize, ggmlMaxDims });
|
||||
if (blockSize === 1) {
|
||||
let totalBytes = typeSize;
|
||||
for (let i = 0; i < ggmlMaxDims; i++) {
|
||||
totalBytes += (ne[i] - 1) * nb[i];
|
||||
}
|
||||
return totalBytes;
|
||||
}
|
||||
else {
|
||||
let totalBytes = Math.floor((ne[0] * nb[0]) / blockSize);
|
||||
for (let i = 1; i < ggmlMaxDims; i++) {
|
||||
totalBytes += (ne[i] - 1) * nb[i];
|
||||
}
|
||||
return totalBytes;
|
||||
}
|
||||
}
|
||||
function getTensorNeAndNb(tensor, { typeSize, blockSize, ggmlMaxDims }) {
|
||||
// number of elements
|
||||
// source: `ggml_new_tensor_impl` in `ggml.c`
|
||||
const ne = [
|
||||
...tensor.dimensions,
|
||||
...(Array(Math.max(0, ggmlMaxDims - tensor.dimensions.length)).fill(1))
|
||||
].slice(0, ggmlMaxDims);
|
||||
// number of bytes
|
||||
// source: `ggml_new_tensor_impl` in `ggml.c`
|
||||
const nb = [
|
||||
typeSize,
|
||||
Math.floor(typeSize * (ne[0] / blockSize)),
|
||||
...Array(ggmlMaxDims - 2).fill(0)
|
||||
];
|
||||
for (let i = 2; i < ggmlMaxDims; i++) {
|
||||
nb[i] = nb[i - 1] * ne[i - 1];
|
||||
}
|
||||
return {
|
||||
ne,
|
||||
nb
|
||||
};
|
||||
}
|
||||
function isInputLayer(layerName) {
|
||||
const [firstPart] = layerName.split(".");
|
||||
if (firstPart == null)
|
||||
return false;
|
||||
// source: in `llama.cpp`, all tensor names from `LLM_TENSOR_NAMES` where
|
||||
// in `llm_tensor_info_mapping` have a mapping to `LLM_TENSOR_LAYER_INPUT`
|
||||
switch (firstPart) {
|
||||
case "token_embd":
|
||||
case "token_embd_norm":
|
||||
case "token_types":
|
||||
case "position_embd":
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isOutputLayer(layerName) {
|
||||
const [firstPart, secondPart] = layerName.split(".");
|
||||
if (firstPart == null)
|
||||
return false;
|
||||
// source: in `llama.cpp`, all tensor names from `LLM_TENSOR_NAMES` where
|
||||
// in `llm_tensor_info_mapping` have a mapping to `LLM_TENSOR_LAYER_INPUT`
|
||||
switch (firstPart) {
|
||||
case "output":
|
||||
case "output_norm":
|
||||
case "cls":
|
||||
return true;
|
||||
}
|
||||
if (secondPart == null)
|
||||
return false;
|
||||
// source: in `llama.cpp`, all tensor names from `LLM_TENSOR_NAMES` where
|
||||
// in `llm_tensor_info_mapping` have a mapping to `LLM_TENSOR_LAYER_INPUT`
|
||||
switch (firstPart + "." + secondPart) {
|
||||
case "cls.output":
|
||||
case "dec.output_norm":
|
||||
case "enc.output_norm":
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isMainOutputLayer(layerName) {
|
||||
const [firstPart] = layerName.split(".");
|
||||
return firstPart === "output";
|
||||
}
|
||||
function isTokenEmbedLayer(layerName) {
|
||||
const [firstPart] = layerName.split(".");
|
||||
return firstPart === "token_embd";
|
||||
}
|
||||
function ggmlPad(value, padding) {
|
||||
return ((value + padding - 1) & ~(padding - 1));
|
||||
}
|
||||
function getSwaPatternForArchitecture(architecture) {
|
||||
// source: `llama_model::load_hparams` in `llama-model.cpp` - calls to `hparams.set_swa_pattern`
|
||||
switch (architecture) {
|
||||
case GgufArchitectureType.llama4:
|
||||
return 4;
|
||||
case GgufArchitectureType.phi3:
|
||||
return 1;
|
||||
case GgufArchitectureType.gemma2:
|
||||
return 2;
|
||||
case GgufArchitectureType.gemma3:
|
||||
return 6;
|
||||
case GgufArchitectureType.gemma3n:
|
||||
return 5;
|
||||
case GgufArchitectureType.cohere2:
|
||||
return 4;
|
||||
case GgufArchitectureType.exaone4:
|
||||
return 4;
|
||||
case GgufArchitectureType.gptOss:
|
||||
return 2;
|
||||
case GgufArchitectureType.smallthinker:
|
||||
return 4;
|
||||
}
|
||||
return 1;
|
||||
}
|
||||
export function parseRankingTemplate(template) {
|
||||
if (template == null)
|
||||
return undefined;
|
||||
return template
|
||||
.replaceAll("{query}", "{{query}}")
|
||||
.replaceAll("{document}", "{{document}}");
|
||||
}
|
||||
export function isRankingTemplateValid(template) {
|
||||
return template != null && template.includes("{{query}}") && template.includes("{{document}}");
|
||||
}
|
||||
//# sourceMappingURL=GgufInsights.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsights.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
194
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.d.ts
generated
vendored
Normal file
194
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.d.ts
generated
vendored
Normal file
@@ -0,0 +1,194 @@
|
||||
import { BuildGpu } from "../../bindings/types.js";
|
||||
import { LlamaModelOptions } from "../../evaluator/LlamaModel/LlamaModel.js";
|
||||
import { LlamaContextOptions } from "../../evaluator/LlamaContext/types.js";
|
||||
import type { GgufInsights } from "./GgufInsights.js";
|
||||
export declare const defaultTrainContextSizeForEstimationPurposes = 4096;
|
||||
export declare class GgufInsightsConfigurationResolver {
|
||||
private constructor();
|
||||
get ggufInsights(): GgufInsights;
|
||||
/**
|
||||
* Resolve the best configuration for loading a model and creating a context using the current hardware.
|
||||
*
|
||||
* Specifying a `targetGpuLayers` and/or `targetContextSize` will ensure the resolved configuration matches those values,
|
||||
* but note it can lower the compatibility score if the hardware doesn't support it.
|
||||
*
|
||||
* Overriding hardware values it possible by configuring `hardwareOverrides`.
|
||||
* @param options
|
||||
* @param hardwareOverrides
|
||||
*/
|
||||
resolveAndScoreConfig({ targetGpuLayers, targetContextSize, embeddingContext, flashAttention, swaFullCache, useMmap }?: {
|
||||
targetGpuLayers?: number | "max";
|
||||
targetContextSize?: number;
|
||||
embeddingContext?: boolean;
|
||||
flashAttention?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
useMmap?: boolean;
|
||||
}, { getVramState, getRamState, getSwapState, llamaVramPaddingSize, llamaGpu, llamaSupportsGpuOffloading }?: {
|
||||
getVramState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
getRamState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
getSwapState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaVramPaddingSize?: number;
|
||||
llamaGpu?: BuildGpu;
|
||||
llamaSupportsGpuOffloading?: boolean;
|
||||
}): Promise<{
|
||||
/**
|
||||
* A number between `0` (inclusive) and `1` (inclusive) representing the compatibility score.
|
||||
*/
|
||||
compatibilityScore: number;
|
||||
/**
|
||||
* A number starting at `0` with no upper limit representing the bonus score.
|
||||
* For each multiplier of the specified `contextSize` that the resolved context size is larger by, 1 bonus point is given.
|
||||
*/
|
||||
bonusScore: number;
|
||||
/**
|
||||
* The total score, which is the sum of the compatibility and bonus scores.
|
||||
*/
|
||||
totalScore: number;
|
||||
/**
|
||||
* The resolved values used to calculate the scores.
|
||||
*/
|
||||
resolvedValues: {
|
||||
gpuLayers: number;
|
||||
contextSize: number;
|
||||
modelRamUsage: number;
|
||||
contextRamUsage: number;
|
||||
totalRamUsage: number;
|
||||
modelVramUsage: number;
|
||||
contextVramUsage: number;
|
||||
totalVramUsage: number;
|
||||
};
|
||||
}>;
|
||||
/**
|
||||
* Score the compatibility of the model configuration with the current GPU and VRAM state.
|
||||
* Assumes a model is loaded with the default `"auto"` configurations.
|
||||
* Scored based on the following criteria:
|
||||
* - The number of GPU layers that can be offloaded to the GPU (only if there's a GPU. If there's no GPU then by how small the model is)
|
||||
* - Whether all layers can be offloaded to the GPU (gives additional points)
|
||||
* - Whether the resolved context size is at least as large as the specified `contextSize`
|
||||
*
|
||||
* If the resolved context size is larger than the specified context size, for each multiplier of the specified `contextSize`
|
||||
* that the resolved context size is larger by, 1 bonus point is given in the `bonusScore`.
|
||||
*
|
||||
* `maximumFittedContextSizeMultiplier` is used to improve the proportionality of the bonus score between models.
|
||||
* Set this to any value higher than `<max compared model context size> / contextSize`.
|
||||
* Defaults to `100`.
|
||||
*
|
||||
* `maximumUnfitConfigurationResourceMultiplier` is used to improve the proportionality of the bonus score between unfit models.
|
||||
* Set this to any value higher than `<max compared model resource usage> / <total available resources>`.
|
||||
* Defaults to `100`.
|
||||
*
|
||||
* `contextSize` defaults to `4096` (if the model train context size is lower than this, the model train context size is used instead).
|
||||
*/
|
||||
scoreModelConfigurationCompatibility({ contextSize, embeddingContext, flashAttention, swaFullCache, maximumFittedContextSizeMultiplier, maximumUnfitConfigurationResourceMultiplier, forceStrictContextSize, forceGpuLayers, useMmap }?: {
|
||||
contextSize?: number;
|
||||
embeddingContext?: boolean;
|
||||
flashAttention?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
maximumFittedContextSizeMultiplier?: number;
|
||||
maximumUnfitConfigurationResourceMultiplier?: number;
|
||||
/**
|
||||
* Do not resolve a context size larger than the specified `contextSize`.
|
||||
*
|
||||
* Defaults to `false`.
|
||||
*/
|
||||
forceStrictContextSize?: boolean;
|
||||
forceGpuLayers?: number | "max";
|
||||
useMmap?: boolean;
|
||||
}, { getVramState, getRamState, getSwapState, llamaVramPaddingSize, llamaGpu, llamaSupportsGpuOffloading }?: {
|
||||
getVramState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
getRamState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
getSwapState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaVramPaddingSize?: number;
|
||||
llamaGpu?: BuildGpu;
|
||||
llamaSupportsGpuOffloading?: boolean;
|
||||
}): Promise<{
|
||||
/**
|
||||
* A number between `0` (inclusive) and `1` (inclusive) representing the compatibility score.
|
||||
*/
|
||||
compatibilityScore: number;
|
||||
/**
|
||||
* A number starting at `0` with no upper limit representing the bonus score.
|
||||
* For each multiplier of the specified `contextSize` that the resolved context size is larger by, 1 bonus point is given.
|
||||
*/
|
||||
bonusScore: number;
|
||||
/**
|
||||
* The total score, which is the sum of the compatibility and bonus scores.
|
||||
*/
|
||||
totalScore: number;
|
||||
/**
|
||||
* The resolved values used to calculate the scores.
|
||||
*/
|
||||
resolvedValues: {
|
||||
gpuLayers: number;
|
||||
contextSize: number;
|
||||
modelRamUsage: number;
|
||||
contextRamUsage: number;
|
||||
totalRamUsage: number;
|
||||
modelVramUsage: number;
|
||||
contextVramUsage: number;
|
||||
totalVramUsage: number;
|
||||
};
|
||||
}>;
|
||||
resolveModelGpuLayers(gpuLayers?: LlamaModelOptions["gpuLayers"], { ignoreMemorySafetyChecks, getVramState, llamaVramPaddingSize, llamaGpu, llamaSupportsGpuOffloading, defaultContextFlashAttention, defaultContextSwaFullCache, useMmap }?: {
|
||||
ignoreMemorySafetyChecks?: boolean;
|
||||
getVramState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaVramPaddingSize?: number;
|
||||
llamaGpu?: BuildGpu;
|
||||
llamaSupportsGpuOffloading?: boolean;
|
||||
defaultContextFlashAttention?: boolean;
|
||||
defaultContextSwaFullCache?: boolean;
|
||||
useMmap?: boolean;
|
||||
}): Promise<number>;
|
||||
/**
|
||||
* Resolve a context size option for the given options and constraints.
|
||||
*
|
||||
* If there's no context size that can fit the available resources, an `InsufficientMemoryError` is thrown.
|
||||
*/
|
||||
resolveContextContextSize(contextSize: LlamaContextOptions["contextSize"], { modelGpuLayers, batchSize, modelTrainContextSize, flashAttention, swaFullCache, getVramState, getRamState, getSwapState, llamaGpu, ignoreMemorySafetyChecks, isEmbeddingContext, sequences }: {
|
||||
modelGpuLayers: number;
|
||||
modelTrainContextSize: number;
|
||||
flashAttention?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
batchSize?: LlamaContextOptions["batchSize"];
|
||||
sequences?: number;
|
||||
getVramState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
getRamState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
getSwapState?(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaGpu?: BuildGpu;
|
||||
ignoreMemorySafetyChecks?: boolean;
|
||||
isEmbeddingContext?: boolean;
|
||||
}): Promise<number>;
|
||||
}
|
||||
272
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.js
generated
vendored
Normal file
272
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.js
generated
vendored
Normal file
@@ -0,0 +1,272 @@
|
||||
import { getDefaultContextSequences } from "../../evaluator/LlamaContext/LlamaContext.js";
|
||||
import { InsufficientMemoryError } from "../../utils/InsufficientMemoryError.js";
|
||||
import { resolveModelGpuLayersOption } from "./utils/resolveModelGpuLayersOption.js";
|
||||
import { resolveContextContextSizeOption } from "./utils/resolveContextContextSizeOption.js";
|
||||
import { scoreLevels } from "./utils/scoreLevels.js";
|
||||
import { getRamUsageFromUnifiedVram } from "./utils/getRamUsageFromUnifiedVram.js";
|
||||
export const defaultTrainContextSizeForEstimationPurposes = 4096;
|
||||
const defaultContextSizeForUnfitContextSizeConfiguration = 2048;
|
||||
export class GgufInsightsConfigurationResolver {
|
||||
/** @internal */ _ggufInsights;
|
||||
constructor(ggufInsights) {
|
||||
this._ggufInsights = ggufInsights;
|
||||
}
|
||||
get ggufInsights() {
|
||||
return this._ggufInsights;
|
||||
}
|
||||
/**
|
||||
* Resolve the best configuration for loading a model and creating a context using the current hardware.
|
||||
*
|
||||
* Specifying a `targetGpuLayers` and/or `targetContextSize` will ensure the resolved configuration matches those values,
|
||||
* but note it can lower the compatibility score if the hardware doesn't support it.
|
||||
*
|
||||
* Overriding hardware values it possible by configuring `hardwareOverrides`.
|
||||
* @param options
|
||||
* @param hardwareOverrides
|
||||
*/
|
||||
async resolveAndScoreConfig({ targetGpuLayers, targetContextSize, embeddingContext = false, flashAttention = false, swaFullCache = false, useMmap = this._ggufInsights._llama.supportsMmap } = {}, { getVramState = (() => this._ggufInsights._llama._vramOrchestrator.getMemoryState()), getRamState = (async () => this._ggufInsights._llama._ramOrchestrator.getMemoryState()), getSwapState = (() => this._ggufInsights._llama._swapOrchestrator.getMemoryState()), llamaVramPaddingSize = this._ggufInsights._llama.vramPaddingSize, llamaGpu = this._ggufInsights._llama.gpu, llamaSupportsGpuOffloading = this._ggufInsights._llama.supportsGpuOffloading } = {}) {
|
||||
const compatibilityScore = await this.scoreModelConfigurationCompatibility({
|
||||
flashAttention,
|
||||
swaFullCache,
|
||||
contextSize: targetContextSize,
|
||||
embeddingContext,
|
||||
forceGpuLayers: targetGpuLayers,
|
||||
forceStrictContextSize: targetContextSize != null,
|
||||
useMmap
|
||||
}, {
|
||||
getVramState,
|
||||
getRamState,
|
||||
getSwapState,
|
||||
llamaVramPaddingSize,
|
||||
llamaGpu,
|
||||
llamaSupportsGpuOffloading
|
||||
});
|
||||
return compatibilityScore;
|
||||
}
|
||||
/**
|
||||
* Score the compatibility of the model configuration with the current GPU and VRAM state.
|
||||
* Assumes a model is loaded with the default `"auto"` configurations.
|
||||
* Scored based on the following criteria:
|
||||
* - The number of GPU layers that can be offloaded to the GPU (only if there's a GPU. If there's no GPU then by how small the model is)
|
||||
* - Whether all layers can be offloaded to the GPU (gives additional points)
|
||||
* - Whether the resolved context size is at least as large as the specified `contextSize`
|
||||
*
|
||||
* If the resolved context size is larger than the specified context size, for each multiplier of the specified `contextSize`
|
||||
* that the resolved context size is larger by, 1 bonus point is given in the `bonusScore`.
|
||||
*
|
||||
* `maximumFittedContextSizeMultiplier` is used to improve the proportionality of the bonus score between models.
|
||||
* Set this to any value higher than `<max compared model context size> / contextSize`.
|
||||
* Defaults to `100`.
|
||||
*
|
||||
* `maximumUnfitConfigurationResourceMultiplier` is used to improve the proportionality of the bonus score between unfit models.
|
||||
* Set this to any value higher than `<max compared model resource usage> / <total available resources>`.
|
||||
* Defaults to `100`.
|
||||
*
|
||||
* `contextSize` defaults to `4096` (if the model train context size is lower than this, the model train context size is used instead).
|
||||
*/
|
||||
async scoreModelConfigurationCompatibility({ contextSize = Math.min(4096, this._ggufInsights.trainContextSize ?? 4096), embeddingContext = false, flashAttention = false, swaFullCache = false, maximumFittedContextSizeMultiplier = 100, maximumUnfitConfigurationResourceMultiplier = 100, forceStrictContextSize = false, forceGpuLayers, useMmap = this._ggufInsights._llama.supportsMmap } = {}, { getVramState = (() => this._ggufInsights._llama._vramOrchestrator.getMemoryState()), getRamState = (async () => this._ggufInsights._llama._ramOrchestrator.getMemoryState()), getSwapState = (() => this._ggufInsights._llama._swapOrchestrator.getMemoryState()), llamaVramPaddingSize = this._ggufInsights._llama.vramPaddingSize, llamaGpu = this._ggufInsights._llama.gpu, llamaSupportsGpuOffloading = this._ggufInsights._llama.supportsGpuOffloading } = {}) {
|
||||
const [vramState, ramState, swapState] = await Promise.all([
|
||||
getVramState(),
|
||||
getRamState(),
|
||||
getSwapState()
|
||||
]);
|
||||
let resolvedGpuLayers = (forceGpuLayers == null || forceGpuLayers == "max")
|
||||
? this.ggufInsights.totalLayers
|
||||
: forceGpuLayers;
|
||||
let gpuLayersFitMemory = false;
|
||||
try {
|
||||
resolvedGpuLayers = await this.resolveModelGpuLayers(forceGpuLayers != null
|
||||
? forceGpuLayers
|
||||
: embeddingContext
|
||||
? {
|
||||
fitContext: {
|
||||
embeddingContext: true,
|
||||
contextSize: forceStrictContextSize
|
||||
? contextSize
|
||||
: undefined
|
||||
}
|
||||
}
|
||||
: forceStrictContextSize != null
|
||||
? { fitContext: { contextSize } }
|
||||
: "auto", {
|
||||
getVramState: async () => vramState,
|
||||
llamaVramPaddingSize,
|
||||
llamaGpu,
|
||||
llamaSupportsGpuOffloading,
|
||||
defaultContextFlashAttention: flashAttention,
|
||||
defaultContextSwaFullCache: swaFullCache,
|
||||
ignoreMemorySafetyChecks: forceGpuLayers != null,
|
||||
useMmap
|
||||
});
|
||||
gpuLayersFitMemory = true;
|
||||
}
|
||||
catch (err) {
|
||||
if (!(err instanceof InsufficientMemoryError))
|
||||
throw err;
|
||||
}
|
||||
const canUseGpu = llamaSupportsGpuOffloading && llamaGpu !== false;
|
||||
const estimatedModelResourceUsage = this._ggufInsights.estimateModelResourceRequirements({
|
||||
gpuLayers: resolvedGpuLayers,
|
||||
useMmap
|
||||
});
|
||||
let resolvedContextSize = forceStrictContextSize
|
||||
? contextSize
|
||||
: Math.min(this.ggufInsights.trainContextSize ?? defaultContextSizeForUnfitContextSizeConfiguration, defaultContextSizeForUnfitContextSizeConfiguration);
|
||||
let contextFitsMemory = false;
|
||||
try {
|
||||
resolvedContextSize = await this.resolveContextContextSize("auto", {
|
||||
getVramState: async () => ({
|
||||
total: vramState.total,
|
||||
free: Math.max(0, vramState.free - estimatedModelResourceUsage.gpuVram),
|
||||
unifiedSize: vramState.unifiedSize
|
||||
}),
|
||||
getRamState: async () => ({
|
||||
total: ramState.total,
|
||||
free: Math.max(0, ramState.free - estimatedModelResourceUsage.cpuRam +
|
||||
(-getRamUsageFromUnifiedVram(estimatedModelResourceUsage.gpuVram, vramState)))
|
||||
}),
|
||||
getSwapState: async () => ({
|
||||
total: swapState.total,
|
||||
free: Math.max(0, swapState.free - Math.max(0, estimatedModelResourceUsage.cpuRam +
|
||||
(-getRamUsageFromUnifiedVram(estimatedModelResourceUsage.gpuVram, vramState)) +
|
||||
(-ramState.free)))
|
||||
}),
|
||||
llamaGpu,
|
||||
isEmbeddingContext: embeddingContext,
|
||||
modelGpuLayers: resolvedGpuLayers,
|
||||
modelTrainContextSize: this._ggufInsights.trainContextSize ?? defaultTrainContextSizeForEstimationPurposes,
|
||||
ignoreMemorySafetyChecks: forceStrictContextSize,
|
||||
flashAttention,
|
||||
swaFullCache
|
||||
});
|
||||
contextFitsMemory = true;
|
||||
if (forceStrictContextSize && resolvedContextSize < contextSize) {
|
||||
contextFitsMemory = false;
|
||||
resolvedContextSize = contextSize;
|
||||
}
|
||||
else if (forceStrictContextSize && resolvedContextSize > contextSize) {
|
||||
resolvedContextSize = contextSize;
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
if (!(err instanceof InsufficientMemoryError))
|
||||
throw err;
|
||||
}
|
||||
const estimatedContextResourceUsage = this._ggufInsights.estimateContextResourceRequirements({
|
||||
contextSize: resolvedContextSize,
|
||||
isEmbeddingContext: embeddingContext,
|
||||
modelGpuLayers: resolvedGpuLayers,
|
||||
flashAttention,
|
||||
swaFullCache
|
||||
});
|
||||
const rankPoints = {
|
||||
gpuLayers: 60,
|
||||
allLayersAreOffloaded: 10,
|
||||
contextSize: 30,
|
||||
ramUsageFitsInRam: 10,
|
||||
cpuOnlySmallModelSize: 70, // also defined inside `scoreModelSizeForCpuOnlyUsage`
|
||||
bonusContextSize: 10
|
||||
};
|
||||
const gpuLayersPoints = rankPoints.gpuLayers * Math.min(1, resolvedGpuLayers / this._ggufInsights.totalLayers);
|
||||
const allLayersAreOffloadedPoints = rankPoints.allLayersAreOffloaded * (resolvedGpuLayers === this._ggufInsights.totalLayers ? 1 : 0);
|
||||
const contextSizePoints = contextFitsMemory
|
||||
? rankPoints.contextSize * Math.min(1, resolvedContextSize / contextSize)
|
||||
: 0;
|
||||
const ramUsageFitsInRamPoints = rankPoints.ramUsageFitsInRam * (estimatedModelResourceUsage.cpuRam <= ramState.free
|
||||
? 1
|
||||
: estimatedModelResourceUsage.cpuRam <= ramState.free + swapState.free
|
||||
? 0.8
|
||||
: estimatedModelResourceUsage.cpuRam <= ramState.total
|
||||
? 0.5
|
||||
: (0.5 - Math.min(0.5, 0.5 * ((estimatedModelResourceUsage.cpuRam - ramState.total) / ramState.total))));
|
||||
const bonusContextSizePoints = contextFitsMemory
|
||||
? (10 * Math.min(1, (Math.max(0, resolvedContextSize - contextSize) / contextSize) / maximumFittedContextSizeMultiplier))
|
||||
: 0;
|
||||
let compatibilityScore = canUseGpu
|
||||
? ((gpuLayersPoints + allLayersAreOffloadedPoints + contextSizePoints + ramUsageFitsInRamPoints) /
|
||||
(rankPoints.gpuLayers + rankPoints.allLayersAreOffloaded + rankPoints.contextSize + rankPoints.ramUsageFitsInRam))
|
||||
: ((contextSizePoints + ramUsageFitsInRamPoints + scoreModelSizeForCpuOnlyUsage(this._ggufInsights.modelSize)) /
|
||||
(rankPoints.contextSize + rankPoints.ramUsageFitsInRam + rankPoints.cpuOnlySmallModelSize));
|
||||
let bonusScore = bonusContextSizePoints / rankPoints.bonusContextSize;
|
||||
if (!gpuLayersFitMemory || !contextFitsMemory ||
|
||||
estimatedModelResourceUsage.gpuVram + estimatedContextResourceUsage.gpuVram > vramState.total ||
|
||||
estimatedModelResourceUsage.cpuRam + estimatedContextResourceUsage.cpuRam > ramState.total + swapState.total) {
|
||||
const totalVramRequirement = estimatedModelResourceUsage.gpuVram + estimatedContextResourceUsage.gpuVram;
|
||||
const totalRamRequirement = estimatedModelResourceUsage.cpuRam + estimatedContextResourceUsage.cpuRam;
|
||||
compatibilityScore = 0;
|
||||
bonusScore = ((1 - (totalVramRequirement / (vramState.total * maximumUnfitConfigurationResourceMultiplier))) +
|
||||
(1 - (totalRamRequirement / ((ramState.total + swapState.total) * maximumUnfitConfigurationResourceMultiplier)))) / 2;
|
||||
}
|
||||
return {
|
||||
compatibilityScore,
|
||||
bonusScore,
|
||||
totalScore: compatibilityScore + bonusScore,
|
||||
resolvedValues: {
|
||||
gpuLayers: resolvedGpuLayers,
|
||||
contextSize: resolvedContextSize,
|
||||
modelRamUsage: estimatedModelResourceUsage.cpuRam,
|
||||
contextRamUsage: estimatedContextResourceUsage.cpuRam,
|
||||
totalRamUsage: estimatedModelResourceUsage.cpuRam + estimatedContextResourceUsage.cpuRam,
|
||||
modelVramUsage: estimatedModelResourceUsage.gpuVram,
|
||||
contextVramUsage: estimatedContextResourceUsage.gpuVram,
|
||||
totalVramUsage: estimatedModelResourceUsage.gpuVram + estimatedContextResourceUsage.gpuVram
|
||||
}
|
||||
};
|
||||
}
|
||||
async resolveModelGpuLayers(gpuLayers, { ignoreMemorySafetyChecks = false, getVramState = (() => this._ggufInsights._llama._vramOrchestrator.getMemoryState()), llamaVramPaddingSize = this._ggufInsights._llama.vramPaddingSize, llamaGpu = this._ggufInsights._llama.gpu, llamaSupportsGpuOffloading = this._ggufInsights._llama.supportsGpuOffloading, defaultContextFlashAttention = false, defaultContextSwaFullCache = false, useMmap = this._ggufInsights._llama.supportsMmap } = {}) {
|
||||
return resolveModelGpuLayersOption(gpuLayers, {
|
||||
ggufInsights: this._ggufInsights,
|
||||
ignoreMemorySafetyChecks,
|
||||
getVramState,
|
||||
llamaVramPaddingSize,
|
||||
llamaGpu,
|
||||
llamaSupportsGpuOffloading,
|
||||
defaultContextFlashAttention,
|
||||
defaultContextSwaFullCache,
|
||||
useMmap
|
||||
});
|
||||
}
|
||||
/**
|
||||
* Resolve a context size option for the given options and constraints.
|
||||
*
|
||||
* If there's no context size that can fit the available resources, an `InsufficientMemoryError` is thrown.
|
||||
*/
|
||||
async resolveContextContextSize(contextSize, { modelGpuLayers, batchSize, modelTrainContextSize, flashAttention = false, swaFullCache = false, getVramState = (() => this._ggufInsights._llama._vramOrchestrator.getMemoryState()), getRamState = (async () => this._ggufInsights._llama._ramOrchestrator.getMemoryState()), getSwapState = (() => this._ggufInsights._llama._swapOrchestrator.getMemoryState()), llamaGpu = this._ggufInsights._llama.gpu, ignoreMemorySafetyChecks = false, isEmbeddingContext = false, sequences = getDefaultContextSequences() }) {
|
||||
return await resolveContextContextSizeOption({
|
||||
contextSize,
|
||||
batchSize,
|
||||
sequences,
|
||||
modelFileInsights: this._ggufInsights,
|
||||
modelGpuLayers,
|
||||
modelTrainContextSize,
|
||||
flashAttention,
|
||||
swaFullCache,
|
||||
getVramState,
|
||||
getRamState,
|
||||
getSwapState,
|
||||
llamaGpu,
|
||||
ignoreMemorySafetyChecks,
|
||||
isEmbeddingContext
|
||||
});
|
||||
}
|
||||
/** @internal */
|
||||
static _create(ggufInsights) {
|
||||
return new GgufInsightsConfigurationResolver(ggufInsights);
|
||||
}
|
||||
}
|
||||
function scoreModelSizeForCpuOnlyUsage(modelSize) {
|
||||
const s1GB = Math.pow(1024, 3);
|
||||
return 70 - scoreLevels(modelSize, [{
|
||||
start: s1GB,
|
||||
end: s1GB * 2.5,
|
||||
points: 46
|
||||
}, {
|
||||
start: s1GB * 2.5,
|
||||
end: s1GB * 4,
|
||||
points: 17
|
||||
}, {
|
||||
start: s1GB * 4,
|
||||
points: 7
|
||||
}]);
|
||||
}
|
||||
//# sourceMappingURL=GgufInsightsConfigurationResolver.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsConfigurationResolver.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
5
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export declare class GgufInsightsTokens {
|
||||
private constructor();
|
||||
get sepToken(): number | null;
|
||||
get eosToken(): number | null;
|
||||
}
|
||||
40
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.js
generated
vendored
Normal file
40
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.js
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
export class GgufInsightsTokens {
|
||||
/** @internal */ _ggufInsights;
|
||||
constructor(ggufInsights) {
|
||||
this._ggufInsights = ggufInsights;
|
||||
}
|
||||
get sepToken() {
|
||||
const tokenizerModel = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.model;
|
||||
const totalTokens = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.tokens?.length;
|
||||
let sepTokenId = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.["seperator_token_id"];
|
||||
if (sepTokenId == null && tokenizerModel === "bert") {
|
||||
sepTokenId = 102; // source: `llama_vocab::impl::load` in `llama-vocab.cpp`
|
||||
}
|
||||
if (totalTokens != null && sepTokenId != null && sepTokenId >= totalTokens)
|
||||
return null;
|
||||
return sepTokenId ?? null;
|
||||
}
|
||||
get eosToken() {
|
||||
const tokenizerModel = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.model;
|
||||
const totalTokens = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.tokens?.length;
|
||||
const eosTokenId = this._ggufInsights._ggufFileInfo?.metadata?.tokenizer?.ggml?.["eos_token_id"];
|
||||
if (eosTokenId != null && totalTokens != null && eosTokenId < totalTokens)
|
||||
return eosTokenId;
|
||||
switch (tokenizerModel) {
|
||||
case "no_vocab": return null;
|
||||
case "none": return null;
|
||||
case "bert": return null;
|
||||
case "rwkv": return null;
|
||||
case "llama": return 2;
|
||||
case "gpt2": return 11;
|
||||
case "t5": return 1;
|
||||
case "plamo2": return 2;
|
||||
}
|
||||
return 2; // source: `llama_vocab::impl::load` in `llama-vocab.cpp`
|
||||
}
|
||||
/** @internal */
|
||||
static _create(ggufInsights) {
|
||||
return new GgufInsightsTokens(ggufInsights);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=GgufInsightsTokens.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/GgufInsightsTokens.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufInsightsTokens.js","sourceRoot":"","sources":["../../../src/gguf/insights/GgufInsightsTokens.ts"],"names":[],"mappings":"AAGA,MAAM,OAAO,kBAAkB;IAC3B,gBAAgB,CAAkB,aAAa,CAAe;IAE9D,YAAoB,YAA0B;QAC1C,IAAI,CAAC,aAAa,GAAG,YAAY,CAAC;IACtC,CAAC;IAED,IAAW,QAAQ;QACf,MAAM,cAAc,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,KAAK,CAAC;QAC1F,MAAM,WAAW,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,CAAC;QAEhG,IAAI,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,oBAAoB,CAAC,CAAC;QACrG,IAAI,UAAU,IAAI,IAAI,IAAI,cAAc,KAAK,MAAM,EAAE,CAAC;YAClD,UAAU,GAAG,GAAG,CAAC,CAAC,yDAAyD;QAC/E,CAAC;QAED,IAAI,WAAW,IAAI,IAAI,IAAI,UAAU,IAAI,IAAI,IAAI,UAAU,IAAI,WAAW;YACtE,OAAO,IAAI,CAAC;QAEhB,OAAO,UAAU,IAAI,IAAI,CAAC;IAC9B,CAAC;IAED,IAAW,QAAQ;QACf,MAAM,cAAc,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,KAAK,CAAC;QAC1F,MAAM,WAAW,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,MAAM,EAAE,MAAM,CAAC;QAEhG,MAAM,UAAU,GAAG,IAAI,CAAC,aAAa,CAAC,aAAa,EAAE,QAAQ,EAAE,SAAS,EAAE,IAAI,EAAE,CAAC,cAAc,CAAC,CAAC;QACjG,IAAI,UAAU,IAAI,IAAI,IAAI,WAAW,IAAI,IAAI,IAAI,UAAU,GAAG,WAAW;YACrE,OAAO,UAAU,CAAC;QAEtB,QAAQ,cAAc,EAAE,CAAC;YACrB,KAAK,UAAU,CAAC,CAAC,OAAO,IAAI,CAAC;YAC7B,KAAK,MAAM,CAAC,CAAC,OAAO,IAAI,CAAC;YACzB,KAAK,MAAM,CAAC,CAAC,OAAO,IAAI,CAAC;YACzB,KAAK,MAAM,CAAC,CAAC,OAAO,IAAI,CAAC;YACzB,KAAK,OAAO,CAAC,CAAC,OAAO,CAAC,CAAC;YACvB,KAAK,MAAM,CAAC,CAAC,OAAO,EAAE,CAAC;YACvB,KAAK,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC;YACpB,KAAK,QAAQ,CAAC,CAAC,OAAO,CAAC,CAAC;QAC5B,CAAC;QACD,OAAO,CAAC,CAAC,CAAC,yDAAyD;IACvE,CAAC;IAED,gBAAgB;IACT,MAAM,CAAC,OAAO,CAAC,YAA0B;QAC5C,OAAO,IAAI,kBAAkB,CAAC,YAAY,CAAC,CAAC;IAChD,CAAC;CACJ"}
|
||||
5
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export declare function getRamUsageFromUnifiedVram(vramUsage: number, vramState: {
|
||||
total: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}): number;
|
||||
7
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.js
generated
vendored
Normal file
7
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export function getRamUsageFromUnifiedVram(vramUsage, vramState) {
|
||||
const onlyVramSize = vramState.total - vramState.unifiedSize;
|
||||
const existingUsage = Math.max(0, vramState.total - vramState.free);
|
||||
const unifiedRamUsage = Math.min(vramState.unifiedSize, Math.max(0, vramUsage - Math.max(0, onlyVramSize - existingUsage)));
|
||||
return unifiedRamUsage;
|
||||
}
|
||||
//# sourceMappingURL=getRamUsageFromUnifiedVram.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/getRamUsageFromUnifiedVram.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getRamUsageFromUnifiedVram.js","sourceRoot":"","sources":["../../../../src/gguf/insights/utils/getRamUsageFromUnifiedVram.ts"],"names":[],"mappings":"AAAA,MAAM,UAAU,0BAA0B,CAAC,SAAiB,EAAE,SAA6D;IACvH,MAAM,YAAY,GAAG,SAAS,CAAC,KAAK,GAAG,SAAS,CAAC,WAAW,CAAC;IAC7D,MAAM,aAAa,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,CAAC,KAAK,GAAG,SAAS,CAAC,IAAI,CAAC,CAAC;IAEpE,MAAM,eAAe,GAAG,IAAI,CAAC,GAAG,CAAC,SAAS,CAAC,WAAW,EAAE,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,SAAS,GAAG,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,YAAY,GAAG,aAAa,CAAC,CAAC,CAAC,CAAC;IAE5H,OAAO,eAAe,CAAC;AAC3B,CAAC"}
|
||||
30
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.d.ts
generated
vendored
Normal file
30
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.d.ts
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
import { LlamaContextOptions } from "../../../evaluator/LlamaContext/types.js";
|
||||
import { GgufInsights } from "../GgufInsights.js";
|
||||
import { BuildGpu } from "../../../bindings/types.js";
|
||||
export declare function resolveContextContextSizeOption({ contextSize, batchSize, sequences, modelFileInsights, modelGpuLayers, modelTrainContextSize, flashAttention, swaFullCache, getVramState, getRamState, getSwapState, ignoreMemorySafetyChecks, isEmbeddingContext, maxContextSizeSwapUse }: {
|
||||
contextSize?: LlamaContextOptions["contextSize"];
|
||||
batchSize?: LlamaContextOptions["batchSize"];
|
||||
sequences: number;
|
||||
modelFileInsights: GgufInsights;
|
||||
modelGpuLayers: number;
|
||||
modelTrainContextSize: number;
|
||||
flashAttention: boolean;
|
||||
swaFullCache: boolean;
|
||||
getVramState(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
getRamState(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
getSwapState(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaGpu: BuildGpu;
|
||||
ignoreMemorySafetyChecks?: boolean;
|
||||
isEmbeddingContext?: boolean;
|
||||
maxContextSizeSwapUse?: number;
|
||||
}): Promise<number>;
|
||||
111
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.js
generated
vendored
Normal file
111
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.js
generated
vendored
Normal file
@@ -0,0 +1,111 @@
|
||||
import { minAllowedContextSizeInCalculations } from "../../../config.js";
|
||||
import { getDefaultContextBatchSize, getDefaultModelContextSize } from "../../../evaluator/LlamaContext/LlamaContext.js";
|
||||
import { InsufficientMemoryError } from "../../../utils/InsufficientMemoryError.js";
|
||||
import { getRamUsageFromUnifiedVram } from "./getRamUsageFromUnifiedVram.js";
|
||||
const defaultMaxContextSizeSwapUse = 2048;
|
||||
export async function resolveContextContextSizeOption({ contextSize, batchSize, sequences, modelFileInsights, modelGpuLayers, modelTrainContextSize, flashAttention, swaFullCache, getVramState, getRamState, getSwapState, ignoreMemorySafetyChecks = false, isEmbeddingContext = false, maxContextSizeSwapUse = defaultMaxContextSizeSwapUse }) {
|
||||
if (contextSize == null)
|
||||
contextSize = "auto";
|
||||
if (typeof contextSize === "number") {
|
||||
const resolvedContextSize = Math.max(1, Math.floor(contextSize));
|
||||
if (ignoreMemorySafetyChecks)
|
||||
return resolvedContextSize;
|
||||
const [vramState, ramState, swapState] = await Promise.all([
|
||||
getVramState(),
|
||||
getRamState(),
|
||||
getSwapState()
|
||||
]);
|
||||
const contextResourceRequirements = modelFileInsights.estimateContextResourceRequirements({
|
||||
contextSize: resolvedContextSize,
|
||||
batchSize: batchSize ?? getDefaultContextBatchSize({ contextSize: resolvedContextSize, sequences }),
|
||||
modelGpuLayers: modelGpuLayers,
|
||||
sequences,
|
||||
flashAttention,
|
||||
swaFullCache,
|
||||
isEmbeddingContext
|
||||
});
|
||||
if (contextResourceRequirements.gpuVram > vramState.free)
|
||||
throw new InsufficientMemoryError(`A context size of ${resolvedContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available VRAM`);
|
||||
else if (contextResourceRequirements.cpuRam > (ramState.free + swapState.free - getRamUsageFromUnifiedVram(contextResourceRequirements.gpuVram, vramState)))
|
||||
throw new InsufficientMemoryError(`A context size of ${resolvedContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available RAM${swapState.total > 0 ? " (including swap)" : ""}`);
|
||||
return resolvedContextSize;
|
||||
}
|
||||
else if (contextSize === "auto" || typeof contextSize === "object") {
|
||||
const [vramState, ramState, swapState] = await Promise.all([
|
||||
getVramState(),
|
||||
getRamState(),
|
||||
getSwapState()
|
||||
]);
|
||||
const maxContextSize = contextSize === "auto"
|
||||
? getDefaultModelContextSize({ trainContextSize: modelTrainContextSize })
|
||||
: Math.min(contextSize.max ?? getDefaultModelContextSize({ trainContextSize: modelTrainContextSize }), getDefaultModelContextSize({ trainContextSize: modelTrainContextSize }));
|
||||
const minContextSize = contextSize === "auto"
|
||||
? minAllowedContextSizeInCalculations
|
||||
: Math.max(contextSize.min ?? minAllowedContextSizeInCalculations, minAllowedContextSizeInCalculations);
|
||||
let highestCompatibleContextSize = null;
|
||||
let step = -Math.max(1, Math.floor((maxContextSize - minContextSize) / 4));
|
||||
for (let testContextSize = maxContextSize; testContextSize >= minContextSize && testContextSize <= maxContextSize;) {
|
||||
const contextResourceRequirements = modelFileInsights.estimateContextResourceRequirements({
|
||||
contextSize: testContextSize,
|
||||
batchSize: batchSize ?? getDefaultContextBatchSize({ contextSize: testContextSize, sequences }),
|
||||
modelGpuLayers: modelGpuLayers,
|
||||
sequences,
|
||||
flashAttention,
|
||||
swaFullCache,
|
||||
isEmbeddingContext
|
||||
});
|
||||
if (contextResourceRequirements.gpuVram <= vramState.free &&
|
||||
contextResourceRequirements.cpuRam <= (ramState.free - getRamUsageFromUnifiedVram(contextResourceRequirements.gpuVram, vramState) + (testContextSize <= maxContextSizeSwapUse
|
||||
? swapState.free
|
||||
: 0))) {
|
||||
if (highestCompatibleContextSize == null || testContextSize >= highestCompatibleContextSize) {
|
||||
highestCompatibleContextSize = testContextSize;
|
||||
if (step === -1)
|
||||
break;
|
||||
else if (step < 0)
|
||||
step = Math.max(1, Math.floor(-step / 2));
|
||||
}
|
||||
}
|
||||
else if (step > 0)
|
||||
step = -Math.max(1, Math.floor(step / 2));
|
||||
if (testContextSize == minContextSize && step === -1)
|
||||
break;
|
||||
testContextSize += step;
|
||||
if (testContextSize < minContextSize) {
|
||||
testContextSize = minContextSize;
|
||||
step = Math.max(1, Math.floor(Math.abs(step) / 2));
|
||||
}
|
||||
else if (testContextSize > maxContextSize) {
|
||||
testContextSize = maxContextSize;
|
||||
step = -Math.max(1, Math.floor(Math.abs(step) / 2));
|
||||
}
|
||||
}
|
||||
if (highestCompatibleContextSize != null)
|
||||
return highestCompatibleContextSize;
|
||||
if (ignoreMemorySafetyChecks)
|
||||
return minContextSize;
|
||||
const minContextSizeResourceRequirements = modelFileInsights.estimateContextResourceRequirements({
|
||||
contextSize: minContextSize,
|
||||
batchSize: batchSize ?? getDefaultContextBatchSize({ contextSize: minContextSize, sequences }),
|
||||
modelGpuLayers: modelGpuLayers,
|
||||
sequences,
|
||||
flashAttention,
|
||||
swaFullCache,
|
||||
isEmbeddingContext
|
||||
});
|
||||
const unifiedRamUsage = getRamUsageFromUnifiedVram(minContextSizeResourceRequirements.gpuVram, vramState);
|
||||
if (minContextSizeResourceRequirements.gpuVram > vramState.free &&
|
||||
minContextSizeResourceRequirements.cpuRam > ramState.free + swapState.free - unifiedRamUsage)
|
||||
throw new InsufficientMemoryError(`A context size of ${minContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available VRAM and RAM${swapState.total > 0 ? " (including swap)" : ""}`);
|
||||
else if (minContextSizeResourceRequirements.gpuVram > vramState.free)
|
||||
throw new InsufficientMemoryError(`A context size of ${minContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available VRAM`);
|
||||
else if (minContextSizeResourceRequirements.cpuRam > ramState.free + swapState.free - unifiedRamUsage)
|
||||
throw new InsufficientMemoryError(`A context size of ${minContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available RAM${swapState.total > 0 ? " (including swap)" : ""}`);
|
||||
else if (minContextSizeResourceRequirements.cpuRam > ramState.free - unifiedRamUsage)
|
||||
throw new InsufficientMemoryError(`A context size of ${minContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available RAM`);
|
||||
else
|
||||
throw new InsufficientMemoryError(`A context size of ${minContextSize}${sequences > 1 ? ` with ${sequences} sequences` : ""} is too large for the available resources`);
|
||||
}
|
||||
throw new Error(`Invalid context size: "${contextSize}"`);
|
||||
}
|
||||
//# sourceMappingURL=resolveContextContextSizeOption.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveContextContextSizeOption.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
17
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.d.ts
generated
vendored
Normal file
17
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.d.ts
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
import { LlamaModelOptions } from "../../../evaluator/LlamaModel/LlamaModel.js";
|
||||
import { BuildGpu } from "../../../bindings/types.js";
|
||||
import type { GgufInsights } from "../GgufInsights.js";
|
||||
export declare function resolveModelGpuLayersOption(gpuLayers: LlamaModelOptions["gpuLayers"], { ggufInsights, ignoreMemorySafetyChecks, getVramState, llamaVramPaddingSize, llamaGpu, llamaSupportsGpuOffloading, defaultContextFlashAttention, defaultContextSwaFullCache, useMmap }: {
|
||||
ggufInsights: GgufInsights;
|
||||
ignoreMemorySafetyChecks?: boolean;
|
||||
getVramState(): Promise<{
|
||||
total: number;
|
||||
free: number;
|
||||
}>;
|
||||
llamaVramPaddingSize: number;
|
||||
llamaGpu: BuildGpu;
|
||||
llamaSupportsGpuOffloading: boolean;
|
||||
defaultContextFlashAttention: boolean;
|
||||
defaultContextSwaFullCache: boolean;
|
||||
useMmap?: boolean;
|
||||
}): Promise<number>;
|
||||
239
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.js
generated
vendored
Normal file
239
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.js
generated
vendored
Normal file
@@ -0,0 +1,239 @@
|
||||
import { InsufficientMemoryError } from "../../../utils/InsufficientMemoryError.js";
|
||||
import { findBestOption } from "../../../utils/findBestOption.js";
|
||||
import { getDefaultContextBatchSize, getDefaultModelContextSize } from "../../../evaluator/LlamaContext/LlamaContext.js";
|
||||
import { minAllowedContextSizeInCalculations } from "../../../config.js";
|
||||
import { scoreLevels } from "./scoreLevels.js";
|
||||
const fitContextExtraMemoryPaddingPercentage = 0.5;
|
||||
export async function resolveModelGpuLayersOption(gpuLayers, { ggufInsights, ignoreMemorySafetyChecks = false, getVramState, llamaVramPaddingSize, llamaGpu, llamaSupportsGpuOffloading, defaultContextFlashAttention, defaultContextSwaFullCache, useMmap }) {
|
||||
if (gpuLayers == null)
|
||||
gpuLayers = "auto";
|
||||
if (!llamaSupportsGpuOffloading)
|
||||
return 0;
|
||||
if (gpuLayers === "max" || typeof gpuLayers === "number") {
|
||||
const resolvedGpuLayers = typeof gpuLayers === "number"
|
||||
? Math.max(0, Math.min(ggufInsights.totalLayers, gpuLayers))
|
||||
: ggufInsights.totalLayers;
|
||||
if (ignoreMemorySafetyChecks)
|
||||
return resolvedGpuLayers;
|
||||
const vramState = await getVramState();
|
||||
const maxLayersRequirements = getVramRequiredForGpuLayers({
|
||||
gpuLayers: resolvedGpuLayers,
|
||||
ggufInsights,
|
||||
currentVram: vramState.free,
|
||||
defaultContextFlashAttention,
|
||||
defaultContextSwaFullCache,
|
||||
useMmap
|
||||
});
|
||||
if (maxLayersRequirements == null)
|
||||
throw new InsufficientMemoryError("Not enough VRAM to fit the model with the specified settings");
|
||||
return resolvedGpuLayers;
|
||||
}
|
||||
else if (gpuLayers === "auto" || typeof gpuLayers === "object") {
|
||||
if (llamaGpu === false)
|
||||
return 0;
|
||||
const vramState = await getVramState();
|
||||
if (vramState.total === 0)
|
||||
return 0;
|
||||
let freeVram = vramState.free;
|
||||
if (typeof gpuLayers === "object" && gpuLayers.fitContext?.contextSize != null) {
|
||||
freeVram -= llamaVramPaddingSize * fitContextExtraMemoryPaddingPercentage;
|
||||
if (freeVram < 0)
|
||||
freeVram = 0;
|
||||
}
|
||||
const bestGpuLayersOption = getBestGpuLayersForFreeVram({
|
||||
ggufInsights,
|
||||
freeVram,
|
||||
fitContext: typeof gpuLayers === "object"
|
||||
? gpuLayers.fitContext
|
||||
: undefined,
|
||||
minGpuLayers: typeof gpuLayers === "object"
|
||||
? gpuLayers.min
|
||||
: undefined,
|
||||
maxGpuLayers: typeof gpuLayers === "object"
|
||||
? gpuLayers.max
|
||||
: undefined,
|
||||
defaultContextFlashAttention,
|
||||
defaultContextSwaFullCache,
|
||||
useMmap
|
||||
});
|
||||
const hasGpuLayersRequirements = typeof gpuLayers === "object" &&
|
||||
(gpuLayers.min != null || gpuLayers.max != null || gpuLayers.fitContext?.contextSize != null);
|
||||
if (!ignoreMemorySafetyChecks && bestGpuLayersOption == null && hasGpuLayersRequirements)
|
||||
throw new InsufficientMemoryError("Not enough VRAM to fit the model with the specified settings");
|
||||
return bestGpuLayersOption ?? 0;
|
||||
}
|
||||
throw new Error(`Invalid gpuLayers value: ${gpuLayers}`);
|
||||
}
|
||||
function getBestGpuLayersForFreeVram({ ggufInsights, freeVram, fitContext, minGpuLayers, maxGpuLayers, defaultContextFlashAttention, defaultContextSwaFullCache, useMmap }) {
|
||||
return findBestOption({
|
||||
*generator() {
|
||||
const minLayers = Math.floor(Math.max(0, minGpuLayers ?? 0));
|
||||
const maxLayers = Math.floor(Math.min(ggufInsights.totalLayers, maxGpuLayers ?? ggufInsights.totalLayers));
|
||||
for (let layers = maxLayers; layers >= minLayers; layers--) {
|
||||
yield {
|
||||
gpuLayers: layers
|
||||
};
|
||||
}
|
||||
},
|
||||
score(option) {
|
||||
const layersRequirements = getVramRequiredForGpuLayers({
|
||||
gpuLayers: option.gpuLayers,
|
||||
ggufInsights,
|
||||
currentVram: freeVram,
|
||||
fitContext,
|
||||
defaultContextFlashAttention,
|
||||
defaultContextSwaFullCache,
|
||||
useMmap
|
||||
});
|
||||
if (layersRequirements == null)
|
||||
return null;
|
||||
return scoreGpuLayersAndContextCombination({ gpuLayers: option.gpuLayers, contextSize: layersRequirements.contextSize }, {
|
||||
totalGpuLayers: ggufInsights.totalLayers,
|
||||
trainContextSize: getDefaultModelContextSize({ trainContextSize: ggufInsights.trainContextSize })
|
||||
});
|
||||
}
|
||||
})?.gpuLayers ?? null;
|
||||
}
|
||||
function scoreGpuLayersAndContextCombination({ gpuLayers, contextSize }, { totalGpuLayers, trainContextSize }) {
|
||||
function scoreGpuLayers() {
|
||||
return scoreLevels(gpuLayers, [{
|
||||
start: 0,
|
||||
points: 4
|
||||
}, {
|
||||
start: 1,
|
||||
points: 26
|
||||
}, {
|
||||
start: totalGpuLayers,
|
||||
points: 14,
|
||||
end: totalGpuLayers
|
||||
}]);
|
||||
}
|
||||
function scoreContextSize() {
|
||||
const gpuLayersPercentage = gpuLayers / totalGpuLayers;
|
||||
return scoreLevels(contextSize, [{
|
||||
start: 0,
|
||||
points: 2
|
||||
}, {
|
||||
start: 1024,
|
||||
points: 4
|
||||
}, {
|
||||
start: 2048,
|
||||
points: gpuLayersPercentage < 0.1 ? 1 : 8
|
||||
}, {
|
||||
start: 4096,
|
||||
points: gpuLayersPercentage < 0.3 ? 4 : 16
|
||||
}, {
|
||||
start: 8192,
|
||||
points: gpuLayersPercentage < 0.6 ? 1 : 8,
|
||||
end: Math.max(trainContextSize, 16384)
|
||||
}]);
|
||||
}
|
||||
return scoreGpuLayers() + scoreContextSize();
|
||||
}
|
||||
function getVramRequiredForGpuLayers({ gpuLayers, ggufInsights, currentVram, fitContext, defaultContextFlashAttention = false, defaultContextSwaFullCache = false, useMmap }) {
|
||||
const modelVram = ggufInsights.estimateModelResourceRequirements({
|
||||
gpuLayers,
|
||||
useMmap
|
||||
}).gpuVram;
|
||||
if (modelVram > currentVram)
|
||||
return null;
|
||||
if (fitContext != null && fitContext.contextSize != null) {
|
||||
const contextVram = ggufInsights.estimateContextResourceRequirements({
|
||||
contextSize: fitContext.contextSize,
|
||||
batchSize: getDefaultContextBatchSize({ contextSize: fitContext.contextSize, sequences: 1 }),
|
||||
modelGpuLayers: gpuLayers,
|
||||
sequences: 1,
|
||||
isEmbeddingContext: fitContext.embeddingContext ?? false,
|
||||
flashAttention: defaultContextFlashAttention,
|
||||
swaFullCache: defaultContextSwaFullCache
|
||||
}).gpuVram;
|
||||
const totalVram = modelVram + contextVram;
|
||||
if (totalVram > currentVram)
|
||||
return null;
|
||||
return {
|
||||
contextSize: fitContext.contextSize,
|
||||
contextVram,
|
||||
totalVram
|
||||
};
|
||||
}
|
||||
const maxContext = findMaxPossibleContextSizeForVram({
|
||||
gpuLayers,
|
||||
ggufInsights,
|
||||
vram: currentVram - modelVram,
|
||||
isEmbeddingContext: fitContext?.embeddingContext ?? false,
|
||||
flashAttention: defaultContextFlashAttention,
|
||||
swaFullCache: defaultContextSwaFullCache
|
||||
});
|
||||
if (maxContext == null || modelVram + maxContext.vram > currentVram)
|
||||
return null;
|
||||
return {
|
||||
contextSize: maxContext.contextSize,
|
||||
contextVram: maxContext.vram,
|
||||
totalVram: modelVram + maxContext.vram
|
||||
};
|
||||
}
|
||||
function findMaxPossibleContextSizeForVram({ gpuLayers, ggufInsights, vram, isEmbeddingContext, flashAttention, swaFullCache }) {
|
||||
const maxContextSize = getDefaultModelContextSize({ trainContextSize: ggufInsights.trainContextSize });
|
||||
return findMaxValidValue({
|
||||
maxValue: maxContextSize,
|
||||
minValue: minAllowedContextSizeInCalculations,
|
||||
minStep: 1,
|
||||
test(contextSize) {
|
||||
const contextVram = ggufInsights.estimateContextResourceRequirements({
|
||||
contextSize,
|
||||
batchSize: getDefaultContextBatchSize({ contextSize, sequences: 1 }),
|
||||
modelGpuLayers: gpuLayers,
|
||||
sequences: 1,
|
||||
isEmbeddingContext,
|
||||
flashAttention,
|
||||
swaFullCache
|
||||
}).gpuVram;
|
||||
if (contextVram <= vram)
|
||||
return {
|
||||
contextSize,
|
||||
vram: contextVram
|
||||
};
|
||||
return null;
|
||||
}
|
||||
});
|
||||
}
|
||||
function findMaxValidValue({ maxValue, minValue, minStep = 1, test }) {
|
||||
let step = -Math.max(minStep, Math.floor((maxValue - minValue) / 4));
|
||||
let bestValue = null;
|
||||
for (let value = maxValue; value >= minValue;) {
|
||||
const result = (bestValue != null && value === bestValue.value)
|
||||
? bestValue.result
|
||||
: test(value);
|
||||
if (result != null) {
|
||||
if (bestValue == null || value >= bestValue.value) {
|
||||
bestValue = { value: value, result: result };
|
||||
if (step === -minStep)
|
||||
break;
|
||||
else if (step < 0)
|
||||
step = Math.max(minStep, Math.floor(-step / 2));
|
||||
}
|
||||
}
|
||||
else if (bestValue != null && value < bestValue.value) {
|
||||
value = bestValue.value;
|
||||
step = Math.max(minStep, Math.floor(Math.abs(step) / 2));
|
||||
continue;
|
||||
}
|
||||
else if (step > 0)
|
||||
step = -Math.max(minStep, Math.floor(step / 2));
|
||||
if (value === minValue && step === -minStep)
|
||||
break;
|
||||
value += step;
|
||||
if (value < minValue) {
|
||||
value = minValue;
|
||||
step = Math.max(minStep, Math.floor(Math.abs(step) / 2));
|
||||
}
|
||||
else if (value > maxValue) {
|
||||
value = maxValue;
|
||||
step = -Math.max(minStep, Math.floor(Math.abs(step) / 2));
|
||||
}
|
||||
}
|
||||
if (bestValue != null)
|
||||
return bestValue.result;
|
||||
return null;
|
||||
}
|
||||
//# sourceMappingURL=resolveModelGpuLayersOption.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/resolveModelGpuLayersOption.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
5
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export declare function scoreLevels(num: number, levels: {
|
||||
start: number;
|
||||
end?: number;
|
||||
points: number;
|
||||
}[]): number;
|
||||
16
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.js
generated
vendored
Normal file
16
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.js
generated
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
export function scoreLevels(num, levels) {
|
||||
let res = 0;
|
||||
for (let i = 0; i < levels.length; i++) {
|
||||
const level = levels[i];
|
||||
const start = level.start;
|
||||
const end = level.end ?? levels[i + 1]?.start ?? Math.max(start, num);
|
||||
if (num < start)
|
||||
break;
|
||||
else if (num >= end)
|
||||
res += level.points;
|
||||
else
|
||||
res += level.points * ((num - start) / (end - start));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
//# sourceMappingURL=scoreLevels.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/insights/utils/scoreLevels.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"scoreLevels.js","sourceRoot":"","sources":["../../../../src/gguf/insights/utils/scoreLevels.ts"],"names":[],"mappings":"AAAA,MAAM,UAAU,WAAW,CAAC,GAAW,EAAE,MAAuD;IAC5F,IAAI,GAAG,GAAG,CAAC,CAAC;IAEZ,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,MAAM,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QACrC,MAAM,KAAK,GAAG,MAAM,CAAC,CAAC,CAAE,CAAC;QACzB,MAAM,KAAK,GAAG,KAAK,CAAC,KAAK,CAAC;QAC1B,MAAM,GAAG,GAAG,KAAK,CAAC,GAAG,IAAI,MAAM,CAAC,CAAC,GAAG,CAAC,CAAC,EAAE,KAAK,IAAI,IAAI,CAAC,GAAG,CAAC,KAAK,EAAE,GAAG,CAAC,CAAC;QAEtE,IAAI,GAAG,GAAG,KAAK;YACX,MAAM;aACL,IAAI,GAAG,IAAI,GAAG;YACf,GAAG,IAAI,KAAK,CAAC,MAAM,CAAC;;YAEpB,GAAG,IAAI,KAAK,CAAC,MAAM,GAAG,CAAC,CAAC,GAAG,GAAG,KAAK,CAAC,GAAG,CAAC,GAAG,GAAG,KAAK,CAAC,CAAC,CAAC;IAC9D,CAAC;IAED,OAAO,GAAG,CAAC;AACf,CAAC"}
|
||||
20
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.d.ts
generated
vendored
Normal file
20
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.d.ts
generated
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { GgufValueType, GgufVersionParserOptions, GgufVersionParserResult, MetadataKeyValueRecord, MetadataValue } from "../types/GgufFileInfoTypes.js";
|
||||
import { Promisable } from "../../utils/transformPromisable.js";
|
||||
export declare class GgufV2Parser {
|
||||
private readonly _fileReader;
|
||||
private readonly _shouldReadTensorInfo;
|
||||
private readonly _ignoreKeys;
|
||||
private readonly _readOffset;
|
||||
private readonly _logWarnings;
|
||||
constructor({ fileReader, readTensorInfo, ignoreKeys, readOffset, logWarnings }: GgufVersionParserOptions);
|
||||
parse(): Promise<GgufVersionParserResult>;
|
||||
protected _readGgufValue(type: GgufValueType, offset: number | GgufReadOffset): Promisable<MetadataValue>;
|
||||
protected _readStringValue(offset: number | GgufReadOffset): Promisable<string>;
|
||||
protected _readRawHeader(readOffset: GgufReadOffset): Promise<{
|
||||
tensorCount: number | bigint;
|
||||
metadata: MetadataKeyValueRecord;
|
||||
headerSize: number;
|
||||
}>;
|
||||
private _readTensorInfo;
|
||||
}
|
||||
184
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.js
generated
vendored
Normal file
184
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.js
generated
vendored
Normal file
@@ -0,0 +1,184 @@
|
||||
import { GgufFileReader } from "../fileReaders/GgufFileReader.js";
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { UnsupportedGgufValueTypeError } from "../errors/UnsupportedGgufValueTypeError.js";
|
||||
import { GgufValueType } from "../types/GgufFileInfoTypes.js";
|
||||
import { convertMetadataKeyValueRecordToNestedObject } from "../utils/convertMetadataKeyValueRecordToNestedObject.js";
|
||||
import { promisableLoop, transformPromisable, transformPromisablesInOrder } from "../../utils/transformPromisable.js";
|
||||
import { noDirectSubNestingGGufMetadataKeys } from "../consts.js";
|
||||
const ggufDefaultAlignment = 32;
|
||||
export class GgufV2Parser {
|
||||
_fileReader;
|
||||
_shouldReadTensorInfo;
|
||||
_ignoreKeys;
|
||||
_readOffset;
|
||||
_logWarnings;
|
||||
constructor({ fileReader, readTensorInfo = true, ignoreKeys = [], readOffset, logWarnings }) {
|
||||
this._fileReader = fileReader;
|
||||
this._shouldReadTensorInfo = readTensorInfo;
|
||||
this._ignoreKeys = ignoreKeys;
|
||||
this._readOffset = readOffset;
|
||||
this._logWarnings = logWarnings;
|
||||
}
|
||||
async parse() {
|
||||
const readOffset = this._readOffset;
|
||||
const initialOffset = readOffset.offset;
|
||||
const headerReadResultPromisable = this._readRawHeader(readOffset);
|
||||
const headerReadResult = headerReadResultPromisable instanceof Promise
|
||||
? await headerReadResultPromisable
|
||||
: headerReadResultPromisable;
|
||||
const alignmentHeader = headerReadResult.metadata["general.alignment"];
|
||||
const ggufAlignment = (alignmentHeader != null &&
|
||||
(typeof alignmentHeader === "number" || typeof alignmentHeader === "bigint") &&
|
||||
Number.isFinite(Number(alignmentHeader)))
|
||||
? Number(alignmentHeader)
|
||||
: ggufDefaultAlignment;
|
||||
const tensorReadResultPromisable = this._shouldReadTensorInfo
|
||||
? await this._readTensorInfo(headerReadResult.tensorCount, readOffset, ggufAlignment)
|
||||
: null;
|
||||
const tensorReadResult = tensorReadResultPromisable instanceof Promise
|
||||
? await tensorReadResultPromisable
|
||||
: tensorReadResultPromisable;
|
||||
const metadata = convertMetadataKeyValueRecordToNestedObject(headerReadResult.metadata, {
|
||||
logOverrideWarnings: this._logWarnings,
|
||||
ignoreKeys: this._ignoreKeys,
|
||||
noDirectSubNestingKeys: noDirectSubNestingGGufMetadataKeys
|
||||
});
|
||||
return {
|
||||
tensorCount: headerReadResult.tensorCount,
|
||||
metadata: metadata,
|
||||
tensorInfo: tensorReadResult?.tensorInfo,
|
||||
metadataSize: headerReadResult.headerSize + initialOffset,
|
||||
tensorInfoSize: tensorReadResult?.tensorInfoSize,
|
||||
tensorDataOffset: tensorReadResult?.tensorDataOffset
|
||||
};
|
||||
}
|
||||
_readGgufValue(type, offset) {
|
||||
const readOffset = GgufReadOffset.resolveReadOffset(offset);
|
||||
switch (type) {
|
||||
case GgufValueType.Uint8: return this._fileReader.readUint8(readOffset);
|
||||
case GgufValueType.Int8: return this._fileReader.readInt8(readOffset);
|
||||
case GgufValueType.Uint16: return this._fileReader.readUint16(readOffset);
|
||||
case GgufValueType.Int16: return this._fileReader.readInt16(readOffset);
|
||||
case GgufValueType.Uint32: return this._fileReader.readUint32(readOffset);
|
||||
case GgufValueType.Int32: return this._fileReader.readInt32(readOffset);
|
||||
case GgufValueType.Float32: return this._fileReader.readFloat32(readOffset);
|
||||
case GgufValueType.Bool: return this._fileReader.readBool(readOffset);
|
||||
case GgufValueType.String: return this._readStringValue(readOffset);
|
||||
case GgufValueType.Uint64: return this._fileReader.readUint64(readOffset);
|
||||
case GgufValueType.Int64: return this._fileReader.readInt64(readOffset);
|
||||
case GgufValueType.Float64: return this._fileReader.readFloat64(readOffset);
|
||||
}
|
||||
if (type === GgufValueType.Array) {
|
||||
return transformPromisablesInOrder([
|
||||
() => this._fileReader.readUint32(readOffset),
|
||||
() => this._fileReader.readUint64(readOffset)
|
||||
], ([arrayType, arrayLength]) => {
|
||||
const arrayValues = [];
|
||||
let i = 0;
|
||||
return promisableLoop({
|
||||
condition: () => i < arrayLength,
|
||||
callback: () => {
|
||||
return transformPromisable(this._readGgufValue(arrayType, readOffset), (value) => {
|
||||
arrayValues.push(value);
|
||||
});
|
||||
},
|
||||
afterthought: () => void i++,
|
||||
returnValue: () => arrayValues
|
||||
});
|
||||
});
|
||||
}
|
||||
throw new UnsupportedGgufValueTypeError(type);
|
||||
}
|
||||
_readStringValue(offset) {
|
||||
return this._fileReader.readString(offset);
|
||||
}
|
||||
async _readRawHeader(readOffset) {
|
||||
const initialOffset = readOffset.offset;
|
||||
const tensorCountAndMetadataKVCountPromisable = transformPromisablesInOrder([
|
||||
() => this._fileReader.readUint64(readOffset),
|
||||
() => transformPromisable(this._fileReader.readUint64(readOffset), Number)
|
||||
]);
|
||||
const [tensorCount, metadataKVCount] = tensorCountAndMetadataKVCountPromisable instanceof Promise
|
||||
? await tensorCountAndMetadataKVCountPromisable
|
||||
: tensorCountAndMetadataKVCountPromisable;
|
||||
const metadata = {};
|
||||
let i = 0;
|
||||
return promisableLoop({
|
||||
condition: () => i < metadataKVCount,
|
||||
callback: () => {
|
||||
return transformPromisablesInOrder([
|
||||
() => this._readStringValue(readOffset),
|
||||
() => this._fileReader.readUint32(readOffset)
|
||||
], ([keyResult, valueType]) => {
|
||||
return transformPromisable(this._readGgufValue(valueType, readOffset), (value) => {
|
||||
metadata[keyResult] = value;
|
||||
});
|
||||
});
|
||||
},
|
||||
afterthought: () => void i++,
|
||||
returnValue: () => ({
|
||||
tensorCount: GgufFileReader.castNumberIfSafe(tensorCount),
|
||||
metadata: metadata,
|
||||
headerSize: readOffset.offset - initialOffset
|
||||
})
|
||||
});
|
||||
}
|
||||
_readTensorInfo(tensorCount, readOffset, ggufAlignment) {
|
||||
const initialOffset = readOffset.offset;
|
||||
const tensorInfo = [];
|
||||
let i = 0n;
|
||||
return promisableLoop({
|
||||
condition: () => i < BigInt(tensorCount),
|
||||
callback: () => {
|
||||
const dimensions = [];
|
||||
return transformPromisablesInOrder([
|
||||
() => this._readStringValue(readOffset),
|
||||
() => this._fileReader.readUint32(readOffset)
|
||||
], ([name, dimensionsNumber]) => {
|
||||
let d = 0;
|
||||
return promisableLoop({
|
||||
condition: () => d < dimensionsNumber,
|
||||
callback: () => {
|
||||
return transformPromisable(this._fileReader.readUint64(readOffset), (dimension) => {
|
||||
dimensions.push(GgufFileReader.castNumberIfSafe(dimension));
|
||||
});
|
||||
},
|
||||
afterthought: () => void d++,
|
||||
returnValue: () => {
|
||||
return transformPromisablesInOrder([
|
||||
() => this._fileReader.readUint32(readOffset),
|
||||
() => this._fileReader.readUint64(readOffset)
|
||||
], ([ggmlType, offset]) => {
|
||||
tensorInfo.push({
|
||||
name,
|
||||
dimensions,
|
||||
ggmlType: ggmlType,
|
||||
offset: GgufFileReader.castNumberIfSafe(offset),
|
||||
fileOffset: 0, // will be set later
|
||||
filePart: 1 // will be updated later if needed
|
||||
});
|
||||
});
|
||||
}
|
||||
});
|
||||
});
|
||||
},
|
||||
afterthought: () => void i++,
|
||||
returnValue: () => {
|
||||
const fileTensorDataOffset = alignOffset(readOffset.offset, ggufAlignment);
|
||||
for (const tensor of tensorInfo)
|
||||
tensor.fileOffset = typeof tensor.offset === "bigint"
|
||||
? BigInt(fileTensorDataOffset) + tensor.offset
|
||||
: fileTensorDataOffset + tensor.offset;
|
||||
return {
|
||||
tensorInfo,
|
||||
tensorInfoSize: readOffset.offset - initialOffset,
|
||||
tensorDataOffset: fileTensorDataOffset
|
||||
};
|
||||
}
|
||||
});
|
||||
}
|
||||
}
|
||||
function alignOffset(offset, alignment) {
|
||||
return offset + (alignment - (offset % alignment)) % alignment;
|
||||
}
|
||||
//# sourceMappingURL=GgufV2Parser.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/parser/GgufV2Parser.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
3
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.d.ts
generated
vendored
Normal file
3
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import { GgufV2Parser } from "./GgufV2Parser.js";
|
||||
export declare class GgufV3Parser extends GgufV2Parser {
|
||||
}
|
||||
4
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.js
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.js
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import { GgufV2Parser } from "./GgufV2Parser.js";
|
||||
export class GgufV3Parser extends GgufV2Parser {
|
||||
}
|
||||
//# sourceMappingURL=GgufV3Parser.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/parser/GgufV3Parser.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufV3Parser.js","sourceRoot":"","sources":["../../../src/gguf/parser/GgufV3Parser.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,YAAY,EAAC,MAAM,mBAAmB,CAAC;AAE/C,MAAM,OAAO,YAAa,SAAQ,YAAY;CAE7C"}
|
||||
8
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.d.ts
generated
vendored
Normal file
8
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.d.ts
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
import { GgufFileReader } from "../fileReaders/GgufFileReader.js";
|
||||
import { GgufFileInfo } from "../types/GgufFileInfoTypes.js";
|
||||
export declare function parseGguf({ fileReader, readTensorInfo, ignoreKeys, logWarnings }: {
|
||||
fileReader: GgufFileReader;
|
||||
readTensorInfo?: boolean;
|
||||
ignoreKeys?: string[];
|
||||
logWarnings?: boolean;
|
||||
}): Promise<GgufFileInfo>;
|
||||
61
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.js
generated
vendored
Normal file
61
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.js
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
import { InvalidGgufMagicError } from "../errors/InvalidGgufMagicError.js";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
import { UnsupportedError } from "../../utils/UnsupportedError.js";
|
||||
import { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import { getGgufMetadataArchitectureData } from "../utils/getGgufMetadataArchitectureData.js";
|
||||
import { GgufV2Parser } from "./GgufV2Parser.js";
|
||||
import { GgufV3Parser } from "./GgufV3Parser.js";
|
||||
const ggufMagic = "GGUF";
|
||||
export async function parseGguf({ fileReader, readTensorInfo = true, ignoreKeys = [], logWarnings = true }) {
|
||||
const readOffset = new GgufReadOffset(0);
|
||||
const magicAndVersion = await parseMagicAndVersion(fileReader, readOffset);
|
||||
const ggufInfo = await parseGgufUsingASpecificVersionParser({
|
||||
fileReader,
|
||||
readTensorInfo,
|
||||
ignoreKeys,
|
||||
version: magicAndVersion.version,
|
||||
readOffset,
|
||||
logWarnings
|
||||
});
|
||||
const architectureMetadata = getGgufMetadataArchitectureData(ggufInfo.metadata);
|
||||
return {
|
||||
version: magicAndVersion.version,
|
||||
tensorCount: ggufInfo.tensorCount,
|
||||
metadata: ggufInfo.metadata,
|
||||
architectureMetadata: architectureMetadata,
|
||||
tensorInfo: ggufInfo.tensorInfo,
|
||||
metadataSize: ggufInfo.metadataSize,
|
||||
splicedParts: 1,
|
||||
totalTensorInfoSize: ggufInfo.tensorInfoSize,
|
||||
totalTensorCount: ggufInfo.tensorCount,
|
||||
totalMetadataSize: ggufInfo.metadataSize,
|
||||
fullTensorInfo: ggufInfo.tensorInfo,
|
||||
tensorInfoSize: ggufInfo.tensorInfoSize
|
||||
};
|
||||
}
|
||||
async function parseMagicAndVersion(fileReader, readOffset) {
|
||||
const fileMagicText = await fileReader.readStringWithLength(readOffset, ggufMagic.length);
|
||||
if (fileMagicText !== ggufMagic)
|
||||
throw new InvalidGgufMagicError(ggufMagic, fileMagicText);
|
||||
const version = await fileReader.readUint32(readOffset);
|
||||
return {
|
||||
magic: ggufMagic,
|
||||
version
|
||||
};
|
||||
}
|
||||
async function parseGgufUsingASpecificVersionParser(specificVersionParserOptions) {
|
||||
switch (specificVersionParserOptions.version) {
|
||||
case 1:
|
||||
throw new UnsupportedError("GGUF version 1 is not supported by llama.cpp anymore");
|
||||
case 2:
|
||||
return await (new GgufV2Parser(specificVersionParserOptions)).parse();
|
||||
case 3:
|
||||
return await (new GgufV3Parser(specificVersionParserOptions)).parse();
|
||||
default:
|
||||
if (specificVersionParserOptions.logWarnings)
|
||||
console.warn(getConsoleLogPrefix() +
|
||||
`Unsupported GGUF version "${specificVersionParserOptions.version}". Reading the file as GGUF version 3`);
|
||||
return await (new GgufV3Parser(specificVersionParserOptions)).parse();
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=parseGguf.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/parser/parseGguf.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"parseGguf.js","sourceRoot":"","sources":["../../../src/gguf/parser/parseGguf.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,qBAAqB,EAAC,MAAM,oCAAoC,CAAC;AACzE,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AACvE,OAAO,EAAC,gBAAgB,EAAC,MAAM,iCAAiC,CAAC;AACjE,OAAO,EAAC,cAAc,EAAC,MAAM,4BAA4B,CAAC;AAG1D,OAAO,EAAC,+BAA+B,EAAC,MAAM,6CAA6C,CAAC;AAC5F,OAAO,EAAC,YAAY,EAAC,MAAM,mBAAmB,CAAC;AAC/C,OAAO,EAAC,YAAY,EAAC,MAAM,mBAAmB,CAAC;AAE/C,MAAM,SAAS,GAAG,MAAM,CAAC;AAEzB,MAAM,CAAC,KAAK,UAAU,SAAS,CAAC,EAC5B,UAAU,EACV,cAAc,GAAG,IAAI,EACrB,UAAU,GAAG,EAAE,EACf,WAAW,GAAG,IAAI,EAMrB;IACG,MAAM,UAAU,GAAG,IAAI,cAAc,CAAC,CAAC,CAAC,CAAC;IACzC,MAAM,eAAe,GAAG,MAAM,oBAAoB,CAAC,UAAU,EAAE,UAAU,CAAC,CAAC;IAC3E,MAAM,QAAQ,GAAG,MAAM,oCAAoC,CAAC;QACxD,UAAU;QACV,cAAc;QACd,UAAU;QAEV,OAAO,EAAE,eAAe,CAAC,OAAO;QAChC,UAAU;QACV,WAAW;KACd,CAAC,CAAC;IACH,MAAM,oBAAoB,GAAG,+BAA+B,CAAC,QAAQ,CAAC,QAAQ,CAAC,CAAC;IAEhF,OAAO;QACH,OAAO,EAAE,eAAe,CAAC,OAAO;QAChC,WAAW,EAAE,QAAQ,CAAC,WAAW;QACjC,QAAQ,EAAE,QAAQ,CAAC,QAAQ;QAC3B,oBAAoB,EAAE,oBAAoB;QAC1C,UAAU,EAAE,QAAQ,CAAC,UAAU;QAC/B,YAAY,EAAE,QAAQ,CAAC,YAAY;QACnC,YAAY,EAAE,CAAC;QACf,mBAAmB,EAAE,QAAQ,CAAC,cAAc;QAC5C,gBAAgB,EAAE,QAAQ,CAAC,WAAW;QACtC,iBAAiB,EAAE,QAAQ,CAAC,YAAY;QACxC,cAAc,EAAE,QAAQ,CAAC,UAAU;QACnC,cAAc,EAAE,QAAQ,CAAC,cAAc;KAC1C,CAAC;AACN,CAAC;AAED,KAAK,UAAU,oBAAoB,CAAC,UAA0B,EAAE,UAA0B;IACtF,MAAM,aAAa,GAAG,MAAM,UAAU,CAAC,oBAAoB,CAAC,UAAU,EAAE,SAAS,CAAC,MAAM,CAAC,CAAC;IAE1F,IAAI,aAAa,KAAK,SAAS;QAC3B,MAAM,IAAI,qBAAqB,CAAC,SAAS,EAAE,aAAa,CAAC,CAAC;IAE9D,MAAM,OAAO,GAAG,MAAM,UAAU,CAAC,UAAU,CAAC,UAAU,CAAC,CAAC;IAExD,OAAO;QACH,KAAK,EAAE,SAAS;QAChB,OAAO;KACV,CAAC;AACN,CAAC;AAED,KAAK,UAAU,oCAAoC,CAC/C,4BAAsD;IAEtD,QAAQ,4BAA4B,CAAC,OAAO,EAAE,CAAC;QAC3C,KAAK,CAAC;YACF,MAAM,IAAI,gBAAgB,CAAC,sDAAsD,CAAC,CAAC;QAEvF,KAAK,CAAC;YACF,OAAO,MAAM,CAAC,IAAI,YAAY,CAAC,4BAA4B,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC;QAE1E,KAAK,CAAC;YACF,OAAO,MAAM,CAAC,IAAI,YAAY,CAAC,4BAA4B,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC;QAE1E;YACI,IAAI,4BAA4B,CAAC,WAAW;gBACxC,OAAO,CAAC,IAAI,CACR,mBAAmB,EAAE;oBACrB,6BAA6B,4BAA4B,CAAC,OAAO,uCAAuC,CAC3G,CAAC;YAEN,OAAO,MAAM,CAAC,IAAI,YAAY,CAAC,4BAA4B,CAAC,CAAC,CAAC,KAAK,EAAE,CAAC;IAC9E,CAAC;AACL,CAAC"}
|
||||
54
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.d.ts
generated
vendored
Normal file
54
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,54 @@
|
||||
import retry from "async-retry";
|
||||
import { ModelFileAccessTokens } from "../utils/modelFileAccessTokens.js";
|
||||
import { ModelDownloadEndpoints } from "../utils/modelDownloadEndpoints.js";
|
||||
import { GgufFileInfo } from "./types/GgufFileInfoTypes.js";
|
||||
/**
|
||||
* Read a GGUF file and return its metadata and tensor info (unless `readTensorInfo` is set to `false`).
|
||||
* Only the parts of the file required for the metadata and tensor info are read.
|
||||
* @param pathOrUri
|
||||
* @param options
|
||||
*/
|
||||
export declare function readGgufFileInfo(pathOrUri: string, { readTensorInfo, sourceType, ignoreKeys, logWarnings, fetchRetryOptions, fetchHeaders, spliceSplitFiles, signal, tokens, endpoints }?: {
|
||||
/**
|
||||
* Whether to read the tensor info from the file's header.
|
||||
*
|
||||
* Defaults to `true`.
|
||||
*/
|
||||
readTensorInfo?: boolean;
|
||||
/**
|
||||
* Set to a specific value to force it to only use that source type.
|
||||
* By default, it detects whether the path is a network URL or a filesystem path and uses the appropriate reader accordingly.
|
||||
*/
|
||||
sourceType?: "network" | "filesystem";
|
||||
/**
|
||||
* Metadata keys to ignore when parsing the metadata.
|
||||
* For example, `["tokenizer.ggml.tokens"]`
|
||||
*/
|
||||
ignoreKeys?: string[];
|
||||
/**
|
||||
* Whether to log warnings
|
||||
*
|
||||
* Defaults to `true`.
|
||||
*/
|
||||
logWarnings?: boolean;
|
||||
/** Relevant only when fetching from a network */
|
||||
fetchRetryOptions?: retry.Options;
|
||||
/** Relevant only when fetching from a network */
|
||||
fetchHeaders?: Record<string, string>;
|
||||
/**
|
||||
* When split files are detected, read the metadata of the first file and splice the tensor info from all the parts.
|
||||
*
|
||||
* Defaults to `true`.
|
||||
*/
|
||||
spliceSplitFiles?: boolean;
|
||||
signal?: AbortSignal;
|
||||
/**
|
||||
* Tokens to use to access the remote model file.
|
||||
*/
|
||||
tokens?: ModelFileAccessTokens;
|
||||
/**
|
||||
* Configure the URLs used for resolving model URIs.
|
||||
* @see [Model URIs](https://node-llama-cpp.withcat.ai/guide/downloading-models#model-uris)
|
||||
*/
|
||||
endpoints?: ModelDownloadEndpoints;
|
||||
}): Promise<GgufFileInfo>;
|
||||
82
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.js
generated
vendored
Normal file
82
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.js
generated
vendored
Normal file
@@ -0,0 +1,82 @@
|
||||
import { isUrl } from "../utils/isUrl.js";
|
||||
import { getAuthorizationHeader, isModelUri, parseModelUri, resolveParsedModelUri } from "../utils/parseModelUri.js";
|
||||
import { parseGguf } from "./parser/parseGguf.js";
|
||||
import { GgufNetworkFetchFileReader } from "./fileReaders/GgufNetworkFetchFileReader.js";
|
||||
import { GgufFsFileReader } from "./fileReaders/GgufFsFileReader.js";
|
||||
import { ggufDefaultFetchRetryOptions } from "./consts.js";
|
||||
import { normalizeGgufDownloadUrl } from "./utils/normalizeGgufDownloadUrl.js";
|
||||
import { resolveSplitGgufParts } from "./utils/resolveSplitGgufParts.js";
|
||||
/**
|
||||
* Read a GGUF file and return its metadata and tensor info (unless `readTensorInfo` is set to `false`).
|
||||
* Only the parts of the file required for the metadata and tensor info are read.
|
||||
* @param pathOrUri
|
||||
* @param options
|
||||
*/
|
||||
export async function readGgufFileInfo(pathOrUri, { readTensorInfo = true, sourceType, ignoreKeys = [], logWarnings = true, fetchRetryOptions = ggufDefaultFetchRetryOptions, fetchHeaders = {}, spliceSplitFiles = true, signal, tokens, endpoints } = {}) {
|
||||
const useNetworkReader = sourceType === "network" || (sourceType == null && (isUrl(pathOrUri) || isModelUri(pathOrUri)));
|
||||
async function createFileReader(pathOrUri) {
|
||||
if (useNetworkReader) {
|
||||
const parsedModelUri = await resolveParsedModelUri(parseModelUri(pathOrUri, undefined, endpoints), {
|
||||
tokens, endpoints, signal,
|
||||
authorizationHeader: getAuthorizationHeader(fetchHeaders)
|
||||
});
|
||||
return new GgufNetworkFetchFileReader({
|
||||
url: parsedModelUri?.resolvedUrl ?? normalizeGgufDownloadUrl(pathOrUri, endpoints),
|
||||
retryOptions: fetchRetryOptions,
|
||||
headers: fetchHeaders,
|
||||
signal,
|
||||
tokens,
|
||||
endpoints
|
||||
});
|
||||
}
|
||||
else if (sourceType === "filesystem" || sourceType == null) {
|
||||
return new GgufFsFileReader({
|
||||
filePath: pathOrUri,
|
||||
signal
|
||||
});
|
||||
}
|
||||
void sourceType;
|
||||
throw new Error(`Unsupported sourceType: ${sourceType}`);
|
||||
}
|
||||
async function readSingleFile(pathOrUri, splitPartNumber = 1) {
|
||||
const fileReader = await createFileReader(pathOrUri);
|
||||
const res = await parseGguf({
|
||||
fileReader,
|
||||
ignoreKeys,
|
||||
readTensorInfo,
|
||||
logWarnings
|
||||
});
|
||||
if (splitPartNumber > 1) {
|
||||
for (const tensor of res.tensorInfo ?? [])
|
||||
tensor.filePart = splitPartNumber;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
if (!spliceSplitFiles)
|
||||
return await readSingleFile(pathOrUri);
|
||||
const allSplitPartPaths = resolveSplitGgufParts(pathOrUri);
|
||||
if (allSplitPartPaths.length === 1)
|
||||
return await readSingleFile(allSplitPartPaths[0]);
|
||||
const [first, ...rest] = await Promise.all(allSplitPartPaths.map((partPath, index) => readSingleFile(partPath, index + 1)));
|
||||
if (first == null)
|
||||
throw new Error("First part of the split GGUF file is missing");
|
||||
return {
|
||||
version: first.version,
|
||||
tensorCount: first.tensorCount,
|
||||
metadata: first.metadata,
|
||||
architectureMetadata: first.architectureMetadata,
|
||||
tensorInfo: first.tensorInfo,
|
||||
metadataSize: first.metadataSize,
|
||||
splicedParts: allSplitPartPaths.length,
|
||||
totalTensorInfoSize: first.totalTensorInfoSize == null
|
||||
? undefined
|
||||
: (first.totalTensorInfoSize + rest.reduce((acc, part) => (acc + (part.totalTensorInfoSize ?? 0)), 0)),
|
||||
totalTensorCount: Number(first.totalTensorCount) + rest.reduce((acc, part) => acc + Number(part.totalTensorCount), 0),
|
||||
totalMetadataSize: first.totalMetadataSize + rest.reduce((acc, part) => acc + part.totalMetadataSize, 0),
|
||||
fullTensorInfo: first.fullTensorInfo == null
|
||||
? undefined
|
||||
: [first, ...rest].flatMap((part) => (part.fullTensorInfo ?? [])),
|
||||
tensorInfoSize: first.tensorInfoSize
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=readGgufFileInfo.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/readGgufFileInfo.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"readGgufFileInfo.js","sourceRoot":"","sources":["../../src/gguf/readGgufFileInfo.ts"],"names":[],"mappings":"AACA,OAAO,EAAC,KAAK,EAAC,MAAM,mBAAmB,CAAC;AAExC,OAAO,EAAC,sBAAsB,EAAE,UAAU,EAAE,aAAa,EAAE,qBAAqB,EAAC,MAAM,2BAA2B,CAAC;AAGnH,OAAO,EAAC,SAAS,EAAC,MAAM,uBAAuB,CAAC;AAChD,OAAO,EAAC,0BAA0B,EAAC,MAAM,6CAA6C,CAAC;AACvF,OAAO,EAAC,gBAAgB,EAAC,MAAM,mCAAmC,CAAC;AACnE,OAAO,EAAC,4BAA4B,EAAC,MAAM,aAAa,CAAC;AACzD,OAAO,EAAC,wBAAwB,EAAC,MAAM,qCAAqC,CAAC;AAC7E,OAAO,EAAC,qBAAqB,EAAC,MAAM,kCAAkC,CAAC;AAKvE;;;;;GAKG;AACH,MAAM,CAAC,KAAK,UAAU,gBAAgB,CAAC,SAAiB,EAAE,EACtD,cAAc,GAAG,IAAI,EACrB,UAAU,EACV,UAAU,GAAG,EAAE,EACf,WAAW,GAAG,IAAI,EAClB,iBAAiB,GAAG,4BAA4B,EAChD,YAAY,GAAG,EAAE,EACjB,gBAAgB,GAAG,IAAI,EACvB,MAAM,EACN,MAAM,EACN,SAAS,KAqDT,EAAE;IACF,MAAM,gBAAgB,GAAG,UAAU,KAAK,SAAS,IAAI,CAAC,UAAU,IAAI,IAAI,IAAI,CAAC,KAAK,CAAC,SAAS,CAAC,IAAI,UAAU,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC;IAEzH,KAAK,UAAU,gBAAgB,CAAC,SAAiB;QAC7C,IAAI,gBAAgB,EAAE,CAAC;YACnB,MAAM,cAAc,GAAG,MAAM,qBAAqB,CAAC,aAAa,CAAC,SAAS,EAAE,SAAS,EAAE,SAAS,CAAC,EAAE;gBAC/F,MAAM,EAAE,SAAS,EAAE,MAAM;gBACzB,mBAAmB,EAAE,sBAAsB,CAAC,YAAY,CAAC;aAC5D,CAAC,CAAC;YACH,OAAO,IAAI,0BAA0B,CAAC;gBAClC,GAAG,EAAE,cAAc,EAAE,WAAW,IAAI,wBAAwB,CAAC,SAAS,EAAE,SAAS,CAAC;gBAClF,YAAY,EAAE,iBAAiB;gBAC/B,OAAO,EAAE,YAAY;gBACrB,MAAM;gBACN,MAAM;gBACN,SAAS;aACZ,CAAC,CAAC;QACP,CAAC;aAAM,IAAI,UAAU,KAAK,YAAY,IAAI,UAAU,IAAI,IAAI,EAAE,CAAC;YAC3D,OAAO,IAAI,gBAAgB,CAAC;gBACxB,QAAQ,EAAE,SAAS;gBACnB,MAAM;aACT,CAAC,CAAC;QACP,CAAC;QAED,KAAM,UAA2B,CAAC;QAClC,MAAM,IAAI,KAAK,CAAC,2BAA2B,UAAU,EAAE,CAAC,CAAC;IAC7D,CAAC;IAED,KAAK,UAAU,cAAc,CAAC,SAAiB,EAAE,kBAA0B,CAAC;QACxE,MAAM,UAAU,GAAG,MAAM,gBAAgB,CAAC,SAAS,CAAC,CAAC;QACrD,MAAM,GAAG,GAAG,MAAM,SAAS,CAAC;YACxB,UAAU;YACV,UAAU;YACV,cAAc;YACd,WAAW;SACd,CAAC,CAAC;QAEH,IAAI,eAAe,GAAG,CAAC,EAAE,CAAC;YACtB,KAAK,MAAM,MAAM,IAAI,GAAG,CAAC,UAAU,IAAI,EAAE;gBACpC,MAAmC,CAAC,QAAQ,GAAG,eAAe,CAAC;QACxE,CAAC;QAED,OAAO,GAAG,CAAC;IACf,CAAC;IAED,IAAI,CAAC,gBAAgB;QACjB,OAAO,MAAM,cAAc,CAAC,SAAS,CAAC,CAAC;IAE3C,MAAM,iBAAiB,GAAG,qBAAqB,CAAC,SAAS,CAAC,CAAC;IAE3D,IAAI,iBAAiB,CAAC,MAAM,KAAK,CAAC;QAC9B,OAAO,MAAM,cAAc,CAAC,iBAAiB,CAAC,CAAC,CAAE,CAAC,CAAC;IAEvD,MAAM,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,GAAG,MAAM,OAAO,CAAC,GAAG,CACtC,iBAAiB,CAAC,GAAG,CAAC,CAAC,QAAQ,EAAE,KAAK,EAAE,EAAE,CAAC,cAAc,CAAC,QAAQ,EAAE,KAAK,GAAG,CAAC,CAAC,CAAC,CAClF,CAAC;IAEF,IAAI,KAAK,IAAI,IAAI;QACb,MAAM,IAAI,KAAK,CAAC,8CAA8C,CAAC,CAAC;IAEpE,OAAO;QACH,OAAO,EAAE,KAAK,CAAC,OAAO;QACtB,WAAW,EAAE,KAAK,CAAC,WAAW;QAC9B,QAAQ,EAAE,KAAK,CAAC,QAAQ;QACxB,oBAAoB,EAAE,KAAK,CAAC,oBAAoB;QAChD,UAAU,EAAE,KAAK,CAAC,UAAU;QAC5B,YAAY,EAAE,KAAK,CAAC,YAAY;QAChC,YAAY,EAAE,iBAAiB,CAAC,MAAM;QACtC,mBAAmB,EAAE,KAAK,CAAC,mBAAmB,IAAI,IAAI;YAClD,CAAC,CAAC,SAAS;YACX,CAAC,CAAC,CAAC,KAAK,CAAC,mBAAmB,GAAG,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,EAAE,EAAE,CAAC,CAAC,GAAG,GAAG,CAAC,IAAI,CAAC,mBAAmB,IAAI,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QAC1G,gBAAgB,EAAE,MAAM,CAAC,KAAK,CAAC,gBAAgB,CAAC,GAAG,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,EAAE,EAAE,CAAC,GAAG,GAAG,MAAM,CAAC,IAAI,CAAC,gBAAgB,CAAC,EAAE,CAAC,CAAC;QACrH,iBAAiB,EAAE,KAAK,CAAC,iBAAiB,GAAG,IAAI,CAAC,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,EAAE,EAAE,CAAC,GAAG,GAAG,IAAI,CAAC,iBAAiB,EAAE,CAAC,CAAC;QACxG,cAAc,EAAE,KAAK,CAAC,cAAc,IAAI,IAAI;YACxC,CAAC,CAAC,SAAS;YACX,CAAC,CAAC,CAAC,KAAK,EAAE,GAAG,IAAI,CAAC,CAAC,OAAO,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC,IAAI,CAAC,cAAc,IAAI,EAAE,CAAC,CAAC;QACrE,cAAc,EAAE,KAAK,CAAC,cAAc;KAChB,CAAC;AAC7B,CAAC"}
|
||||
85
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.d.ts
generated
vendored
Normal file
85
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,85 @@
|
||||
import type { GgufReadOffset } from "../utils/GgufReadOffset.js";
|
||||
import type { GgufFileReader } from "../fileReaders/GgufFileReader.js";
|
||||
import type { MergeOptionalUnionTypes } from "../../utils/mergeUnionTypes.js";
|
||||
import type { GgufArchitectureType, GgufMetadata } from "./GgufMetadataTypes.js";
|
||||
import type { GgufTensorInfo } from "./GgufTensorInfoTypes.js";
|
||||
export type MetadataValue = string | number | bigint | boolean | MetadataValue[];
|
||||
export type MetadataKeyValueRecord = Record<string, MetadataValue>;
|
||||
export type MetadataNestedObject = {
|
||||
[key: string]: MetadataValue | MetadataNestedObject;
|
||||
};
|
||||
export type GgufFileInfo = {
|
||||
readonly version: 2 | 3 | number;
|
||||
readonly tensorCount: number | bigint;
|
||||
readonly metadata: GgufMetadata;
|
||||
readonly metadataSize: number;
|
||||
/** Same value as `metadata[metadata.general.architecture]`, but with merged types for convenience */
|
||||
readonly architectureMetadata: MergeOptionalUnionTypes<Exclude<GgufMetadata[GgufArchitectureType], undefined>>;
|
||||
/** can be null if `readTensorInfo` is set to `false` */
|
||||
readonly tensorInfo?: GgufTensorInfo[];
|
||||
/** can be null if `readTensorInfo` is set to `false` */
|
||||
readonly tensorInfoSize?: number;
|
||||
/**
|
||||
* For spliced metadata of multiple file parts,
|
||||
* this will be the number of files parts read and spliced into this metadata.
|
||||
*
|
||||
* Whe no splicing is done, this will be `1`.
|
||||
*/
|
||||
readonly splicedParts: number;
|
||||
/**
|
||||
* For spliced metadata of multiple file parts, this will be the total tensor count from all the parts
|
||||
*
|
||||
* When no splicing is done, this will be the same as `tensorCount`.
|
||||
*/
|
||||
readonly totalTensorCount: number | bigint;
|
||||
/**
|
||||
* For spliced metadata of multiple file parts, this will be the total metadata size from all the parts
|
||||
*
|
||||
* When no splicing is done, this will be the same as `metadataSize`.
|
||||
*/
|
||||
readonly totalMetadataSize: number;
|
||||
/**
|
||||
* For spliced metadata of multiple file parts, this will be the spliced tensorInfo from all the parts.
|
||||
* Can be null if `readTensorInfo` is set to `false`
|
||||
*
|
||||
* When no splicing is done, this will be the same as `tensorInfo`.
|
||||
*/
|
||||
readonly fullTensorInfo?: GgufTensorInfo[];
|
||||
/**
|
||||
* For spliced metadata of multiple file parts, this will be the total tensor info size from all the parts
|
||||
*
|
||||
* When no splicing is done, this will be the same as `tensorInfoSize`.
|
||||
*/
|
||||
readonly totalTensorInfoSize?: number;
|
||||
};
|
||||
export declare const enum GgufValueType {
|
||||
Uint8 = 0,
|
||||
Int8 = 1,
|
||||
Uint16 = 2,
|
||||
Int16 = 3,
|
||||
Uint32 = 4,
|
||||
Int32 = 5,
|
||||
Float32 = 6,
|
||||
Bool = 7,
|
||||
String = 8,
|
||||
Array = 9,
|
||||
Uint64 = 10,
|
||||
Int64 = 11,
|
||||
Float64 = 12
|
||||
}
|
||||
export type GgufVersionParserOptions = {
|
||||
fileReader: GgufFileReader;
|
||||
readTensorInfo?: boolean;
|
||||
ignoreKeys?: string[];
|
||||
version: number;
|
||||
readOffset: GgufReadOffset;
|
||||
logWarnings: boolean;
|
||||
};
|
||||
export type GgufVersionParserResult = {
|
||||
tensorCount: number | bigint;
|
||||
metadata: GgufMetadata;
|
||||
tensorInfo?: GgufTensorInfo[];
|
||||
metadataSize: number;
|
||||
tensorInfoSize?: number;
|
||||
tensorDataOffset?: number;
|
||||
};
|
||||
18
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.js
generated
vendored
Normal file
18
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
// source: `enum gguf_type` in `ggml.h` in the `llama.cpp` source code
|
||||
export var GgufValueType;
|
||||
(function (GgufValueType) {
|
||||
GgufValueType[GgufValueType["Uint8"] = 0] = "Uint8";
|
||||
GgufValueType[GgufValueType["Int8"] = 1] = "Int8";
|
||||
GgufValueType[GgufValueType["Uint16"] = 2] = "Uint16";
|
||||
GgufValueType[GgufValueType["Int16"] = 3] = "Int16";
|
||||
GgufValueType[GgufValueType["Uint32"] = 4] = "Uint32";
|
||||
GgufValueType[GgufValueType["Int32"] = 5] = "Int32";
|
||||
GgufValueType[GgufValueType["Float32"] = 6] = "Float32";
|
||||
GgufValueType[GgufValueType["Bool"] = 7] = "Bool";
|
||||
GgufValueType[GgufValueType["String"] = 8] = "String";
|
||||
GgufValueType[GgufValueType["Array"] = 9] = "Array";
|
||||
GgufValueType[GgufValueType["Uint64"] = 10] = "Uint64";
|
||||
GgufValueType[GgufValueType["Int64"] = 11] = "Int64";
|
||||
GgufValueType[GgufValueType["Float64"] = 12] = "Float64";
|
||||
})(GgufValueType || (GgufValueType = {}));
|
||||
//# sourceMappingURL=GgufFileInfoTypes.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/types/GgufFileInfoTypes.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufFileInfoTypes.js","sourceRoot":"","sources":["../../../src/gguf/types/GgufFileInfoTypes.ts"],"names":[],"mappings":"AAkEA,sEAAsE;AACtE,MAAM,CAAN,IAAkB,aAcjB;AAdD,WAAkB,aAAa;IAC3B,mDAAS,CAAA;IACT,iDAAQ,CAAA;IACR,qDAAU,CAAA;IACV,mDAAS,CAAA;IACT,qDAAU,CAAA;IACV,mDAAS,CAAA;IACT,uDAAW,CAAA;IACX,iDAAQ,CAAA;IACR,qDAAU,CAAA;IACV,mDAAS,CAAA;IACT,sDAAW,CAAA;IACX,oDAAU,CAAA;IACV,wDAAY,CAAA;AAChB,CAAC,EAdiB,aAAa,KAAb,aAAa,QAc9B"}
|
||||
459
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.d.ts
generated
vendored
Normal file
459
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,459 @@
|
||||
export declare const enum GgufArchitectureType {
|
||||
llama = "llama",
|
||||
llama4 = "llama4",
|
||||
deci = "deci",
|
||||
falcon = "falcon",
|
||||
grok = "grok",
|
||||
gpt2 = "gpt2",
|
||||
gptj = "gptj",
|
||||
gptneox = "gptneox",
|
||||
mpt = "mpt",
|
||||
baichuan = "baichuan",
|
||||
starcoder = "starcoder",
|
||||
refact = "refact",
|
||||
bert = "bert",
|
||||
modernBert = "modern-bert",
|
||||
nomicBert = "nomic-bert",
|
||||
nomicBertMoe = "nomic-bert-moe",
|
||||
neoBert = "neo-bert",
|
||||
jinaBertV2 = "jina-bert-v2",
|
||||
jinaBertV3 = "jina-bert-v3",
|
||||
bloom = "bloom",
|
||||
stablelm = "stablelm",
|
||||
qwen = "qwen",
|
||||
qwen2 = "qwen2",
|
||||
qwen2moe = "qwen2moe",
|
||||
qwen2vl = "qwen2vl",
|
||||
qwen3 = "qwen3",
|
||||
qwen3moe = "qwen3moe",
|
||||
qwen3next = "qwen3next",
|
||||
qwen3vl = "qwen3vl",
|
||||
qwen3vlmoe = "qwen3vlmoe",
|
||||
phi2 = "phi2",
|
||||
phi3 = "phi3",
|
||||
phimoe = "phimoe",
|
||||
plamo = "plamo",
|
||||
plamo2 = "plamo2",
|
||||
plamo3 = "plamo3",
|
||||
codeshell = "codeshell",
|
||||
orion = "orion",
|
||||
internlm2 = "internlm2",
|
||||
minicpm = "minicpm",
|
||||
minicpm3 = "minicpm3",
|
||||
gemma = "gemma",
|
||||
gemma2 = "gemma2",
|
||||
gemma3 = "gemma3",
|
||||
gemma3n = "gemma3n",
|
||||
gemmaEmbedding = "gemma-embedding",
|
||||
starcoder2 = "starcoder2",
|
||||
mamba = "mamba",
|
||||
mamba2 = "mamba2",
|
||||
jamba = "jamba",
|
||||
falconH1 = "falcon-h1",
|
||||
xverse = "xverse",
|
||||
commandR = "command-r",
|
||||
cohere2 = "cohere2",
|
||||
dbrx = "dbrx",
|
||||
olmo = "olmo",
|
||||
olmo2 = "olmo2",
|
||||
olmoe = "olmoe",
|
||||
openelm = "openelm",
|
||||
arctic = "arctic",
|
||||
deepseek = "deepseek",
|
||||
deepseek2 = "deepseek2",
|
||||
chatglm = "chatglm",
|
||||
glm4 = "glm4",
|
||||
glm4moe = "glm4moe",
|
||||
bitnet = "bitnet",
|
||||
t5 = "t5",
|
||||
t5encoder = "t5encoder",
|
||||
jais = "jais",
|
||||
nemotron = "nemotron",
|
||||
nemotronH = "nemotron_h",
|
||||
nemotronHMoe = "nemotron_h_moe",
|
||||
exaone = "exaone",
|
||||
exaone4 = "exaone4",
|
||||
rwkv6 = "rwkv6",
|
||||
rwkv6qwen2 = "rwkv6qwen2",
|
||||
rwkv7 = "rwkv7",
|
||||
arwkv7 = "arwkv7",
|
||||
granite = "granite",
|
||||
granitemoe = "granitemoe",
|
||||
granitehybrid = "granitehybrid",
|
||||
chameleon = "chameleon",
|
||||
wavtokenizerDec = "wavtokenizer-dec",
|
||||
plm = "plm",
|
||||
bailingmoe = "bailingmoe",
|
||||
bailingmoe2 = "bailingmoe2",
|
||||
dots1 = "dots1",
|
||||
arcee = "arcee",
|
||||
afmoe = "afmoe",
|
||||
ernie4_5 = "ernie4_5",
|
||||
ernie4_5Moe = "ernie4_5-moe",
|
||||
hunyuanMoe = "hunyuan-moe",
|
||||
hunyuanDense = "hunyuan-dense",
|
||||
smollm3 = "smollm3",
|
||||
gptOss = "gpt-oss",
|
||||
lfm2 = "lfm2",
|
||||
lfm2moe = "lfm2moe",
|
||||
dream = "dream",
|
||||
smallthinker = "smallthinker",
|
||||
llada = "llada",
|
||||
lladaMoe = "llada-moe",
|
||||
seedOss = "seed_oss",
|
||||
grovemoe = "grovemoe",
|
||||
apertus = "apertus",
|
||||
minimaxM2 = "minimax-m2",
|
||||
cogvlm = "cogvlm",
|
||||
rnd1 = "rnd1",
|
||||
panguEmbedded = "pangu-embedded",
|
||||
mistral3 = "mistral3",
|
||||
mimo2 = "mimo2",
|
||||
llamaEmbed = "llama-embed",
|
||||
maincoder = "maincoder",
|
||||
clip = "clip",
|
||||
unknown = "(unknown)"
|
||||
}
|
||||
export type GgufMetadata<A extends GgufArchitectureType = GgufArchitectureType> = {
|
||||
readonly general: GgufMetadataGeneral<A>;
|
||||
readonly tokenizer: GgufMetadataTokenizer;
|
||||
} & (GgufArchitectureType extends A ? {
|
||||
readonly [key in GgufArchitectureType]?: key extends keyof GgufMetadataLlmToType ? GgufMetadataLlmToType[key] : GgufMetadataDefaultArchitectureType;
|
||||
} : {
|
||||
readonly [key in A]: key extends keyof GgufMetadataLlmToType ? GgufMetadataLlmToType[key] : GgufMetadataDefaultArchitectureType;
|
||||
});
|
||||
export type GgufMetadataLlmToType = {
|
||||
[GgufArchitectureType.llama]: GgufMetadataLlmLLaMA;
|
||||
[GgufArchitectureType.mpt]: GgufMetadataMPT;
|
||||
[GgufArchitectureType.gptneox]: GgufMetadataGPTNeoX;
|
||||
[GgufArchitectureType.gptj]: GgufMetadataGPTJ;
|
||||
[GgufArchitectureType.gpt2]: GgufMetadataGPT2;
|
||||
[GgufArchitectureType.bloom]: GgufMetadataBloom;
|
||||
[GgufArchitectureType.falcon]: GgufMetadataFalcon;
|
||||
[GgufArchitectureType.mamba]: GgufMetadataMamba;
|
||||
};
|
||||
export declare enum GgufFileType {
|
||||
ALL_F32 = 0,
|
||||
MOSTLY_F16 = 1,
|
||||
MOSTLY_Q4_0 = 2,
|
||||
MOSTLY_Q4_1 = 3,
|
||||
MOSTLY_Q4_1_SOME_F16 = 4,// deprecated
|
||||
MOSTLY_Q4_2 = 5,// deprecated
|
||||
MOSTLY_Q4_3 = 6,// deprecated
|
||||
MOSTLY_Q8_0 = 7,
|
||||
MOSTLY_Q5_0 = 8,
|
||||
MOSTLY_Q5_1 = 9,
|
||||
MOSTLY_Q2_K = 10,
|
||||
MOSTLY_Q3_K_S = 11,
|
||||
MOSTLY_Q3_K_M = 12,
|
||||
MOSTLY_Q3_K_L = 13,
|
||||
MOSTLY_Q4_K_S = 14,
|
||||
MOSTLY_Q4_K_M = 15,
|
||||
MOSTLY_Q5_K_S = 16,
|
||||
MOSTLY_Q5_K_M = 17,
|
||||
MOSTLY_Q6_K = 18,
|
||||
MOSTLY_IQ2_XXS = 19,
|
||||
MOSTLY_IQ2_XS = 20,
|
||||
MOSTLY_Q2_K_S = 21,
|
||||
MOSTLY_IQ3_XS = 22,
|
||||
MOSTLY_IQ3_XXS = 23,
|
||||
MOSTLY_IQ1_S = 24,
|
||||
MOSTLY_IQ4_NL = 25,
|
||||
MOSTLY_IQ3_S = 26,
|
||||
MOSTLY_IQ3_M = 27,
|
||||
MOSTLY_IQ2_S = 28,
|
||||
MOSTLY_IQ2_M = 29,
|
||||
MOSTLY_IQ4_XS = 30,
|
||||
MOSTLY_IQ1_M = 31,
|
||||
MOSTLY_BF16 = 32,
|
||||
MOSTLY_Q4_0_4_4 = 33,// deprecated
|
||||
MOSTLY_Q4_0_4_8 = 34,// deprecated
|
||||
MOSTLY_Q4_0_8_8 = 35,// deprecated
|
||||
MOSTLY_TQ1_0 = 36,
|
||||
MOSTLY_TQ2_0 = 37,
|
||||
MOSTLY_MXFP4_MOE = 38
|
||||
}
|
||||
export type GgufMetadataGeneral<A extends GgufArchitectureType = GgufArchitectureType> = {
|
||||
readonly architecture: A;
|
||||
/**
|
||||
* The version of the quantization format. Not required if the model is not
|
||||
* quantized (i.e. no tensors are quantized). If any tensors are quantized,
|
||||
* this must be present. This is separate to the quantization scheme of the
|
||||
* tensors itself; the quantization version may change without changing the
|
||||
* scheme's name (e.g. the quantization scheme is Q5_K, and the quantization
|
||||
* version is 4).
|
||||
*/
|
||||
readonly quantization_version: string;
|
||||
/**
|
||||
* the global alignment to use, as described above. This can vary to allow
|
||||
* for different alignment schemes, but it must be a multiple of 8. Some
|
||||
* writers may not write the alignment. If the alignment is not specified,
|
||||
* assume it is `32`.
|
||||
*/
|
||||
readonly alignment?: number;
|
||||
/**
|
||||
* The name of the model. This should be a human-readable name that can be
|
||||
* used to identify the model. It should be unique within the community
|
||||
* that the model is defined in.
|
||||
*/
|
||||
readonly name?: string;
|
||||
readonly basename?: string;
|
||||
readonly size_label?: string;
|
||||
readonly author?: string;
|
||||
/**
|
||||
* URL to the model's homepage. This can be a GitHub repo, a paper, etc.
|
||||
*/
|
||||
readonly url?: string;
|
||||
/**
|
||||
* free-form description of the model including anything that isn't
|
||||
* covered by the other fields
|
||||
*/
|
||||
readonly description?: string;
|
||||
/**
|
||||
* License of the model, expressed as a SPDX license expression
|
||||
* (e.g. `MIT OR Apache-2.0`). *Should not* include any other information,
|
||||
* such as the license text or the URL to the license.
|
||||
*/
|
||||
readonly license?: string;
|
||||
readonly "license.name"?: string;
|
||||
readonly "license.link"?: string;
|
||||
/**
|
||||
* Information about where this model came from. This is useful for tracking
|
||||
* the provenance of the model, and for finding the original source if the
|
||||
* model is modified. For a model that was converted from GGML, for
|
||||
* example, these keys would point to the model that was converted from.
|
||||
*/
|
||||
readonly source?: {
|
||||
/**
|
||||
* URL to the source of the model. Can be a GitHub repo, a paper, etc.
|
||||
*/
|
||||
readonly url?: string;
|
||||
readonly huggingface?: {
|
||||
readonly repository?: string;
|
||||
};
|
||||
};
|
||||
/**
|
||||
* An enumerated value describing the type of the majority of the tensors
|
||||
* in the file. Optional; can be inferred from the tensor types.
|
||||
*/
|
||||
readonly file_type?: GgufFileType | undefined;
|
||||
readonly base_model?: {
|
||||
readonly count: number;
|
||||
readonly [key: `${bigint}`]: {
|
||||
readonly name?: string;
|
||||
readonly author?: string;
|
||||
readonly version?: string;
|
||||
readonly organization?: string;
|
||||
readonly url?: string;
|
||||
readonly doi?: string;
|
||||
readonly uuid?: string;
|
||||
readonly repo_url?: string;
|
||||
};
|
||||
};
|
||||
};
|
||||
export declare const enum GgufMetadataTokenizerTokenType {
|
||||
undefined = 0,
|
||||
normal = 1,
|
||||
unknown = 2,
|
||||
control = 3,
|
||||
userDefined = 4,
|
||||
unused = 5,
|
||||
byte = 6
|
||||
}
|
||||
export type GgufMetadataTokenizer = {
|
||||
readonly ggml: {
|
||||
readonly model: "no_vocab" | "none" | "llama" | "gpt2" | "bert" | "rwkv" | "t5" | "plamo2" | string;
|
||||
readonly pre?: "default" | "llama3" | "llama-v3" | "llama-bpe" | "deepseek-llm" | "deepseek-coder" | "falcon" | "falcon3" | "pixtral" | "mpt" | "starcoder" | "gpt-2" | "phi-2" | "jina-es" | "jina-de" | "jina-v1-en" | "jina-v2-es" | "jina-v2-de" | "jina-v2-code" | "refact" | "command-r" | "qwen2" | "stablelm2" | "olmo" | "dbrx" | "smaug-bpe" | "poro-chat" | "chatglm-bpe" | "viking" | "jais" | "tekken" | "smollm" | "codeshell" | "bloom" | "gpt3-finnish" | "exaone" | "exaone4" | "chameleon" | "minerva-7b" | "megrez" | "gpt-4o" | "superbpe" | "trillion" | "bailingmoe" | "a.x-4.0" | "mellum" | string;
|
||||
readonly tokens: readonly string[];
|
||||
readonly token_type: GgufMetadataTokenizerTokenType[];
|
||||
readonly token_type_count?: number;
|
||||
readonly scores?: readonly number[];
|
||||
readonly merges?: readonly string[];
|
||||
readonly bos_token_id?: number;
|
||||
readonly eos_token_id?: number;
|
||||
readonly eot_token_id?: number;
|
||||
readonly eom_token_id?: number;
|
||||
readonly unknown_token_id?: number;
|
||||
readonly seperator_token_id?: number;
|
||||
readonly padding_token_id?: number;
|
||||
readonly cls_token_id?: number;
|
||||
readonly mask_token_id?: number;
|
||||
readonly add_bos_token?: boolean;
|
||||
readonly add_eos_token?: boolean;
|
||||
readonly add_space_prefix?: boolean;
|
||||
readonly added_tokens?: readonly string[];
|
||||
readonly fim_pre_token_id?: number;
|
||||
readonly fim_suf_token_id?: number;
|
||||
readonly fim_mid_token_id?: number;
|
||||
readonly fim_pad_token_id?: number;
|
||||
readonly fim_rep_token_id?: number;
|
||||
readonly fim_sep_token_id?: number;
|
||||
/** @deprecated */
|
||||
readonly prefix_token_id?: number;
|
||||
/** @deprecated */
|
||||
readonly suffix_token_id?: number;
|
||||
/** @deprecated */
|
||||
readonly middle_token_id?: number;
|
||||
};
|
||||
readonly huggingface?: {
|
||||
readonly json?: string;
|
||||
};
|
||||
readonly chat_template?: string;
|
||||
readonly "chat_template.rerank"?: string;
|
||||
};
|
||||
export declare const enum GgufMetadataArchitecturePoolingType {
|
||||
unspecified = -1,
|
||||
none = 0,
|
||||
mean = 1,
|
||||
cls = 2,
|
||||
last = 3,
|
||||
rank = 4
|
||||
}
|
||||
export type GgufMetadataDefaultArchitectureType = {
|
||||
readonly vocab_size?: number;
|
||||
readonly context_length?: number;
|
||||
readonly embedding_length?: number;
|
||||
readonly block_count?: number;
|
||||
readonly feed_forward_length?: number;
|
||||
readonly use_parallel_residual?: boolean;
|
||||
readonly tensor_data_layout?: string;
|
||||
readonly expert_count?: number;
|
||||
readonly expert_used_count?: number;
|
||||
readonly pooling_type?: GgufMetadataArchitecturePoolingType;
|
||||
readonly logit_scale?: number;
|
||||
readonly token_shift_count?: number;
|
||||
readonly attention?: {
|
||||
readonly head_count?: number;
|
||||
readonly head_count_kv?: number | number[];
|
||||
readonly max_alibi_bias?: number;
|
||||
readonly clamp_kqv?: number;
|
||||
readonly layer_norm_epsilon?: number;
|
||||
readonly layer_norm_rms_epsilon?: number;
|
||||
readonly key_length?: number;
|
||||
readonly value_length?: number;
|
||||
readonly sliding_window?: number;
|
||||
readonly causal?: boolean;
|
||||
};
|
||||
readonly rope?: {
|
||||
readonly dimension_count?: number;
|
||||
readonly freq_base?: number;
|
||||
readonly scale_linear?: number;
|
||||
readonly scaling?: {
|
||||
readonly type?: "none" | "linear" | "yarn" | string;
|
||||
readonly factor?: number;
|
||||
readonly original_context_length?: number;
|
||||
readonly finetuned?: boolean;
|
||||
};
|
||||
};
|
||||
readonly ssm?: {
|
||||
readonly conv_kernel?: number;
|
||||
readonly inner_size?: number;
|
||||
readonly state_size?: number;
|
||||
readonly time_step_rank?: number;
|
||||
};
|
||||
readonly wkv?: {
|
||||
readonly head_size?: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataLlmLLaMA = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly feed_forward_length: number;
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly layer_norm_rms_epsilon: number;
|
||||
readonly head_count_kv?: number;
|
||||
};
|
||||
readonly rope: {
|
||||
readonly dimension_count: number;
|
||||
readonly scale?: number;
|
||||
};
|
||||
readonly expert_count?: number;
|
||||
readonly expert_used_count?: number;
|
||||
readonly tensor_data_layout?: string;
|
||||
};
|
||||
export type GgufMetadataMPT = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly alibi_bias_max: number;
|
||||
readonly clip_kqv: number;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataGPTNeoX = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly use_parallel_residual: boolean;
|
||||
readonly rope: {
|
||||
readonly dimension_count: number;
|
||||
readonly scale?: number;
|
||||
};
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataGPTJ = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly rope: {
|
||||
readonly dimension_count: number;
|
||||
readonly scale?: number;
|
||||
};
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataGPT2 = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataBloom = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly feed_forward_length: number;
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
};
|
||||
export type GgufMetadataFalcon = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly attention: {
|
||||
readonly head_count: number;
|
||||
readonly head_count_kv: number;
|
||||
readonly use_norm: boolean;
|
||||
readonly layer_norm_epsilon: number;
|
||||
};
|
||||
readonly tensor_data_layout?: string;
|
||||
};
|
||||
export type GgufMetadataMamba = {
|
||||
readonly context_length: number;
|
||||
readonly embedding_length: number;
|
||||
readonly block_count: number;
|
||||
readonly ssm: {
|
||||
readonly conv_kernel: number;
|
||||
readonly inner_size: number;
|
||||
readonly state_size: number;
|
||||
readonly time_step_rank: number;
|
||||
};
|
||||
readonly attention: {
|
||||
readonly layer_norm_rms_epsilon: number;
|
||||
};
|
||||
};
|
||||
export declare function isGgufMetadataOfArchitectureType<A extends GgufArchitectureType>(metadata: GgufMetadata, type: A): metadata is GgufMetadata<A>;
|
||||
183
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.js
generated
vendored
Normal file
183
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.js
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
export var GgufArchitectureType;
|
||||
(function (GgufArchitectureType) {
|
||||
GgufArchitectureType["llama"] = "llama";
|
||||
GgufArchitectureType["llama4"] = "llama4";
|
||||
GgufArchitectureType["deci"] = "deci";
|
||||
GgufArchitectureType["falcon"] = "falcon";
|
||||
GgufArchitectureType["grok"] = "grok";
|
||||
GgufArchitectureType["gpt2"] = "gpt2";
|
||||
GgufArchitectureType["gptj"] = "gptj";
|
||||
GgufArchitectureType["gptneox"] = "gptneox";
|
||||
GgufArchitectureType["mpt"] = "mpt";
|
||||
GgufArchitectureType["baichuan"] = "baichuan";
|
||||
GgufArchitectureType["starcoder"] = "starcoder";
|
||||
GgufArchitectureType["refact"] = "refact";
|
||||
GgufArchitectureType["bert"] = "bert";
|
||||
GgufArchitectureType["modernBert"] = "modern-bert";
|
||||
GgufArchitectureType["nomicBert"] = "nomic-bert";
|
||||
GgufArchitectureType["nomicBertMoe"] = "nomic-bert-moe";
|
||||
GgufArchitectureType["neoBert"] = "neo-bert";
|
||||
GgufArchitectureType["jinaBertV2"] = "jina-bert-v2";
|
||||
GgufArchitectureType["jinaBertV3"] = "jina-bert-v3";
|
||||
GgufArchitectureType["bloom"] = "bloom";
|
||||
GgufArchitectureType["stablelm"] = "stablelm";
|
||||
GgufArchitectureType["qwen"] = "qwen";
|
||||
GgufArchitectureType["qwen2"] = "qwen2";
|
||||
GgufArchitectureType["qwen2moe"] = "qwen2moe";
|
||||
GgufArchitectureType["qwen2vl"] = "qwen2vl";
|
||||
GgufArchitectureType["qwen3"] = "qwen3";
|
||||
GgufArchitectureType["qwen3moe"] = "qwen3moe";
|
||||
GgufArchitectureType["qwen3next"] = "qwen3next";
|
||||
GgufArchitectureType["qwen3vl"] = "qwen3vl";
|
||||
GgufArchitectureType["qwen3vlmoe"] = "qwen3vlmoe";
|
||||
GgufArchitectureType["phi2"] = "phi2";
|
||||
GgufArchitectureType["phi3"] = "phi3";
|
||||
GgufArchitectureType["phimoe"] = "phimoe";
|
||||
GgufArchitectureType["plamo"] = "plamo";
|
||||
GgufArchitectureType["plamo2"] = "plamo2";
|
||||
GgufArchitectureType["plamo3"] = "plamo3";
|
||||
GgufArchitectureType["codeshell"] = "codeshell";
|
||||
GgufArchitectureType["orion"] = "orion";
|
||||
GgufArchitectureType["internlm2"] = "internlm2";
|
||||
GgufArchitectureType["minicpm"] = "minicpm";
|
||||
GgufArchitectureType["minicpm3"] = "minicpm3";
|
||||
GgufArchitectureType["gemma"] = "gemma";
|
||||
GgufArchitectureType["gemma2"] = "gemma2";
|
||||
GgufArchitectureType["gemma3"] = "gemma3";
|
||||
GgufArchitectureType["gemma3n"] = "gemma3n";
|
||||
GgufArchitectureType["gemmaEmbedding"] = "gemma-embedding";
|
||||
GgufArchitectureType["starcoder2"] = "starcoder2";
|
||||
GgufArchitectureType["mamba"] = "mamba";
|
||||
GgufArchitectureType["mamba2"] = "mamba2";
|
||||
GgufArchitectureType["jamba"] = "jamba";
|
||||
GgufArchitectureType["falconH1"] = "falcon-h1";
|
||||
GgufArchitectureType["xverse"] = "xverse";
|
||||
GgufArchitectureType["commandR"] = "command-r";
|
||||
GgufArchitectureType["cohere2"] = "cohere2";
|
||||
GgufArchitectureType["dbrx"] = "dbrx";
|
||||
GgufArchitectureType["olmo"] = "olmo";
|
||||
GgufArchitectureType["olmo2"] = "olmo2";
|
||||
GgufArchitectureType["olmoe"] = "olmoe";
|
||||
GgufArchitectureType["openelm"] = "openelm";
|
||||
GgufArchitectureType["arctic"] = "arctic";
|
||||
GgufArchitectureType["deepseek"] = "deepseek";
|
||||
GgufArchitectureType["deepseek2"] = "deepseek2";
|
||||
GgufArchitectureType["chatglm"] = "chatglm";
|
||||
GgufArchitectureType["glm4"] = "glm4";
|
||||
GgufArchitectureType["glm4moe"] = "glm4moe";
|
||||
GgufArchitectureType["bitnet"] = "bitnet";
|
||||
GgufArchitectureType["t5"] = "t5";
|
||||
GgufArchitectureType["t5encoder"] = "t5encoder";
|
||||
GgufArchitectureType["jais"] = "jais";
|
||||
GgufArchitectureType["nemotron"] = "nemotron";
|
||||
GgufArchitectureType["nemotronH"] = "nemotron_h";
|
||||
GgufArchitectureType["nemotronHMoe"] = "nemotron_h_moe";
|
||||
GgufArchitectureType["exaone"] = "exaone";
|
||||
GgufArchitectureType["exaone4"] = "exaone4";
|
||||
GgufArchitectureType["rwkv6"] = "rwkv6";
|
||||
GgufArchitectureType["rwkv6qwen2"] = "rwkv6qwen2";
|
||||
GgufArchitectureType["rwkv7"] = "rwkv7";
|
||||
GgufArchitectureType["arwkv7"] = "arwkv7";
|
||||
GgufArchitectureType["granite"] = "granite";
|
||||
GgufArchitectureType["granitemoe"] = "granitemoe";
|
||||
GgufArchitectureType["granitehybrid"] = "granitehybrid";
|
||||
GgufArchitectureType["chameleon"] = "chameleon";
|
||||
GgufArchitectureType["wavtokenizerDec"] = "wavtokenizer-dec";
|
||||
GgufArchitectureType["plm"] = "plm";
|
||||
GgufArchitectureType["bailingmoe"] = "bailingmoe";
|
||||
GgufArchitectureType["bailingmoe2"] = "bailingmoe2";
|
||||
GgufArchitectureType["dots1"] = "dots1";
|
||||
GgufArchitectureType["arcee"] = "arcee";
|
||||
GgufArchitectureType["afmoe"] = "afmoe";
|
||||
GgufArchitectureType["ernie4_5"] = "ernie4_5";
|
||||
GgufArchitectureType["ernie4_5Moe"] = "ernie4_5-moe";
|
||||
GgufArchitectureType["hunyuanMoe"] = "hunyuan-moe";
|
||||
GgufArchitectureType["hunyuanDense"] = "hunyuan-dense";
|
||||
GgufArchitectureType["smollm3"] = "smollm3";
|
||||
GgufArchitectureType["gptOss"] = "gpt-oss";
|
||||
GgufArchitectureType["lfm2"] = "lfm2";
|
||||
GgufArchitectureType["lfm2moe"] = "lfm2moe";
|
||||
GgufArchitectureType["dream"] = "dream";
|
||||
GgufArchitectureType["smallthinker"] = "smallthinker";
|
||||
GgufArchitectureType["llada"] = "llada";
|
||||
GgufArchitectureType["lladaMoe"] = "llada-moe";
|
||||
GgufArchitectureType["seedOss"] = "seed_oss";
|
||||
GgufArchitectureType["grovemoe"] = "grovemoe";
|
||||
GgufArchitectureType["apertus"] = "apertus";
|
||||
GgufArchitectureType["minimaxM2"] = "minimax-m2";
|
||||
GgufArchitectureType["cogvlm"] = "cogvlm";
|
||||
GgufArchitectureType["rnd1"] = "rnd1";
|
||||
GgufArchitectureType["panguEmbedded"] = "pangu-embedded";
|
||||
GgufArchitectureType["mistral3"] = "mistral3";
|
||||
GgufArchitectureType["mimo2"] = "mimo2";
|
||||
GgufArchitectureType["llamaEmbed"] = "llama-embed";
|
||||
GgufArchitectureType["maincoder"] = "maincoder";
|
||||
GgufArchitectureType["clip"] = "clip";
|
||||
GgufArchitectureType["unknown"] = "(unknown)";
|
||||
})(GgufArchitectureType || (GgufArchitectureType = {}));
|
||||
// source: `enum llama_ftype` in `llama.h` in the `llama.cpp` source code
|
||||
export var GgufFileType;
|
||||
(function (GgufFileType) {
|
||||
GgufFileType[GgufFileType["ALL_F32"] = 0] = "ALL_F32";
|
||||
GgufFileType[GgufFileType["MOSTLY_F16"] = 1] = "MOSTLY_F16";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_0"] = 2] = "MOSTLY_Q4_0";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_1"] = 3] = "MOSTLY_Q4_1";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_1_SOME_F16"] = 4] = "MOSTLY_Q4_1_SOME_F16";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_2"] = 5] = "MOSTLY_Q4_2";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_3"] = 6] = "MOSTLY_Q4_3";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q8_0"] = 7] = "MOSTLY_Q8_0";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q5_0"] = 8] = "MOSTLY_Q5_0";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q5_1"] = 9] = "MOSTLY_Q5_1";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q2_K"] = 10] = "MOSTLY_Q2_K";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q3_K_S"] = 11] = "MOSTLY_Q3_K_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q3_K_M"] = 12] = "MOSTLY_Q3_K_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q3_K_L"] = 13] = "MOSTLY_Q3_K_L";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_K_S"] = 14] = "MOSTLY_Q4_K_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_K_M"] = 15] = "MOSTLY_Q4_K_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q5_K_S"] = 16] = "MOSTLY_Q5_K_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q5_K_M"] = 17] = "MOSTLY_Q5_K_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q6_K"] = 18] = "MOSTLY_Q6_K";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ2_XXS"] = 19] = "MOSTLY_IQ2_XXS";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ2_XS"] = 20] = "MOSTLY_IQ2_XS";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q2_K_S"] = 21] = "MOSTLY_Q2_K_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ3_XS"] = 22] = "MOSTLY_IQ3_XS";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ3_XXS"] = 23] = "MOSTLY_IQ3_XXS";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ1_S"] = 24] = "MOSTLY_IQ1_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ4_NL"] = 25] = "MOSTLY_IQ4_NL";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ3_S"] = 26] = "MOSTLY_IQ3_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ3_M"] = 27] = "MOSTLY_IQ3_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ2_S"] = 28] = "MOSTLY_IQ2_S";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ2_M"] = 29] = "MOSTLY_IQ2_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ4_XS"] = 30] = "MOSTLY_IQ4_XS";
|
||||
GgufFileType[GgufFileType["MOSTLY_IQ1_M"] = 31] = "MOSTLY_IQ1_M";
|
||||
GgufFileType[GgufFileType["MOSTLY_BF16"] = 32] = "MOSTLY_BF16";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_0_4_4"] = 33] = "MOSTLY_Q4_0_4_4";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_0_4_8"] = 34] = "MOSTLY_Q4_0_4_8";
|
||||
GgufFileType[GgufFileType["MOSTLY_Q4_0_8_8"] = 35] = "MOSTLY_Q4_0_8_8";
|
||||
GgufFileType[GgufFileType["MOSTLY_TQ1_0"] = 36] = "MOSTLY_TQ1_0";
|
||||
GgufFileType[GgufFileType["MOSTLY_TQ2_0"] = 37] = "MOSTLY_TQ2_0";
|
||||
GgufFileType[GgufFileType["MOSTLY_MXFP4_MOE"] = 38] = "MOSTLY_MXFP4_MOE";
|
||||
})(GgufFileType || (GgufFileType = {}));
|
||||
export var GgufMetadataTokenizerTokenType;
|
||||
(function (GgufMetadataTokenizerTokenType) {
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["undefined"] = 0] = "undefined";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["normal"] = 1] = "normal";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["unknown"] = 2] = "unknown";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["control"] = 3] = "control";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["userDefined"] = 4] = "userDefined";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["unused"] = 5] = "unused";
|
||||
GgufMetadataTokenizerTokenType[GgufMetadataTokenizerTokenType["byte"] = 6] = "byte";
|
||||
})(GgufMetadataTokenizerTokenType || (GgufMetadataTokenizerTokenType = {}));
|
||||
export var GgufMetadataArchitecturePoolingType;
|
||||
(function (GgufMetadataArchitecturePoolingType) {
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["unspecified"] = -1] = "unspecified";
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["none"] = 0] = "none";
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["mean"] = 1] = "mean";
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["cls"] = 2] = "cls";
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["last"] = 3] = "last";
|
||||
GgufMetadataArchitecturePoolingType[GgufMetadataArchitecturePoolingType["rank"] = 4] = "rank";
|
||||
})(GgufMetadataArchitecturePoolingType || (GgufMetadataArchitecturePoolingType = {}));
|
||||
export function isGgufMetadataOfArchitectureType(metadata, type) {
|
||||
return metadata?.general?.architecture === type;
|
||||
}
|
||||
//# sourceMappingURL=GgufMetadataTypes.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/types/GgufMetadataTypes.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufMetadataTypes.js","sourceRoot":"","sources":["../../../src/gguf/types/GgufMetadataTypes.ts"],"names":[],"mappings":"AAAA,MAAM,CAAN,IAAkB,oBAmHjB;AAnHD,WAAkB,oBAAoB;IAClC,uCAAe,CAAA;IACf,yCAAiB,CAAA;IACjB,qCAAa,CAAA;IACb,yCAAiB,CAAA;IACjB,qCAAa,CAAA;IACb,qCAAa,CAAA;IACb,qCAAa,CAAA;IACb,2CAAmB,CAAA;IACnB,mCAAW,CAAA;IACX,6CAAqB,CAAA;IACrB,+CAAuB,CAAA;IACvB,yCAAiB,CAAA;IACjB,qCAAa,CAAA;IACb,kDAA0B,CAAA;IAC1B,gDAAwB,CAAA;IACxB,uDAA+B,CAAA;IAC/B,4CAAoB,CAAA;IACpB,mDAA2B,CAAA;IAC3B,mDAA2B,CAAA;IAC3B,uCAAe,CAAA;IACf,6CAAqB,CAAA;IACrB,qCAAa,CAAA;IACb,uCAAe,CAAA;IACf,6CAAqB,CAAA;IACrB,2CAAmB,CAAA;IACnB,uCAAe,CAAA;IACf,6CAAqB,CAAA;IACrB,+CAAuB,CAAA;IACvB,2CAAmB,CAAA;IACnB,iDAAyB,CAAA;IACzB,qCAAa,CAAA;IACb,qCAAa,CAAA;IACb,yCAAiB,CAAA;IACjB,uCAAe,CAAA;IACf,yCAAiB,CAAA;IACjB,yCAAiB,CAAA;IACjB,+CAAuB,CAAA;IACvB,uCAAe,CAAA;IACf,+CAAuB,CAAA;IACvB,2CAAmB,CAAA;IACnB,6CAAqB,CAAA;IACrB,uCAAe,CAAA;IACf,yCAAiB,CAAA;IACjB,yCAAiB,CAAA;IACjB,2CAAmB,CAAA;IACnB,0DAAkC,CAAA;IAClC,iDAAyB,CAAA;IACzB,uCAAe,CAAA;IACf,yCAAiB,CAAA;IACjB,uCAAe,CAAA;IACf,8CAAsB,CAAA;IACtB,yCAAiB,CAAA;IACjB,8CAAsB,CAAA;IACtB,2CAAmB,CAAA;IACnB,qCAAa,CAAA;IACb,qCAAa,CAAA;IACb,uCAAe,CAAA;IACf,uCAAe,CAAA;IACf,2CAAmB,CAAA;IACnB,yCAAiB,CAAA;IACjB,6CAAqB,CAAA;IACrB,+CAAuB,CAAA;IACvB,2CAAmB,CAAA;IACnB,qCAAa,CAAA;IACb,2CAAmB,CAAA;IACnB,yCAAiB,CAAA;IACjB,iCAAS,CAAA;IACT,+CAAuB,CAAA;IACvB,qCAAa,CAAA;IACb,6CAAqB,CAAA;IACrB,gDAAwB,CAAA;IACxB,uDAA+B,CAAA;IAC/B,yCAAiB,CAAA;IACjB,2CAAmB,CAAA;IACnB,uCAAe,CAAA;IACf,iDAAyB,CAAA;IACzB,uCAAe,CAAA;IACf,yCAAiB,CAAA;IACjB,2CAAmB,CAAA;IACnB,iDAAyB,CAAA;IACzB,uDAA+B,CAAA;IAC/B,+CAAuB,CAAA;IACvB,4DAAoC,CAAA;IACpC,mCAAW,CAAA;IACX,iDAAyB,CAAA;IACzB,mDAA2B,CAAA;IAC3B,uCAAe,CAAA;IACf,uCAAe,CAAA;IACf,uCAAe,CAAA;IACf,6CAAqB,CAAA;IACrB,oDAA4B,CAAA;IAC5B,kDAA0B,CAAA;IAC1B,sDAA8B,CAAA;IAC9B,2CAAmB,CAAA;IACnB,0CAAkB,CAAA;IAClB,qCAAa,CAAA;IACb,2CAAmB,CAAA;IACnB,uCAAe,CAAA;IACf,qDAA6B,CAAA;IAC7B,uCAAe,CAAA;IACf,8CAAsB,CAAA;IACtB,4CAAoB,CAAA;IACpB,6CAAqB,CAAA;IACrB,2CAAmB,CAAA;IACnB,gDAAwB,CAAA;IACxB,yCAAiB,CAAA;IACjB,qCAAa,CAAA;IACb,wDAAgC,CAAA;IAChC,6CAAqB,CAAA;IACrB,uCAAe,CAAA;IACf,kDAA0B,CAAA;IAC1B,+CAAuB,CAAA;IACvB,qCAAa,CAAA;IACb,6CAAqB,CAAA;AACzB,CAAC,EAnHiB,oBAAoB,KAApB,oBAAoB,QAmHrC;AA8BD,yEAAyE;AACzE,MAAM,CAAN,IAAY,YAwCX;AAxCD,WAAY,YAAY;IACpB,qDAAW,CAAA;IACX,2DAAc,CAAA;IACd,6DAAe,CAAA;IACf,6DAAe,CAAA;IACf,+EAAwB,CAAA;IACxB,6DAAe,CAAA;IACf,6DAAe,CAAA;IACf,6DAAe,CAAA;IACf,6DAAe,CAAA;IACf,6DAAe,CAAA;IACf,8DAAgB,CAAA;IAChB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,8DAAgB,CAAA;IAChB,oEAAmB,CAAA;IACnB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,kEAAkB,CAAA;IAClB,oEAAmB,CAAA;IACnB,gEAAiB,CAAA;IACjB,kEAAkB,CAAA;IAClB,gEAAiB,CAAA;IACjB,gEAAiB,CAAA;IACjB,gEAAiB,CAAA;IACjB,gEAAiB,CAAA;IACjB,kEAAkB,CAAA;IAClB,gEAAiB,CAAA;IACjB,8DAAgB,CAAA;IAChB,sEAAoB,CAAA;IACpB,sEAAoB,CAAA;IACpB,sEAAoB,CAAA;IACpB,gEAAiB,CAAA;IACjB,gEAAiB,CAAA;IACjB,wEAAqB,CAAA;AACzB,CAAC,EAxCW,YAAY,KAAZ,YAAY,QAwCvB;AA2FD,MAAM,CAAN,IAAkB,8BAQjB;AARD,WAAkB,8BAA8B;IAC5C,6FAAa,CAAA;IACb,uFAAU,CAAA;IACV,yFAAW,CAAA;IACX,yFAAW,CAAA;IACX,iGAAe,CAAA;IACf,uFAAU,CAAA;IACV,mFAAQ,CAAA;AACZ,CAAC,EARiB,8BAA8B,KAA9B,8BAA8B,QAQ/C;AAiDD,MAAM,CAAN,IAAkB,mCAOjB;AAPD,WAAkB,mCAAmC;IACjD,4GAAgB,CAAA;IAChB,6FAAQ,CAAA;IACR,6FAAQ,CAAA;IACR,2FAAO,CAAA;IACP,6FAAQ,CAAA;IACR,6FAAQ,CAAA;AACZ,CAAC,EAPiB,mCAAmC,KAAnC,mCAAmC,QAOpD;AAoND,MAAM,UAAU,gCAAgC,CAC5C,QAAsB,EAAE,IAAO;IAE/B,OAAO,QAAQ,EAAE,OAAO,EAAE,YAAY,KAAK,IAAI,CAAC;AACpD,CAAC"}
|
||||
61
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.d.ts
generated
vendored
Normal file
61
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,61 @@
|
||||
export type GgufTensorInfo = {
|
||||
readonly name: string;
|
||||
readonly dimensions: readonly (number | bigint)[];
|
||||
readonly ggmlType: GgmlType;
|
||||
readonly offset: number | bigint;
|
||||
/**
|
||||
* Adjusted offset relative to the file.
|
||||
*
|
||||
* Added by the GGUF parser - not part of the file's metadata.
|
||||
*/
|
||||
readonly fileOffset: number | bigint;
|
||||
/**
|
||||
* For spliced metadata of multiple file parts, this will be the file part number.
|
||||
* Starts from `1`.
|
||||
*
|
||||
* Added by the GGUF parser - not part of the file's metadata.
|
||||
*/
|
||||
readonly filePart: number;
|
||||
};
|
||||
export declare const enum GgmlType {
|
||||
F32 = 0,
|
||||
F16 = 1,
|
||||
Q4_0 = 2,
|
||||
Q4_1 = 3,
|
||||
Q4_2 = 4,
|
||||
Q4_3 = 5,
|
||||
Q5_0 = 6,
|
||||
Q5_1 = 7,
|
||||
Q8_0 = 8,
|
||||
Q8_1 = 9,
|
||||
Q2_K = 10,
|
||||
Q3_K = 11,
|
||||
Q4_K = 12,
|
||||
Q5_K = 13,
|
||||
Q6_K = 14,
|
||||
Q8_K = 15,
|
||||
IQ2_XXS = 16,
|
||||
IQ2_XS = 17,
|
||||
IQ3_XXS = 18,
|
||||
IQ1_S = 19,
|
||||
IQ4_NL = 20,
|
||||
IQ3_S = 21,
|
||||
IQ2_S = 22,
|
||||
IQ4_XS = 23,
|
||||
I8 = 24,
|
||||
I16 = 25,
|
||||
I32 = 26,
|
||||
I64 = 27,
|
||||
F64 = 28,
|
||||
IQ1_M = 29,
|
||||
BF16 = 30,
|
||||
Q4_0_4_4 = 31,
|
||||
Q4_0_4_8 = 32,
|
||||
Q4_0_8_8 = 33,
|
||||
TQ1_0 = 34,
|
||||
TQ2_0 = 35,
|
||||
IQ4_NL_4_4 = 36,
|
||||
IQ4_NL_4_8 = 37,
|
||||
IQ4_NL_8_8 = 38,
|
||||
MXFP4 = 39
|
||||
}
|
||||
44
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.js
generated
vendored
Normal file
44
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.js
generated
vendored
Normal file
@@ -0,0 +1,44 @@
|
||||
export var GgmlType;
|
||||
(function (GgmlType) {
|
||||
GgmlType[GgmlType["F32"] = 0] = "F32";
|
||||
GgmlType[GgmlType["F16"] = 1] = "F16";
|
||||
GgmlType[GgmlType["Q4_0"] = 2] = "Q4_0";
|
||||
GgmlType[GgmlType["Q4_1"] = 3] = "Q4_1";
|
||||
GgmlType[GgmlType["Q4_2"] = 4] = "Q4_2";
|
||||
GgmlType[GgmlType["Q4_3"] = 5] = "Q4_3";
|
||||
GgmlType[GgmlType["Q5_0"] = 6] = "Q5_0";
|
||||
GgmlType[GgmlType["Q5_1"] = 7] = "Q5_1";
|
||||
GgmlType[GgmlType["Q8_0"] = 8] = "Q8_0";
|
||||
GgmlType[GgmlType["Q8_1"] = 9] = "Q8_1";
|
||||
GgmlType[GgmlType["Q2_K"] = 10] = "Q2_K";
|
||||
GgmlType[GgmlType["Q3_K"] = 11] = "Q3_K";
|
||||
GgmlType[GgmlType["Q4_K"] = 12] = "Q4_K";
|
||||
GgmlType[GgmlType["Q5_K"] = 13] = "Q5_K";
|
||||
GgmlType[GgmlType["Q6_K"] = 14] = "Q6_K";
|
||||
GgmlType[GgmlType["Q8_K"] = 15] = "Q8_K";
|
||||
GgmlType[GgmlType["IQ2_XXS"] = 16] = "IQ2_XXS";
|
||||
GgmlType[GgmlType["IQ2_XS"] = 17] = "IQ2_XS";
|
||||
GgmlType[GgmlType["IQ3_XXS"] = 18] = "IQ3_XXS";
|
||||
GgmlType[GgmlType["IQ1_S"] = 19] = "IQ1_S";
|
||||
GgmlType[GgmlType["IQ4_NL"] = 20] = "IQ4_NL";
|
||||
GgmlType[GgmlType["IQ3_S"] = 21] = "IQ3_S";
|
||||
GgmlType[GgmlType["IQ2_S"] = 22] = "IQ2_S";
|
||||
GgmlType[GgmlType["IQ4_XS"] = 23] = "IQ4_XS";
|
||||
GgmlType[GgmlType["I8"] = 24] = "I8";
|
||||
GgmlType[GgmlType["I16"] = 25] = "I16";
|
||||
GgmlType[GgmlType["I32"] = 26] = "I32";
|
||||
GgmlType[GgmlType["I64"] = 27] = "I64";
|
||||
GgmlType[GgmlType["F64"] = 28] = "F64";
|
||||
GgmlType[GgmlType["IQ1_M"] = 29] = "IQ1_M";
|
||||
GgmlType[GgmlType["BF16"] = 30] = "BF16";
|
||||
GgmlType[GgmlType["Q4_0_4_4"] = 31] = "Q4_0_4_4";
|
||||
GgmlType[GgmlType["Q4_0_4_8"] = 32] = "Q4_0_4_8";
|
||||
GgmlType[GgmlType["Q4_0_8_8"] = 33] = "Q4_0_8_8";
|
||||
GgmlType[GgmlType["TQ1_0"] = 34] = "TQ1_0";
|
||||
GgmlType[GgmlType["TQ2_0"] = 35] = "TQ2_0";
|
||||
GgmlType[GgmlType["IQ4_NL_4_4"] = 36] = "IQ4_NL_4_4";
|
||||
GgmlType[GgmlType["IQ4_NL_4_8"] = 37] = "IQ4_NL_4_8";
|
||||
GgmlType[GgmlType["IQ4_NL_8_8"] = 38] = "IQ4_NL_8_8";
|
||||
GgmlType[GgmlType["MXFP4"] = 39] = "MXFP4"; // MXFP4 (1 block)
|
||||
})(GgmlType || (GgmlType = {}));
|
||||
//# sourceMappingURL=GgufTensorInfoTypes.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/types/GgufTensorInfoTypes.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufTensorInfoTypes.js","sourceRoot":"","sources":["../../../src/gguf/types/GgufTensorInfoTypes.ts"],"names":[],"mappings":"AAsBA,MAAM,CAAN,IAAkB,QAyCjB;AAzCD,WAAkB,QAAQ;IACtB,qCAAO,CAAA;IACP,qCAAO,CAAA;IACP,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,uCAAQ,CAAA;IACR,wCAAS,CAAA;IACT,wCAAS,CAAA;IACT,wCAAS,CAAA;IACT,wCAAS,CAAA;IACT,wCAAS,CAAA;IACT,wCAAS,CAAA;IACT,8CAAY,CAAA;IACZ,4CAAW,CAAA;IACX,8CAAY,CAAA;IACZ,0CAAU,CAAA;IACV,4CAAW,CAAA;IACX,0CAAU,CAAA;IACV,0CAAU,CAAA;IACV,4CAAW,CAAA;IACX,oCAAO,CAAA;IACP,sCAAQ,CAAA;IACR,sCAAQ,CAAA;IACR,sCAAQ,CAAA;IACR,sCAAQ,CAAA;IACR,0CAAU,CAAA;IACV,wCAAS,CAAA;IACT,gDAAa,CAAA;IACb,gDAAa,CAAA;IACb,gDAAa,CAAA;IACb,0CAAU,CAAA;IACV,0CAAU,CAAA;IACV,oDAAe,CAAA;IACf,oDAAe,CAAA;IACf,oDAAe,CAAA;IACf,0CAAU,CAAA,CAAC,kBAAkB;AACjC,CAAC,EAzCiB,QAAQ,KAAR,QAAQ,QAyCzB"}
|
||||
6
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.d.ts
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
export declare class GgufReadOffset {
|
||||
offset: number;
|
||||
constructor(offset: number | GgufReadOffset);
|
||||
moveBy(amount: number): void;
|
||||
static resolveReadOffset(offset: number | GgufReadOffset): GgufReadOffset;
|
||||
}
|
||||
18
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.js
generated
vendored
Normal file
18
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
export class GgufReadOffset {
|
||||
offset;
|
||||
constructor(offset) {
|
||||
if (offset instanceof GgufReadOffset)
|
||||
this.offset = offset.offset;
|
||||
else
|
||||
this.offset = offset;
|
||||
}
|
||||
moveBy(amount) {
|
||||
this.offset += amount;
|
||||
}
|
||||
static resolveReadOffset(offset) {
|
||||
if (offset instanceof GgufReadOffset)
|
||||
return offset;
|
||||
return new GgufReadOffset(offset);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=GgufReadOffset.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/GgufReadOffset.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"GgufReadOffset.js","sourceRoot":"","sources":["../../../src/gguf/utils/GgufReadOffset.ts"],"names":[],"mappings":"AAAA,MAAM,OAAO,cAAc;IAChB,MAAM,CAAS;IAEtB,YAAmB,MAA+B;QAC9C,IAAI,MAAM,YAAY,cAAc;YAChC,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC,MAAM,CAAC;;YAE5B,IAAI,CAAC,MAAM,GAAG,MAAM,CAAC;IAC7B,CAAC;IAEM,MAAM,CAAC,MAAc;QACxB,IAAI,CAAC,MAAM,IAAI,MAAM,CAAC;IAC1B,CAAC;IAEM,MAAM,CAAC,iBAAiB,CAAC,MAA+B;QAC3D,IAAI,MAAM,YAAY,cAAc;YAChC,OAAO,MAAM,CAAC;QAElB,OAAO,IAAI,cAAc,CAAC,MAAM,CAAC,CAAC;IACtC,CAAC;CACJ"}
|
||||
6
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.d.ts
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import { MetadataKeyValueRecord, MetadataValue } from "../types/GgufFileInfoTypes.js";
|
||||
export declare function convertMetadataKeyValueRecordToNestedObject(keyValueRecord: MetadataKeyValueRecord, { logOverrideWarnings, ignoreKeys, noDirectSubNestingKeys }?: {
|
||||
logOverrideWarnings?: boolean;
|
||||
ignoreKeys?: readonly string[];
|
||||
noDirectSubNestingKeys?: readonly string[];
|
||||
}): Record<string, MetadataValue>;
|
||||
76
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js
generated
vendored
Normal file
76
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js
generated
vendored
Normal file
@@ -0,0 +1,76 @@
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
export function convertMetadataKeyValueRecordToNestedObject(keyValueRecord, { logOverrideWarnings = true, ignoreKeys = [], noDirectSubNestingKeys } = {}) {
|
||||
const nestedObject = {};
|
||||
const ignoreKeySet = new Set(ignoreKeys);
|
||||
const noDirectSubNestingKeysSet = new Set(noDirectSubNestingKeys);
|
||||
for (const [key, value] of Object.entries(keyValueRecord)) {
|
||||
if (ignoreKeySet.has(key))
|
||||
continue;
|
||||
const { lastObject, lastKey } = getNestedObject(key, nestedObject, noDirectSubNestingKeysSet);
|
||||
if (Object.hasOwn(lastObject, lastKey)) {
|
||||
const currentValue = lastObject[lastKey];
|
||||
delete lastObject[lastKey];
|
||||
flattenNestedKeys(lastObject, lastKey, currentValue, logOverrideWarnings);
|
||||
if (Object.hasOwn(lastObject, lastKey) && logOverrideWarnings)
|
||||
console.warn(getConsoleLogPrefix() + `Metadata key "${key}" is already occupied by a value. Overwriting it.`);
|
||||
}
|
||||
lastObject[lastKey] = value;
|
||||
}
|
||||
return nestedObject;
|
||||
}
|
||||
function getNestedObject(key, nestedObject, noDirectSubNestingKeysSet) {
|
||||
const nestedKey = key.split(".");
|
||||
let lastKey = "";
|
||||
let currentObject = nestedObject;
|
||||
const previousKeys = [];
|
||||
while (nestedKey.length > 0) {
|
||||
let currentKey = nestedKey.shift();
|
||||
while (noDirectSubNestingKeysSet.has([...previousKeys, currentKey].join(".")) && nestedKey.length > 0)
|
||||
currentKey += "." + nestedKey.shift();
|
||||
if (nestedKey.length === 0) {
|
||||
lastKey = currentKey;
|
||||
break;
|
||||
}
|
||||
if (!Object.hasOwn(currentObject, currentKey)) {
|
||||
const nextCurrentObject = {};
|
||||
currentObject[currentKey] = nextCurrentObject;
|
||||
currentObject = nextCurrentObject;
|
||||
}
|
||||
else {
|
||||
const value = currentObject[currentKey];
|
||||
if (value instanceof Array || value == null || typeof value !== "object") {
|
||||
if (nestedKey.length > 0) {
|
||||
nestedKey.unshift(currentKey + "." + nestedKey.shift());
|
||||
continue;
|
||||
}
|
||||
throw new Error(`Cannot create nested object for key "${key}". The key "${currentKey}" is already occupied by a non-object value.`);
|
||||
}
|
||||
currentObject = value;
|
||||
}
|
||||
previousKeys.push(currentKey);
|
||||
}
|
||||
return {
|
||||
lastObject: currentObject,
|
||||
lastKey
|
||||
};
|
||||
}
|
||||
function flattenNestedKeys(parent, newParentKey, keyValue, logOverrideWarnings = false) {
|
||||
if (keyValue === undefined)
|
||||
return;
|
||||
if (typeof keyValue !== "object" || keyValue instanceof Array) {
|
||||
parent[newParentKey] = keyValue;
|
||||
return;
|
||||
}
|
||||
for (const [key, subValue] of Object.entries(keyValue)) {
|
||||
const newKey = newParentKey + "." + key;
|
||||
if (Object.hasOwn(parent, newKey)) {
|
||||
const currentValue = parent[newKey];
|
||||
delete parent[newKey];
|
||||
flattenNestedKeys(parent, newKey, currentValue, logOverrideWarnings);
|
||||
if (Object.hasOwn(parent, newKey) && logOverrideWarnings)
|
||||
console.warn(getConsoleLogPrefix() + `Metadata key "${newKey}" is already occupied by a value. Overwriting it.`);
|
||||
}
|
||||
parent[newKey] = subValue;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=convertMetadataKeyValueRecordToNestedObject.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/convertMetadataKeyValueRecordToNestedObject.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"convertMetadataKeyValueRecordToNestedObject.js","sourceRoot":"","sources":["../../../src/gguf/utils/convertMetadataKeyValueRecordToNestedObject.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AAGvE,MAAM,UAAU,2CAA2C,CACvD,cAAsC,EACtC,EACI,mBAAmB,GAAG,IAAI,EAC1B,UAAU,GAAG,EAAE,EACf,sBAAsB,KAKtB,EAAE;IAEN,MAAM,YAAY,GAAkC,EAAE,CAAC;IACvD,MAAM,YAAY,GAAG,IAAI,GAAG,CAAC,UAAU,CAAC,CAAC;IACzC,MAAM,yBAAyB,GAAG,IAAI,GAAG,CAAC,sBAAsB,CAAC,CAAC;IAElE,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,cAAc,CAAC,EAAE,CAAC;QACxD,IAAI,YAAY,CAAC,GAAG,CAAC,GAAG,CAAC;YACrB,SAAS;QAEb,MAAM,EAAC,UAAU,EAAE,OAAO,EAAC,GAAG,eAAe,CAAC,GAAG,EAAE,YAAY,EAAE,yBAAyB,CAAC,CAAC;QAC5F,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE,OAAO,CAAC,EAAE,CAAC;YACrC,MAAM,YAAY,GAAG,UAAU,CAAC,OAAO,CAAC,CAAC;YACzC,OAAO,UAAU,CAAC,OAAO,CAAC,CAAC;YAC3B,iBAAiB,CAAC,UAAU,EAAE,OAAO,EAAE,YAAY,EAAE,mBAAmB,CAAC,CAAC;YAE1E,IAAI,MAAM,CAAC,MAAM,CAAC,UAAU,EAAE,OAAO,CAAC,IAAI,mBAAmB;gBACzD,OAAO,CAAC,IAAI,CAAC,mBAAmB,EAAE,GAAG,iBAAiB,GAAG,mDAAmD,CAAC,CAAC;QACtH,CAAC;QAED,UAAU,CAAC,OAAO,CAAC,GAAG,KAAK,CAAC;IAChC,CAAC;IAED,OAAO,YAAY,CAAC;AACxB,CAAC;AAED,SAAS,eAAe,CAAC,GAAW,EAAE,YAAkC,EAAE,yBAAsC;IAC5G,MAAM,SAAS,GAAG,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;IACjC,IAAI,OAAO,GAAG,EAAE,CAAC;IAEjB,IAAI,aAAa,GAAG,YAAY,CAAC;IAEjC,MAAM,YAAY,GAAG,EAAE,CAAC;IACxB,OAAO,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;QAC1B,IAAI,UAAU,GAAG,SAAS,CAAC,KAAK,EAAG,CAAC;QAEpC,OAAO,yBAAyB,CAAC,GAAG,CAAC,CAAC,GAAG,YAAY,EAAE,UAAU,CAAC,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC;YACjG,UAAU,IAAI,GAAG,GAAG,SAAS,CAAC,KAAK,EAAG,CAAC;QAE3C,IAAI,SAAS,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;YACzB,OAAO,GAAG,UAAU,CAAC;YACrB,MAAM;QACV,CAAC;QAED,IAAI,CAAC,MAAM,CAAC,MAAM,CAAC,aAAa,EAAE,UAAU,CAAC,EAAE,CAAC;YAC5C,MAAM,iBAAiB,GAAG,EAAE,CAAC;YAC7B,aAAa,CAAC,UAAU,CAAC,GAAG,iBAAiB,CAAC;YAE9C,aAAa,GAAG,iBAAiB,CAAC;QACtC,CAAC;aAAM,CAAC;YACJ,MAAM,KAAK,GAAG,aAAa,CAAC,UAAU,CAAC,CAAC;YACxC,IAAI,KAAK,YAAY,KAAK,IAAI,KAAK,IAAI,IAAI,IAAI,OAAO,KAAK,KAAK,QAAQ,EAAE,CAAC;gBACvE,IAAI,SAAS,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;oBACvB,SAAS,CAAC,OAAO,CAAC,UAAU,GAAG,GAAG,GAAG,SAAS,CAAC,KAAK,EAAG,CAAC,CAAC;oBACzD,SAAS;gBACb,CAAC;gBAED,MAAM,IAAI,KAAK,CACX,wCAAwC,GAAG,eAAe,UAAU,8CAA8C,CACrH,CAAC;YACN,CAAC;YAED,aAAa,GAAG,KAAK,CAAC;QAC1B,CAAC;QAED,YAAY,CAAC,IAAI,CAAC,UAAU,CAAC,CAAC;IAClC,CAAC;IAED,OAAO;QACH,UAAU,EAAE,aAAa;QACzB,OAAO;KACV,CAAC;AACN,CAAC;AAED,SAAS,iBAAiB,CACtB,MAA4B,EAC5B,YAAoB,EACpB,QAA0D,EAC1D,sBAA+B,KAAK;IAEpC,IAAI,QAAQ,KAAK,SAAS;QACtB,OAAO;IAEX,IAAI,OAAO,QAAQ,KAAK,QAAQ,IAAI,QAAQ,YAAY,KAAK,EAAE,CAAC;QAC5D,MAAM,CAAC,YAAY,CAAC,GAAG,QAAQ,CAAC;QAChC,OAAO;IACX,CAAC;IAED,KAAK,MAAM,CAAC,GAAG,EAAE,QAAQ,CAAC,IAAK,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAsD,EAAE,CAAC;QAC3G,MAAM,MAAM,GAAG,YAAY,GAAG,GAAG,GAAG,GAAG,CAAC;QAExC,IAAI,MAAM,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,EAAE,CAAC;YAChC,MAAM,YAAY,GAAG,MAAM,CAAC,MAAM,CAAC,CAAC;YACpC,OAAO,MAAM,CAAC,MAAM,CAAC,CAAC;YACtB,iBAAiB,CAAC,MAAM,EAAE,MAAM,EAAE,YAAY,EAAE,mBAAmB,CAAC,CAAC;YAErE,IAAI,MAAM,CAAC,MAAM,CAAC,MAAM,EAAE,MAAM,CAAC,IAAI,mBAAmB;gBACpD,OAAO,CAAC,IAAI,CAAC,mBAAmB,EAAE,GAAG,iBAAiB,MAAM,mDAAmD,CAAC,CAAC;QACzH,CAAC;QAED,MAAM,CAAC,MAAM,CAAC,GAAG,QAAQ,CAAC;IAC9B,CAAC;AACL,CAAC"}
|
||||
4
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.d.ts
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
/**
|
||||
* Convert a GGUF file type number to its corresponding type name
|
||||
*/
|
||||
export declare function getGgufFileTypeName(fileType?: number): "ALL_F32" | "MOSTLY_F16" | "MOSTLY_Q4_0" | "MOSTLY_Q4_1" | "MOSTLY_Q4_1_SOME_F16" | "MOSTLY_Q4_2" | "MOSTLY_Q4_3" | "MOSTLY_Q8_0" | "MOSTLY_Q5_0" | "MOSTLY_Q5_1" | "MOSTLY_Q2_K" | "MOSTLY_Q3_K_S" | "MOSTLY_Q3_K_M" | "MOSTLY_Q3_K_L" | "MOSTLY_Q4_K_S" | "MOSTLY_Q4_K_M" | "MOSTLY_Q5_K_S" | "MOSTLY_Q5_K_M" | "MOSTLY_Q6_K" | "MOSTLY_IQ2_XXS" | "MOSTLY_IQ2_XS" | "MOSTLY_Q2_K_S" | "MOSTLY_IQ3_XS" | "MOSTLY_IQ3_XXS" | "MOSTLY_IQ1_S" | "MOSTLY_IQ4_NL" | "MOSTLY_IQ3_S" | "MOSTLY_IQ3_M" | "MOSTLY_IQ2_S" | "MOSTLY_IQ2_M" | "MOSTLY_IQ4_XS" | "MOSTLY_IQ1_M" | "MOSTLY_BF16" | "MOSTLY_Q4_0_4_4" | "MOSTLY_Q4_0_4_8" | "MOSTLY_Q4_0_8_8" | "MOSTLY_TQ1_0" | "MOSTLY_TQ2_0" | "MOSTLY_MXFP4_MOE" | undefined;
|
||||
13
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.js
generated
vendored
Normal file
13
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
import { GgufFileType } from "../types/GgufMetadataTypes.js";
|
||||
const fileTypeNumberToNameMap = new Map();
|
||||
for (const [key, value] of Object.entries(GgufFileType)) {
|
||||
if (typeof value === "number")
|
||||
fileTypeNumberToNameMap.set(value, key);
|
||||
}
|
||||
/**
|
||||
* Convert a GGUF file type number to its corresponding type name
|
||||
*/
|
||||
export function getGgufFileTypeName(fileType) {
|
||||
return fileTypeNumberToNameMap.get(fileType) ?? undefined;
|
||||
}
|
||||
//# sourceMappingURL=getGgufFileTypeName.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufFileTypeName.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getGgufFileTypeName.js","sourceRoot":"","sources":["../../../src/gguf/utils/getGgufFileTypeName.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,YAAY,EAAC,MAAM,+BAA+B,CAAC;AAE3D,MAAM,uBAAuB,GAAG,IAAI,GAAG,EAAqC,CAAC;AAC7E,KAAK,MAAM,CAAC,GAAG,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,YAAY,CAAC,EAAE,CAAC;IACtD,IAAI,OAAO,KAAK,KAAK,QAAQ;QACzB,uBAAuB,CAAC,GAAG,CAAC,KAAK,EAAE,GAAgC,CAAC,CAAC;AAC7E,CAAC;AAED;;GAEG;AACH,MAAM,UAAU,mBAAmB,CAAC,QAAiB;IACjD,OAAO,uBAAuB,CAAC,GAAG,CAAC,QAAS,CAAC,IAAI,SAAS,CAAC;AAC/D,CAAC"}
|
||||
3
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.d.ts
generated
vendored
Normal file
3
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
import { GgufArchitectureType, GgufMetadata } from "../types/GgufMetadataTypes.js";
|
||||
import { MergeOptionalUnionTypes } from "../../utils/mergeUnionTypes.js";
|
||||
export declare function getGgufMetadataArchitectureData<const T extends GgufArchitectureType>(ggufMetadata: GgufMetadata<T>): (GgufArchitectureType extends T ? MergeOptionalUnionTypes<Exclude<GgufMetadata[T], undefined>> : GgufMetadata<T>[T]);
|
||||
4
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.js
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.js
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
export function getGgufMetadataArchitectureData(ggufMetadata) {
|
||||
return ggufMetadata[ggufMetadata.general?.architecture] ?? {};
|
||||
}
|
||||
//# sourceMappingURL=getGgufMetadataArchitectureData.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataArchitectureData.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getGgufMetadataArchitectureData.js","sourceRoot":"","sources":["../../../src/gguf/utils/getGgufMetadataArchitectureData.ts"],"names":[],"mappings":"AAGA,MAAM,UAAU,+BAA+B,CAAuC,YAA6B;IAK/G,OAAO,YAAY,CAAC,YAAY,CAAC,OAAO,EAAE,YAAY,CAAC,IAAI,EAAS,CAAC;AACzE,CAAC"}
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.d.ts
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare function getGgufMetadataKeyValue(metadata: Record<string, any>, key: string): any;
|
||||
27
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.js
generated
vendored
Normal file
27
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.js
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
export function getGgufMetadataKeyValue(metadata, key) {
|
||||
return readMedataKey(metadata, key.split("."));
|
||||
}
|
||||
function readMedataKey(metadata, keyParts) {
|
||||
for (const [metadataKey, value] of Object.entries(metadata)) {
|
||||
const matchLength = checkMatchLength(metadataKey, keyParts);
|
||||
if (matchLength === 0)
|
||||
continue;
|
||||
if (matchLength === keyParts.length)
|
||||
return value;
|
||||
const res = readMedataKey(value, keyParts.slice(matchLength));
|
||||
if (res !== undefined)
|
||||
return res;
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
function checkMatchLength(metadataKey, keyParts) {
|
||||
const metadataKeyParts = metadataKey.split(".");
|
||||
if (metadataKeyParts.length > keyParts.length)
|
||||
return 0;
|
||||
for (let i = 0; i < metadataKeyParts.length; i++) {
|
||||
if (metadataKeyParts[i] !== keyParts[i])
|
||||
return 0;
|
||||
}
|
||||
return metadataKeyParts.length;
|
||||
}
|
||||
//# sourceMappingURL=getGgufMetadataKeyValue.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/getGgufMetadataKeyValue.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getGgufMetadataKeyValue.js","sourceRoot":"","sources":["../../../src/gguf/utils/getGgufMetadataKeyValue.ts"],"names":[],"mappings":"AAAA,MAAM,UAAU,uBAAuB,CAAC,QAA6B,EAAE,GAAW;IAC9E,OAAO,aAAa,CAAC,QAAQ,EAAE,GAAG,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,CAAC;AACnD,CAAC;AAED,SAAS,aAAa,CAAC,QAA6B,EAAE,QAAkB;IACpE,KAAK,MAAM,CAAC,WAAW,EAAE,KAAK,CAAC,IAAI,MAAM,CAAC,OAAO,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC1D,MAAM,WAAW,GAAG,gBAAgB,CAAC,WAAW,EAAE,QAAQ,CAAC,CAAC;QAC5D,IAAI,WAAW,KAAK,CAAC;YACjB,SAAS;QAEb,IAAI,WAAW,KAAK,QAAQ,CAAC,MAAM;YAC/B,OAAO,KAAK,CAAC;QAEjB,MAAM,GAAG,GAAG,aAAa,CAAC,KAAK,EAAE,QAAQ,CAAC,KAAK,CAAC,WAAW,CAAC,CAAC,CAAC;QAC9D,IAAI,GAAG,KAAK,SAAS;YACjB,OAAO,GAAG,CAAC;IACnB,CAAC;IAED,OAAO,SAAS,CAAC;AACrB,CAAC;AAED,SAAS,gBAAgB,CAAC,WAAmB,EAAE,QAAkB;IAC7D,MAAM,gBAAgB,GAAG,WAAW,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;IAEhD,IAAI,gBAAgB,CAAC,MAAM,GAAG,QAAQ,CAAC,MAAM;QACzC,OAAO,CAAC,CAAC;IAEb,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,GAAG,gBAAgB,CAAC,MAAM,EAAE,CAAC,EAAE,EAAE,CAAC;QAC/C,IAAI,gBAAgB,CAAC,CAAC,CAAC,KAAK,QAAQ,CAAC,CAAC,CAAC;YACnC,OAAO,CAAC,CAAC;IACjB,CAAC;IAED,OAAO,gBAAgB,CAAC,MAAM,CAAC;AACnC,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import { GgufFileType } from "../types/GgufMetadataTypes.js";
|
||||
export declare const ggufQuantNames: Map<string, GgufFileType>;
|
||||
41
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.js
generated
vendored
Normal file
41
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.js
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
import { GgufFileType } from "../types/GgufMetadataTypes.js";
|
||||
export const ggufQuantNames = new Map([
|
||||
["Q4_0", GgufFileType.MOSTLY_Q4_0],
|
||||
["Q4_1", GgufFileType.MOSTLY_Q4_1],
|
||||
["MXFP4", GgufFileType.MOSTLY_MXFP4_MOE],
|
||||
["Q5_0", GgufFileType.MOSTLY_Q5_0],
|
||||
["Q5_1", GgufFileType.MOSTLY_Q5_1],
|
||||
["IQ2_XXS", GgufFileType.MOSTLY_IQ2_XXS],
|
||||
["IQ2_XS", GgufFileType.MOSTLY_IQ2_XS],
|
||||
["IQ2_S", GgufFileType.MOSTLY_IQ2_S],
|
||||
["IQ2_M", GgufFileType.MOSTLY_IQ2_M],
|
||||
["IQ1_S", GgufFileType.MOSTLY_IQ1_S],
|
||||
["IQ1_M", GgufFileType.MOSTLY_IQ1_M],
|
||||
["TQ1_0", GgufFileType.MOSTLY_TQ1_0],
|
||||
["TQ2_0", GgufFileType.MOSTLY_TQ2_0],
|
||||
["Q2_K", GgufFileType.MOSTLY_Q2_K],
|
||||
["Q2_K_S", GgufFileType.MOSTLY_Q2_K_S],
|
||||
["IQ3_XXS", GgufFileType.MOSTLY_IQ3_XXS],
|
||||
["IQ3_S", GgufFileType.MOSTLY_IQ3_S],
|
||||
["IQ3_M", GgufFileType.MOSTLY_IQ3_M],
|
||||
["Q3_K", GgufFileType.MOSTLY_Q3_K_M],
|
||||
["IQ3_XS", GgufFileType.MOSTLY_IQ3_XS],
|
||||
["Q3_K_S", GgufFileType.MOSTLY_Q3_K_S],
|
||||
["Q3_K_M", GgufFileType.MOSTLY_Q3_K_M],
|
||||
["Q3_K_L", GgufFileType.MOSTLY_Q3_K_L],
|
||||
["IQ4_NL", GgufFileType.MOSTLY_IQ4_NL],
|
||||
["IQ4_XS", GgufFileType.MOSTLY_IQ4_XS],
|
||||
["Q4_K", GgufFileType.MOSTLY_Q4_K_M],
|
||||
["Q4_K_S", GgufFileType.MOSTLY_Q4_K_S],
|
||||
["Q4_K_M", GgufFileType.MOSTLY_Q4_K_M],
|
||||
["Q5_K", GgufFileType.MOSTLY_Q5_K_M],
|
||||
["Q5_K_S", GgufFileType.MOSTLY_Q5_K_S],
|
||||
["Q5_K_M", GgufFileType.MOSTLY_Q5_K_M],
|
||||
["Q6_K", GgufFileType.MOSTLY_Q6_K],
|
||||
["Q8_0", GgufFileType.MOSTLY_Q8_0],
|
||||
["F16", GgufFileType.MOSTLY_F16],
|
||||
["BF16", GgufFileType.MOSTLY_BF16],
|
||||
["F32", GgufFileType.ALL_F32],
|
||||
["COPY", GgufFileType.ALL_F32]
|
||||
]);
|
||||
//# sourceMappingURL=ggufQuantNames.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/ggufQuantNames.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"ggufQuantNames.js","sourceRoot":"","sources":["../../../src/gguf/utils/ggufQuantNames.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,YAAY,EAAC,MAAM,+BAA+B,CAAC;AAE3D,MAAM,CAAC,MAAM,cAAc,GAAG,IAAI,GAAG,CAAuB;IACxD,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,OAAO,EAAE,YAAY,CAAC,gBAAgB,CAAC;IACxC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,SAAS,EAAE,YAAY,CAAC,cAAc,CAAC;IACxC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,SAAS,EAAE,YAAY,CAAC,cAAc,CAAC;IACxC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,OAAO,EAAE,YAAY,CAAC,YAAY,CAAC;IACpC,CAAC,MAAM,EAAE,YAAY,CAAC,aAAa,CAAC;IACpC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,MAAM,EAAE,YAAY,CAAC,aAAa,CAAC;IACpC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,MAAM,EAAE,YAAY,CAAC,aAAa,CAAC;IACpC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,QAAQ,EAAE,YAAY,CAAC,aAAa,CAAC;IACtC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,KAAK,EAAE,YAAY,CAAC,UAAU,CAAC;IAChC,CAAC,MAAM,EAAE,YAAY,CAAC,WAAW,CAAC;IAClC,CAAC,KAAK,EAAE,YAAY,CAAC,OAAO,CAAC;IAC7B,CAAC,MAAM,EAAE,YAAY,CAAC,OAAO,CAAC;CACjC,CAAC,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import { ModelDownloadEndpoints } from "../../utils/modelDownloadEndpoints.js";
|
||||
export declare function normalizeGgufDownloadUrl(url: string, endpoints?: ModelDownloadEndpoints): string;
|
||||
18
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.js
generated
vendored
Normal file
18
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.js
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { resolveHuggingFaceEndpoint } from "../../utils/modelDownloadEndpoints.js";
|
||||
export function normalizeGgufDownloadUrl(url, endpoints) {
|
||||
const parsedUrl = new URL(url);
|
||||
if (parsedUrl.hostname === "huggingface.co" || parsedUrl.hostname === "hf.co" ||
|
||||
parsedUrl.hostname === (new URL(resolveHuggingFaceEndpoint(endpoints))).hostname) {
|
||||
const pathnameParts = parsedUrl.pathname.split("/");
|
||||
if (pathnameParts.length > 3 && pathnameParts[3] === "blob") {
|
||||
const newUrl = new URL(url);
|
||||
pathnameParts[3] = "resolve";
|
||||
newUrl.pathname = pathnameParts.join("/");
|
||||
if (newUrl.searchParams.get("download") !== "true")
|
||||
newUrl.searchParams.set("download", "true");
|
||||
return newUrl.href;
|
||||
}
|
||||
}
|
||||
return url;
|
||||
}
|
||||
//# sourceMappingURL=normalizeGgufDownloadUrl.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/normalizeGgufDownloadUrl.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"normalizeGgufDownloadUrl.js","sourceRoot":"","sources":["../../../src/gguf/utils/normalizeGgufDownloadUrl.ts"],"names":[],"mappings":"AAAA,OAAO,EAAyB,0BAA0B,EAAC,MAAM,uCAAuC,CAAC;AAEzG,MAAM,UAAU,wBAAwB,CAAC,GAAW,EAAE,SAAkC;IACpF,MAAM,SAAS,GAAG,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC;IAE/B,IAAI,SAAS,CAAC,QAAQ,KAAK,gBAAgB,IAAI,SAAS,CAAC,QAAQ,KAAK,OAAO;QACzE,SAAS,CAAC,QAAQ,KAAK,CAAC,IAAI,GAAG,CAAC,0BAA0B,CAAC,SAAS,CAAC,CAAC,CAAC,CAAC,QAAQ,EAClF,CAAC;QACC,MAAM,aAAa,GAAG,SAAS,CAAC,QAAQ,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAEpD,IAAI,aAAa,CAAC,MAAM,GAAG,CAAC,IAAI,aAAa,CAAC,CAAC,CAAC,KAAK,MAAM,EAAE,CAAC;YAC1D,MAAM,MAAM,GAAG,IAAI,GAAG,CAAC,GAAG,CAAC,CAAC;YAC5B,aAAa,CAAC,CAAC,CAAC,GAAG,SAAS,CAAC;YAC7B,MAAM,CAAC,QAAQ,GAAG,aAAa,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;YAE1C,IAAI,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,UAAU,CAAC,KAAK,MAAM;gBAC9C,MAAM,CAAC,YAAY,CAAC,GAAG,CAAC,UAAU,EAAE,MAAM,CAAC,CAAC;YAEhD,OAAO,MAAM,CAAC,IAAI,CAAC;QACvB,CAAC;IACL,CAAC;IAED,OAAO,GAAG,CAAC;AACf,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export declare function resolveBinarySplitGgufPartUrls(ggufUrl: string): string | string[];
|
||||
export declare function getFilenameForBinarySplitGgufPartUrls(urls: string[]): string | undefined;
|
||||
38
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.js
generated
vendored
Normal file
38
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.js
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
import filenamify from "filenamify";
|
||||
const binarySplitGgufPartsRegex = /\.gguf\.part(?<part>\d+)of(?<parts>\d+)$/;
|
||||
export function resolveBinarySplitGgufPartUrls(ggufUrl) {
|
||||
const parsedGgufUrl = new URL(ggufUrl);
|
||||
const binaryPartsMatch = parsedGgufUrl.pathname.match(binarySplitGgufPartsRegex);
|
||||
if (binaryPartsMatch != null) {
|
||||
const partString = binaryPartsMatch.groups?.part;
|
||||
const part = Number(partString);
|
||||
const partsString = binaryPartsMatch.groups?.parts;
|
||||
const parts = Number(partsString);
|
||||
if (partString == null || !Number.isFinite(part) || partsString == null || !Number.isFinite(parts) || part > parts || part === 0 ||
|
||||
parts === 0)
|
||||
return ggufUrl;
|
||||
const ggufIndex = parsedGgufUrl.pathname.indexOf(".gguf");
|
||||
const pathnameWithoutPart = parsedGgufUrl.pathname.slice(0, ggufIndex + ".gguf".length);
|
||||
const res = [];
|
||||
for (let i = 1; i <= parts; i++) {
|
||||
const url = new URL(parsedGgufUrl.href);
|
||||
url.pathname = pathnameWithoutPart + `.part${String(i).padStart(partString.length, "0")}of${partsString}`;
|
||||
res.push(url.href);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
return ggufUrl;
|
||||
}
|
||||
export function getFilenameForBinarySplitGgufPartUrls(urls) {
|
||||
if (urls.length === 0)
|
||||
return undefined;
|
||||
const firstParsedUrl = new URL(urls[0]);
|
||||
if (binarySplitGgufPartsRegex.test(firstParsedUrl.pathname)) {
|
||||
const ggufIndex = firstParsedUrl.pathname.toLowerCase().indexOf(".gguf");
|
||||
const urlWithoutPart = firstParsedUrl.pathname.slice(0, ggufIndex + ".gguf".length);
|
||||
const filename = decodeURIComponent(urlWithoutPart.split("/").pop());
|
||||
return filenamify(filename);
|
||||
}
|
||||
return undefined;
|
||||
}
|
||||
//# sourceMappingURL=resolveBinarySplitGgufPartUrls.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/resolveBinarySplitGgufPartUrls.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"resolveBinarySplitGgufPartUrls.js","sourceRoot":"","sources":["../../../src/gguf/utils/resolveBinarySplitGgufPartUrls.ts"],"names":[],"mappings":"AAAA,OAAO,UAAU,MAAM,YAAY,CAAC;AAEpC,MAAM,yBAAyB,GAAG,0CAA0C,CAAC;AAE7E,MAAM,UAAU,8BAA8B,CAAC,OAAe;IAC1D,MAAM,aAAa,GAAG,IAAI,GAAG,CAAC,OAAO,CAAC,CAAC;IACvC,MAAM,gBAAgB,GAAG,aAAa,CAAC,QAAQ,CAAC,KAAK,CAAC,yBAAyB,CAAC,CAAC;IACjF,IAAI,gBAAgB,IAAI,IAAI,EAAE,CAAC;QAC3B,MAAM,UAAU,GAAG,gBAAgB,CAAC,MAAM,EAAE,IAAI,CAAC;QACjD,MAAM,IAAI,GAAG,MAAM,CAAC,UAAU,CAAC,CAAC;QAChC,MAAM,WAAW,GAAG,gBAAgB,CAAC,MAAM,EAAE,KAAK,CAAC;QACnD,MAAM,KAAK,GAAG,MAAM,CAAC,WAAW,CAAC,CAAC;QAElC,IAAI,UAAU,IAAI,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,WAAW,IAAI,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,IAAI,GAAG,KAAK,IAAI,IAAI,KAAK,CAAC;YAC5H,KAAK,KAAK,CAAC;YAEX,OAAO,OAAO,CAAC;QAEnB,MAAM,SAAS,GAAG,aAAa,CAAC,QAAQ,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;QAC1D,MAAM,mBAAmB,GAAG,aAAa,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,SAAS,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;QAExF,MAAM,GAAG,GAAa,EAAE,CAAC;QACzB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,EAAE,EAAE,CAAC;YAC9B,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC;YACxC,GAAG,CAAC,QAAQ,GAAG,mBAAmB,GAAG,QAAQ,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,UAAU,CAAC,MAAM,EAAE,GAAG,CAAC,KAAK,WAAW,EAAE,CAAC;YAC1G,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,IAAI,CAAC,CAAC;QACvB,CAAC;QAED,OAAO,GAAG,CAAC;IACf,CAAC;IAED,OAAO,OAAO,CAAC;AACnB,CAAC;AAED,MAAM,UAAU,qCAAqC,CAAC,IAAc;IAChE,IAAI,IAAI,CAAC,MAAM,KAAK,CAAC;QACjB,OAAO,SAAS,CAAC;IAErB,MAAM,cAAc,GAAG,IAAI,GAAG,CAAC,IAAI,CAAC,CAAC,CAAE,CAAC,CAAC;IAEzC,IAAI,yBAAyB,CAAC,IAAI,CAAC,cAAc,CAAC,QAAQ,CAAC,EAAE,CAAC;QAC1D,MAAM,SAAS,GAAG,cAAc,CAAC,QAAQ,CAAC,WAAW,EAAE,CAAC,OAAO,CAAC,OAAO,CAAC,CAAC;QACzE,MAAM,cAAc,GAAG,cAAc,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,SAAS,GAAG,OAAO,CAAC,MAAM,CAAC,CAAC;QAEpF,MAAM,QAAQ,GAAG,kBAAkB,CAAC,cAAc,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC,GAAG,EAAG,CAAC,CAAC;QACtE,OAAO,UAAU,CAAC,QAAQ,CAAC,CAAC;IAChC,CAAC;IAED,OAAO,SAAS,CAAC;AACrB,CAAC"}
|
||||
7
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.d.ts
generated
vendored
Normal file
7
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.d.ts
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export declare function resolveSplitGgufParts(ggufPathOrUri: string): string[];
|
||||
export declare function getGgufSplitPartsInfo(ggufPath: string): {
|
||||
part: number;
|
||||
parts: number;
|
||||
matchLength: number;
|
||||
} | null;
|
||||
export declare function createSplitPartFilename(filename: string, part: number, parts: number): string;
|
||||
64
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.js
generated
vendored
Normal file
64
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.js
generated
vendored
Normal file
@@ -0,0 +1,64 @@
|
||||
import { isUrl } from "../../utils/isUrl.js";
|
||||
const splitGgufPartRegex = /-(?<part>\d{5})-of-(?<parts>\d{5})\.gguf$/;
|
||||
export function resolveSplitGgufParts(ggufPathOrUri) {
|
||||
if (isUrl(ggufPathOrUri)) {
|
||||
const parsedUrl = new URL(ggufPathOrUri);
|
||||
return resolveParts(parsedUrl.pathname).map((part) => {
|
||||
const url = new URL(ggufPathOrUri);
|
||||
url.pathname = part;
|
||||
return url.href;
|
||||
});
|
||||
}
|
||||
return resolveParts(ggufPathOrUri);
|
||||
}
|
||||
function resolveParts(ggufPath) {
|
||||
const splitPartMatch = ggufPath.match(splitGgufPartRegex);
|
||||
if (splitPartMatch != null) {
|
||||
const partsInfo = getGgufSplitPartsInfo(ggufPath);
|
||||
if (partsInfo == null)
|
||||
return [ggufPath];
|
||||
const { parts, matchLength } = partsInfo;
|
||||
const commonPath = ggufPath.slice(0, ggufPath.length - matchLength);
|
||||
const res = [];
|
||||
for (let i = 1; i <= parts; i++)
|
||||
res.push(commonPath + `-${String(i).padStart(5, "0")}-of-${String(parts).padStart(5, "0")}.gguf`);
|
||||
return res;
|
||||
}
|
||||
return [ggufPath];
|
||||
}
|
||||
export function getGgufSplitPartsInfo(ggufPath) {
|
||||
let checkPath = ggufPath;
|
||||
if (isUrl(checkPath)) {
|
||||
const parsedUrl = new URL(checkPath);
|
||||
checkPath = parsedUrl.pathname;
|
||||
}
|
||||
const splitPartMatch = checkPath.match(splitGgufPartRegex);
|
||||
if (splitPartMatch != null) {
|
||||
const part = Number(splitPartMatch.groups?.part);
|
||||
const parts = Number(splitPartMatch.groups?.parts);
|
||||
const matchLength = splitPartMatch[0]?.length;
|
||||
if (matchLength == null || !Number.isFinite(part) || !Number.isFinite(parts) || part > parts || part === 0 || parts === 0)
|
||||
return null;
|
||||
return {
|
||||
part,
|
||||
parts,
|
||||
matchLength
|
||||
};
|
||||
}
|
||||
return null;
|
||||
}
|
||||
export function createSplitPartFilename(filename, part, parts) {
|
||||
const splitPartMatch = filename.match(splitGgufPartRegex);
|
||||
if (splitPartMatch != null) {
|
||||
const partsInfo = getGgufSplitPartsInfo(filename);
|
||||
if (partsInfo != null) {
|
||||
const { matchLength } = partsInfo;
|
||||
const commonPath = filename.slice(0, filename.length - matchLength);
|
||||
filename = commonPath + ".gguf";
|
||||
}
|
||||
}
|
||||
if (filename.toLowerCase().endsWith(".gguf"))
|
||||
filename = filename.slice(0, -".gguf".length);
|
||||
return `${filename}-${String(part).padStart(5, "0")}-of-${String(parts).padStart(5, "0")}.gguf`;
|
||||
}
|
||||
//# sourceMappingURL=resolveSplitGgufParts.js.map
|
||||
1
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/gguf/utils/resolveSplitGgufParts.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"resolveSplitGgufParts.js","sourceRoot":"","sources":["../../../src/gguf/utils/resolveSplitGgufParts.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,KAAK,EAAC,MAAM,sBAAsB,CAAC;AAE3C,MAAM,kBAAkB,GAAG,2CAA2C,CAAC;AAEvE,MAAM,UAAU,qBAAqB,CAAC,aAAqB;IACvD,IAAI,KAAK,CAAC,aAAa,CAAC,EAAE,CAAC;QACvB,MAAM,SAAS,GAAG,IAAI,GAAG,CAAC,aAAa,CAAC,CAAC;QAEzC,OAAO,YAAY,CAAC,SAAS,CAAC,QAAQ,CAAC,CAAC,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE;YACjD,MAAM,GAAG,GAAG,IAAI,GAAG,CAAC,aAAa,CAAC,CAAC;YACnC,GAAG,CAAC,QAAQ,GAAG,IAAI,CAAC;YACpB,OAAO,GAAG,CAAC,IAAI,CAAC;QACpB,CAAC,CAAC,CAAC;IACP,CAAC;IAED,OAAO,YAAY,CAAC,aAAa,CAAC,CAAC;AACvC,CAAC;AAED,SAAS,YAAY,CAAC,QAAgB;IAClC,MAAM,cAAc,GAAG,QAAQ,CAAC,KAAK,CAAC,kBAAkB,CAAC,CAAC;IAE1D,IAAI,cAAc,IAAI,IAAI,EAAE,CAAC;QACzB,MAAM,SAAS,GAAG,qBAAqB,CAAC,QAAQ,CAAC,CAAC;QAElD,IAAI,SAAS,IAAI,IAAI;YACjB,OAAO,CAAC,QAAQ,CAAC,CAAC;QAEtB,MAAM,EAAC,KAAK,EAAE,WAAW,EAAC,GAAG,SAAS,CAAC;QAEvC,MAAM,UAAU,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,CAAC,MAAM,GAAG,WAAW,CAAC,CAAC;QAEpE,MAAM,GAAG,GAAa,EAAE,CAAC;QACzB,KAAK,IAAI,CAAC,GAAG,CAAC,EAAE,CAAC,IAAI,KAAK,EAAE,CAAC,EAAE;YAC3B,GAAG,CAAC,IAAI,CAAC,UAAU,GAAG,IAAI,MAAM,CAAC,CAAC,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,GAAG,CAAC,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,GAAG,CAAC,OAAO,CAAC,CAAC;QAEtG,OAAO,GAAG,CAAC;IACf,CAAC;IAED,OAAO,CAAC,QAAQ,CAAC,CAAC;AACtB,CAAC;AAED,MAAM,UAAU,qBAAqB,CAAC,QAAgB;IAClD,IAAI,SAAS,GAAG,QAAQ,CAAC;IAEzB,IAAI,KAAK,CAAC,SAAS,CAAC,EAAE,CAAC;QACnB,MAAM,SAAS,GAAG,IAAI,GAAG,CAAC,SAAS,CAAC,CAAC;QACrC,SAAS,GAAG,SAAS,CAAC,QAAQ,CAAC;IACnC,CAAC;IAED,MAAM,cAAc,GAAG,SAAS,CAAC,KAAK,CAAC,kBAAkB,CAAC,CAAC;IAE3D,IAAI,cAAc,IAAI,IAAI,EAAE,CAAC;QACzB,MAAM,IAAI,GAAG,MAAM,CAAC,cAAc,CAAC,MAAM,EAAE,IAAI,CAAC,CAAC;QACjD,MAAM,KAAK,GAAG,MAAM,CAAC,cAAc,CAAC,MAAM,EAAE,KAAK,CAAC,CAAC;QACnD,MAAM,WAAW,GAAG,cAAc,CAAC,CAAC,CAAC,EAAE,MAAM,CAAC;QAE9C,IAAI,WAAW,IAAI,IAAI,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,IAAI,CAAC,IAAI,CAAC,MAAM,CAAC,QAAQ,CAAC,KAAK,CAAC,IAAI,IAAI,GAAG,KAAK,IAAI,IAAI,KAAK,CAAC,IAAI,KAAK,KAAK,CAAC;YACrH,OAAO,IAAI,CAAC;QAEhB,OAAO;YACH,IAAI;YACJ,KAAK;YACL,WAAW;SACd,CAAC;IACN,CAAC;IAED,OAAO,IAAI,CAAC;AAChB,CAAC;AAED,MAAM,UAAU,uBAAuB,CAAC,QAAgB,EAAE,IAAY,EAAE,KAAa;IACjF,MAAM,cAAc,GAAG,QAAQ,CAAC,KAAK,CAAC,kBAAkB,CAAC,CAAC;IAC1D,IAAI,cAAc,IAAI,IAAI,EAAE,CAAC;QACzB,MAAM,SAAS,GAAG,qBAAqB,CAAC,QAAQ,CAAC,CAAC;QAClD,IAAI,SAAS,IAAI,IAAI,EAAE,CAAC;YACpB,MAAM,EAAC,WAAW,EAAC,GAAG,SAAS,CAAC;YAChC,MAAM,UAAU,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,QAAQ,CAAC,MAAM,GAAG,WAAW,CAAC,CAAC;YAEpE,QAAQ,GAAG,UAAU,GAAG,OAAO,CAAC;QACpC,CAAC;IACL,CAAC;IAED,IAAI,QAAQ,CAAC,WAAW,EAAE,CAAC,QAAQ,CAAC,OAAO,CAAC;QACxC,QAAQ,GAAG,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC,CAAC;IAElD,OAAO,GAAG,QAAQ,IAAI,MAAM,CAAC,IAAI,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,GAAG,CAAC,OAAO,MAAM,CAAC,KAAK,CAAC,CAAC,QAAQ,CAAC,CAAC,EAAE,GAAG,CAAC,OAAO,CAAC;AACpG,CAAC"}
|
||||
Reference in New Issue
Block a user