First upload version 0.0.1
This commit is contained in:
27
node_modules/node-llama-cpp/dist/ChatWrapper.d.ts
generated
vendored
Normal file
27
node_modules/node-llama-cpp/dist/ChatWrapper.d.ts
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import { ChatHistoryItem, ChatModelFunctionCall, ChatModelFunctions, ChatModelResponse, ChatWrapperGenerateContextStateOptions, ChatWrapperGeneratedContextState, ChatWrapperGenerateInitialHistoryOptions, ChatWrapperSettings } from "./types.js";
|
||||
import { LlamaText } from "./utils/LlamaText.js";
|
||||
import type { JinjaTemplateChatWrapperOptions } from "./chatWrappers/generic/JinjaTemplateChatWrapper.js";
|
||||
export declare abstract class ChatWrapper {
|
||||
static defaultSettings: ChatWrapperSettings;
|
||||
abstract readonly wrapperName: string;
|
||||
readonly settings: ChatWrapperSettings;
|
||||
generateContextState({ chatHistory, availableFunctions, documentFunctionParams }: ChatWrapperGenerateContextStateOptions): ChatWrapperGeneratedContextState;
|
||||
generateFunctionCallsAndResults(functionCalls: ChatModelFunctionCall[], useRawCall?: boolean): import("./utils/LlamaText.js")._LlamaText;
|
||||
generateFunctionCall(name: string, params: any): LlamaText;
|
||||
generateFunctionCallResult(functionName: string, functionParams: any, result: any): LlamaText;
|
||||
generateModelResponseText(modelResponse: ChatModelResponse["response"], useRawValues?: boolean): LlamaText;
|
||||
generateAvailableFunctionsSystemText(availableFunctions: ChatModelFunctions, { documentParams }: {
|
||||
documentParams?: boolean;
|
||||
}): LlamaText;
|
||||
addAvailableFunctionsSystemMessageToHistory(history: readonly ChatHistoryItem[], availableFunctions?: ChatModelFunctions, { documentParams }?: {
|
||||
documentParams?: boolean;
|
||||
}): readonly ChatHistoryItem[];
|
||||
generateInitialChatHistory({ systemPrompt }?: ChatWrapperGenerateInitialHistoryOptions): ChatHistoryItem[];
|
||||
}
|
||||
type FirstItemOfTupleOrFallback<T extends any[], Fallback> = T extends [infer U, ...any[]] ? U : Fallback;
|
||||
export type ChatWrapperJinjaMatchConfiguration<T extends typeof ChatWrapper> = Array<FirstItemOfTupleOrFallback<ConstructorParameters<T>, object> | [
|
||||
testConfig: FirstItemOfTupleOrFallback<ConstructorParameters<T>, object>,
|
||||
applyConfig: FirstItemOfTupleOrFallback<ConstructorParameters<T>, object>,
|
||||
testJinjaChatWrapperOptions?: JinjaTemplateChatWrapperOptions
|
||||
]>;
|
||||
export {};
|
||||
233
node_modules/node-llama-cpp/dist/ChatWrapper.js
generated
vendored
Normal file
233
node_modules/node-llama-cpp/dist/ChatWrapper.js
generated
vendored
Normal file
@@ -0,0 +1,233 @@
|
||||
import { isChatModelResponseSegment } from "./types.js";
|
||||
import { LlamaText, SpecialTokensText } from "./utils/LlamaText.js";
|
||||
import { ChatModelFunctionsDocumentationGenerator } from "./chatWrappers/utils/ChatModelFunctionsDocumentationGenerator.js";
|
||||
import { jsonDumps } from "./chatWrappers/utils/jsonDumps.js";
|
||||
import { defaultChatSystemPrompt } from "./config.js";
|
||||
import { getChatWrapperSegmentDefinition } from "./utils/getChatWrapperSegmentDefinition.js";
|
||||
export class ChatWrapper {
|
||||
static defaultSettings = {
|
||||
supportsSystemMessages: true,
|
||||
functions: {
|
||||
call: {
|
||||
optionalPrefixSpace: true,
|
||||
prefix: "||call: ",
|
||||
paramsPrefix: LlamaText(new SpecialTokensText("(")),
|
||||
suffix: LlamaText(new SpecialTokensText(")")),
|
||||
emptyCallParamsPlaceholder: ""
|
||||
},
|
||||
result: {
|
||||
prefix: LlamaText(new SpecialTokensText("\n"), "||result: "),
|
||||
suffix: LlamaText(new SpecialTokensText("\n"))
|
||||
}
|
||||
},
|
||||
segments: {}
|
||||
};
|
||||
settings = ChatWrapper.defaultSettings;
|
||||
generateContextState({ chatHistory, availableFunctions, documentFunctionParams }) {
|
||||
const historyWithFunctions = this.addAvailableFunctionsSystemMessageToHistory(chatHistory, availableFunctions, {
|
||||
documentParams: documentFunctionParams
|
||||
});
|
||||
const texts = historyWithFunctions
|
||||
.map((item) => {
|
||||
if (item.type === "system")
|
||||
return LlamaText(["system: ", LlamaText.fromJSON(item.text)]);
|
||||
else if (item.type === "user")
|
||||
return LlamaText(["user: ", item.text]);
|
||||
else if (item.type === "model")
|
||||
return LlamaText(["model: ", this.generateModelResponseText(item.response)]);
|
||||
return item;
|
||||
});
|
||||
return {
|
||||
contextText: LlamaText.joinValues("\n", texts),
|
||||
stopGenerationTriggers: []
|
||||
};
|
||||
}
|
||||
generateFunctionCallsAndResults(functionCalls, useRawCall = true) {
|
||||
const calls = [];
|
||||
const results = [];
|
||||
const res = [];
|
||||
if (functionCalls.length === 0)
|
||||
return LlamaText([]);
|
||||
for (const functionCall of functionCalls) {
|
||||
if (useRawCall && functionCall.rawCall != null)
|
||||
calls.push(LlamaText.fromJSON(functionCall.rawCall));
|
||||
else
|
||||
calls.push(this.generateFunctionCall(functionCall.name, functionCall.params));
|
||||
results.push(this.generateFunctionCallResult(functionCall.name, functionCall.params, functionCall.result));
|
||||
}
|
||||
if (this.settings.functions.parallelism == null) {
|
||||
for (let i = 0; i < calls.length; i++) {
|
||||
res.push(calls[i]);
|
||||
res.push(results[i]);
|
||||
}
|
||||
return LlamaText(res);
|
||||
}
|
||||
res.push(LlamaText(this.settings.functions.parallelism.call.sectionPrefix ?? ""));
|
||||
for (let i = 0; i < calls.length; i++) {
|
||||
if (i > 0)
|
||||
res.push(LlamaText(this.settings.functions.parallelism.call.betweenCalls ?? ""));
|
||||
res.push(calls[i]);
|
||||
}
|
||||
res.push(LlamaText(this.settings.functions.parallelism.call.sectionSuffix ?? ""));
|
||||
res.push(LlamaText(this.settings.functions.parallelism.result?.sectionPrefix ?? ""));
|
||||
for (let i = 0; i < results.length; i++) {
|
||||
if (i > 0)
|
||||
res.push(LlamaText(this.settings.functions.parallelism.result?.betweenResults ?? ""));
|
||||
res.push(results[i]);
|
||||
}
|
||||
res.push(LlamaText(this.settings.functions.parallelism.result?.sectionSuffix ?? ""));
|
||||
return LlamaText(res);
|
||||
}
|
||||
generateFunctionCall(name, params) {
|
||||
const emptyCallParamsPlaceholder = this.settings.functions.call.emptyCallParamsPlaceholder;
|
||||
return LlamaText([
|
||||
this.settings.functions.call.prefix,
|
||||
name,
|
||||
this.settings.functions.call.paramsPrefix,
|
||||
(params === undefined
|
||||
? (emptyCallParamsPlaceholder === undefined || emptyCallParamsPlaceholder === "")
|
||||
? ""
|
||||
: jsonDumps(emptyCallParamsPlaceholder)
|
||||
: jsonDumps(params)),
|
||||
this.settings.functions.call.suffix
|
||||
]);
|
||||
}
|
||||
generateFunctionCallResult(functionName, functionParams, result) {
|
||||
return this._generateFunctionCallResult(functionName, functionParams, result === undefined
|
||||
? "void"
|
||||
: jsonDumps(result));
|
||||
}
|
||||
/** @internal */
|
||||
_generateFunctionCallResult(functionName, functionParams, rawResult) {
|
||||
function resolveParameters(text) {
|
||||
return LlamaText(text)
|
||||
.mapValues((value) => {
|
||||
if (typeof value !== "string")
|
||||
return value;
|
||||
return value
|
||||
.replaceAll("{{functionName}}", functionName)
|
||||
.replaceAll("{{functionParams}}", functionParams === undefined ? "" : jsonDumps(functionParams));
|
||||
});
|
||||
}
|
||||
return LlamaText([
|
||||
resolveParameters(this.settings.functions.result.prefix),
|
||||
rawResult,
|
||||
resolveParameters(this.settings.functions.result.suffix)
|
||||
]);
|
||||
}
|
||||
generateModelResponseText(modelResponse, useRawValues = true) {
|
||||
const res = [];
|
||||
const pendingFunctionCalls = [];
|
||||
const segmentStack = [];
|
||||
let lastSegmentEndedWithoutSuffix = false;
|
||||
let needsToAddSegmentReminder = false;
|
||||
const addFunctionCalls = () => {
|
||||
if (pendingFunctionCalls.length === 0)
|
||||
return;
|
||||
res.push(this.generateFunctionCallsAndResults(pendingFunctionCalls, useRawValues));
|
||||
pendingFunctionCalls.length = 0;
|
||||
needsToAddSegmentReminder = true;
|
||||
};
|
||||
const addSegmentReminderIfNeeded = () => {
|
||||
if (lastSegmentEndedWithoutSuffix && segmentStack.length === 0 && this.settings.segments?.closeAllSegments != null) {
|
||||
lastSegmentEndedWithoutSuffix = false;
|
||||
res.push(LlamaText(this.settings.segments.closeAllSegments));
|
||||
}
|
||||
else if (needsToAddSegmentReminder && segmentStack.length > 0 && this.settings.segments?.reiterateStackAfterFunctionCalls) {
|
||||
for (const segmentType of segmentStack) {
|
||||
const segmentDefinition = getChatWrapperSegmentDefinition(this.settings, segmentType);
|
||||
if (segmentDefinition == null)
|
||||
continue;
|
||||
res.push(LlamaText(segmentDefinition.prefix));
|
||||
}
|
||||
}
|
||||
};
|
||||
for (const response of modelResponse) {
|
||||
if (typeof response === "string") {
|
||||
addFunctionCalls();
|
||||
addSegmentReminderIfNeeded();
|
||||
res.push(LlamaText(response));
|
||||
continue;
|
||||
}
|
||||
else if (isChatModelResponseSegment(response)) {
|
||||
addFunctionCalls();
|
||||
const segmentDefinition = getChatWrapperSegmentDefinition(this.settings, response.segmentType);
|
||||
if (response.raw != null && useRawValues)
|
||||
res.push(LlamaText.fromJSON(response.raw));
|
||||
else
|
||||
res.push(LlamaText([
|
||||
(segmentStack.length > 0 && segmentStack.at(-1) === response.segmentType)
|
||||
? ""
|
||||
: segmentDefinition?.prefix ?? "",
|
||||
response.text,
|
||||
response.ended
|
||||
? (segmentDefinition?.suffix ?? "")
|
||||
: ""
|
||||
]));
|
||||
lastSegmentEndedWithoutSuffix = response.ended && segmentDefinition?.suffix == null;
|
||||
if (!response.ended && segmentStack.at(-1) !== response.segmentType)
|
||||
segmentStack.push(response.segmentType);
|
||||
else if (response.ended && segmentStack.at(-1) === response.segmentType) {
|
||||
segmentStack.pop();
|
||||
if (segmentStack.length === 0 && segmentDefinition?.suffix == null && this.settings.segments?.closeAllSegments != null)
|
||||
res.push(LlamaText(this.settings.segments.closeAllSegments));
|
||||
}
|
||||
continue;
|
||||
}
|
||||
if (response.startsNewChunk)
|
||||
addFunctionCalls();
|
||||
pendingFunctionCalls.push(response);
|
||||
}
|
||||
addFunctionCalls();
|
||||
addSegmentReminderIfNeeded();
|
||||
return LlamaText(res);
|
||||
}
|
||||
generateAvailableFunctionsSystemText(availableFunctions, { documentParams = true }) {
|
||||
const functionsDocumentationGenerator = new ChatModelFunctionsDocumentationGenerator(availableFunctions);
|
||||
if (!functionsDocumentationGenerator.hasAnyFunctions)
|
||||
return LlamaText([]);
|
||||
return LlamaText.joinValues("\n", [
|
||||
"The assistant calls the provided functions as needed to retrieve information instead of relying on existing knowledge.",
|
||||
"To fulfill a request, the assistant calls relevant functions in advance when needed before responding to the request, and does not tell the user prior to calling a function.",
|
||||
"Provided functions:",
|
||||
"```typescript",
|
||||
functionsDocumentationGenerator.getTypeScriptFunctionSignatures({ documentParams }),
|
||||
"```",
|
||||
"",
|
||||
"Calling any of the provided functions can be done like this:",
|
||||
this.generateFunctionCall("getSomeInfo", { someKey: "someValue" }),
|
||||
"",
|
||||
"Note that the || prefix is mandatory.",
|
||||
"The assistant does not inform the user about using functions and does not explain anything before calling a function.",
|
||||
"After calling a function, the raw result appears afterwards and is not part of the conversation.",
|
||||
"To make information be part of the conversation, the assistant paraphrases and repeats the information without the function syntax."
|
||||
]);
|
||||
}
|
||||
addAvailableFunctionsSystemMessageToHistory(history, availableFunctions, { documentParams = true } = {}) {
|
||||
const availableFunctionNames = Object.keys(availableFunctions ?? {});
|
||||
if (availableFunctions == null || availableFunctionNames.length === 0)
|
||||
return history;
|
||||
const res = history.slice();
|
||||
const firstNonSystemMessageIndex = res.findIndex((item) => item.type !== "system");
|
||||
res.splice(Math.max(0, firstNonSystemMessageIndex), 0, {
|
||||
type: "system",
|
||||
text: this.generateAvailableFunctionsSystemText(availableFunctions, { documentParams }).toJSON()
|
||||
});
|
||||
return res;
|
||||
}
|
||||
generateInitialChatHistory({ systemPrompt = defaultChatSystemPrompt } = {}) {
|
||||
return [{
|
||||
type: "system",
|
||||
text: LlamaText(systemPrompt ?? defaultChatSystemPrompt).toJSON()
|
||||
}];
|
||||
}
|
||||
/** @internal */
|
||||
static _getOptionConfigurationsToTestIfCanSupersedeJinjaTemplate() {
|
||||
return [{}];
|
||||
}
|
||||
/** @internal */
|
||||
static _checkModelCompatibility(options) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=ChatWrapper.js.map
|
||||
1
node_modules/node-llama-cpp/dist/ChatWrapper.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/ChatWrapper.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
1
node_modules/node-llama-cpp/dist/apiDocsIndex.d.ts
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/apiDocsIndex.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export {};
|
||||
7
node_modules/node-llama-cpp/dist/apiDocsIndex.js
generated
vendored
Normal file
7
node_modules/node-llama-cpp/dist/apiDocsIndex.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
/** @internal */
|
||||
import { _LlamaText } from "./utils/LlamaText.js";
|
||||
/** @internal */
|
||||
export * from "./index.js";
|
||||
/** @internal */
|
||||
export { _LlamaText as LlamaText };
|
||||
//# sourceMappingURL=apiDocsIndex.js.map
|
||||
1
node_modules/node-llama-cpp/dist/apiDocsIndex.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/apiDocsIndex.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"apiDocsIndex.js","sourceRoot":"","sources":["../src/apiDocsIndex.ts"],"names":[],"mappings":"AAAA,gBAAgB;AAChB,OAAO,EACH,UAAU,EACb,MAAM,sBAAsB,CAAC;AAE9B,gBAAgB;AAChB,cAAc,YAAY,CAAC;AAE3B,gBAAgB;AAChB,OAAO,EACH,UAAU,IAAI,SAAS,EAC1B,CAAC"}
|
||||
183
node_modules/node-llama-cpp/dist/bindings/AddonTypes.d.ts
generated
vendored
Normal file
183
node_modules/node-llama-cpp/dist/bindings/AddonTypes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,183 @@
|
||||
import { Token } from "../types.js";
|
||||
import { LlamaNuma } from "./types.js";
|
||||
export type BindingModule = {
|
||||
AddonModel: {
|
||||
new (modelPath: string, params: {
|
||||
addonExports?: BindingModule;
|
||||
gpuLayers?: number;
|
||||
vocabOnly?: boolean;
|
||||
useMmap?: boolean;
|
||||
useDirectIo?: boolean;
|
||||
useMlock?: boolean;
|
||||
checkTensors?: boolean;
|
||||
onLoadProgress?(loadPercentage: number): void;
|
||||
hasLoadAbortSignal?: boolean;
|
||||
overridesList?: Array<[key: string, value: number | bigint | boolean | string, type: 0 | 1 | undefined]>;
|
||||
}): AddonModel;
|
||||
};
|
||||
AddonModelLora: {
|
||||
new (model: AddonModel, filePath: string): AddonModelLora;
|
||||
};
|
||||
AddonContext: {
|
||||
new (model: AddonModel, params: {
|
||||
contextSize?: number;
|
||||
batchSize?: number;
|
||||
sequences?: number;
|
||||
flashAttention?: boolean;
|
||||
logitsAll?: boolean;
|
||||
embeddings?: boolean;
|
||||
ranking?: boolean;
|
||||
threads?: number;
|
||||
performanceTracking?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
}): AddonContext;
|
||||
};
|
||||
AddonGrammar: {
|
||||
new (grammarPath: string, params?: {
|
||||
addonExports?: BindingModule;
|
||||
rootRuleName?: string;
|
||||
}): AddonGrammar;
|
||||
};
|
||||
AddonGrammarEvaluationState: {
|
||||
new (model: AddonModel, grammar: AddonGrammar): AddonGrammarEvaluationState;
|
||||
new (existingState: AddonGrammarEvaluationState): AddonGrammarEvaluationState;
|
||||
};
|
||||
AddonSampler: {
|
||||
new (model: AddonModel): AddonSampler;
|
||||
acceptGrammarEvaluationStateToken(grammarEvaluationState: AddonGrammarEvaluationState, token: Token): void;
|
||||
canBeNextTokenForGrammarEvaluationState(grammarEvaluationState: AddonGrammarEvaluationState, token: Token): boolean;
|
||||
};
|
||||
markLoaded(): boolean;
|
||||
systemInfo(): string;
|
||||
getSupportsGpuOffloading(): boolean;
|
||||
getSupportsMmap(): boolean;
|
||||
getGpuSupportsMmap(): boolean;
|
||||
getSupportsMlock(): boolean;
|
||||
getMathCores(): number;
|
||||
getBlockSizeForGgmlType(ggmlType: number): number | undefined;
|
||||
getTypeSizeForGgmlType(ggmlType: number): number | undefined;
|
||||
getGgmlGraphOverheadCustom(size: number, grads: boolean): number;
|
||||
getConsts(): {
|
||||
ggmlMaxDims: number;
|
||||
ggmlTypeF16Size: number;
|
||||
ggmlTypeF32Size: number;
|
||||
ggmlTensorOverhead: number;
|
||||
llamaPosSize: number;
|
||||
llamaSeqIdSize: number;
|
||||
};
|
||||
setLogger(logger: (level: number, message: string) => void): void;
|
||||
setLoggerLogLevel(level: number): void;
|
||||
getGpuVramInfo(): {
|
||||
total: number;
|
||||
used: number;
|
||||
unifiedSize: number;
|
||||
};
|
||||
getGpuDeviceInfo(): {
|
||||
deviceNames: string[];
|
||||
};
|
||||
getGpuType(): "cuda" | "vulkan" | "metal" | false | undefined;
|
||||
ensureGpuDeviceIsSupported(): void;
|
||||
getSwapInfo(): {
|
||||
total: number;
|
||||
maxSize: number;
|
||||
free: number;
|
||||
};
|
||||
getMemoryInfo(): {
|
||||
total: number;
|
||||
};
|
||||
init(): Promise<void>;
|
||||
setNuma(numa?: LlamaNuma): void;
|
||||
loadBackends(forceLoadLibrariesSearchPath?: string): void;
|
||||
dispose(): Promise<void>;
|
||||
};
|
||||
export type AddonModel = {
|
||||
init(): Promise<boolean>;
|
||||
loadLora(lora: AddonModelLora): Promise<void>;
|
||||
abortActiveModelLoad(): void;
|
||||
dispose(): Promise<void>;
|
||||
tokenize(text: string, specialTokens: boolean): Uint32Array;
|
||||
detokenize(tokens: Uint32Array, specialTokens?: boolean): string;
|
||||
getTrainContextSize(): number;
|
||||
getEmbeddingVectorSize(): number;
|
||||
getTotalSize(): number;
|
||||
getTotalParameters(): number;
|
||||
getModelDescription(): ModelTypeDescription;
|
||||
tokenBos(): Token;
|
||||
tokenEos(): Token;
|
||||
tokenNl(): Token;
|
||||
prefixToken(): Token;
|
||||
middleToken(): Token;
|
||||
suffixToken(): Token;
|
||||
eotToken(): Token;
|
||||
sepToken(): Token;
|
||||
getTokenString(token: number): string;
|
||||
getTokenAttributes(token: Token): number;
|
||||
isEogToken(token: Token): boolean;
|
||||
getVocabularyType(): number;
|
||||
shouldPrependBosToken(): boolean;
|
||||
shouldAppendEosToken(): boolean;
|
||||
getModelSize(): number;
|
||||
};
|
||||
export type AddonContext = {
|
||||
init(): Promise<boolean>;
|
||||
dispose(): Promise<void>;
|
||||
getContextSize(): number;
|
||||
initBatch(size: number): void;
|
||||
addToBatch(sequenceId: number, firstTokenSequenceIndex: number, tokens: Uint32Array, logitIndexes: Uint32Array): Uint32Array;
|
||||
decodeBatch(): Promise<void>;
|
||||
sampleToken(batchLogitIndex: BatchLogitIndex, sampler: AddonSampler): Promise<Token | -1>;
|
||||
sampleToken(batchLogitIndex: BatchLogitIndex, sampler: AddonSampler, probabilities: boolean, confidence?: boolean): Promise<[token: Token | -1, probabilities: (Token | number)[] | undefined, confidence: number | undefined]>;
|
||||
disposeSequence(sequenceId: number): void;
|
||||
removeTokenCellsFromSequence(sequenceId: number, startPos: number, endPos: number): boolean;
|
||||
shiftSequenceTokenCells(sequenceId: number, startPos: number, endPos: number, shiftDelta: number): void;
|
||||
getSequenceKvCacheMinPosition(sequenceId: number): number;
|
||||
getSequenceKvCacheMaxPosition(sequenceId: number): number;
|
||||
getEmbedding(inputTokensLength: number, maxVectorSize?: number): Float64Array;
|
||||
getStateSize(): number;
|
||||
getThreads(): number;
|
||||
setThreads(threads: number): void;
|
||||
printTimings(): void;
|
||||
ensureDraftContextIsCompatibleForSpeculative(draftContext: AddonContext): void;
|
||||
saveSequenceStateToFile(filePath: string, sequenceId: number, tokens: Uint32Array): Promise<number>;
|
||||
loadSequenceStateFromFile(filePath: string, sequenceId: number, maxContextSize: number): Promise<Uint32Array>;
|
||||
setLora(lora: AddonModelLora, scale: number): void;
|
||||
};
|
||||
export type BatchLogitIndex = number & {
|
||||
readonly __batchLogitIndex: never;
|
||||
};
|
||||
export type AddonGrammar = {
|
||||
isTextCompatible(testText: string): boolean;
|
||||
};
|
||||
export type AddonGrammarEvaluationState = "AddonGrammarEvaluationState" & {
|
||||
readonly __brand: never;
|
||||
};
|
||||
export type AddonSampler = {
|
||||
dispose(): void;
|
||||
applyConfig(config: {
|
||||
temperature?: number;
|
||||
minP?: number;
|
||||
topK?: number;
|
||||
topP?: number;
|
||||
seed?: number;
|
||||
repeatPenalty?: number;
|
||||
repeatPenaltyMaxTokens?: number;
|
||||
repeatPenaltyTokens?: Uint32Array;
|
||||
repeatPenaltyPresencePenalty?: number;
|
||||
repeatPenaltyFrequencyPenalty?: number;
|
||||
grammarEvaluationState?: AddonGrammarEvaluationState;
|
||||
tokenBiasKeys?: Uint32Array;
|
||||
tokenBiasValues?: Float32Array;
|
||||
}): void;
|
||||
};
|
||||
export type AddonModelLora = {
|
||||
usages: number;
|
||||
readonly filePath: string;
|
||||
readonly disposed: boolean;
|
||||
dispose(): Promise<void>;
|
||||
};
|
||||
export type ModelTypeDescription = `${AddonModelArchName} ${AddonModelTypeName} ${AddonModelFileTypeName}`;
|
||||
export type AddonModelArchName = "unknown" | "llama" | "falcon" | "gpt2" | "gptj" | "gptneox" | "mpt" | "baichuan" | "starcoder" | "persimmon" | "refact" | "bloom" | "stablelm";
|
||||
export type AddonModelTypeName = "1B" | "3B" | "7B" | "8B" | "13B" | "15B" | "30B" | "34B" | "40B" | "65B" | "70B" | "?B";
|
||||
export type AddonModelFileTypeName = _AddonModelFileTypeName | `${_AddonModelFileTypeName} (guessed)`;
|
||||
type _AddonModelFileTypeName = "all F32" | "mostly F16" | "mostly Q4_0" | "mostly Q4_1" | "mostly Q4_1, some F16" | "mostly Q5_0" | "mostly Q5_1" | "mostly Q8_0" | "mostly Q2_K" | "mostly Q3_K - Small" | "mostly Q3_K - Medium" | "mostly Q3_K - Large" | "mostly Q4_K - Small" | "mostly Q4_K - Medium" | "mostly Q5_K - Small" | "mostly Q5_K - Medium" | "mostly Q6_K" | "unknown, may not work";
|
||||
export {};
|
||||
2
node_modules/node-llama-cpp/dist/bindings/AddonTypes.js
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/AddonTypes.js
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export {};
|
||||
//# sourceMappingURL=AddonTypes.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/AddonTypes.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/AddonTypes.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"AddonTypes.js","sourceRoot":"","sources":["../../src/bindings/AddonTypes.ts"],"names":[],"mappings":""}
|
||||
104
node_modules/node-llama-cpp/dist/bindings/Llama.d.ts
generated
vendored
Normal file
104
node_modules/node-llama-cpp/dist/bindings/Llama.d.ts
generated
vendored
Normal file
@@ -0,0 +1,104 @@
|
||||
import { EventRelay } from "lifecycle-utils";
|
||||
import { LlamaModel, LlamaModelOptions } from "../evaluator/LlamaModel/LlamaModel.js";
|
||||
import { GbnfJsonDefList, GbnfJsonSchema } from "../utils/gbnfJson/types.js";
|
||||
import { LlamaJsonSchemaGrammar } from "../evaluator/LlamaJsonSchemaGrammar.js";
|
||||
import { LlamaGrammar, LlamaGrammarOptions } from "../evaluator/LlamaGrammar.js";
|
||||
import { LlamaClasses } from "../utils/getLlamaClasses.js";
|
||||
import { LlamaGpuType, LlamaLogLevel, LlamaNuma } from "./types.js";
|
||||
export declare const LlamaLogLevelToAddonLogLevel: ReadonlyMap<LlamaLogLevel, number>;
|
||||
export declare class Llama {
|
||||
private _classes?;
|
||||
readonly onDispose: EventRelay<void>;
|
||||
private constructor();
|
||||
dispose(): Promise<void>;
|
||||
/** @hidden */
|
||||
[Symbol.asyncDispose](): Promise<void>;
|
||||
get disposed(): boolean;
|
||||
get classes(): LlamaClasses;
|
||||
get gpu(): LlamaGpuType;
|
||||
get supportsGpuOffloading(): boolean;
|
||||
get supportsMmap(): boolean;
|
||||
get gpuSupportsMmap(): boolean;
|
||||
get supportsMlock(): boolean;
|
||||
/** The number of CPU cores that are useful for math */
|
||||
get cpuMathCores(): number;
|
||||
/**
|
||||
* The maximum number of threads that can be used by the Llama instance.
|
||||
*
|
||||
* If set to `0`, the Llama instance will have no limit on the number of threads.
|
||||
*
|
||||
* See the `maxThreads` option of `getLlama` for more information.
|
||||
*/
|
||||
get maxThreads(): number;
|
||||
set maxThreads(value: number);
|
||||
/**
|
||||
* See the `numa` option of `getLlama` for more information
|
||||
*/
|
||||
get numa(): LlamaNuma;
|
||||
get logLevel(): LlamaLogLevel;
|
||||
set logLevel(value: LlamaLogLevel);
|
||||
get logger(): (level: LlamaLogLevel, message: string) => void;
|
||||
set logger(value: (level: LlamaLogLevel, message: string) => void);
|
||||
get buildType(): "localBuild" | "prebuilt";
|
||||
get cmakeOptions(): Readonly<Record<string, string>>;
|
||||
get llamaCppRelease(): {
|
||||
readonly repo: string;
|
||||
readonly release: string;
|
||||
};
|
||||
get systemInfo(): string;
|
||||
/**
|
||||
* VRAM padding used for memory size calculations, as these calculations are not always accurate.
|
||||
* This is set by default to ensure stability, but can be configured when you call `getLlama`.
|
||||
*
|
||||
* See `vramPadding` on `getLlama` for more information.
|
||||
*/
|
||||
get vramPaddingSize(): number;
|
||||
/**
|
||||
* The total amount of VRAM that is currently being used.
|
||||
*
|
||||
* `unifiedSize` represents the amount of VRAM that is shared between the CPU and GPU.
|
||||
* On SoC devices, this is usually the same as `total`.
|
||||
*/
|
||||
getVramState(): Promise<{
|
||||
total: number;
|
||||
used: number;
|
||||
free: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
/**
|
||||
* Get the state of the swap memory.
|
||||
*
|
||||
* **`maxSize`** - The maximum size of the swap memory that the system can allocate.
|
||||
* If the swap size is dynamic (like on macOS), this will be `Infinity`.
|
||||
*
|
||||
* **`allocated`** - The total size allocated by the system for swap memory.
|
||||
*
|
||||
* **`used`** - The amount of swap memory that is currently being used from the `allocated` size.
|
||||
*
|
||||
* On Windows, this will return the info for the page file.
|
||||
*/
|
||||
getSwapState(): Promise<{
|
||||
/**
|
||||
* The maximum size of the swap memory that the system can allocate.
|
||||
* If the swap size is dynamic (like on macOS), this will be `Infinity`
|
||||
*/
|
||||
maxSize: number;
|
||||
/** The total size allocated by the system for swap memory */
|
||||
allocated: number;
|
||||
/** The amount of swap memory that is currently being used from the `allocated` size */
|
||||
used: number;
|
||||
}>;
|
||||
getGpuDeviceNames(): Promise<string[]>;
|
||||
loadModel(options: LlamaModelOptions): Promise<LlamaModel>;
|
||||
/**
|
||||
* @see [Using a JSON Schema Grammar](https://node-llama-cpp.withcat.ai/guide/grammar#json-schema) tutorial
|
||||
* @see [Reducing Hallucinations When Using JSON Schema Grammar](https://node-llama-cpp.withcat.ai/guide/grammar#reducing-json-schema-hallucinations) tutorial
|
||||
*/
|
||||
createGrammarForJsonSchema<const T extends GbnfJsonSchema<Defs>, const Defs extends GbnfJsonDefList<Defs> = Record<any, any>>(schema: Readonly<T> & GbnfJsonSchema<Defs>): Promise<LlamaJsonSchemaGrammar<T, Defs>>;
|
||||
getGrammarFor(type: Parameters<typeof LlamaGrammar.getFor>[1]): Promise<LlamaGrammar>;
|
||||
/**
|
||||
* @see [Using Grammar](https://node-llama-cpp.withcat.ai/guide/grammar) tutorial
|
||||
*/
|
||||
createGrammar(options: LlamaGrammarOptions): Promise<LlamaGrammar>;
|
||||
static defaultConsoleLogger(level: LlamaLogLevel, message: string): void;
|
||||
}
|
||||
546
node_modules/node-llama-cpp/dist/bindings/Llama.js
generated
vendored
Normal file
546
node_modules/node-llama-cpp/dist/bindings/Llama.js
generated
vendored
Normal file
@@ -0,0 +1,546 @@
|
||||
import os from "os";
|
||||
import path from "path";
|
||||
import chalk from "chalk";
|
||||
import { DisposedError, EventRelay, withLock } from "lifecycle-utils";
|
||||
import { getConsoleLogPrefix } from "../utils/getConsoleLogPrefix.js";
|
||||
import { LlamaModel } from "../evaluator/LlamaModel/LlamaModel.js";
|
||||
import { DisposeGuard } from "../utils/DisposeGuard.js";
|
||||
import { LlamaJsonSchemaGrammar } from "../evaluator/LlamaJsonSchemaGrammar.js";
|
||||
import { LlamaGrammar } from "../evaluator/LlamaGrammar.js";
|
||||
import { ThreadsSplitter } from "../utils/ThreadsSplitter.js";
|
||||
import { getLlamaClasses } from "../utils/getLlamaClasses.js";
|
||||
import { LlamaLocks, LlamaLogLevel, LlamaLogLevelGreaterThan, LlamaLogLevelGreaterThanOrEqual } from "./types.js";
|
||||
import { MemoryOrchestrator } from "./utils/MemoryOrchestrator.js";
|
||||
export const LlamaLogLevelToAddonLogLevel = new Map([
|
||||
[LlamaLogLevel.disabled, 0],
|
||||
[LlamaLogLevel.fatal, 1],
|
||||
[LlamaLogLevel.error, 2],
|
||||
[LlamaLogLevel.warn, 3],
|
||||
[LlamaLogLevel.info, 4],
|
||||
[LlamaLogLevel.log, 5],
|
||||
[LlamaLogLevel.debug, 6]
|
||||
]);
|
||||
const addonLogLevelToLlamaLogLevel = new Map([...LlamaLogLevelToAddonLogLevel.entries()].map(([key, value]) => [value, key]));
|
||||
const defaultLogLevel = 5;
|
||||
const defaultCPUMinThreadSplitterThreads = 4;
|
||||
export class Llama {
|
||||
/** @internal */ _bindings;
|
||||
/** @internal */ _backendDisposeGuard = new DisposeGuard();
|
||||
/** @internal */ _memoryLock = {};
|
||||
/** @internal */ _consts;
|
||||
/** @internal */ _vramOrchestrator;
|
||||
/** @internal */ _vramPadding;
|
||||
/** @internal */ _ramOrchestrator;
|
||||
/** @internal */ _ramPadding;
|
||||
/** @internal */ _swapOrchestrator;
|
||||
/** @internal */ _debug;
|
||||
/** @internal */ _threadsSplitter;
|
||||
/** @internal */ _hadErrorLogs = false;
|
||||
/** @internal */ _gpu;
|
||||
/** @internal */ _numa;
|
||||
/** @internal */ _buildType;
|
||||
/** @internal */ _cmakeOptions;
|
||||
/** @internal */ _supportsGpuOffloading;
|
||||
/** @internal */ _supportsMmap;
|
||||
/** @internal */ _gpuSupportsMmap;
|
||||
/** @internal */ _supportsMlock;
|
||||
/** @internal */ _mathCores;
|
||||
/** @internal */ _llamaCppRelease;
|
||||
/** @internal */ _logger;
|
||||
/** @internal */ _logLevel;
|
||||
/** @internal */ _pendingLog = null;
|
||||
/** @internal */ _pendingLogLevel = null;
|
||||
/** @internal */ _logDispatchQueuedMicrotasks = 0;
|
||||
/** @internal */ _previousLog = null;
|
||||
/** @internal */ _previousLogLevel = null;
|
||||
/** @internal */ _nextLogNeedNewLine = false;
|
||||
/** @internal */ _disposed = false;
|
||||
_classes;
|
||||
onDispose = new EventRelay();
|
||||
constructor({ bindings, bindingPath, extBackendsPath, logLevel, logger, buildType, cmakeOptions, llamaCppRelease, debug, numa, buildGpu, maxThreads, vramOrchestrator, vramPadding, ramOrchestrator, ramPadding, swapOrchestrator, skipLlamaInit }) {
|
||||
this._dispatchPendingLogMicrotask = this._dispatchPendingLogMicrotask.bind(this);
|
||||
this._onAddonLog = this._onAddonLog.bind(this);
|
||||
this._bindings = bindings;
|
||||
this._debug = debug;
|
||||
this._numa = numa ?? false;
|
||||
this._logLevel = this._debug
|
||||
? LlamaLogLevel.debug
|
||||
: (logLevel ?? LlamaLogLevel.debug);
|
||||
const previouslyLoaded = bindings.markLoaded();
|
||||
if (!this._debug && (!skipLlamaInit || !previouslyLoaded)) {
|
||||
this._bindings.setLogger(this._onAddonLog);
|
||||
this._bindings.setLoggerLogLevel(LlamaLogLevelToAddonLogLevel.get(this._logLevel) ?? defaultLogLevel);
|
||||
}
|
||||
bindings.loadBackends();
|
||||
let loadedGpu = bindings.getGpuType();
|
||||
if (loadedGpu == null || (loadedGpu === false && buildGpu !== false)) {
|
||||
const backendsPath = path.dirname(bindingPath);
|
||||
const fallbackBackendsDir = path.join(extBackendsPath ?? backendsPath, "fallback");
|
||||
bindings.loadBackends(backendsPath);
|
||||
loadedGpu = bindings.getGpuType();
|
||||
if (loadedGpu == null || (loadedGpu === false && buildGpu !== false))
|
||||
bindings.loadBackends(fallbackBackendsDir);
|
||||
}
|
||||
bindings.ensureGpuDeviceIsSupported();
|
||||
if (this._numa !== false)
|
||||
bindings.setNuma(numa);
|
||||
this._gpu = bindings.getGpuType() ?? false;
|
||||
this._supportsGpuOffloading = bindings.getSupportsGpuOffloading();
|
||||
this._supportsMmap = bindings.getSupportsMmap();
|
||||
this._gpuSupportsMmap = bindings.getGpuSupportsMmap();
|
||||
this._supportsMlock = bindings.getSupportsMlock();
|
||||
this._mathCores = Math.floor(bindings.getMathCores());
|
||||
this._consts = bindings.getConsts();
|
||||
this._vramOrchestrator = vramOrchestrator;
|
||||
this._vramPadding = vramPadding;
|
||||
this._ramOrchestrator = ramOrchestrator;
|
||||
this._ramPadding = ramPadding;
|
||||
this._swapOrchestrator = swapOrchestrator;
|
||||
this._threadsSplitter = new ThreadsSplitter(maxThreads ?? (this._gpu === false
|
||||
? Math.max(defaultCPUMinThreadSplitterThreads, this._mathCores)
|
||||
: 0));
|
||||
this._logger = logger;
|
||||
this._buildType = buildType;
|
||||
this._cmakeOptions = Object.freeze({ ...cmakeOptions });
|
||||
this._llamaCppRelease = Object.freeze({
|
||||
repo: llamaCppRelease.repo,
|
||||
release: llamaCppRelease.release
|
||||
});
|
||||
this._onExit = this._onExit.bind(this);
|
||||
process.on("exit", this._onExit);
|
||||
}
|
||||
async dispose() {
|
||||
if (this._disposed)
|
||||
return;
|
||||
this._disposed = true;
|
||||
this.onDispose.dispatchEvent();
|
||||
await this._backendDisposeGuard.acquireDisposeLock();
|
||||
await this._bindings.dispose();
|
||||
}
|
||||
/** @hidden */
|
||||
async [Symbol.asyncDispose]() {
|
||||
await this.dispose();
|
||||
}
|
||||
get disposed() {
|
||||
return this._disposed;
|
||||
}
|
||||
get classes() {
|
||||
if (this._classes == null)
|
||||
this._classes = getLlamaClasses();
|
||||
return this._classes;
|
||||
}
|
||||
get gpu() {
|
||||
return this._gpu;
|
||||
}
|
||||
get supportsGpuOffloading() {
|
||||
return this._supportsGpuOffloading;
|
||||
}
|
||||
get supportsMmap() {
|
||||
return this._supportsMmap;
|
||||
}
|
||||
get gpuSupportsMmap() {
|
||||
return this._gpuSupportsMmap;
|
||||
}
|
||||
get supportsMlock() {
|
||||
return this._supportsMlock;
|
||||
}
|
||||
/** The number of CPU cores that are useful for math */
|
||||
get cpuMathCores() {
|
||||
return this._mathCores;
|
||||
}
|
||||
/**
|
||||
* The maximum number of threads that can be used by the Llama instance.
|
||||
*
|
||||
* If set to `0`, the Llama instance will have no limit on the number of threads.
|
||||
*
|
||||
* See the `maxThreads` option of `getLlama` for more information.
|
||||
*/
|
||||
get maxThreads() {
|
||||
return this._threadsSplitter.maxThreads;
|
||||
}
|
||||
set maxThreads(value) {
|
||||
this._threadsSplitter.maxThreads = Math.floor(Math.max(0, value));
|
||||
}
|
||||
/**
|
||||
* See the `numa` option of `getLlama` for more information
|
||||
*/
|
||||
get numa() {
|
||||
return this._numa;
|
||||
}
|
||||
get logLevel() {
|
||||
return this._logLevel;
|
||||
}
|
||||
set logLevel(value) {
|
||||
this._ensureNotDisposed();
|
||||
if (value === this._logLevel || this._debug)
|
||||
return;
|
||||
this._bindings.setLoggerLogLevel(LlamaLogLevelToAddonLogLevel.get(value) ?? defaultLogLevel);
|
||||
this._logLevel = value;
|
||||
}
|
||||
get logger() {
|
||||
return this._logger;
|
||||
}
|
||||
set logger(value) {
|
||||
this._logger = value;
|
||||
if (value !== Llama.defaultConsoleLogger)
|
||||
this._nextLogNeedNewLine = false;
|
||||
}
|
||||
get buildType() {
|
||||
return this._buildType;
|
||||
}
|
||||
get cmakeOptions() {
|
||||
return this._cmakeOptions;
|
||||
}
|
||||
get llamaCppRelease() {
|
||||
return this._llamaCppRelease;
|
||||
}
|
||||
get systemInfo() {
|
||||
this._ensureNotDisposed();
|
||||
return this._bindings.systemInfo();
|
||||
}
|
||||
/**
|
||||
* VRAM padding used for memory size calculations, as these calculations are not always accurate.
|
||||
* This is set by default to ensure stability, but can be configured when you call `getLlama`.
|
||||
*
|
||||
* See `vramPadding` on `getLlama` for more information.
|
||||
*/
|
||||
get vramPaddingSize() {
|
||||
return this._vramPadding.size;
|
||||
}
|
||||
/**
|
||||
* The total amount of VRAM that is currently being used.
|
||||
*
|
||||
* `unifiedSize` represents the amount of VRAM that is shared between the CPU and GPU.
|
||||
* On SoC devices, this is usually the same as `total`.
|
||||
*/
|
||||
async getVramState() {
|
||||
this._ensureNotDisposed();
|
||||
const { total, used, unifiedSize } = this._bindings.getGpuVramInfo();
|
||||
return {
|
||||
total,
|
||||
used,
|
||||
free: Math.max(0, total - used),
|
||||
unifiedSize
|
||||
};
|
||||
}
|
||||
/**
|
||||
* Get the state of the swap memory.
|
||||
*
|
||||
* **`maxSize`** - The maximum size of the swap memory that the system can allocate.
|
||||
* If the swap size is dynamic (like on macOS), this will be `Infinity`.
|
||||
*
|
||||
* **`allocated`** - The total size allocated by the system for swap memory.
|
||||
*
|
||||
* **`used`** - The amount of swap memory that is currently being used from the `allocated` size.
|
||||
*
|
||||
* On Windows, this will return the info for the page file.
|
||||
*/
|
||||
async getSwapState() {
|
||||
this._ensureNotDisposed();
|
||||
const { total, maxSize, free } = this._bindings.getSwapInfo();
|
||||
return {
|
||||
maxSize: maxSize === -1
|
||||
? Infinity
|
||||
: maxSize,
|
||||
allocated: total,
|
||||
used: total - free
|
||||
};
|
||||
}
|
||||
async getGpuDeviceNames() {
|
||||
this._ensureNotDisposed();
|
||||
const { deviceNames } = this._bindings.getGpuDeviceInfo();
|
||||
return deviceNames;
|
||||
}
|
||||
async loadModel(options) {
|
||||
this._ensureNotDisposed();
|
||||
return await withLock([this._memoryLock, LlamaLocks.loadToMemory], options.loadSignal, async () => {
|
||||
this._ensureNotDisposed();
|
||||
const preventDisposalHandle = this._backendDisposeGuard.createPreventDisposalHandle();
|
||||
try {
|
||||
return await LlamaModel._create(options, { _llama: this });
|
||||
}
|
||||
finally {
|
||||
preventDisposalHandle.dispose();
|
||||
}
|
||||
});
|
||||
}
|
||||
/* eslint-disable @stylistic/max-len */
|
||||
/**
|
||||
* @see [Using a JSON Schema Grammar](https://node-llama-cpp.withcat.ai/guide/grammar#json-schema) tutorial
|
||||
* @see [Reducing Hallucinations When Using JSON Schema Grammar](https://node-llama-cpp.withcat.ai/guide/grammar#reducing-json-schema-hallucinations) tutorial
|
||||
*/
|
||||
async createGrammarForJsonSchema(schema) {
|
||||
return new LlamaJsonSchemaGrammar(this, schema);
|
||||
}
|
||||
/* eslint-enable @stylistic/max-len */
|
||||
async getGrammarFor(type) {
|
||||
return await LlamaGrammar.getFor(this, type);
|
||||
}
|
||||
/**
|
||||
* @see [Using Grammar](https://node-llama-cpp.withcat.ai/guide/grammar) tutorial
|
||||
*/
|
||||
async createGrammar(options) {
|
||||
return new LlamaGrammar(this, options);
|
||||
}
|
||||
/** @internal */
|
||||
async _init() {
|
||||
await this._bindings.init();
|
||||
}
|
||||
/**
|
||||
* Log messages related to the Llama instance
|
||||
* @internal
|
||||
*/
|
||||
_log(level, message) {
|
||||
this._onAddonLog(LlamaLogLevelToAddonLogLevel.get(level) ?? defaultLogLevel, message + "\n");
|
||||
}
|
||||
/** @internal */
|
||||
_onAddonLog(level, message) {
|
||||
const llamaLogLevel = addonLogLevelToLlamaLogLevel.get(level) ?? LlamaLogLevel.fatal;
|
||||
if (this._pendingLog != null && this._pendingLogLevel != null && this._pendingLogLevel != llamaLogLevel) {
|
||||
this._callLogger(this._pendingLogLevel, this._pendingLog);
|
||||
this._pendingLog = null;
|
||||
}
|
||||
const sourceMessage = (this._pendingLog ?? "") + message;
|
||||
const lastNewLineIndex = sourceMessage.lastIndexOf("\n");
|
||||
const currentLog = lastNewLineIndex < 0
|
||||
? sourceMessage
|
||||
: sourceMessage.slice(0, lastNewLineIndex);
|
||||
const nextLog = lastNewLineIndex < 0
|
||||
? ""
|
||||
: sourceMessage.slice(lastNewLineIndex + 1);
|
||||
if (currentLog !== "")
|
||||
this._callLogger(llamaLogLevel, currentLog);
|
||||
if (nextLog !== "") {
|
||||
this._pendingLog = nextLog;
|
||||
this._pendingLogLevel = llamaLogLevel;
|
||||
queueMicrotask(this._dispatchPendingLogMicrotask);
|
||||
this._logDispatchQueuedMicrotasks++;
|
||||
}
|
||||
else
|
||||
this._pendingLog = null;
|
||||
}
|
||||
/** @internal */
|
||||
_dispatchPendingLogMicrotask() {
|
||||
this._logDispatchQueuedMicrotasks--;
|
||||
if (this._logDispatchQueuedMicrotasks !== 0)
|
||||
return;
|
||||
if (this._pendingLog != null && this._pendingLogLevel != null) {
|
||||
this._callLogger(this._pendingLogLevel, this._pendingLog);
|
||||
this._pendingLog = null;
|
||||
}
|
||||
}
|
||||
/** @internal */
|
||||
_callLogger(level, message) {
|
||||
// llama.cpp uses dots to indicate progress, so we don't want to print them as different lines,
|
||||
// and instead, append to the same log line
|
||||
if (logMessageIsOnlyDots(message) && this._logger === Llama.defaultConsoleLogger) {
|
||||
if (logMessageIsOnlyDots(this._previousLog) && level === this._previousLogLevel) {
|
||||
process.stdout.write(message);
|
||||
}
|
||||
else {
|
||||
this._nextLogNeedNewLine = true;
|
||||
process.stdout.write(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
}
|
||||
}
|
||||
else {
|
||||
if (this._nextLogNeedNewLine) {
|
||||
process.stdout.write("\n");
|
||||
this._nextLogNeedNewLine = false;
|
||||
}
|
||||
try {
|
||||
const transformedLogLevel = getTransformedLogLevel(level, message, this.gpu);
|
||||
if (LlamaLogLevelGreaterThanOrEqual(transformedLogLevel, this._logLevel))
|
||||
this._logger(transformedLogLevel, message);
|
||||
}
|
||||
catch (err) {
|
||||
// the native addon code calls this function, so there's no use to throw an error here
|
||||
}
|
||||
}
|
||||
this._previousLog = message;
|
||||
this._previousLogLevel = level;
|
||||
if (!this._hadErrorLogs && LlamaLogLevelGreaterThan(level, LlamaLogLevel.error))
|
||||
this._hadErrorLogs = true;
|
||||
}
|
||||
/** @internal */
|
||||
_onExit() {
|
||||
if (this._pendingLog != null && this._pendingLogLevel != null) {
|
||||
this._callLogger(this._pendingLogLevel, this._pendingLog);
|
||||
this._pendingLog = null;
|
||||
}
|
||||
}
|
||||
/** @internal */
|
||||
_ensureNotDisposed() {
|
||||
if (this._disposed)
|
||||
throw new DisposedError();
|
||||
}
|
||||
/** @internal */
|
||||
static async _create({ bindings, bindingPath, extBackendsPath, buildType, buildMetadata, logLevel, logger, vramPadding, ramPadding, maxThreads, skipLlamaInit = false, debug, numa }) {
|
||||
const vramOrchestrator = new MemoryOrchestrator(() => {
|
||||
const { total, used, unifiedSize } = bindings.getGpuVramInfo();
|
||||
return {
|
||||
total,
|
||||
free: Math.max(0, total - used),
|
||||
unifiedSize
|
||||
};
|
||||
});
|
||||
const ramOrchestrator = new MemoryOrchestrator(() => {
|
||||
const used = process.memoryUsage().rss;
|
||||
const total = os.totalmem();
|
||||
return {
|
||||
total,
|
||||
free: Math.max(0, total - used),
|
||||
unifiedSize: total
|
||||
};
|
||||
});
|
||||
const swapOrchestrator = new MemoryOrchestrator(() => {
|
||||
const { total, maxSize, free } = bindings.getSwapInfo();
|
||||
const used = total - free;
|
||||
if (maxSize === -1)
|
||||
return {
|
||||
total: Infinity,
|
||||
free: Infinity,
|
||||
unifiedSize: Infinity
|
||||
};
|
||||
return {
|
||||
total: maxSize,
|
||||
free: maxSize - used,
|
||||
unifiedSize: maxSize
|
||||
};
|
||||
});
|
||||
let resolvedRamPadding;
|
||||
if (ramPadding instanceof Function)
|
||||
resolvedRamPadding = ramOrchestrator.reserveMemory(ramPadding((await ramOrchestrator.getMemoryState()).total));
|
||||
else
|
||||
resolvedRamPadding = ramOrchestrator.reserveMemory(ramPadding);
|
||||
const llama = new Llama({
|
||||
bindings,
|
||||
bindingPath,
|
||||
extBackendsPath,
|
||||
buildType,
|
||||
cmakeOptions: buildMetadata.buildOptions.customCmakeOptions,
|
||||
llamaCppRelease: {
|
||||
repo: buildMetadata.buildOptions.llamaCpp.repo,
|
||||
release: buildMetadata.buildOptions.llamaCpp.release
|
||||
},
|
||||
logLevel,
|
||||
logger,
|
||||
debug,
|
||||
numa,
|
||||
buildGpu: buildMetadata.buildOptions.gpu,
|
||||
vramOrchestrator,
|
||||
maxThreads,
|
||||
vramPadding: vramOrchestrator.reserveMemory(0),
|
||||
ramOrchestrator,
|
||||
ramPadding: resolvedRamPadding,
|
||||
swapOrchestrator,
|
||||
skipLlamaInit
|
||||
});
|
||||
if (llama.gpu === false || vramPadding === 0) {
|
||||
// do nothing since `llama._vramPadding` is already set to 0
|
||||
}
|
||||
else if (vramPadding instanceof Function) {
|
||||
const currentVramPadding = llama._vramPadding;
|
||||
llama._vramPadding = vramOrchestrator.reserveMemory(vramPadding((await vramOrchestrator.getMemoryState()).total));
|
||||
currentVramPadding.dispose();
|
||||
}
|
||||
else {
|
||||
const currentVramPadding = llama._vramPadding;
|
||||
llama._vramPadding = vramOrchestrator.reserveMemory(vramPadding);
|
||||
currentVramPadding.dispose();
|
||||
}
|
||||
if (!skipLlamaInit)
|
||||
await llama._init();
|
||||
return llama;
|
||||
}
|
||||
static defaultConsoleLogger(level, message) {
|
||||
switch (level) {
|
||||
case LlamaLogLevel.disabled:
|
||||
break;
|
||||
case LlamaLogLevel.fatal:
|
||||
// we don't use console.error here because it prints the stack trace
|
||||
console.warn(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
case LlamaLogLevel.error:
|
||||
// we don't use console.error here because it prints the stack trace
|
||||
console.warn(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
case LlamaLogLevel.warn:
|
||||
console.warn(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
case LlamaLogLevel.info:
|
||||
console.info(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
case LlamaLogLevel.log:
|
||||
console.info(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
case LlamaLogLevel.debug:
|
||||
console.debug(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
break;
|
||||
default:
|
||||
void level;
|
||||
console.warn(getConsoleLogPrefix() + getColorForLogLevel(LlamaLogLevel.warn)(`Unknown log level: ${level}`));
|
||||
console.log(prefixAndColorMessage(message, getColorForLogLevel(level)));
|
||||
}
|
||||
}
|
||||
}
|
||||
function getColorForLogLevel(level) {
|
||||
switch (level) {
|
||||
case LlamaLogLevel.disabled: return chalk.whiteBright;
|
||||
case LlamaLogLevel.fatal: return chalk.redBright;
|
||||
case LlamaLogLevel.error: return chalk.red;
|
||||
case LlamaLogLevel.warn: return chalk.yellow;
|
||||
case LlamaLogLevel.info: return chalk.whiteBright;
|
||||
case LlamaLogLevel.log: return chalk.white;
|
||||
case LlamaLogLevel.debug: return chalk.gray;
|
||||
default:
|
||||
void level;
|
||||
return chalk.whiteBright;
|
||||
}
|
||||
}
|
||||
function prefixAndColorMessage(message, color) {
|
||||
return getConsoleLogPrefix() + (message
|
||||
.split("\n")
|
||||
.map((line) => color(line))
|
||||
.join("\n" + getConsoleLogPrefix()));
|
||||
}
|
||||
function logMessageIsOnlyDots(message) {
|
||||
if (message == null)
|
||||
return false;
|
||||
for (let i = 0; i < message.length; i++) {
|
||||
if (message[i] !== ".")
|
||||
return false;
|
||||
}
|
||||
return true;
|
||||
}
|
||||
function getTransformedLogLevel(level, message, gpu) {
|
||||
if (level === LlamaLogLevel.warn && message.endsWith("the full capacity of the model will not be utilized"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("ggml_metal_init: skipping kernel_") && message.endsWith("(not supported)"))
|
||||
return LlamaLogLevel.log;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("ggml_cuda_init: GGML_CUDA_FORCE_") && message.endsWith(" no"))
|
||||
return LlamaLogLevel.log;
|
||||
else if (level === LlamaLogLevel.info && message.startsWith("load_backend: loaded "))
|
||||
return LlamaLogLevel.log;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("make_cpu_buft_list: disabling extra buffer types"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("llama_context: non-unified KV cache requires ggml_set_rows() - forcing unified KV cache"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("llama_kv_cache_unified: LLAMA_SET_ROWS=0, using old ggml_cpy() method for backwards compatibility"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("init: embeddings required but some input tokens were not marked as outputs -> overriding"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("load: special_eog_ids contains both '<|return|>' and '<|call|>' tokens, removing '<|end|>' token from EOG list"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("llama_init_from_model: model default pooling_type is [0], but [-1] was specified"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("llama_model_loader: direct I/O is enabled, disabling mmap"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (level === LlamaLogLevel.warn && message.startsWith("llama_model_loader: direct I/O is not available, using mmap"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (gpu === false && level === LlamaLogLevel.warn && message.startsWith("llama_adapter_lora_init_impl: lora for '") && message.endsWith("' cannot use buft 'CPU_REPACK', fallback to CPU"))
|
||||
return LlamaLogLevel.info;
|
||||
else if (gpu === "metal" && level === LlamaLogLevel.warn && message.startsWith("ggml_metal_device_init: tensor API disabled for"))
|
||||
return LlamaLogLevel.info;
|
||||
return level;
|
||||
}
|
||||
//# sourceMappingURL=Llama.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/Llama.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/Llama.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
2
node_modules/node-llama-cpp/dist/bindings/consts.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/consts.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import { BuildGpu } from "./types.js";
|
||||
export declare function getPrettyBuildGpuName(gpu: BuildGpu | undefined): string;
|
||||
13
node_modules/node-llama-cpp/dist/bindings/consts.js
generated
vendored
Normal file
13
node_modules/node-llama-cpp/dist/bindings/consts.js
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
const prettyBuildGpuNames = {
|
||||
metal: "Metal",
|
||||
cuda: "CUDA",
|
||||
vulkan: "Vulkan"
|
||||
};
|
||||
export function getPrettyBuildGpuName(gpu) {
|
||||
if (gpu == null)
|
||||
return "unknown GPU";
|
||||
if (gpu == false)
|
||||
return "no GPU";
|
||||
return prettyBuildGpuNames[gpu] ?? ('"' + gpu + '"');
|
||||
}
|
||||
//# sourceMappingURL=consts.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/consts.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/consts.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"consts.js","sourceRoot":"","sources":["../../src/bindings/consts.ts"],"names":[],"mappings":"AAEA,MAAM,mBAAmB,GAA6C;IAClE,KAAK,EAAE,OAAO;IACd,IAAI,EAAE,MAAM;IACZ,MAAM,EAAE,QAAQ;CACnB,CAAC;AAEF,MAAM,UAAU,qBAAqB,CAAC,GAAyB;IAC3D,IAAI,GAAG,IAAI,IAAI;QACX,OAAO,aAAa,CAAC;IAEzB,IAAI,GAAG,IAAI,KAAK;QACZ,OAAO,QAAQ,CAAC;IAEpB,OAAO,mBAAmB,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,GAAG,GAAG,GAAG,GAAG,CAAC,CAAC;AACzD,CAAC"}
|
||||
280
node_modules/node-llama-cpp/dist/bindings/getLlama.d.ts
generated
vendored
Normal file
280
node_modules/node-llama-cpp/dist/bindings/getLlama.d.ts
generated
vendored
Normal file
@@ -0,0 +1,280 @@
|
||||
import { LlamaGpuType, LlamaLogLevel, LlamaNuma } from "./types.js";
|
||||
import { Llama } from "./Llama.js";
|
||||
export type LlamaOptions = {
|
||||
/**
|
||||
* The compute layer implementation type to use for llama.cpp.
|
||||
* - **`"auto"`**: Automatically detect and use the best GPU available (Metal on macOS, and CUDA or Vulkan on Windows and Linux)
|
||||
* - **`"metal"`**: Use Metal.
|
||||
* Only supported on macOS.
|
||||
* Enabled by default on Apple Silicon Macs.
|
||||
* - **`"cuda"`**: Use CUDA.
|
||||
* - **`"vulkan"`**: Use Vulkan.
|
||||
* - **`false`**: Disable any GPU support and only use the CPU.
|
||||
*
|
||||
* `"auto"` by default.
|
||||
* @see Use the `getLlamaGpuTypes` function to get the available GPU types (from the above list) for the current machine at runtime.
|
||||
*/
|
||||
gpu?: "auto" | LlamaGpuType | {
|
||||
type: "auto";
|
||||
exclude?: LlamaGpuType[];
|
||||
};
|
||||
/**
|
||||
* Set the minimum log level for llama.cpp.
|
||||
* Defaults to `"warn"`.
|
||||
*/
|
||||
logLevel?: LlamaLogLevel;
|
||||
/**
|
||||
* Set a custom logger for llama.cpp logs.
|
||||
*/
|
||||
logger?: (level: LlamaLogLevel, message: string) => void;
|
||||
/**
|
||||
* Set what build method to use.
|
||||
* - **`"auto"`**: If a local build is found, use it.
|
||||
* Otherwise, if a prebuilt binary is found, use it.
|
||||
* Otherwise, build from source.
|
||||
* - **`"never"`**: If a local build is found, use it.
|
||||
* Otherwise, if a prebuilt binary is found, use it.
|
||||
* Otherwise, throw a `NoBinaryFoundError` error.
|
||||
* - **`"forceRebuild"`**: Always build from source.
|
||||
* Be cautious with this option, as it will cause the build to fail on Windows when the binaries are in use by another process.
|
||||
* - **`"try"`**: If a local build is found, use it.
|
||||
* Otherwise, try to build from source and use the resulting binary.
|
||||
* If building from source fails, use a prebuilt binary if found.
|
||||
*
|
||||
* When running from inside an Asar archive in Electron, building from source is not possible, so it'll never build from source.
|
||||
* To allow building from source in Electron apps, make sure you ship `node-llama-cpp` as an unpacked module.
|
||||
*
|
||||
* Defaults to `"auto"`.
|
||||
* On Electron, defaults to `"never"`.
|
||||
*/
|
||||
build?: "auto" | "never" | "forceRebuild" | "try";
|
||||
/**
|
||||
* Set custom CMake options for llama.cpp
|
||||
*/
|
||||
cmakeOptions?: Record<string, string>;
|
||||
/**
|
||||
* When a prebuilt binary is found, only use it if it was built with the same build options as the ones specified in `buildOptions`.
|
||||
* Disabled by default.
|
||||
*/
|
||||
existingPrebuiltBinaryMustMatchBuildOptions?: boolean;
|
||||
/**
|
||||
* Use prebuilt binaries if they match the build options.
|
||||
* Enabled by default.
|
||||
*/
|
||||
usePrebuiltBinaries?: boolean;
|
||||
/**
|
||||
* Print binary compilation progress logs.
|
||||
* Enabled by default.
|
||||
*/
|
||||
progressLogs?: boolean;
|
||||
/**
|
||||
* Don't download llama.cpp source if it's not found.
|
||||
* When set to `true`, and llama.cpp source is not found, a `NoBinaryFoundError` error will be thrown.
|
||||
* Disabled by default.
|
||||
*/
|
||||
skipDownload?: boolean;
|
||||
/**
|
||||
* The maximum number of threads to use for the Llama instance.
|
||||
*
|
||||
* Set to `0` to have no thread limit.
|
||||
*
|
||||
* When not using a GPU, defaults to the number of CPU cores that are useful for math (`.cpuMathCores`), or `4`, whichever is higher.
|
||||
*
|
||||
* When using a GPU, there's no limit by default.
|
||||
*/
|
||||
maxThreads?: number;
|
||||
/**
|
||||
* Pad the available VRAM for the memory size calculations, as these calculations are not always accurate.
|
||||
* Recommended to ensure stability.
|
||||
* This only affects the calculations of `"auto"` in function options and is not reflected in the `getVramState` function.
|
||||
*
|
||||
* Defaults to `6%` of the total VRAM or 1GB, whichever is lower.
|
||||
* Set to `0` to disable.
|
||||
*/
|
||||
vramPadding?: number | ((totalVram: number) => number);
|
||||
/**
|
||||
* Pad the available RAM for the memory size calculations, as these calculations are not always accurate.
|
||||
* Recommended to ensure stability.
|
||||
*
|
||||
* Defaults to `25%` of the total RAM or 6GB (1GB on Linux), whichever is lower.
|
||||
* Set to `0` to disable.
|
||||
*
|
||||
* > Since the OS also needs RAM to function, the default value can get up to 6GB on Windows and macOS, and 1GB on Linux.
|
||||
*/
|
||||
ramPadding?: number | ((totalRam: number) => number);
|
||||
/**
|
||||
* Enable debug mode to find issues with llama.cpp.
|
||||
* Makes logs print directly to the console from `llama.cpp` and not through the provided logger.
|
||||
*
|
||||
* Defaults to `false`.
|
||||
*
|
||||
* The default can be set using the `NODE_LLAMA_CPP_DEBUG` environment variable.
|
||||
*/
|
||||
debug?: boolean;
|
||||
/**
|
||||
* Loads existing binaries without loading the `llama.cpp` backend,
|
||||
* and then disposes the returned `Llama` instance right away before returning it.
|
||||
*
|
||||
* Useful for performing a fast and efficient test to check whether the given configuration can be loaded.
|
||||
* Can be used for determining which GPU types the current machine supports before actually using them.
|
||||
*
|
||||
* Enabling this option implies that `build: "never"` and `skipDownload: true`.
|
||||
*
|
||||
* The returned `Llama` instance will be disposed and cannot be used.
|
||||
*
|
||||
* Defaults to `false`.
|
||||
*/
|
||||
dryRun?: boolean;
|
||||
/**
|
||||
* NUMA (Non-Uniform Memory Access) allocation policy.
|
||||
*
|
||||
* On multi-socket or multi-cluster machines, each CPU "socket" (or node) has its own local memory.
|
||||
* Accessing memory on your own socket is fast, but another socket's memory is slower.
|
||||
* Setting a NUMA (Non-Uniform Memory Access) allocation policy can
|
||||
* dramatically improve performance by keeping data local and "close" to the socket.
|
||||
*
|
||||
* These are the available NUMA options:
|
||||
* - **`false`**: Don't set any NUMA policy - let the OS decide.
|
||||
* - **`"distribute"`**: Distribute the memory across all available NUMA nodes.
|
||||
* - **`"isolate"`**: Pin both threads and their memory to a single NUMA node to avoid cross-node traffic.
|
||||
* - **`"numactl"`**: Delegate NUMA management to the external `numactl` command (or `libnuma` library) to set the NUMA policy.
|
||||
* - **`"mirror"`**: Allocate memory on all NUMA nodes, and copy the data to all of them.
|
||||
* This ensures minimal traffic between nodes, but uses more memory.
|
||||
*
|
||||
* Defaults to `false` (no NUMA policy).
|
||||
*/
|
||||
numa?: LlamaNuma;
|
||||
};
|
||||
export type LastBuildOptions = {
|
||||
/**
|
||||
* Set the minimum log level for llama.cpp.
|
||||
* Defaults to "warn".
|
||||
*/
|
||||
logLevel?: LlamaLogLevel;
|
||||
/**
|
||||
* Set a custom logger for llama.cpp logs.
|
||||
*/
|
||||
logger?: (level: LlamaLogLevel, message: string) => void;
|
||||
/**
|
||||
* If a local build is not found, use prebuilt binaries.
|
||||
* Enabled by default.
|
||||
*/
|
||||
usePrebuiltBinaries?: boolean;
|
||||
/**
|
||||
* If a local build is not found, and prebuilt binaries are not found, when building from source,
|
||||
* print binary compilation progress logs.
|
||||
* Enabled by default.
|
||||
*/
|
||||
progressLogs?: boolean;
|
||||
/**
|
||||
* If a local build is not found, and prebuilt binaries are not found, don't download llama.cpp source if it's not found.
|
||||
* When set to `true`, and llama.cpp source is needed but is not found, a `NoBinaryFoundError` error will be thrown.
|
||||
* Disabled by default.
|
||||
*/
|
||||
skipDownload?: boolean;
|
||||
/**
|
||||
* The maximum number of threads to use for the Llama instance.
|
||||
*
|
||||
* Set to `0` to have no thread limit.
|
||||
*
|
||||
* When not using a GPU, defaults to the number of CPU cores that are useful for math (`.cpuMathCores`), or `4`, whichever is higher.
|
||||
*
|
||||
* When using a GPU, there's no limit by default.
|
||||
*/
|
||||
maxThreads?: number;
|
||||
/**
|
||||
* Pad the available VRAM for the memory size calculations, as these calculations are not always accurate.
|
||||
* Recommended to ensure stability.
|
||||
* This only affects the calculations of `"auto"` in function options and is not reflected in the `getVramState` function.
|
||||
*
|
||||
* Defaults to `6%` of the total VRAM or 1GB, whichever is lower.
|
||||
* Set to `0` to disable.
|
||||
*/
|
||||
vramPadding?: number | ((totalVram: number) => number);
|
||||
/**
|
||||
* Pad the available RAM for the memory size calculations, as these calculations are not always accurate.
|
||||
* Recommended to ensure stability.
|
||||
*
|
||||
* Defaults to `25%` of the total RAM or 6GB (1GB on Linux), whichever is lower.
|
||||
* Set to `0` to disable.
|
||||
*
|
||||
* > Since the OS also needs RAM to function, the default value can get up to 6GB on Windows and macOS, and 1GB on Linux.
|
||||
*/
|
||||
ramPadding?: number | ((totalRam: number) => number);
|
||||
/**
|
||||
* Enable debug mode to find issues with llama.cpp.
|
||||
* Makes logs print directly to the console from `llama.cpp` and not through the provided logger.
|
||||
*
|
||||
* Defaults to `false`.
|
||||
*
|
||||
* The default can be set using the `NODE_LLAMA_CPP_DEBUG` environment variable.
|
||||
*/
|
||||
debug?: boolean;
|
||||
/**
|
||||
* Loads existing binaries without loading the `llama.cpp` backend,
|
||||
* and then disposes the returned `Llama` instance right away before returning it.
|
||||
*
|
||||
* Useful for performing a fast and efficient test to check whether the given configuration can be loaded.
|
||||
* Can be used for determining which GPU types the current machine supports before actually using them.
|
||||
*
|
||||
* Enabling this option implies that `build: "never"` and `skipDownload: true`.
|
||||
*
|
||||
* The returned `Llama` instance will be disposed and cannot be used.
|
||||
*
|
||||
* Defaults to `false`.
|
||||
*/
|
||||
dryRun?: boolean;
|
||||
/**
|
||||
* NUMA (Non-Uniform Memory Access) allocation policy.
|
||||
*
|
||||
* On multi-socket or multi-cluster machines, each CPU "socket" (or node) has its own local memory.
|
||||
* Accessing memory on your own socket is fast, but another socket's memory is slower.
|
||||
* Setting a NUMA (Non-Uniform Memory Access) allocation policy can
|
||||
* dramatically improve performance by keeping data local and "close" to the socket.
|
||||
*
|
||||
* These are the available NUMA options:
|
||||
* - **`false`**: Don't set any NUMA policy - let the OS decide.
|
||||
* - **`"distribute"`**: Distribute the memory across all available NUMA nodes.
|
||||
* - **`"isolate"`**: Pin both threads and their memory to a single NUMA node to avoid cross-node traffic.
|
||||
* - **`"numactl"`**: Delegate NUMA management to the external `numactl` command (or `libnuma` library) to set the NUMA policy.
|
||||
* - **`"mirror"`**: Allocate memory on all NUMA nodes, and copy the data to all of them.
|
||||
* This ensures minimal traffic between nodes, but uses more memory.
|
||||
*
|
||||
* Defaults to `false` (no NUMA policy).
|
||||
*/
|
||||
numa?: LlamaNuma;
|
||||
};
|
||||
export declare const getLlamaFunctionName = "getLlama";
|
||||
export declare const defaultLlamaVramPadding: (totalVram: number) => number;
|
||||
export declare const defaultLlamaRamPadding: (totalRam: number) => number;
|
||||
/**
|
||||
* Get a `llama.cpp` binding.
|
||||
*
|
||||
* Defaults to use a local binary built using the `source download` or `source build` CLI commands if one exists,
|
||||
* otherwise, uses a prebuilt binary, and fallbacks to building from source if a prebuilt binary is not found.
|
||||
*
|
||||
* Pass `"lastBuild"` to default to use the last successful build created
|
||||
* using the `source download` or `source build` CLI commands if one exists.
|
||||
*
|
||||
* The difference between using `"lastBuild"` and not using it is that `"lastBuild"` will use the binary built using a CLI command
|
||||
* with the configuration used to build that binary (like using its GPU type),
|
||||
* while not using `"lastBuild"` will only attempt to only use a binary that complies with the given options.
|
||||
*
|
||||
* For example, if your machine supports both CUDA and Vulkan, and you run the `source download --gpu vulkan` command,
|
||||
* calling `getLlama("lastBuild")` will return the binary you built with Vulkan,
|
||||
* while calling `getLlama()` will return a binding from a pre-built binary with CUDA,
|
||||
* since CUDA is preferable on systems that support it.
|
||||
*
|
||||
* For example, if your machine supports CUDA, and you run the `source download --gpu cuda` command,
|
||||
* calling `getLlama("lastBuild")` will return the binary you built with CUDA,
|
||||
* and calling `getLlama()` will also return that same binary you built with CUDA.
|
||||
*
|
||||
* You should prefer to use `getLlama()` without `"lastBuild"` unless you have a specific reason to use the last build.
|
||||
*/
|
||||
export declare function getLlama(options?: LlamaOptions): Promise<Llama>;
|
||||
export declare function getLlama(type: "lastBuild", lastBuildOptions?: LastBuildOptions): Promise<Llama>;
|
||||
export declare function getLlamaForOptions({ gpu, logLevel, logger, build, cmakeOptions, existingPrebuiltBinaryMustMatchBuildOptions, usePrebuiltBinaries, progressLogs, skipDownload, maxThreads, vramPadding, ramPadding, debug, numa, dryRun }: LlamaOptions, { updateLastBuildInfoOnCompile, skipLlamaInit, pipeBinaryTestErrorLogs }?: {
|
||||
updateLastBuildInfoOnCompile?: boolean;
|
||||
skipLlamaInit?: boolean;
|
||||
pipeBinaryTestErrorLogs?: boolean;
|
||||
}): Promise<Llama>;
|
||||
517
node_modules/node-llama-cpp/dist/bindings/getLlama.js
generated
vendored
Normal file
517
node_modules/node-llama-cpp/dist/bindings/getLlama.js
generated
vendored
Normal file
@@ -0,0 +1,517 @@
|
||||
import process from "process";
|
||||
import path from "path";
|
||||
import console from "console";
|
||||
import { createRequire } from "module";
|
||||
import { builtinLlamaCppGitHubRepo, builtinLlamaCppRelease, defaultLlamaCppLogLevel, defaultLlamaCppGitHubRepo, defaultLlamaCppGpuSupport, defaultLlamaCppRelease, defaultSkipDownload, llamaLocalBuildBinsDirectory, recommendedBaseDockerImage, defaultLlamaCppDebugMode } from "../config.js";
|
||||
import { getConsoleLogPrefix } from "../utils/getConsoleLogPrefix.js";
|
||||
import { waitForLockfileRelease } from "../utils/waitForLockfileRelease.js";
|
||||
import { isGithubReleaseNeedsResolving, resolveGithubRelease } from "../utils/resolveGithubRelease.js";
|
||||
import { runningInsideAsar, runningInElectron } from "../utils/runtime.js";
|
||||
import { compileLlamaCpp, getLocalBuildBinaryBuildMetadata, getLocalBuildBinaryPath, getPrebuiltBinaryBuildMetadata, getPrebuiltBinaryPath } from "./utils/compileLLamaCpp.js";
|
||||
import { getLastBuildInfo } from "./utils/lastBuildInfo.js";
|
||||
import { getClonedLlamaCppRepoReleaseInfo, isLlamaCppRepoCloned } from "./utils/cloneLlamaCppRepo.js";
|
||||
import { getPlatform } from "./utils/getPlatform.js";
|
||||
import { getBuildFolderNameForBuildOptions } from "./utils/getBuildFolderNameForBuildOptions.js";
|
||||
import { resolveCustomCmakeOptions } from "./utils/resolveCustomCmakeOptions.js";
|
||||
import { getCanUsePrebuiltBinaries } from "./utils/getCanUsePrebuiltBinaries.js";
|
||||
import { NoBinaryFoundError } from "./utils/NoBinaryFoundError.js";
|
||||
import { Llama } from "./Llama.js";
|
||||
import { getGpuTypesToUseForOption } from "./utils/getGpuTypesToUseForOption.js";
|
||||
import { getPrettyBuildGpuName } from "./consts.js";
|
||||
import { detectGlibc } from "./utils/detectGlibc.js";
|
||||
import { getLinuxDistroInfo, isDistroAlpineLinux } from "./utils/getLinuxDistroInfo.js";
|
||||
import { testBindingBinary } from "./utils/testBindingBinary.js";
|
||||
import { getPlatformInfo } from "./utils/getPlatformInfo.js";
|
||||
import { hasBuildingFromSourceDependenciesInstalled } from "./utils/hasBuildingFromSourceDependenciesInstalled.js";
|
||||
import { resolveActualBindingBinaryPath } from "./utils/resolveActualBindingBinaryPath.js";
|
||||
const require = createRequire(import.meta.url);
|
||||
export const getLlamaFunctionName = "getLlama";
|
||||
export const defaultLlamaVramPadding = (totalVram) => Math.floor(Math.min(totalVram * 0.06, 1024 * 1024 * 1024));
|
||||
export const defaultLlamaRamPadding = (totalRam) => {
|
||||
const platform = getPlatform();
|
||||
if (platform === "linux")
|
||||
return Math.floor(Math.min(totalRam * 0.25, 1024 * 1024 * 1024));
|
||||
return Math.floor(Math.min(totalRam * 0.25, 1024 * 1024 * 1024 * 6));
|
||||
};
|
||||
const defaultBuildOption = runningInElectron
|
||||
? "never"
|
||||
: "auto";
|
||||
export async function getLlama(options, lastBuildOptions) {
|
||||
if (options === "lastBuild") {
|
||||
const lastBuildInfo = await getLastBuildInfo();
|
||||
const dryRun = lastBuildOptions?.dryRun ?? false;
|
||||
const getLlamaOptions = {
|
||||
logLevel: lastBuildOptions?.logLevel ?? defaultLlamaCppLogLevel,
|
||||
logger: lastBuildOptions?.logger ?? Llama.defaultConsoleLogger,
|
||||
usePrebuiltBinaries: lastBuildOptions?.usePrebuiltBinaries ?? true,
|
||||
progressLogs: lastBuildOptions?.progressLogs ?? true,
|
||||
skipDownload: lastBuildOptions?.skipDownload ?? defaultSkipDownload,
|
||||
maxThreads: lastBuildOptions?.maxThreads,
|
||||
vramPadding: lastBuildOptions?.vramPadding ?? defaultLlamaVramPadding,
|
||||
ramPadding: lastBuildOptions?.ramPadding ?? defaultLlamaRamPadding,
|
||||
debug: lastBuildOptions?.debug ?? defaultLlamaCppDebugMode,
|
||||
numa: lastBuildOptions?.numa,
|
||||
dryRun
|
||||
};
|
||||
if (lastBuildInfo == null)
|
||||
return getLlamaForOptions(getLlamaOptions);
|
||||
const localBuildFolder = path.join(llamaLocalBuildBinsDirectory, lastBuildInfo.folderName);
|
||||
const localBuildBinPath = await getLocalBuildBinaryPath(lastBuildInfo.folderName);
|
||||
await waitForLockfileRelease({ resourcePath: localBuildFolder });
|
||||
if (localBuildBinPath != null) {
|
||||
try {
|
||||
const resolvedBindingPath = await resolveActualBindingBinaryPath(localBuildBinPath);
|
||||
const binding = loadBindingModule(resolvedBindingPath);
|
||||
const buildMetadata = await getLocalBuildBinaryBuildMetadata(lastBuildInfo.folderName);
|
||||
const res = await Llama._create({
|
||||
bindings: binding,
|
||||
bindingPath: resolvedBindingPath,
|
||||
buildType: "localBuild",
|
||||
buildMetadata,
|
||||
logger: lastBuildOptions?.logger ?? Llama.defaultConsoleLogger,
|
||||
logLevel: lastBuildOptions?.logLevel ?? defaultLlamaCppLogLevel,
|
||||
maxThreads: lastBuildOptions?.maxThreads,
|
||||
vramPadding: lastBuildOptions?.vramPadding ?? defaultLlamaVramPadding,
|
||||
ramPadding: lastBuildOptions?.ramPadding ?? defaultLlamaRamPadding,
|
||||
debug: lastBuildOptions?.debug ?? defaultLlamaCppDebugMode,
|
||||
numa: lastBuildOptions?.numa,
|
||||
skipLlamaInit: dryRun
|
||||
});
|
||||
if (dryRun)
|
||||
await res.dispose();
|
||||
return res;
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + "Failed to load last build. Error:", err);
|
||||
console.info(getConsoleLogPrefix() + "Falling back to default binaries");
|
||||
}
|
||||
}
|
||||
return getLlamaForOptions(getLlamaOptions);
|
||||
}
|
||||
return getLlamaForOptions(options ?? {});
|
||||
}
|
||||
// internal
|
||||
export async function getLlamaForOptions({ gpu = defaultLlamaCppGpuSupport, logLevel = defaultLlamaCppLogLevel, logger = Llama.defaultConsoleLogger, build = defaultBuildOption, cmakeOptions = {}, existingPrebuiltBinaryMustMatchBuildOptions = false, usePrebuiltBinaries = true, progressLogs = true, skipDownload = defaultSkipDownload, maxThreads, vramPadding = defaultLlamaVramPadding, ramPadding = defaultLlamaRamPadding, debug = defaultLlamaCppDebugMode, numa = false, dryRun = false }, { updateLastBuildInfoOnCompile = false, skipLlamaInit = false, pipeBinaryTestErrorLogs = false } = {}) {
|
||||
const platform = getPlatform();
|
||||
const arch = process.arch;
|
||||
if (logLevel == null)
|
||||
logLevel = defaultLlamaCppLogLevel;
|
||||
if (logger == null)
|
||||
logger = Llama.defaultConsoleLogger;
|
||||
if (build == null)
|
||||
build = defaultBuildOption;
|
||||
if (cmakeOptions == null)
|
||||
cmakeOptions = {};
|
||||
if (existingPrebuiltBinaryMustMatchBuildOptions == null)
|
||||
existingPrebuiltBinaryMustMatchBuildOptions = false;
|
||||
if (usePrebuiltBinaries == null)
|
||||
usePrebuiltBinaries = true;
|
||||
if (progressLogs == null)
|
||||
progressLogs = true;
|
||||
if (skipDownload == null)
|
||||
skipDownload = defaultSkipDownload;
|
||||
if (vramPadding == null)
|
||||
vramPadding = defaultLlamaVramPadding;
|
||||
if (ramPadding == null)
|
||||
ramPadding = defaultLlamaRamPadding;
|
||||
if (debug == null)
|
||||
debug = defaultLlamaCppDebugMode;
|
||||
if (dryRun == null)
|
||||
dryRun = false;
|
||||
if (dryRun) {
|
||||
build = "never";
|
||||
skipDownload = true;
|
||||
skipLlamaInit = true;
|
||||
}
|
||||
const clonedLlamaCppRepoReleaseInfo = await getClonedLlamaCppRepoReleaseInfo();
|
||||
let canUsePrebuiltBinaries = (build === "forceRebuild" || !usePrebuiltBinaries)
|
||||
? false
|
||||
: await getCanUsePrebuiltBinaries();
|
||||
const buildGpusToTry = await getGpuTypesToUseForOption(gpu, { platform, arch });
|
||||
const platformInfo = await getPlatformInfo();
|
||||
const llamaCppInfo = {
|
||||
repo: clonedLlamaCppRepoReleaseInfo?.llamaCppGithubRepo ?? builtinLlamaCppGitHubRepo,
|
||||
release: clonedLlamaCppRepoReleaseInfo?.tag ?? builtinLlamaCppRelease
|
||||
};
|
||||
let shouldLogNoGlibcWarningIfNoBuildIsAvailable = false;
|
||||
const canBuild = build !== "never" && !runningInsideAsar &&
|
||||
(!runningInElectron || await hasBuildingFromSourceDependenciesInstalled());
|
||||
if (canUsePrebuiltBinaries && platform === "linux") {
|
||||
if (!(await detectGlibc({ platform }))) {
|
||||
canUsePrebuiltBinaries = false;
|
||||
shouldLogNoGlibcWarningIfNoBuildIsAvailable = true;
|
||||
}
|
||||
}
|
||||
if (buildGpusToTry.length === 0)
|
||||
throw new Error("No GPU types available to try building with");
|
||||
if (build === "try") {
|
||||
if (canUsePrebuiltBinaries) {
|
||||
try {
|
||||
return await getLlamaForOptions({
|
||||
gpu,
|
||||
logLevel,
|
||||
logger,
|
||||
build: "auto",
|
||||
cmakeOptions,
|
||||
existingPrebuiltBinaryMustMatchBuildOptions,
|
||||
usePrebuiltBinaries: false,
|
||||
progressLogs,
|
||||
skipDownload,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
debug,
|
||||
numa,
|
||||
dryRun
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
return await getLlamaForOptions({
|
||||
gpu,
|
||||
logLevel,
|
||||
logger,
|
||||
build: "never",
|
||||
cmakeOptions,
|
||||
existingPrebuiltBinaryMustMatchBuildOptions,
|
||||
usePrebuiltBinaries,
|
||||
progressLogs,
|
||||
skipDownload,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
debug,
|
||||
numa,
|
||||
dryRun
|
||||
});
|
||||
}
|
||||
}
|
||||
else
|
||||
build = "auto";
|
||||
}
|
||||
if (build === "auto" || build === "never") {
|
||||
for (let i = 0; i < buildGpusToTry.length; i++) {
|
||||
const gpu = buildGpusToTry[i];
|
||||
const isLastItem = i === buildGpusToTry.length - 1;
|
||||
if (gpu == null)
|
||||
continue;
|
||||
const buildOptions = {
|
||||
customCmakeOptions: resolveCustomCmakeOptions(cmakeOptions),
|
||||
progressLogs,
|
||||
platform,
|
||||
platformInfo,
|
||||
arch,
|
||||
gpu,
|
||||
llamaCpp: llamaCppInfo
|
||||
};
|
||||
const llama = await loadExistingLlamaBinary({
|
||||
buildOptions,
|
||||
canUsePrebuiltBinaries,
|
||||
logLevel,
|
||||
logger,
|
||||
existingPrebuiltBinaryMustMatchBuildOptions,
|
||||
progressLogs,
|
||||
platform,
|
||||
platformInfo,
|
||||
skipLlamaInit,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
fallbackMessage: !isLastItem
|
||||
? `falling back to using ${getPrettyBuildGpuName(buildGpusToTry[i + 1])}`
|
||||
: (canBuild
|
||||
? "falling back to building from source"
|
||||
: null),
|
||||
debug,
|
||||
numa,
|
||||
pipeBinaryTestErrorLogs
|
||||
});
|
||||
if (llama != null) {
|
||||
if (dryRun)
|
||||
await llama.dispose();
|
||||
return llama;
|
||||
}
|
||||
}
|
||||
}
|
||||
if (shouldLogNoGlibcWarningIfNoBuildIsAvailable && progressLogs)
|
||||
await logNoGlibcWarning();
|
||||
if (!canBuild)
|
||||
throw new NoBinaryFoundError();
|
||||
const llamaCppRepoCloned = await isLlamaCppRepoCloned();
|
||||
if (!llamaCppRepoCloned) {
|
||||
if (skipDownload)
|
||||
throw new NoBinaryFoundError("No prebuilt binaries found, no llama.cpp source found and `skipDownload` or NODE_LLAMA_CPP_SKIP_DOWNLOAD env var is set to true, so llama.cpp cannot be built from source");
|
||||
llamaCppInfo.repo = defaultLlamaCppGitHubRepo;
|
||||
llamaCppInfo.release = defaultLlamaCppRelease;
|
||||
if (isGithubReleaseNeedsResolving(llamaCppInfo.release)) {
|
||||
const [owner, name] = defaultLlamaCppGitHubRepo.split("/");
|
||||
llamaCppInfo.release = await resolveGithubRelease(owner, name, llamaCppInfo.release);
|
||||
}
|
||||
}
|
||||
for (let i = 0; i < buildGpusToTry.length; i++) {
|
||||
const gpu = buildGpusToTry[i];
|
||||
const isLastItem = i === buildGpusToTry.length - 1;
|
||||
if (gpu == null)
|
||||
continue;
|
||||
const buildOptions = {
|
||||
customCmakeOptions: resolveCustomCmakeOptions(cmakeOptions),
|
||||
progressLogs,
|
||||
platform,
|
||||
platformInfo,
|
||||
arch,
|
||||
gpu,
|
||||
llamaCpp: llamaCppInfo
|
||||
};
|
||||
let llama = undefined;
|
||||
try {
|
||||
llama = await buildAndLoadLlamaBinary({
|
||||
buildOptions,
|
||||
skipDownload,
|
||||
logLevel,
|
||||
logger,
|
||||
updateLastBuildInfoOnCompile,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
skipLlamaInit,
|
||||
debug,
|
||||
numa
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() +
|
||||
`Failed to build llama.cpp with ${getPrettyBuildGpuName(gpu)} support. ` +
|
||||
(!isLastItem
|
||||
? `falling back to building llama.cpp with ${getPrettyBuildGpuName(buildGpusToTry[i + 1])} support. `
|
||||
: "") +
|
||||
"Error:", err);
|
||||
if (isLastItem)
|
||||
throw err;
|
||||
}
|
||||
if (llama != null) {
|
||||
if (dryRun)
|
||||
await llama.dispose();
|
||||
return llama;
|
||||
}
|
||||
}
|
||||
throw new Error("Failed to build llama.cpp");
|
||||
}
|
||||
async function loadExistingLlamaBinary({ buildOptions, canUsePrebuiltBinaries, logLevel, logger, existingPrebuiltBinaryMustMatchBuildOptions, progressLogs, platform, platformInfo, skipLlamaInit, maxThreads, vramPadding, ramPadding, fallbackMessage, debug, numa, pipeBinaryTestErrorLogs }) {
|
||||
const buildFolderName = await getBuildFolderNameForBuildOptions(buildOptions);
|
||||
const localBuildFolder = path.join(llamaLocalBuildBinsDirectory, buildFolderName.withCustomCmakeOptions);
|
||||
const localBuildBinPath = await getLocalBuildBinaryPath(buildFolderName.withCustomCmakeOptions);
|
||||
await waitForLockfileRelease({ resourcePath: localBuildFolder });
|
||||
if (localBuildBinPath != null) {
|
||||
try {
|
||||
const buildMetadata = await getLocalBuildBinaryBuildMetadata(buildFolderName.withCustomCmakeOptions);
|
||||
const shouldTestBinaryBeforeLoading = getShouldTestBinaryBeforeLoading({
|
||||
isPrebuiltBinary: false,
|
||||
platform,
|
||||
platformInfo,
|
||||
buildMetadata
|
||||
});
|
||||
const resolvedBindingPath = await resolveActualBindingBinaryPath(localBuildBinPath);
|
||||
const binaryCompatible = shouldTestBinaryBeforeLoading
|
||||
? await testBindingBinary(resolvedBindingPath, undefined, buildOptions.gpu, undefined, pipeBinaryTestErrorLogs)
|
||||
: true;
|
||||
if (binaryCompatible) {
|
||||
const binding = loadBindingModule(resolvedBindingPath);
|
||||
return await Llama._create({
|
||||
bindings: binding,
|
||||
bindingPath: resolvedBindingPath,
|
||||
buildType: "localBuild",
|
||||
buildMetadata,
|
||||
logLevel,
|
||||
logger,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
skipLlamaInit,
|
||||
debug,
|
||||
numa
|
||||
});
|
||||
}
|
||||
else if (progressLogs) {
|
||||
console.warn(getConsoleLogPrefix() + "The local build binary was not built in the current system and is incompatible with it");
|
||||
if (canUsePrebuiltBinaries)
|
||||
console.info(getConsoleLogPrefix() + "Falling back to prebuilt binaries");
|
||||
else if (fallbackMessage != null)
|
||||
console.info(getConsoleLogPrefix() + fallbackMessage);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
const binaryDescription = describeBinary(buildOptions);
|
||||
console.error(getConsoleLogPrefix() + `Failed to load a local build ${binaryDescription}. Error:`, err);
|
||||
if (canUsePrebuiltBinaries)
|
||||
console.info(getConsoleLogPrefix() + "Falling back to prebuilt binaries");
|
||||
else if (fallbackMessage != null)
|
||||
console.info(getConsoleLogPrefix() + fallbackMessage);
|
||||
}
|
||||
}
|
||||
if (canUsePrebuiltBinaries) {
|
||||
const prebuiltBinDetails = await getPrebuiltBinaryPath(buildOptions, existingPrebuiltBinaryMustMatchBuildOptions
|
||||
? buildFolderName.withCustomCmakeOptions
|
||||
: buildFolderName.withoutCustomCmakeOptions);
|
||||
if (prebuiltBinDetails != null) {
|
||||
try {
|
||||
const buildMetadata = await getPrebuiltBinaryBuildMetadata(prebuiltBinDetails.folderPath, prebuiltBinDetails.folderName);
|
||||
const shouldTestBinaryBeforeLoading = getShouldTestBinaryBeforeLoading({
|
||||
isPrebuiltBinary: true,
|
||||
platform,
|
||||
platformInfo,
|
||||
buildMetadata
|
||||
});
|
||||
const resolvedBindingPath = await resolveActualBindingBinaryPath(prebuiltBinDetails.binaryPath);
|
||||
const resolvedExtBackendsPath = prebuiltBinDetails.extBackendsPath == null
|
||||
? undefined
|
||||
: await resolveActualBindingBinaryPath(prebuiltBinDetails.extBackendsPath);
|
||||
const binaryCompatible = shouldTestBinaryBeforeLoading
|
||||
? await testBindingBinary(resolvedBindingPath, resolvedExtBackendsPath, buildOptions.gpu, undefined, pipeBinaryTestErrorLogs)
|
||||
: true;
|
||||
if (binaryCompatible) {
|
||||
const binding = loadBindingModule(resolvedBindingPath);
|
||||
return await Llama._create({
|
||||
bindings: binding,
|
||||
bindingPath: resolvedBindingPath,
|
||||
extBackendsPath: resolvedExtBackendsPath,
|
||||
buildType: "prebuilt",
|
||||
buildMetadata,
|
||||
logLevel,
|
||||
logger,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
skipLlamaInit,
|
||||
debug,
|
||||
numa
|
||||
});
|
||||
}
|
||||
else if (progressLogs) {
|
||||
const binaryDescription = describeBinary({
|
||||
...buildOptions,
|
||||
customCmakeOptions: existingPrebuiltBinaryMustMatchBuildOptions
|
||||
? buildOptions.customCmakeOptions
|
||||
: new Map()
|
||||
});
|
||||
console.warn(getConsoleLogPrefix() +
|
||||
`The prebuilt ${binaryDescription} is not compatible with the current system` + (fallbackMessage != null
|
||||
? ", " + fallbackMessage
|
||||
: ""));
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
const binaryDescription = describeBinary({
|
||||
...buildOptions,
|
||||
customCmakeOptions: existingPrebuiltBinaryMustMatchBuildOptions
|
||||
? buildOptions.customCmakeOptions
|
||||
: new Map()
|
||||
});
|
||||
console.error(getConsoleLogPrefix() + `Failed to load a prebuilt ${binaryDescription}` + (fallbackMessage != null
|
||||
? ", " + fallbackMessage
|
||||
: "") + ". Error:", err);
|
||||
}
|
||||
}
|
||||
else if (progressLogs)
|
||||
console.warn(getConsoleLogPrefix() + "A prebuilt binary was not found" + (fallbackMessage != null
|
||||
? ", " + fallbackMessage
|
||||
: ""));
|
||||
}
|
||||
return null;
|
||||
}
|
||||
async function buildAndLoadLlamaBinary({ buildOptions, skipDownload, logLevel, logger, updateLastBuildInfoOnCompile, maxThreads, vramPadding, ramPadding, skipLlamaInit, debug, numa }) {
|
||||
const buildFolderName = await getBuildFolderNameForBuildOptions(buildOptions);
|
||||
await compileLlamaCpp(buildOptions, {
|
||||
ensureLlamaCppRepoIsCloned: !skipDownload,
|
||||
downloadCmakeIfNeeded: true,
|
||||
updateLastBuildInfo: updateLastBuildInfoOnCompile
|
||||
});
|
||||
const localBuildFolder = path.join(llamaLocalBuildBinsDirectory, buildFolderName.withCustomCmakeOptions);
|
||||
await waitForLockfileRelease({ resourcePath: localBuildFolder });
|
||||
const localBuildBinPath = await getLocalBuildBinaryPath(buildFolderName.withCustomCmakeOptions);
|
||||
if (localBuildBinPath == null) {
|
||||
throw new Error("Failed to build llama.cpp");
|
||||
}
|
||||
const resolvedBindingPath = await resolveActualBindingBinaryPath(localBuildBinPath);
|
||||
const binding = loadBindingModule(resolvedBindingPath);
|
||||
const buildMetadata = await getLocalBuildBinaryBuildMetadata(buildFolderName.withCustomCmakeOptions);
|
||||
return await Llama._create({
|
||||
bindings: binding,
|
||||
bindingPath: resolvedBindingPath,
|
||||
buildType: "localBuild",
|
||||
buildMetadata,
|
||||
logLevel,
|
||||
logger,
|
||||
maxThreads,
|
||||
vramPadding,
|
||||
ramPadding,
|
||||
skipLlamaInit,
|
||||
debug,
|
||||
numa
|
||||
});
|
||||
}
|
||||
async function logNoGlibcWarning() {
|
||||
console.warn(getConsoleLogPrefix() +
|
||||
"The prebuilt binaries cannot be used in this Linux distro, as `glibc` is not detected");
|
||||
const linuxDistroInfo = await getLinuxDistroInfo();
|
||||
const isAlpineLinux = await isDistroAlpineLinux(linuxDistroInfo);
|
||||
if (isAlpineLinux) {
|
||||
console.warn(getConsoleLogPrefix() +
|
||||
"Using Alpine Linux is not recommended for running LLMs, " +
|
||||
"as using GPU drivers is complicated and suboptimal in this distro at the moment.\n" +
|
||||
getConsoleLogPrefix() +
|
||||
"Consider using a different Linux distro, such as Debian or Ubuntu.\n" +
|
||||
getConsoleLogPrefix() +
|
||||
`If you're trying to run this inside of a Docker container, consider using "${recommendedBaseDockerImage}" image`);
|
||||
}
|
||||
}
|
||||
function describeBinary(binaryOptions) {
|
||||
let res = `binary for platform "${binaryOptions.platform}" "${binaryOptions.arch}"`;
|
||||
const additions = [];
|
||||
if (binaryOptions.gpu != false)
|
||||
additions.push(`with ${getPrettyBuildGpuName(binaryOptions.gpu)} support`);
|
||||
if (binaryOptions.customCmakeOptions.size > 0)
|
||||
additions.push("with custom build options");
|
||||
res += additions
|
||||
.map((addition, index) => {
|
||||
if (index === 0)
|
||||
return " " + addition;
|
||||
if (additions.length === 2)
|
||||
return " and " + addition;
|
||||
if (index === additions.length - 1)
|
||||
return " and " + addition;
|
||||
return ", " + addition;
|
||||
})
|
||||
.join("");
|
||||
return res;
|
||||
}
|
||||
function loadBindingModule(bindingModulePath) {
|
||||
// each llama instance has its own settings, such as a different logger, so we have to make sure we load a new instance every time
|
||||
try {
|
||||
delete require.cache[require.resolve(bindingModulePath)];
|
||||
}
|
||||
catch (err) { }
|
||||
try {
|
||||
const binding = require(bindingModulePath);
|
||||
return binding;
|
||||
}
|
||||
finally {
|
||||
try {
|
||||
delete require.cache[require.resolve(bindingModulePath)];
|
||||
}
|
||||
catch (err) { }
|
||||
}
|
||||
}
|
||||
function getShouldTestBinaryBeforeLoading({ isPrebuiltBinary, platform, platformInfo, buildMetadata }) {
|
||||
if (platform === "linux") {
|
||||
if (isPrebuiltBinary)
|
||||
return true;
|
||||
if (platformInfo.name !== buildMetadata.buildOptions.platformInfo.name ||
|
||||
platformInfo.version !== buildMetadata.buildOptions.platformInfo.version)
|
||||
return true;
|
||||
}
|
||||
else if (platform === "win") {
|
||||
if (buildMetadata.buildOptions.gpu !== false)
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
//# sourceMappingURL=getLlama.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/getLlama.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/getLlama.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
72
node_modules/node-llama-cpp/dist/bindings/types.d.ts
generated
vendored
Normal file
72
node_modules/node-llama-cpp/dist/bindings/types.d.ts
generated
vendored
Normal file
@@ -0,0 +1,72 @@
|
||||
import process from "process";
|
||||
import { BinaryPlatform } from "./utils/getPlatform.js";
|
||||
import { BinaryPlatformInfo } from "./utils/getPlatformInfo.js";
|
||||
export declare const buildGpuOptions: readonly ["metal", "cuda", "vulkan", false];
|
||||
export type LlamaGpuType = "metal" | "cuda" | "vulkan" | false;
|
||||
export declare const nodeLlamaCppGpuOptions: readonly ["auto", "metal", "cuda", "vulkan", false];
|
||||
export declare const nodeLlamaCppGpuOffStringOptions: readonly ["false", "off", "none", "disable", "disabled"];
|
||||
export type BuildGpu = (typeof buildGpuOptions)[number];
|
||||
export type BuildOptions = {
|
||||
customCmakeOptions: Map<string, string>;
|
||||
progressLogs: boolean;
|
||||
platform: BinaryPlatform;
|
||||
platformInfo: BinaryPlatformInfo;
|
||||
arch: typeof process.arch;
|
||||
gpu: BuildGpu;
|
||||
llamaCpp: {
|
||||
repo: string;
|
||||
release: string;
|
||||
};
|
||||
};
|
||||
export declare const llamaNumaOptions: ["distribute", "isolate", "numactl", "mirror", false];
|
||||
export type LlamaNuma = false | "distribute" | "isolate" | "numactl" | "mirror";
|
||||
export type BuildOptionsJSON = Omit<BuildOptions, "customCmakeOptions"> & {
|
||||
customCmakeOptions: Record<string, string>;
|
||||
};
|
||||
export declare function parseNodeLlamaCppGpuOption(option: (typeof nodeLlamaCppGpuOptions)[number] | (typeof nodeLlamaCppGpuOffStringOptions)[number]): BuildGpu | "auto";
|
||||
export declare function parseNumaOption(option: (typeof llamaNumaOptions)[number] | (typeof nodeLlamaCppGpuOffStringOptions)[number]): LlamaNuma;
|
||||
export declare function convertBuildOptionsJSONToBuildOptions(buildOptionsJSON: BuildOptionsJSON): BuildOptions;
|
||||
export declare function convertBuildOptionsToBuildOptionsJSON(buildOptions: BuildOptions): BuildOptionsJSON;
|
||||
export type BuildMetadataFile = {
|
||||
buildOptions: BuildOptionsJSON;
|
||||
};
|
||||
export declare enum LlamaLogLevel {
|
||||
disabled = "disabled",
|
||||
fatal = "fatal",
|
||||
error = "error",
|
||||
warn = "warn",
|
||||
info = "info",
|
||||
log = "log",
|
||||
debug = "debug"
|
||||
}
|
||||
export declare const LlamaLogLevelValues: readonly [LlamaLogLevel.disabled, LlamaLogLevel.fatal, LlamaLogLevel.error, LlamaLogLevel.warn, LlamaLogLevel.info, LlamaLogLevel.log, LlamaLogLevel.debug];
|
||||
export declare enum LlamaVocabularyType {
|
||||
none = "none",
|
||||
spm = "spm",
|
||||
bpe = "bpe",
|
||||
wpm = "wpm",
|
||||
ugm = "ugm",
|
||||
rwkv = "rwkv",
|
||||
plamo2 = "plamo2"
|
||||
}
|
||||
export declare const LlamaVocabularyTypeValues: readonly [LlamaVocabularyType.none, LlamaVocabularyType.spm, LlamaVocabularyType.bpe, LlamaVocabularyType.wpm, LlamaVocabularyType.ugm, LlamaVocabularyType.rwkv, LlamaVocabularyType.plamo2];
|
||||
/**
|
||||
* Check if a log level is higher than another log level
|
||||
* @example
|
||||
* ```ts
|
||||
* LlamaLogLevelGreaterThan(LlamaLogLevel.error, LlamaLogLevel.info); // true
|
||||
* ```
|
||||
*/
|
||||
export declare function LlamaLogLevelGreaterThan(a: LlamaLogLevel, b: LlamaLogLevel): boolean;
|
||||
/**
|
||||
* Check if a log level is higher than or equal to another log level
|
||||
* @example
|
||||
* ```ts
|
||||
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.info); // true
|
||||
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.error); // true
|
||||
* ```
|
||||
*/
|
||||
export declare function LlamaLogLevelGreaterThanOrEqual(a: LlamaLogLevel, b: LlamaLogLevel): boolean;
|
||||
export declare const enum LlamaLocks {
|
||||
loadToMemory = "loadToMemory"
|
||||
}
|
||||
105
node_modules/node-llama-cpp/dist/bindings/types.js
generated
vendored
Normal file
105
node_modules/node-llama-cpp/dist/bindings/types.js
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
export const buildGpuOptions = ["metal", "cuda", "vulkan", false];
|
||||
export const nodeLlamaCppGpuOptions = [
|
||||
"auto",
|
||||
...buildGpuOptions
|
||||
];
|
||||
export const nodeLlamaCppGpuOffStringOptions = ["false", "off", "none", "disable", "disabled"];
|
||||
export const llamaNumaOptions = ["distribute", "isolate", "numactl", "mirror", false];
|
||||
export function parseNodeLlamaCppGpuOption(option) {
|
||||
function optionIsGpuOff(opt) {
|
||||
return nodeLlamaCppGpuOffStringOptions.includes(opt);
|
||||
}
|
||||
if (optionIsGpuOff(option))
|
||||
return false;
|
||||
else if (option === "auto")
|
||||
return "auto";
|
||||
if (buildGpuOptions.includes(option))
|
||||
return option;
|
||||
return "auto";
|
||||
}
|
||||
export function parseNumaOption(option) {
|
||||
function optionIsGpuOff(opt) {
|
||||
return nodeLlamaCppGpuOffStringOptions.includes(opt);
|
||||
}
|
||||
if (optionIsGpuOff(option))
|
||||
return false;
|
||||
if (llamaNumaOptions.includes(option))
|
||||
return option;
|
||||
return false;
|
||||
}
|
||||
export function convertBuildOptionsJSONToBuildOptions(buildOptionsJSON) {
|
||||
return {
|
||||
...buildOptionsJSON,
|
||||
customCmakeOptions: new Map(Object.entries(buildOptionsJSON.customCmakeOptions))
|
||||
};
|
||||
}
|
||||
export function convertBuildOptionsToBuildOptionsJSON(buildOptions) {
|
||||
return {
|
||||
...buildOptions,
|
||||
customCmakeOptions: Object.fromEntries(buildOptions.customCmakeOptions)
|
||||
};
|
||||
}
|
||||
export var LlamaLogLevel;
|
||||
(function (LlamaLogLevel) {
|
||||
LlamaLogLevel["disabled"] = "disabled";
|
||||
LlamaLogLevel["fatal"] = "fatal";
|
||||
LlamaLogLevel["error"] = "error";
|
||||
LlamaLogLevel["warn"] = "warn";
|
||||
LlamaLogLevel["info"] = "info";
|
||||
LlamaLogLevel["log"] = "log";
|
||||
LlamaLogLevel["debug"] = "debug";
|
||||
})(LlamaLogLevel || (LlamaLogLevel = {}));
|
||||
export const LlamaLogLevelValues = Object.freeze([
|
||||
LlamaLogLevel.disabled,
|
||||
LlamaLogLevel.fatal,
|
||||
LlamaLogLevel.error,
|
||||
LlamaLogLevel.warn,
|
||||
LlamaLogLevel.info,
|
||||
LlamaLogLevel.log,
|
||||
LlamaLogLevel.debug
|
||||
]);
|
||||
export var LlamaVocabularyType;
|
||||
(function (LlamaVocabularyType) {
|
||||
LlamaVocabularyType["none"] = "none";
|
||||
LlamaVocabularyType["spm"] = "spm";
|
||||
LlamaVocabularyType["bpe"] = "bpe";
|
||||
LlamaVocabularyType["wpm"] = "wpm";
|
||||
LlamaVocabularyType["ugm"] = "ugm";
|
||||
LlamaVocabularyType["rwkv"] = "rwkv";
|
||||
LlamaVocabularyType["plamo2"] = "plamo2";
|
||||
})(LlamaVocabularyType || (LlamaVocabularyType = {}));
|
||||
export const LlamaVocabularyTypeValues = Object.freeze([
|
||||
LlamaVocabularyType.none,
|
||||
LlamaVocabularyType.spm,
|
||||
LlamaVocabularyType.bpe,
|
||||
LlamaVocabularyType.wpm,
|
||||
LlamaVocabularyType.ugm,
|
||||
LlamaVocabularyType.rwkv,
|
||||
LlamaVocabularyType.plamo2
|
||||
]);
|
||||
/**
|
||||
* Check if a log level is higher than another log level
|
||||
* @example
|
||||
* ```ts
|
||||
* LlamaLogLevelGreaterThan(LlamaLogLevel.error, LlamaLogLevel.info); // true
|
||||
* ```
|
||||
*/
|
||||
export function LlamaLogLevelGreaterThan(a, b) {
|
||||
return LlamaLogLevelValues.indexOf(a) < LlamaLogLevelValues.indexOf(b);
|
||||
}
|
||||
/**
|
||||
* Check if a log level is higher than or equal to another log level
|
||||
* @example
|
||||
* ```ts
|
||||
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.info); // true
|
||||
* LlamaLogLevelGreaterThanOrEqual(LlamaLogLevel.error, LlamaLogLevel.error); // true
|
||||
* ```
|
||||
*/
|
||||
export function LlamaLogLevelGreaterThanOrEqual(a, b) {
|
||||
return LlamaLogLevelValues.indexOf(a) <= LlamaLogLevelValues.indexOf(b);
|
||||
}
|
||||
export var LlamaLocks;
|
||||
(function (LlamaLocks) {
|
||||
LlamaLocks["loadToMemory"] = "loadToMemory";
|
||||
})(LlamaLocks || (LlamaLocks = {}));
|
||||
//# sourceMappingURL=types.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/types.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/types.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"types.js","sourceRoot":"","sources":["../../src/bindings/types.ts"],"names":[],"mappings":"AAIA,MAAM,CAAC,MAAM,eAAe,GAAG,CAAC,OAAO,EAAE,MAAM,EAAE,QAAQ,EAAE,KAAK,CAAU,CAAC;AAE3E,MAAM,CAAC,MAAM,sBAAsB,GAAG;IAClC,MAAM;IACN,GAAG,eAAe;CACZ,CAAC;AACX,MAAM,CAAC,MAAM,+BAA+B,GAAG,CAAC,OAAO,EAAE,KAAK,EAAE,MAAM,EAAE,SAAS,EAAE,UAAU,CAAU,CAAC;AAcxG,MAAM,CAAC,MAAM,gBAAgB,GAAG,CAAC,YAAY,EAAE,SAAS,EAAE,SAAS,EAAE,QAAQ,EAAE,KAAK,CAAgC,CAAC;AAOrH,MAAM,UAAU,0BAA0B,CAAC,MAAkG;IACzI,SAAS,cAAc,CAAC,GAAkB;QACtC,OAAO,+BAA+B,CAAC,QAAQ,CAAC,GAAuD,CAAC,CAAC;IAC7G,CAAC;IAED,IAAI,cAAc,CAAC,MAAM,CAAC;QACtB,OAAO,KAAK,CAAC;SACZ,IAAI,MAAM,KAAK,MAAM;QACtB,OAAO,MAAM,CAAC;IAElB,IAAI,eAAe,CAAC,QAAQ,CAAC,MAAM,CAAC;QAChC,OAAO,MAAM,CAAC;IAElB,OAAO,MAAM,CAAC;AAClB,CAAC;AAED,MAAM,UAAU,eAAe,CAAC,MAA4F;IACxH,SAAS,cAAc,CAAC,GAAkB;QACtC,OAAO,+BAA+B,CAAC,QAAQ,CAAC,GAAuD,CAAC,CAAC;IAC7G,CAAC;IAED,IAAI,cAAc,CAAC,MAAM,CAAC;QACtB,OAAO,KAAK,CAAC;IAEjB,IAAI,gBAAgB,CAAC,QAAQ,CAAC,MAAM,CAAC;QACjC,OAAO,MAAM,CAAC;IAElB,OAAO,KAAK,CAAC;AACjB,CAAC;AAGD,MAAM,UAAU,qCAAqC,CAAC,gBAAkC;IACpF,OAAO;QACH,GAAG,gBAAgB;QACnB,kBAAkB,EAAE,IAAI,GAAG,CAAC,MAAM,CAAC,OAAO,CAAC,gBAAgB,CAAC,kBAAkB,CAAC,CAAC;KACnF,CAAC;AACN,CAAC;AAED,MAAM,UAAU,qCAAqC,CAAC,YAA0B;IAC5E,OAAO;QACH,GAAG,YAAY;QACf,kBAAkB,EAAE,MAAM,CAAC,WAAW,CAAC,YAAY,CAAC,kBAAkB,CAAC;KAC1E,CAAC;AACN,CAAC;AAMD,MAAM,CAAN,IAAY,aAQX;AARD,WAAY,aAAa;IACrB,sCAAqB,CAAA;IACrB,gCAAe,CAAA;IACf,gCAAe,CAAA;IACf,8BAAa,CAAA;IACb,8BAAa,CAAA;IACb,4BAAW,CAAA;IACX,gCAAe,CAAA;AACnB,CAAC,EARW,aAAa,KAAb,aAAa,QAQxB;AACD,MAAM,CAAC,MAAM,mBAAmB,GAAG,MAAM,CAAC,MAAM,CAAC;IAC7C,aAAa,CAAC,QAAQ;IACtB,aAAa,CAAC,KAAK;IACnB,aAAa,CAAC,KAAK;IACnB,aAAa,CAAC,IAAI;IAClB,aAAa,CAAC,IAAI;IAClB,aAAa,CAAC,GAAG;IACjB,aAAa,CAAC,KAAK;CACb,CAAC,CAAC;AAEZ,MAAM,CAAN,IAAY,mBAQX;AARD,WAAY,mBAAmB;IAC3B,oCAAa,CAAA;IACb,kCAAW,CAAA;IACX,kCAAW,CAAA;IACX,kCAAW,CAAA;IACX,kCAAW,CAAA;IACX,oCAAa,CAAA;IACb,wCAAiB,CAAA;AACrB,CAAC,EARW,mBAAmB,KAAnB,mBAAmB,QAQ9B;AACD,MAAM,CAAC,MAAM,yBAAyB,GAAG,MAAM,CAAC,MAAM,CAAC;IACnD,mBAAmB,CAAC,IAAI;IACxB,mBAAmB,CAAC,GAAG;IACvB,mBAAmB,CAAC,GAAG;IACvB,mBAAmB,CAAC,GAAG;IACvB,mBAAmB,CAAC,GAAG;IACvB,mBAAmB,CAAC,IAAI;IACxB,mBAAmB,CAAC,MAAM;CACpB,CAAC,CAAC;AAEZ;;;;;;GAMG;AACH,MAAM,UAAU,wBAAwB,CAAC,CAAgB,EAAE,CAAgB;IACvE,OAAO,mBAAmB,CAAC,OAAO,CAAC,CAAC,CAAC,GAAG,mBAAmB,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;AAC3E,CAAC;AAED;;;;;;;GAOG;AACH,MAAM,UAAU,+BAA+B,CAAC,CAAgB,EAAE,CAAgB;IAC9E,OAAO,mBAAmB,CAAC,OAAO,CAAC,CAAC,CAAC,IAAI,mBAAmB,CAAC,OAAO,CAAC,CAAC,CAAC,CAAC;AAC5E,CAAC;AAED,MAAM,CAAN,IAAkB,UAEjB;AAFD,WAAkB,UAAU;IACxB,2CAA6B,CAAA;AACjC,CAAC,EAFiB,UAAU,KAAV,UAAU,QAE3B"}
|
||||
23
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.d.ts
generated
vendored
Normal file
23
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.d.ts
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
import { EventRelay } from "lifecycle-utils";
|
||||
export declare class MemoryOrchestrator {
|
||||
readonly onMemoryReservationRelease: EventRelay<void>;
|
||||
constructor(getMemoryState: () => {
|
||||
free: number;
|
||||
total: number;
|
||||
unifiedSize: number;
|
||||
});
|
||||
reserveMemory(bytes: number): MemoryReservation;
|
||||
getMemoryState(): Promise<{
|
||||
free: number;
|
||||
total: number;
|
||||
unifiedSize: number;
|
||||
}>;
|
||||
}
|
||||
export declare class MemoryReservation {
|
||||
private constructor();
|
||||
get size(): number;
|
||||
get disposed(): boolean;
|
||||
[Symbol.dispose](): void;
|
||||
dispose(): void;
|
||||
static _create(bytes: number, dispose: () => void): MemoryReservation;
|
||||
}
|
||||
50
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.js
generated
vendored
Normal file
50
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.js
generated
vendored
Normal file
@@ -0,0 +1,50 @@
|
||||
import { EventRelay } from "lifecycle-utils";
|
||||
export class MemoryOrchestrator {
|
||||
/** @internal */ _getMemoryState;
|
||||
/** @internal */ _reservedMemory = 0;
|
||||
onMemoryReservationRelease = new EventRelay();
|
||||
constructor(getMemoryState) {
|
||||
this._getMemoryState = getMemoryState;
|
||||
}
|
||||
reserveMemory(bytes) {
|
||||
this._reservedMemory += bytes;
|
||||
return MemoryReservation._create(bytes, () => {
|
||||
this._reservedMemory -= bytes;
|
||||
this.onMemoryReservationRelease.dispatchEvent();
|
||||
});
|
||||
}
|
||||
async getMemoryState() {
|
||||
const { free, total, unifiedSize } = this._getMemoryState();
|
||||
return {
|
||||
free: Math.max(0, free - this._reservedMemory),
|
||||
total,
|
||||
unifiedSize
|
||||
};
|
||||
}
|
||||
}
|
||||
export class MemoryReservation {
|
||||
/** @internal */ _size;
|
||||
/** @internal */ _dispose;
|
||||
constructor(size, dispose) {
|
||||
this._size = size;
|
||||
this._dispose = dispose;
|
||||
}
|
||||
get size() {
|
||||
return this._size;
|
||||
}
|
||||
get disposed() {
|
||||
return this._dispose == null;
|
||||
}
|
||||
[Symbol.dispose]() {
|
||||
this.dispose();
|
||||
}
|
||||
dispose() {
|
||||
if (this._dispose != null)
|
||||
this._dispose();
|
||||
this._dispose = null;
|
||||
}
|
||||
static _create(bytes, dispose) {
|
||||
return new MemoryReservation(bytes, dispose);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=MemoryOrchestrator.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/MemoryOrchestrator.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"MemoryOrchestrator.js","sourceRoot":"","sources":["../../../src/bindings/utils/MemoryOrchestrator.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAE3C,MAAM,OAAO,kBAAkB;IAC3B,gBAAgB,CAAkB,eAAe,CAA2D;IAC5G,gBAAgB,CAAS,eAAe,GAAW,CAAC,CAAC;IAErC,0BAA0B,GAAG,IAAI,UAAU,EAAQ,CAAC;IAEpE,YAAmB,cAAwE;QACvF,IAAI,CAAC,eAAe,GAAG,cAAc,CAAC;IAC1C,CAAC;IAEM,aAAa,CAAC,KAAa;QAC9B,IAAI,CAAC,eAAe,IAAI,KAAK,CAAC;QAE9B,OAAO,iBAAiB,CAAC,OAAO,CAAC,KAAK,EAAE,GAAG,EAAE;YACzC,IAAI,CAAC,eAAe,IAAI,KAAK,CAAC;YAC9B,IAAI,CAAC,0BAA0B,CAAC,aAAa,EAAE,CAAC;QACpD,CAAC,CAAC,CAAC;IACP,CAAC;IAEM,KAAK,CAAC,cAAc;QACvB,MAAM,EAAC,IAAI,EAAE,KAAK,EAAE,WAAW,EAAC,GAAG,IAAI,CAAC,eAAe,EAAE,CAAC;QAE1D,OAAO;YACH,IAAI,EAAE,IAAI,CAAC,GAAG,CAAC,CAAC,EAAE,IAAI,GAAG,IAAI,CAAC,eAAe,CAAC;YAC9C,KAAK;YACL,WAAW;SACd,CAAC;IACN,CAAC;CACJ;AAED,MAAM,OAAO,iBAAiB;IAC1B,gBAAgB,CAAkB,KAAK,CAAS;IAChD,gBAAgB,CAAS,QAAQ,CAAsB;IAEvD,YAAoB,IAAY,EAAE,OAAmB;QACjD,IAAI,CAAC,KAAK,GAAG,IAAI,CAAC;QAClB,IAAI,CAAC,QAAQ,GAAG,OAAO,CAAC;IAC5B,CAAC;IAED,IAAW,IAAI;QACX,OAAO,IAAI,CAAC,KAAK,CAAC;IACtB,CAAC;IAED,IAAW,QAAQ;QACf,OAAO,IAAI,CAAC,QAAQ,IAAI,IAAI,CAAC;IACjC,CAAC;IAEM,CAAC,MAAM,CAAC,OAAO,CAAC;QACnB,IAAI,CAAC,OAAO,EAAE,CAAC;IACnB,CAAC;IAEM,OAAO;QACV,IAAI,IAAI,CAAC,QAAQ,IAAI,IAAI;YACrB,IAAI,CAAC,QAAQ,EAAE,CAAC;QAEpB,IAAI,CAAC,QAAQ,GAAG,IAAI,CAAC;IACzB,CAAC;IAEM,MAAM,CAAC,OAAO,CAAC,KAAa,EAAE,OAAmB;QACpD,OAAO,IAAI,iBAAiB,CAAC,KAAK,EAAE,OAAO,CAAC,CAAC;IACjD,CAAC;CACJ"}
|
||||
2
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export declare class NoBinaryFoundError extends Error {
|
||||
}
|
||||
7
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.js
generated
vendored
Normal file
7
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.js
generated
vendored
Normal file
@@ -0,0 +1,7 @@
|
||||
export class NoBinaryFoundError extends Error {
|
||||
/** @internal */
|
||||
constructor(message = "NoBinaryFoundError") {
|
||||
super(message);
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=NoBinaryFoundError.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/NoBinaryFoundError.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"NoBinaryFoundError.js","sourceRoot":"","sources":["../../../src/bindings/utils/NoBinaryFoundError.ts"],"names":[],"mappings":"AAAA,MAAM,OAAO,kBAAmB,SAAQ,KAAK;IACzC,gBAAgB;IAChB,YAAmB,UAAkB,oBAAoB;QACrD,KAAK,CAAC,OAAO,CAAC,CAAC;IACnB,CAAC;CACJ"}
|
||||
5
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* Returns a promise that resolves to true if every promise in the array resolves to true, otherwise false.
|
||||
* Note that this function will not throw on error and instead will log the error to the console.
|
||||
*/
|
||||
export declare function asyncEvery(promises: Promise<boolean>[]): Promise<boolean>;
|
||||
15
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.js
generated
vendored
Normal file
15
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.js
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
/**
|
||||
* Returns a promise that resolves to true if every promise in the array resolves to true, otherwise false.
|
||||
* Note that this function will not throw on error and instead will log the error to the console.
|
||||
*/
|
||||
export async function asyncEvery(promises) {
|
||||
try {
|
||||
return (await Promise.all(promises)).every(Boolean);
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix(false, false), err);
|
||||
return false;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=asyncEvery.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/asyncEvery.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"asyncEvery.js","sourceRoot":"","sources":["../../../src/bindings/utils/asyncEvery.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AAEvE;;;GAGG;AACH,MAAM,CAAC,KAAK,UAAU,UAAU,CAAC,QAA4B;IACzD,IAAI,CAAC;QACD,OAAO,CAAC,MAAM,OAAO,CAAC,GAAG,CAAC,QAAQ,CAAC,CAAC,CAAC,KAAK,CAAC,OAAO,CAAC,CAAC;IACxD,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACX,OAAO,CAAC,KAAK,CAAC,mBAAmB,CAAC,KAAK,EAAE,KAAK,CAAC,EAAE,GAAG,CAAC,CAAC;QAEtD,OAAO,KAAK,CAAC;IACjB,CAAC;AACL,CAAC"}
|
||||
5
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
/**
|
||||
* Returns a promise that fulfills as soon as any of the promises return `true`.
|
||||
* Note that this function will not throw on error and instead will log the error to the console.
|
||||
*/
|
||||
export declare function asyncSome(promises: Promise<boolean>[]): Promise<boolean>;
|
||||
29
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.js
generated
vendored
Normal file
29
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
/**
|
||||
* Returns a promise that fulfills as soon as any of the promises return `true`.
|
||||
* Note that this function will not throw on error and instead will log the error to the console.
|
||||
*/
|
||||
export async function asyncSome(promises) {
|
||||
if (promises.length === 0)
|
||||
return Promise.resolve(false);
|
||||
return new Promise((resolve) => {
|
||||
let fulfilled = 0;
|
||||
for (const promise of promises) {
|
||||
promise
|
||||
.then((result) => {
|
||||
if (result)
|
||||
return void resolve(true);
|
||||
fulfilled++;
|
||||
if (fulfilled === promises.length)
|
||||
resolve(false);
|
||||
})
|
||||
.catch((err) => {
|
||||
console.error(getConsoleLogPrefix(false, false), err);
|
||||
fulfilled++;
|
||||
if (fulfilled === promises.length)
|
||||
resolve(false);
|
||||
});
|
||||
}
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=asyncSome.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/asyncSome.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"asyncSome.js","sourceRoot":"","sources":["../../../src/bindings/utils/asyncSome.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AAEvE;;;GAGG;AACH,MAAM,CAAC,KAAK,UAAU,SAAS,CAAC,QAA4B;IACxD,IAAI,QAAQ,CAAC,MAAM,KAAK,CAAC;QACrB,OAAO,OAAO,CAAC,OAAO,CAAC,KAAK,CAAC,CAAC;IAElC,OAAO,IAAI,OAAO,CAAC,CAAC,OAAO,EAAE,EAAE;QAC3B,IAAI,SAAS,GAAG,CAAC,CAAC;QAElB,KAAK,MAAM,OAAO,IAAI,QAAQ,EAAE,CAAC;YAC7B,OAAO;iBACF,IAAI,CAAC,CAAC,MAAM,EAAE,EAAE;gBACb,IAAI,MAAM;oBACN,OAAO,KAAK,OAAO,CAAC,IAAI,CAAC,CAAC;gBAE9B,SAAS,EAAE,CAAC;gBACZ,IAAI,SAAS,KAAK,QAAQ,CAAC,MAAM;oBAC7B,OAAO,CAAC,KAAK,CAAC,CAAC;YACvB,CAAC,CAAC;iBACD,KAAK,CAAC,CAAC,GAAG,EAAE,EAAE;gBACX,OAAO,CAAC,KAAK,CAAC,mBAAmB,CAAC,KAAK,EAAE,KAAK,CAAC,EAAE,GAAG,CAAC,CAAC;gBAEtD,SAAS,EAAE,CAAC;gBACZ,IAAI,SAAS,KAAK,QAAQ,CAAC,MAAM;oBAC7B,OAAO,CAAC,KAAK,CAAC,CAAC;YACvB,CAAC,CAAC,CAAC;QACX,CAAC;IACL,CAAC,CAAC,CAAC;AACP,CAAC"}
|
||||
6
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.d.ts
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
type BinariesGithubReleaseFile = {
|
||||
release: "latest" | string;
|
||||
};
|
||||
export declare function getBinariesGithubRelease(): Promise<string>;
|
||||
export declare function setBinariesGithubRelease(release: BinariesGithubReleaseFile["release"]): Promise<void>;
|
||||
export {};
|
||||
15
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.js
generated
vendored
Normal file
15
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.js
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
import fs from "fs-extra";
|
||||
import { binariesGithubReleasePath } from "../../config.js";
|
||||
export async function getBinariesGithubRelease() {
|
||||
const binariesGithubRelease = await fs.readJson(binariesGithubReleasePath);
|
||||
return binariesGithubRelease.release;
|
||||
}
|
||||
export async function setBinariesGithubRelease(release) {
|
||||
const binariesGithubReleaseJson = {
|
||||
release: release
|
||||
};
|
||||
await fs.writeJson(binariesGithubReleasePath, binariesGithubReleaseJson, {
|
||||
spaces: 4
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=binariesGithubRelease.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/binariesGithubRelease.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"binariesGithubRelease.js","sourceRoot":"","sources":["../../../src/bindings/utils/binariesGithubRelease.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,UAAU,CAAC;AAC1B,OAAO,EAAC,yBAAyB,EAAC,MAAM,iBAAiB,CAAC;AAM1D,MAAM,CAAC,KAAK,UAAU,wBAAwB;IAC1C,MAAM,qBAAqB,GAA8B,MAAM,EAAE,CAAC,QAAQ,CAAC,yBAAyB,CAAC,CAAC;IAEtG,OAAO,qBAAqB,CAAC,OAAO,CAAC;AACzC,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,wBAAwB,CAAC,OAA6C;IACxF,MAAM,yBAAyB,GAA8B;QACzD,OAAO,EAAE,OAAO;KACnB,CAAC;IAEF,MAAM,EAAE,CAAC,SAAS,CAAC,yBAAyB,EAAE,yBAAyB,EAAE;QACrE,MAAM,EAAE,CAAC;KACZ,CAAC,CAAC;AACP,CAAC"}
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.d.ts
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare function clearAllLocalBuilds(waitForLocks?: boolean): Promise<void>;
|
||||
47
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.js
generated
vendored
Normal file
47
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.js
generated
vendored
Normal file
@@ -0,0 +1,47 @@
|
||||
import path from "path";
|
||||
import fs from "fs-extra";
|
||||
import { lastBuildInfoJsonPath, llamaLocalBuildBinsDirectory } from "../../config.js";
|
||||
import { clearTempFolder } from "../../utils/clearTempFolder.js";
|
||||
import { withLockfile } from "../../utils/withLockfile.js";
|
||||
import { isLockfileActive } from "../../utils/isLockfileActive.js";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
export async function clearAllLocalBuilds(waitForLocks = false) {
|
||||
async function removeBuilds() {
|
||||
const itemsToRemove = Array.from(new Set((await fs.readdir(llamaLocalBuildBinsDirectory))
|
||||
.map((item) => (item.endsWith(".lock")
|
||||
? item.slice(0, -".lock".length)
|
||||
: item))
|
||||
.filter((item) => !item.startsWith("."))));
|
||||
let hasLocks = false;
|
||||
const buildRemovals = itemsToRemove.map(async (item) => {
|
||||
const absolutePath = path.join(llamaLocalBuildBinsDirectory, item);
|
||||
const pathIsLocked = await isLockfileActive({ resourcePath: absolutePath });
|
||||
hasLocks ||= pathIsLocked;
|
||||
if (waitForLocks)
|
||||
await withLockfile({
|
||||
resourcePath: absolutePath
|
||||
}, async () => {
|
||||
await fs.remove(absolutePath);
|
||||
});
|
||||
else if (!pathIsLocked)
|
||||
await fs.remove(absolutePath);
|
||||
});
|
||||
return {
|
||||
buildRemovals,
|
||||
hasLocks
|
||||
};
|
||||
}
|
||||
if (await fs.pathExists(llamaLocalBuildBinsDirectory)) {
|
||||
const { hasLocks, buildRemovals } = await removeBuilds();
|
||||
if (hasLocks) {
|
||||
if (waitForLocks)
|
||||
console.log(getConsoleLogPrefix() + "Some builds are in progress. Waiting for those builds to finish before removing them.");
|
||||
else
|
||||
console.log(getConsoleLogPrefix() + "Some builds are in progress. Skipping the removal of those builds.");
|
||||
}
|
||||
await Promise.all(buildRemovals);
|
||||
}
|
||||
await fs.remove(lastBuildInfoJsonPath);
|
||||
await clearTempFolder();
|
||||
}
|
||||
//# sourceMappingURL=clearAllLocalBuilds.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/clearAllLocalBuilds.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"clearAllLocalBuilds.js","sourceRoot":"","sources":["../../../src/bindings/utils/clearAllLocalBuilds.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,MAAM,CAAC;AACxB,OAAO,EAAE,MAAM,UAAU,CAAC;AAC1B,OAAO,EAAC,qBAAqB,EAAE,4BAA4B,EAAC,MAAM,iBAAiB,CAAC;AACpF,OAAO,EAAC,eAAe,EAAC,MAAM,gCAAgC,CAAC;AAC/D,OAAO,EAAC,YAAY,EAAC,MAAM,6BAA6B,CAAC;AACzD,OAAO,EAAC,gBAAgB,EAAC,MAAM,iCAAiC,CAAC;AACjE,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AAEvE,MAAM,CAAC,KAAK,UAAU,mBAAmB,CAAC,YAAY,GAAG,KAAK;IAC1D,KAAK,UAAU,YAAY;QACvB,MAAM,aAAa,GAAG,KAAK,CAAC,IAAI,CAC5B,IAAI,GAAG,CACH,CAAC,MAAM,EAAE,CAAC,OAAO,CAAC,4BAA4B,CAAC,CAAC;aAC3C,GAAG,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CACX,IAAI,CAAC,QAAQ,CAAC,OAAO,CAAC;YAClB,CAAC,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,OAAO,CAAC,MAAM,CAAC;YAChC,CAAC,CAAC,IAAI,CACb,CAAC;aACD,MAAM,CAAC,CAAC,IAAI,EAAE,EAAE,CAAC,CAAC,IAAI,CAAC,UAAU,CAAC,GAAG,CAAC,CAAC,CAC/C,CACJ,CAAC;QAEF,IAAI,QAAQ,GAAG,KAAK,CAAC;QACrB,MAAM,aAAa,GAAG,aAAa,CAAC,GAAG,CAAC,KAAK,EAAE,IAAI,EAAE,EAAE;YACnD,MAAM,YAAY,GAAG,IAAI,CAAC,IAAI,CAAC,4BAA4B,EAAE,IAAI,CAAC,CAAC;YACnE,MAAM,YAAY,GAAG,MAAM,gBAAgB,CAAC,EAAC,YAAY,EAAE,YAAY,EAAC,CAAC,CAAC;YAE1E,QAAQ,KAAK,YAAY,CAAC;YAE1B,IAAI,YAAY;gBACZ,MAAM,YAAY,CAAC;oBACf,YAAY,EAAE,YAAY;iBAC7B,EAAE,KAAK,IAAI,EAAE;oBACV,MAAM,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;gBAClC,CAAC,CAAC,CAAC;iBACF,IAAI,CAAC,YAAY;gBAClB,MAAM,EAAE,CAAC,MAAM,CAAC,YAAY,CAAC,CAAC;QACtC,CAAC,CAAC,CAAC;QAEH,OAAO;YACH,aAAa;YACb,QAAQ;SACX,CAAC;IACN,CAAC;IAED,IAAI,MAAM,EAAE,CAAC,UAAU,CAAC,4BAA4B,CAAC,EAAE,CAAC;QACpD,MAAM,EAAC,QAAQ,EAAE,aAAa,EAAC,GAAG,MAAM,YAAY,EAAE,CAAC;QAEvD,IAAI,QAAQ,EAAE,CAAC;YACX,IAAI,YAAY;gBACZ,OAAO,CAAC,GAAG,CAAC,mBAAmB,EAAE,GAAG,uFAAuF,CAAC,CAAC;;gBAE7H,OAAO,CAAC,GAAG,CAAC,mBAAmB,EAAE,GAAG,oEAAoE,CAAC,CAAC;QAClH,CAAC;QAED,MAAM,OAAO,CAAC,GAAG,CAAC,aAAa,CAAC,CAAC;IACrC,CAAC;IAED,MAAM,EAAE,CAAC,MAAM,CAAC,qBAAqB,CAAC,CAAC;IACvC,MAAM,eAAe,EAAE,CAAC;AAC5B,CAAC"}
|
||||
11
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.d.ts
generated
vendored
Normal file
11
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
type ClonedLlamaCppRepoTagFile = {
|
||||
tag: string;
|
||||
llamaCppGithubRepo: string;
|
||||
};
|
||||
export declare function cloneLlamaCppRepo(githubOwner: string, githubRepo: string, tag: string, useBundles?: boolean, progressLogs?: boolean, recursive?: boolean): Promise<void>;
|
||||
export declare function getClonedLlamaCppRepoReleaseInfo(): Promise<ClonedLlamaCppRepoTagFile | null>;
|
||||
export declare function isLlamaCppRepoCloned(waitForLock?: boolean): Promise<boolean>;
|
||||
export declare function ensureLlamaCppRepoIsCloned({ progressLogs }?: {
|
||||
progressLogs?: boolean;
|
||||
}): Promise<void>;
|
||||
export {};
|
||||
166
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.js
generated
vendored
Normal file
166
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.js
generated
vendored
Normal file
@@ -0,0 +1,166 @@
|
||||
import path from "path";
|
||||
import simpleGit from "simple-git";
|
||||
import chalk from "chalk";
|
||||
import fs from "fs-extra";
|
||||
import which from "which";
|
||||
import { defaultLlamaCppGitHubRepo, defaultLlamaCppRelease, enableRecursiveClone, llamaCppDirectory, llamaCppDirectoryInfoFilePath } from "../../config.js";
|
||||
import { getGitBundlePathForRelease } from "../../utils/gitReleaseBundles.js";
|
||||
import { withLockfile } from "../../utils/withLockfile.js";
|
||||
import { waitForLockfileRelease } from "../../utils/waitForLockfileRelease.js";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
import { isLockfileActive } from "../../utils/isLockfileActive.js";
|
||||
import { isGithubReleaseNeedsResolving, resolveGithubRelease } from "../../utils/resolveGithubRelease.js";
|
||||
import withStatusLogs from "../../utils/withStatusLogs.js";
|
||||
import { withProgressLog } from "../../utils/withProgressLog.js";
|
||||
import { logDistroInstallInstruction } from "./logDistroInstallInstruction.js";
|
||||
export async function cloneLlamaCppRepo(githubOwner, githubRepo, tag, useBundles = true, progressLogs = true, recursive = enableRecursiveClone) {
|
||||
const gitBundleForTag = !useBundles ? null : await getGitBundlePathForRelease(githubOwner, githubRepo, tag);
|
||||
const remoteGitUrl = `https://github.com/${githubOwner}/${githubRepo}.git`;
|
||||
async function withGitCloneProgress(cloneName, callback) {
|
||||
if (!progressLogs)
|
||||
return await callback(simpleGit({}));
|
||||
const repoText = `${githubOwner}/${githubRepo} (${cloneName})`;
|
||||
let lastProgress = 0;
|
||||
let stages = 1;
|
||||
return await withProgressLog({
|
||||
loadingText: chalk.bold("Cloning " + repoText),
|
||||
successText: chalk.blue("Cloned " + repoText),
|
||||
failText: chalk.blue("Failed to clone " + repoText),
|
||||
progressFractionDigits: false
|
||||
}, async (progressUpdater) => {
|
||||
const gitWithCloneProgress = simpleGit({
|
||||
progress({ progress }) {
|
||||
const currentProgress = progress / 100;
|
||||
if (currentProgress < lastProgress)
|
||||
stages++;
|
||||
lastProgress = currentProgress;
|
||||
progressUpdater.setProgress(currentProgress, stages > 1
|
||||
? `(Stage ${stages})`
|
||||
: undefined);
|
||||
}
|
||||
});
|
||||
const res = await callback(gitWithCloneProgress);
|
||||
progressUpdater.setProgress(1);
|
||||
return res;
|
||||
});
|
||||
}
|
||||
await withLockfile({
|
||||
resourcePath: llamaCppDirectory
|
||||
}, async () => {
|
||||
await fs.remove(llamaCppDirectory);
|
||||
await fs.remove(llamaCppDirectoryInfoFilePath);
|
||||
if (gitBundleForTag != null) {
|
||||
try {
|
||||
await withGitCloneProgress("local bundle", async (gitWithCloneProgress) => {
|
||||
await gitWithCloneProgress.clone(gitBundleForTag, llamaCppDirectory, {
|
||||
"--quiet": null
|
||||
});
|
||||
await simpleGit(llamaCppDirectory).removeRemote("origin");
|
||||
});
|
||||
await updateClonedLlamaCppRepoTagFile(githubOwner, githubRepo, tag);
|
||||
return;
|
||||
}
|
||||
catch (err) {
|
||||
await fs.remove(llamaCppDirectory);
|
||||
await fs.remove(llamaCppDirectoryInfoFilePath);
|
||||
if (progressLogs)
|
||||
console.error(getConsoleLogPrefix() + "Failed to clone git bundle, cloning from GitHub instead", err);
|
||||
await printCloneErrorHelp(String(err));
|
||||
}
|
||||
}
|
||||
try {
|
||||
await withGitCloneProgress("GitHub", async (gitWithCloneProgress) => {
|
||||
await gitWithCloneProgress.clone(remoteGitUrl, llamaCppDirectory, {
|
||||
"--depth": 1,
|
||||
"--branch": tag,
|
||||
...(recursive ? { "--recursive": null } : {}),
|
||||
"--quiet": null
|
||||
});
|
||||
});
|
||||
await updateClonedLlamaCppRepoTagFile(githubOwner, githubRepo, tag);
|
||||
}
|
||||
catch (err) {
|
||||
await printCloneErrorHelp(String(err));
|
||||
throw err;
|
||||
}
|
||||
});
|
||||
}
|
||||
async function printCloneErrorHelp(error) {
|
||||
// This error happens with some docker images where the current user is different
|
||||
// from the owner of the files due to mounting a volume.
|
||||
// In such cases, print a helpful message to help the user resolve the issue.
|
||||
if (error.toLowerCase().includes("detected dubious ownership in repository"))
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) + chalk.yellow("To fix this issue, try running this command to fix it for the current module directory:") + "\n" +
|
||||
'git config --global --add safe.directory "' + llamaCppDirectory + '"\n\n' +
|
||||
chalk.yellow("Or run this command to fix it everywhere:") + "\n" +
|
||||
'git config --global --add safe.directory "*"');
|
||||
else if (await which("git", { nothrow: true }) == null) {
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) + chalk.yellow("Git is not installed, please install it first to build llama.cpp"));
|
||||
await logDistroInstallInstruction("To install git, ", {
|
||||
linuxPackages: { apt: ["git"], apk: ["git"] },
|
||||
macOsPackages: { brew: ["git", "git-lfs"] }
|
||||
});
|
||||
}
|
||||
}
|
||||
export async function getClonedLlamaCppRepoReleaseInfo() {
|
||||
if (!(await isLlamaCppRepoCloned(false)))
|
||||
return null;
|
||||
if (!(await fs.pathExists(llamaCppDirectoryInfoFilePath)))
|
||||
return null;
|
||||
try {
|
||||
const clonedLlamaCppRepoTagJson = await fs.readJson(llamaCppDirectoryInfoFilePath);
|
||||
return clonedLlamaCppRepoTagJson;
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + "Failed to read llama.cpp tag file", err);
|
||||
return null;
|
||||
}
|
||||
}
|
||||
export async function isLlamaCppRepoCloned(waitForLock = true) {
|
||||
if (waitForLock)
|
||||
await waitForLockfileRelease({ resourcePath: llamaCppDirectory });
|
||||
else if (await isLockfileActive({ resourcePath: llamaCppDirectory }))
|
||||
return false;
|
||||
const [repoGitExists, releaseInfoFileExists] = await Promise.all([
|
||||
fs.pathExists(path.join(llamaCppDirectory, ".git")),
|
||||
fs.pathExists(llamaCppDirectoryInfoFilePath)
|
||||
]);
|
||||
return repoGitExists && releaseInfoFileExists;
|
||||
}
|
||||
export async function ensureLlamaCppRepoIsCloned({ progressLogs = true } = {}) {
|
||||
if (await isLlamaCppRepoCloned(true))
|
||||
return;
|
||||
const [githubOwner, githubRepo] = defaultLlamaCppGitHubRepo.split("/");
|
||||
if (progressLogs)
|
||||
console.log(getConsoleLogPrefix() + chalk.blue("Cloning llama.cpp"));
|
||||
let releaseTag = defaultLlamaCppRelease;
|
||||
if (isGithubReleaseNeedsResolving(releaseTag)) {
|
||||
await withStatusLogs({
|
||||
loading: chalk.blue("Fetching llama.cpp info"),
|
||||
success: chalk.blue("Fetched llama.cpp info"),
|
||||
fail: chalk.blue("Failed to fetch llama.cpp info"),
|
||||
disableLogs: !progressLogs
|
||||
}, async () => {
|
||||
releaseTag = await resolveGithubRelease(githubOwner, githubRepo, releaseTag);
|
||||
});
|
||||
}
|
||||
await cloneLlamaCppRepo(githubOwner, githubRepo, releaseTag, true, progressLogs);
|
||||
}
|
||||
async function updateClonedLlamaCppRepoTagFile(githubOwner, githubRepo, tag) {
|
||||
try {
|
||||
const clonedLlamaCppRepoTagJson = {
|
||||
tag,
|
||||
llamaCppGithubRepo: githubOwner + "/" + githubRepo
|
||||
};
|
||||
await fs.writeJson(llamaCppDirectoryInfoFilePath, clonedLlamaCppRepoTagJson, {
|
||||
spaces: 4
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + "Failed to write llama.cpp tag file", err);
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=cloneLlamaCppRepo.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/cloneLlamaCppRepo.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
21
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.d.ts
generated
vendored
Normal file
21
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.d.ts
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
import process from "process";
|
||||
import { BuildMetadataFile, BuildOptions } from "../types.js";
|
||||
export declare function compileLlamaCpp(buildOptions: BuildOptions, compileOptions: {
|
||||
nodeTarget?: string;
|
||||
updateLastBuildInfo?: boolean;
|
||||
includeBuildOptionsInBinaryFolderName?: boolean;
|
||||
ensureLlamaCppRepoIsCloned?: boolean;
|
||||
downloadCmakeIfNeeded?: boolean;
|
||||
ignoreWorkarounds?: ("cudaArchitecture" | "reduceParallelBuildThreads" | "singleBuildThread" | "avoidWindowsLlvm")[];
|
||||
envVars?: typeof process.env;
|
||||
ciMode?: boolean;
|
||||
}): Promise<void>;
|
||||
export declare function getLocalBuildBinaryPath(folderName: string): Promise<string | null>;
|
||||
export declare function getLocalBuildBinaryBuildMetadata(folderName: string): Promise<BuildMetadataFile>;
|
||||
export declare function getPrebuiltBinaryPath(buildOptions: BuildOptions, folderName: string): Promise<{
|
||||
binaryPath: string;
|
||||
folderName: string;
|
||||
folderPath: string;
|
||||
extBackendsPath: string | undefined;
|
||||
} | null>;
|
||||
export declare function getPrebuiltBinaryBuildMetadata(folderPath: string, folderName: string): Promise<BuildMetadataFile>;
|
||||
516
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.js
generated
vendored
Normal file
516
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.js
generated
vendored
Normal file
@@ -0,0 +1,516 @@
|
||||
import path from "path";
|
||||
import { fileURLToPath } from "url";
|
||||
import process from "process";
|
||||
import os from "os";
|
||||
import fs from "fs-extra";
|
||||
import chalk from "chalk";
|
||||
import which from "which";
|
||||
import { buildMetadataFileName, documentationPageUrls, llamaCppDirectory, llamaDirectory, llamaLocalBuildBinsDirectory, llamaPrebuiltBinsDirectory, llamaToolchainsDirectory } from "../../config.js";
|
||||
import { convertBuildOptionsToBuildOptionsJSON } from "../types.js";
|
||||
import { spawnCommand, SpawnError } from "../../utils/spawnCommand.js";
|
||||
import { downloadCmakeIfNeeded, fixXpackPermissions, getCmakePath, hasBuiltinCmake } from "../../utils/cmake.js";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
import { withLockfile } from "../../utils/withLockfile.js";
|
||||
import { getModuleVersion } from "../../utils/getModuleVersion.js";
|
||||
import { ensureLlamaCppRepoIsCloned, isLlamaCppRepoCloned } from "./cloneLlamaCppRepo.js";
|
||||
import { getBuildFolderNameForBuildOptions } from "./getBuildFolderNameForBuildOptions.js";
|
||||
import { setLastBuildInfo } from "./lastBuildInfo.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
import { logDistroInstallInstruction } from "./logDistroInstallInstruction.js";
|
||||
import { testCmakeBinary } from "./testCmakeBinary.js";
|
||||
import { getCudaNvccPaths } from "./detectAvailableComputeLayers.js";
|
||||
import { detectWindowsBuildTools } from "./detectBuildTools.js";
|
||||
import { asyncSome } from "./asyncSome.js";
|
||||
const __dirname = path.dirname(fileURLToPath(import.meta.url));
|
||||
const buildConfigType = "Release";
|
||||
const requiresMsvcOnWindowsFlags = ["blas", "cann", "cuda", "hip", "kompute", "musa", "sycl", "opencl"]
|
||||
.map((backend) => ("GGML_" + backend.toUpperCase()));
|
||||
export async function compileLlamaCpp(buildOptions, compileOptions) {
|
||||
const { nodeTarget = process.version, updateLastBuildInfo: updateLastBuildInfoArg = true, includeBuildOptionsInBinaryFolderName = true, ensureLlamaCppRepoIsCloned: ensureLlamaCppRepoIsClonedArg = false, downloadCmakeIfNeeded: downloadCmakeIfNeededArg = false, ignoreWorkarounds = [], envVars = process.env, ciMode = false } = compileOptions;
|
||||
const platform = getPlatform();
|
||||
const buildFolderName = await getBuildFolderNameForBuildOptions(buildOptions);
|
||||
const finalBuildFolderName = includeBuildOptionsInBinaryFolderName
|
||||
? buildFolderName.withCustomCmakeOptions
|
||||
: buildFolderName.withoutCustomCmakeOptions;
|
||||
const useWindowsLlvm = (platform === "win" &&
|
||||
(buildOptions.gpu === false ||
|
||||
buildOptions.gpu === "vulkan") &&
|
||||
!ignoreWorkarounds.includes("avoidWindowsLlvm") &&
|
||||
!buildOptions.customCmakeOptions.has("CMAKE_TOOLCHAIN_FILE") &&
|
||||
!requiresMsvcOnWindowsFlags.some((flag) => buildOptions.customCmakeOptions.has(flag)))
|
||||
? areWindowsBuildToolsCapableForLlvmBuild(await detectWindowsBuildTools())
|
||||
: false;
|
||||
const outDirectory = path.join(llamaLocalBuildBinsDirectory, finalBuildFolderName);
|
||||
let parallelBuildThreads = getParallelBuildThreadsToUse(platform, buildOptions.gpu, ciMode);
|
||||
if (ignoreWorkarounds.includes("singleBuildThread"))
|
||||
parallelBuildThreads = 1;
|
||||
else if (ignoreWorkarounds.includes("reduceParallelBuildThreads"))
|
||||
parallelBuildThreads = reduceParallelBuildThreads(parallelBuildThreads);
|
||||
await fs.mkdirp(llamaLocalBuildBinsDirectory);
|
||||
try {
|
||||
await withLockfile({
|
||||
resourcePath: outDirectory
|
||||
}, async () => {
|
||||
try {
|
||||
if (ensureLlamaCppRepoIsClonedArg)
|
||||
await ensureLlamaCppRepoIsCloned({ progressLogs: buildOptions.progressLogs });
|
||||
else if (!(await isLlamaCppRepoCloned()))
|
||||
throw new Error(`"${llamaCppDirectory}" directory does not exist`);
|
||||
if (downloadCmakeIfNeededArg)
|
||||
await downloadCmakeIfNeeded(buildOptions.progressLogs);
|
||||
const cmakePathArgs = await getCmakePathArgs();
|
||||
const cmakeGeneratorArgs = getCmakeGeneratorArgs(buildOptions.platform, buildOptions.arch, useWindowsLlvm);
|
||||
const toolchainFile = await getToolchainFileForArch(buildOptions.arch, useWindowsLlvm);
|
||||
const runtimeVersion = nodeTarget.startsWith("v") ? nodeTarget.slice("v".length) : nodeTarget;
|
||||
const cmakeCustomOptions = new Map(buildOptions.customCmakeOptions);
|
||||
const cmakeToolchainOptions = new Map();
|
||||
if (!cmakeCustomOptions.has("GGML_BUILD_NUMBER"))
|
||||
cmakeCustomOptions.set("GGML_BUILD_NUMBER", "1");
|
||||
cmakeCustomOptions.set("CMAKE_CONFIGURATION_TYPES", buildConfigType);
|
||||
cmakeCustomOptions.set("NLC_CURRENT_PLATFORM", platform + "-" + process.arch);
|
||||
cmakeCustomOptions.set("NLC_TARGET_PLATFORM", buildOptions.platform + "-" + buildOptions.arch);
|
||||
cmakeCustomOptions.set("NLC_VARIANT", buildFolderName.binVariant);
|
||||
if (toolchainFile != null && !cmakeCustomOptions.has("CMAKE_TOOLCHAIN_FILE"))
|
||||
cmakeToolchainOptions.set("CMAKE_TOOLCHAIN_FILE", toolchainFile);
|
||||
if (toolchainFile != null &&
|
||||
buildOptions.gpu === "vulkan" &&
|
||||
(useWindowsLlvm || (platform === "win" && buildOptions.arch === "arm64")) &&
|
||||
!cmakeCustomOptions.has("GGML_VULKAN_SHADERS_GEN_TOOLCHAIN"))
|
||||
cmakeToolchainOptions.set("GGML_VULKAN_SHADERS_GEN_TOOLCHAIN", toolchainFile);
|
||||
if (buildOptions.gpu === "metal" && process.platform === "darwin" && !cmakeCustomOptions.has("GGML_METAL"))
|
||||
cmakeCustomOptions.set("GGML_METAL", "1");
|
||||
else if (!cmakeCustomOptions.has("GGML_METAL"))
|
||||
cmakeCustomOptions.set("GGML_METAL", "OFF");
|
||||
if (buildOptions.gpu === "cuda" && !cmakeCustomOptions.has("GGML_CUDA"))
|
||||
cmakeCustomOptions.set("GGML_CUDA", "1");
|
||||
if (buildOptions.gpu === "vulkan" && !cmakeCustomOptions.has("GGML_VULKAN"))
|
||||
cmakeCustomOptions.set("GGML_VULKAN", "1");
|
||||
if (!cmakeCustomOptions.has("GGML_CCACHE"))
|
||||
cmakeCustomOptions.set("GGML_CCACHE", "OFF");
|
||||
// avoid linking to extra libraries that we don't use
|
||||
{
|
||||
if (!cmakeCustomOptions.has("LLAMA_CURL") || isCmakeValueOff(cmakeCustomOptions.get("LLAMA_CURL")))
|
||||
cmakeCustomOptions.set("LLAMA_CURL", "OFF");
|
||||
if (!cmakeCustomOptions.has("LLAMA_HTTPLIB") || isCmakeValueOff(cmakeCustomOptions.get("LLAMA_HTTPLIB"))) {
|
||||
cmakeCustomOptions.set("LLAMA_HTTPLIB", "OFF");
|
||||
if (!cmakeCustomOptions.has("LLAMA_BUILD_BORINGSSL"))
|
||||
cmakeCustomOptions.set("LLAMA_BUILD_BORINGSSL", "OFF");
|
||||
if (!cmakeCustomOptions.has("LLAMA_OPENSSL"))
|
||||
cmakeCustomOptions.set("LLAMA_OPENSSL", "OFF");
|
||||
}
|
||||
}
|
||||
if (buildOptions.platform === "win" && buildOptions.arch === "arm64" && !cmakeCustomOptions.has("GGML_OPENMP"))
|
||||
cmakeCustomOptions.set("GGML_OPENMP", "OFF");
|
||||
if (useWindowsLlvm)
|
||||
cmakeCustomOptions.set("GGML_OPENMP", "OFF");
|
||||
if (ciMode) {
|
||||
if (!cmakeCustomOptions.has("GGML_OPENMP"))
|
||||
cmakeCustomOptions.set("GGML_OPENMP", "OFF");
|
||||
if (!cmakeCustomOptions.has("GGML_NATIVE") || isCmakeValueOff(cmakeCustomOptions.get("GGML_NATIVE"))) {
|
||||
cmakeCustomOptions.set("GGML_NATIVE", "OFF");
|
||||
if (buildOptions.arch === "x64" && !cmakeCustomOptions.has("GGML_CPU_ALL_VARIANTS")) {
|
||||
cmakeCustomOptions.set("GGML_CPU_ALL_VARIANTS", "ON");
|
||||
cmakeCustomOptions.set("GGML_BACKEND_DL", "ON");
|
||||
}
|
||||
else if (!cmakeCustomOptions.has("GGML_BACKEND_DL"))
|
||||
cmakeCustomOptions.set("GGML_BACKEND_DL", "ON");
|
||||
}
|
||||
}
|
||||
await fs.remove(outDirectory);
|
||||
await spawnCommand("npm", [
|
||||
"run", "-s", "cmake-js-llama", "--", "clean",
|
||||
"--log-level", "warn",
|
||||
"--out", path.relative(llamaDirectory, outDirectory),
|
||||
...cmakePathArgs
|
||||
], __dirname, envVars, buildOptions.progressLogs);
|
||||
await spawnCommand("npm", [
|
||||
"run", "-s", "cmake-js-llama", "--", "compile",
|
||||
"--log-level", "warn",
|
||||
"--config", buildConfigType,
|
||||
"--arch=" + buildOptions.arch,
|
||||
"--out", path.relative(llamaDirectory, outDirectory),
|
||||
"--runtime-version=" + runtimeVersion,
|
||||
"--parallel=" + parallelBuildThreads,
|
||||
...cmakeGeneratorArgs,
|
||||
...cmakePathArgs,
|
||||
...([
|
||||
...cmakeCustomOptions,
|
||||
...cmakeToolchainOptions
|
||||
].map(([key, value]) => "--CD" + key + "=" + value))
|
||||
], __dirname, envVars, buildOptions.progressLogs);
|
||||
const compiledResultDirPath = await moveBuildFilesToResultDir(outDirectory);
|
||||
await fs.writeFile(path.join(compiledResultDirPath, buildMetadataFileName), JSON.stringify({
|
||||
buildOptions: convertBuildOptionsToBuildOptionsJSON(buildOptions)
|
||||
}), "utf8");
|
||||
await fs.writeFile(path.join(outDirectory, "buildDone.status"), "", "utf8");
|
||||
if (updateLastBuildInfoArg) {
|
||||
await setLastBuildInfo({
|
||||
folderName: finalBuildFolderName
|
||||
});
|
||||
}
|
||||
}
|
||||
finally {
|
||||
await fixXpackPermissions();
|
||||
}
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
if (platform === "linux" && await which("make", { nothrow: true }) == null) {
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow('It seems that "make" is not installed in your system. Install it to resolve build issues'));
|
||||
await logDistroInstallInstruction('To install "make", ', {
|
||||
linuxPackages: { apt: ["make"], apk: ["make"] },
|
||||
macOsPackages: { brew: ["make"] }
|
||||
});
|
||||
}
|
||||
else if (platform === "linux" && !(await testCmakeBinary(await getCmakePath()))) {
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow('It seems that the used "cmake" doesn\'t work properly. Install it on your system to resolve build issues'));
|
||||
await logDistroInstallInstruction('To install "cmake", ', {
|
||||
linuxPackages: { apt: ["cmake"], apk: ["cmake"] },
|
||||
macOsPackages: { brew: ["cmake"] }
|
||||
});
|
||||
}
|
||||
else if (platform === "mac" && ((await which("clang", { nothrow: true })) == null || (err instanceof SpawnError &&
|
||||
err.combinedStd.toLowerCase().includes('"/usr/bin/cc" is not able to compile a simple test program'))))
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow("It seems that Xcode command line tools are not installed in your system. Install it to resolve build issues\n") +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow('To install Xcode command line tools, run "xcode-select --install"'));
|
||||
else if (buildOptions.gpu === "cuda") {
|
||||
if (!ignoreWorkarounds.includes("cudaArchitecture") && (platform === "win" || platform === "linux") &&
|
||||
err instanceof SpawnError && (err.combinedStd.toLowerCase().includes("CUDA Toolkit not found".toLowerCase()) ||
|
||||
err.combinedStd.toLowerCase().includes("Failed to detect a default CUDA architecture".toLowerCase()) ||
|
||||
err.combinedStd.toLowerCase().includes("CMAKE_CUDA_COMPILER-NOTFOUND".toLowerCase()) || (err.combinedStd.toLowerCase().includes("Tell CMake where to find the compiler by setting either the environment".toLowerCase()) &&
|
||||
err.combinedStd.toLowerCase().includes('variable "CUDACXX" or the CMake cache entry CMAKE_CUDA_COMPILER to the full'.toLowerCase())) || (err.combinedStd.toLowerCase().includes("The CUDA compiler".toLowerCase()) &&
|
||||
err.combinedStd.toLowerCase().includes("is not able to compile a simple test program".toLowerCase()) &&
|
||||
err.combinedStd.toLowerCase().includes("nvcc fatal".toLowerCase())))) {
|
||||
for (const { nvccPath, cudaHomePath } of await getCudaNvccPaths()) {
|
||||
if (buildOptions.progressLogs)
|
||||
console.info(getConsoleLogPrefix(true) +
|
||||
`Trying to compile again with "CUDACXX=${nvccPath}" and "CUDA_PATH=${cudaHomePath}" environment variables`);
|
||||
try {
|
||||
return await compileLlamaCpp(buildOptions, {
|
||||
...compileOptions,
|
||||
envVars: {
|
||||
...envVars,
|
||||
CUDACXX: nvccPath,
|
||||
"CUDA_PATH": cudaHomePath
|
||||
},
|
||||
ignoreWorkarounds: [...ignoreWorkarounds, "cudaArchitecture"]
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
if (buildOptions.progressLogs)
|
||||
console.error(getConsoleLogPrefix(true, false), err);
|
||||
}
|
||||
}
|
||||
}
|
||||
else if ((!ignoreWorkarounds.includes("reduceParallelBuildThreads") || !ignoreWorkarounds.includes("singleBuildThread")) &&
|
||||
(platform === "win" || platform === "linux") &&
|
||||
err instanceof SpawnError &&
|
||||
reduceParallelBuildThreads(parallelBuildThreads) !== parallelBuildThreads && (err.combinedStd.toLowerCase().includes("LLVM error : out of memory".toLowerCase()) ||
|
||||
err.combinedStd.toLowerCase().includes("compiler is out of heap space".toLowerCase()))) {
|
||||
if (buildOptions.progressLogs) {
|
||||
if (ignoreWorkarounds.includes("reduceParallelBuildThreads"))
|
||||
console.info(getConsoleLogPrefix(true) + "Trying to compile again with a single build thread");
|
||||
else
|
||||
console.info(getConsoleLogPrefix(true) + "Trying to compile again with reduced parallel build threads");
|
||||
}
|
||||
try {
|
||||
return await compileLlamaCpp(buildOptions, {
|
||||
...compileOptions,
|
||||
ignoreWorkarounds: [
|
||||
...ignoreWorkarounds,
|
||||
ignoreWorkarounds.includes("reduceParallelBuildThreads")
|
||||
? "singleBuildThread"
|
||||
: "reduceParallelBuildThreads"
|
||||
]
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
if (buildOptions.progressLogs)
|
||||
console.error(getConsoleLogPrefix(true, false), err);
|
||||
}
|
||||
}
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow("To resolve errors related to CUDA compilation, see the CUDA guide: ") +
|
||||
documentationPageUrls.CUDA);
|
||||
}
|
||||
else if (buildOptions.gpu === "vulkan")
|
||||
console.info("\n" +
|
||||
getConsoleLogPrefix(true) +
|
||||
chalk.yellow("To resolve errors related to Vulkan compilation, see the Vulkan guide: ") +
|
||||
documentationPageUrls.Vulkan);
|
||||
else if (useWindowsLlvm && !ciMode) {
|
||||
if (buildOptions.progressLogs)
|
||||
console.info(getConsoleLogPrefix(true) + "Trying to compile again without LLVM");
|
||||
try {
|
||||
return await compileLlamaCpp(buildOptions, {
|
||||
...compileOptions,
|
||||
ignoreWorkarounds: [...ignoreWorkarounds, "avoidWindowsLlvm"]
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
if (buildOptions.progressLogs)
|
||||
console.error(getConsoleLogPrefix(true, false), err);
|
||||
}
|
||||
}
|
||||
throw err;
|
||||
}
|
||||
}
|
||||
export async function getLocalBuildBinaryPath(folderName) {
|
||||
const binaryPath = path.join(llamaLocalBuildBinsDirectory, folderName, buildConfigType, "llama-addon.node");
|
||||
const buildMetadataFilePath = path.join(llamaLocalBuildBinsDirectory, folderName, buildConfigType, buildMetadataFileName);
|
||||
const buildDoneStatusPath = path.join(llamaLocalBuildBinsDirectory, folderName, "buildDone.status");
|
||||
const [binaryExists, buildMetadataExists, buildDoneStatusExists] = await Promise.all([
|
||||
fs.pathExists(binaryPath),
|
||||
fs.pathExists(buildMetadataFilePath),
|
||||
fs.pathExists(buildDoneStatusPath)
|
||||
]);
|
||||
if (binaryExists && buildMetadataExists && buildDoneStatusExists)
|
||||
return binaryPath;
|
||||
return null;
|
||||
}
|
||||
export async function getLocalBuildBinaryBuildMetadata(folderName) {
|
||||
const buildMetadataFilePath = path.join(llamaLocalBuildBinsDirectory, folderName, buildConfigType, buildMetadataFileName);
|
||||
if (!(await fs.pathExists(buildMetadataFilePath)))
|
||||
throw new Error(`Could not find build metadata file for local build "${folderName}"`);
|
||||
const buildMetadata = await fs.readJson(buildMetadataFilePath);
|
||||
return buildMetadata;
|
||||
}
|
||||
export async function getPrebuiltBinaryPath(buildOptions, folderName) {
|
||||
const localPrebuiltBinaryDirectoryPath = path.join(llamaPrebuiltBinsDirectory, folderName);
|
||||
const binaryPath = await resolvePrebuiltBinaryPath(localPrebuiltBinaryDirectoryPath);
|
||||
if (binaryPath != null)
|
||||
return {
|
||||
binaryPath,
|
||||
folderName,
|
||||
folderPath: localPrebuiltBinaryDirectoryPath,
|
||||
extBackendsPath: undefined
|
||||
};
|
||||
const packagePrebuiltBinariesDirectoryPath = await getPrebuiltBinariesPackageDirectoryForBuildOptions(buildOptions);
|
||||
if (packagePrebuiltBinariesDirectoryPath == null)
|
||||
return null;
|
||||
const prebuiltBinariesDirPath = typeof packagePrebuiltBinariesDirectoryPath === "string"
|
||||
? packagePrebuiltBinariesDirectoryPath
|
||||
: packagePrebuiltBinariesDirectoryPath.binsDir;
|
||||
const prebuiltBinariesExtDirPath = typeof packagePrebuiltBinariesDirectoryPath === "string"
|
||||
? undefined
|
||||
: packagePrebuiltBinariesDirectoryPath.extBinsDir;
|
||||
const packagePrebuiltBinaryDirectoryPath = path.join(prebuiltBinariesDirPath, folderName);
|
||||
const extPackagePrebuiltBinaryDirectoryPath = prebuiltBinariesExtDirPath == null
|
||||
? undefined
|
||||
: path.join(prebuiltBinariesExtDirPath, folderName);
|
||||
const binaryPathFromPackage = await resolvePrebuiltBinaryPath(packagePrebuiltBinaryDirectoryPath);
|
||||
if (binaryPathFromPackage != null)
|
||||
return {
|
||||
binaryPath: binaryPathFromPackage,
|
||||
folderName,
|
||||
folderPath: packagePrebuiltBinaryDirectoryPath,
|
||||
extBackendsPath: extPackagePrebuiltBinaryDirectoryPath
|
||||
};
|
||||
return null;
|
||||
}
|
||||
export async function getPrebuiltBinaryBuildMetadata(folderPath, folderName) {
|
||||
const buildMetadataFilePath = path.join(folderPath, buildMetadataFileName);
|
||||
if (!(await fs.pathExists(buildMetadataFilePath)))
|
||||
throw new Error(`Could not find build metadata file for prebuilt build "${folderName}"`);
|
||||
const buildMetadata = await fs.readJson(buildMetadataFilePath);
|
||||
return buildMetadata;
|
||||
}
|
||||
async function moveBuildFilesToResultDir(outDirectory, canCreateReleaseDir = false) {
|
||||
const binFilesDirPaths = [
|
||||
path.join(outDirectory, "bin"),
|
||||
path.join(outDirectory, "llama.cpp", "bin")
|
||||
];
|
||||
const compiledResultDirPath = path.join(outDirectory, buildConfigType);
|
||||
if (!await fs.pathExists(compiledResultDirPath)) {
|
||||
if (canCreateReleaseDir) {
|
||||
if (await asyncSome(binFilesDirPaths.map((dirPath) => fs.pathExists(dirPath))))
|
||||
await fs.ensureDir(compiledResultDirPath);
|
||||
else
|
||||
throw new Error(`Could not find ${buildConfigType} directory or any other output directory`);
|
||||
}
|
||||
else
|
||||
throw new Error(`Could not find ${buildConfigType} directory`);
|
||||
}
|
||||
for (const binFilesDirPath of binFilesDirPaths) {
|
||||
if (await fs.pathExists(binFilesDirPath)) {
|
||||
const itemNames = await fs.readdir(binFilesDirPath);
|
||||
await Promise.all(itemNames.map((itemName) => (fs.copy(path.join(binFilesDirPath, itemName), path.join(compiledResultDirPath, itemName), {
|
||||
overwrite: false
|
||||
}))));
|
||||
}
|
||||
}
|
||||
await applyResultDirFixes(compiledResultDirPath, path.join(outDirectory, "_temp"));
|
||||
return compiledResultDirPath;
|
||||
}
|
||||
async function applyResultDirFixes(resultDirPath, tempDirPath) {
|
||||
const releaseDirPath = path.join(resultDirPath, buildConfigType);
|
||||
if (await fs.pathExists(releaseDirPath)) {
|
||||
await fs.remove(tempDirPath);
|
||||
await fs.move(releaseDirPath, tempDirPath);
|
||||
const itemNames = await fs.readdir(tempDirPath);
|
||||
await Promise.all(itemNames.map((itemName) => (fs.move(path.join(tempDirPath, itemName), path.join(resultDirPath, itemName), {
|
||||
overwrite: true
|
||||
}))));
|
||||
await fs.remove(tempDirPath);
|
||||
}
|
||||
// the vulkan-shaders-gen binary is not needed at runtime
|
||||
const vulkanShadersGenBinary = path.join(resultDirPath, getPlatform() === "win"
|
||||
? "vulkan-shaders-gen.exe"
|
||||
: "vulkan-shaders-gen");
|
||||
await fs.remove(vulkanShadersGenBinary);
|
||||
}
|
||||
async function resolvePrebuiltBinaryPath(prebuiltBinaryDirectoryPath) {
|
||||
const binaryPath = path.join(prebuiltBinaryDirectoryPath, "llama-addon.node");
|
||||
const buildMetadataFilePath = path.join(prebuiltBinaryDirectoryPath, buildMetadataFileName);
|
||||
const [binaryExists, buildMetadataExists] = await Promise.all([
|
||||
fs.pathExists(binaryPath),
|
||||
fs.pathExists(buildMetadataFilePath)
|
||||
]);
|
||||
if (binaryExists && buildMetadataExists)
|
||||
return binaryPath;
|
||||
return null;
|
||||
}
|
||||
function getPrebuiltBinariesPackageDirectoryForBuildOptions(buildOptions) {
|
||||
async function getBinariesPathFromModules(moduleImport) {
|
||||
try {
|
||||
const [binariesModule, currentModuleVersion] = await Promise.all([
|
||||
moduleImport(),
|
||||
getModuleVersion()
|
||||
]);
|
||||
const { binsDir, packageVersion } = binariesModule?.getBinsDir?.() ?? {};
|
||||
if (binsDir == null || packageVersion !== currentModuleVersion)
|
||||
return null;
|
||||
return binsDir;
|
||||
}
|
||||
catch (err) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
async function getBinariesPathFromModulesWithExtModule(moduleImport, extModuleImport) {
|
||||
const [moduleBinsDir, extModuleBinsDir] = await Promise.all([
|
||||
getBinariesPathFromModules(moduleImport),
|
||||
getBinariesPathFromModules(extModuleImport)
|
||||
]);
|
||||
if (moduleBinsDir == null)
|
||||
return null;
|
||||
else if (extModuleBinsDir == null)
|
||||
return moduleBinsDir;
|
||||
return {
|
||||
binsDir: moduleBinsDir,
|
||||
extBinsDir: extModuleBinsDir
|
||||
};
|
||||
}
|
||||
/* eslint-disable import/no-unresolved */
|
||||
if (buildOptions.platform === "mac") {
|
||||
if (buildOptions.arch === "arm64" && buildOptions.gpu === "metal")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/mac-arm64-metal"));
|
||||
else if (buildOptions.arch === "x64" && buildOptions.gpu === false)
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/mac-x64"));
|
||||
}
|
||||
else if (buildOptions.platform === "linux") {
|
||||
if (buildOptions.arch === "x64") {
|
||||
if (buildOptions.gpu === "cuda")
|
||||
return getBinariesPathFromModulesWithExtModule(
|
||||
// @ts-ignore
|
||||
() => import("@node-llama-cpp/linux-x64-cuda"),
|
||||
// @ts-ignore
|
||||
() => import("@node-llama-cpp/linux-x64-cuda-ext"));
|
||||
else if (buildOptions.gpu === "vulkan")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/linux-x64-vulkan"));
|
||||
else if (buildOptions.gpu === false)
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/linux-x64"));
|
||||
}
|
||||
else if (buildOptions.arch === "arm64")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/linux-arm64"));
|
||||
else if (buildOptions.arch === "arm")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/linux-armv7l"));
|
||||
}
|
||||
else if (buildOptions.platform === "win") {
|
||||
if (buildOptions.arch === "x64") {
|
||||
if (buildOptions.gpu === "cuda")
|
||||
return getBinariesPathFromModulesWithExtModule(
|
||||
// @ts-ignore
|
||||
() => import("@node-llama-cpp/win-x64-cuda"),
|
||||
// @ts-ignore
|
||||
() => import("@node-llama-cpp/win-x64-cuda-ext"));
|
||||
else if (buildOptions.gpu === "vulkan")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/win-x64-vulkan"));
|
||||
else if (buildOptions.gpu === false)
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/win-x64"));
|
||||
}
|
||||
else if (buildOptions.arch === "arm64")
|
||||
// @ts-ignore
|
||||
return getBinariesPathFromModules(() => import("@node-llama-cpp/win-arm64"));
|
||||
}
|
||||
/* eslint-enable import/no-unresolved */
|
||||
return null;
|
||||
}
|
||||
async function getCmakePathArgs() {
|
||||
if (await hasBuiltinCmake())
|
||||
return [];
|
||||
const cmakePath = await getCmakePath();
|
||||
if (cmakePath == null)
|
||||
return [];
|
||||
return ["--cmake-path", cmakePath];
|
||||
}
|
||||
async function getToolchainFileForArch(targetArch, windowsLlvmSupport = false) {
|
||||
let toolchainPrefix = "";
|
||||
if (process.platform === "win32" && process.arch === "arm64") {
|
||||
// a toolchain is needed to cross-compile to arm64 on Windows, and to compile on arm64 on Windows
|
||||
}
|
||||
else if (process.platform === "win32" && process.arch === "x64" && targetArch === "x64" && windowsLlvmSupport) {
|
||||
toolchainPrefix = "llvm.";
|
||||
}
|
||||
else if (process.arch === targetArch)
|
||||
return null;
|
||||
const platform = process.platform;
|
||||
const hostArch = process.arch;
|
||||
const toolchainFilename = `${toolchainPrefix}${platform}.host-${hostArch}.target-${targetArch}.cmake`;
|
||||
const filePath = path.join(llamaToolchainsDirectory, toolchainFilename);
|
||||
if (await fs.pathExists(filePath))
|
||||
return path.resolve(filePath);
|
||||
return null;
|
||||
}
|
||||
function getCmakeGeneratorArgs(targetPlatform, targetArch, windowsLlvmSupport) {
|
||||
if (targetPlatform === "win" && targetArch === "arm64")
|
||||
return ["--generator", "Ninja Multi-Config"];
|
||||
else if (windowsLlvmSupport && targetPlatform === "win" && process.arch === "x64" && targetArch === "x64")
|
||||
return ["--generator", "Ninja Multi-Config"];
|
||||
return [];
|
||||
}
|
||||
function getParallelBuildThreadsToUse(platform, gpu, ciMode = false) {
|
||||
const cpuCount = os.cpus().length;
|
||||
if (ciMode && platform === "win" && gpu === "cuda" && cpuCount === 4)
|
||||
return 3; // workaround for `compiler is out of heap space` error on GitHub Actions on Windows when building with CUDA
|
||||
if (cpuCount <= 4)
|
||||
return cpuCount;
|
||||
if (platform === "mac" && process.arch === "arm64")
|
||||
return cpuCount - 1;
|
||||
return cpuCount - 2;
|
||||
}
|
||||
function reduceParallelBuildThreads(originalParallelBuildThreads) {
|
||||
return Math.max(1, Math.round(originalParallelBuildThreads / 2));
|
||||
}
|
||||
function isCmakeValueOff(value) {
|
||||
return value === "OFF" || value === "0";
|
||||
}
|
||||
function areWindowsBuildToolsCapableForLlvmBuild(detectedBuildTools) {
|
||||
return detectedBuildTools.hasLlvm && detectedBuildTools.hasNinja && detectedBuildTools.hasLibExe;
|
||||
}
|
||||
//# sourceMappingURL=compileLLamaCpp.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/compileLLamaCpp.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
18
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.d.ts
generated
vendored
Normal file
18
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.d.ts
generated
vendored
Normal file
@@ -0,0 +1,18 @@
|
||||
import { BinaryPlatform } from "./getPlatform.js";
|
||||
export declare function detectAvailableComputeLayers({ platform }?: {
|
||||
platform?: BinaryPlatform;
|
||||
}): Promise<{
|
||||
cuda: {
|
||||
hasNvidiaDriver: boolean;
|
||||
hasCudaRuntime: boolean;
|
||||
};
|
||||
vulkan: boolean;
|
||||
metal: boolean;
|
||||
}>;
|
||||
export declare function getCudaNvccPaths({ platform }?: {
|
||||
platform?: BinaryPlatform;
|
||||
}): Promise<{
|
||||
nvccPath: string;
|
||||
cudaHomePath: string;
|
||||
}[]>;
|
||||
export declare function getWindowsProgramFilesPaths(): Promise<string[]>;
|
||||
311
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.js
generated
vendored
Normal file
311
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.js
generated
vendored
Normal file
@@ -0,0 +1,311 @@
|
||||
import process from "process";
|
||||
import path from "path";
|
||||
import fs from "fs-extra";
|
||||
import semver from "semver";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
import { hasFileInPath } from "./hasFileInPath.js";
|
||||
import { asyncSome } from "./asyncSome.js";
|
||||
import { asyncEvery } from "./asyncEvery.js";
|
||||
export async function detectAvailableComputeLayers({ platform = getPlatform() } = {}) {
|
||||
const [cuda, vulkan, metal] = await Promise.all([
|
||||
detectCudaSupport({ platform }),
|
||||
detectVulkanSupport({ platform }),
|
||||
detectMetalSupport({ platform })
|
||||
]);
|
||||
return {
|
||||
cuda,
|
||||
vulkan,
|
||||
metal
|
||||
};
|
||||
}
|
||||
async function detectCudaSupport({ platform }) {
|
||||
if (platform === "win") {
|
||||
const librarySearchPaths = (await getCudaInstallationPaths({ platform }))
|
||||
.flatMap((cudaInstallationPath) => [cudaInstallationPath, path.join(cudaInstallationPath, "bin")]);
|
||||
const windir = getWindir();
|
||||
const [hasNvidiaDriver, hasCudaRuntime] = await Promise.all([
|
||||
asyncSome([
|
||||
hasFileInPath("nvml.dll"),
|
||||
fs.pathExists(path.join(windir, "System32", "nvml.dll"))
|
||||
]),
|
||||
asyncEvery([
|
||||
asyncSome([
|
||||
hasFileInPath("cudart64_110.dll", librarySearchPaths),
|
||||
hasFileInPath("cudart64_11.dll", librarySearchPaths),
|
||||
hasFileInPath("cudart64_12.dll", librarySearchPaths),
|
||||
hasFileInPath("cudart64_13.dll", librarySearchPaths) // for when the next version comes out
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("cublas64_11.dll", librarySearchPaths),
|
||||
hasFileInPath("cublas64_12.dll", librarySearchPaths),
|
||||
hasFileInPath("cublas64_13.dll", librarySearchPaths) // for when the next version comes out
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("cublasLt64_11.dll", librarySearchPaths),
|
||||
hasFileInPath("cublasLt64_12.dll", librarySearchPaths),
|
||||
hasFileInPath("cublasLt64_13.dll", librarySearchPaths) // for when the next version comes out
|
||||
])
|
||||
])
|
||||
]);
|
||||
return {
|
||||
hasNvidiaDriver,
|
||||
hasCudaRuntime
|
||||
};
|
||||
}
|
||||
else if (platform === "linux") {
|
||||
const cudaLibraryPaths = await getLinuxCudaLibraryPaths();
|
||||
const librarySearchPaths = [
|
||||
process.env.LD_LIBRARY_PATH,
|
||||
process.env.CUDA_PATH,
|
||||
"/usr/lib",
|
||||
"/usr/lib64",
|
||||
"/usr/lib/x86_64-linux-gnu",
|
||||
"/usr/lib/aarch64-linux-gnu",
|
||||
"/usr/lib/armv7l-linux-gnu",
|
||||
...cudaLibraryPaths
|
||||
];
|
||||
const [hasNvidiaDriver, hasCudaRuntime] = await Promise.all([
|
||||
asyncSome([
|
||||
hasFileInPath("libnvidia-ml.so", librarySearchPaths),
|
||||
hasFileInPath("libnvidia-ml.so.1", librarySearchPaths)
|
||||
]),
|
||||
asyncEvery([
|
||||
asyncSome([
|
||||
hasFileInPath("libcuda.so", librarySearchPaths),
|
||||
hasFileInPath("libcuda.so.1", librarySearchPaths)
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("libcudart.so", librarySearchPaths),
|
||||
hasFileInPath("libcudart.so.11", librarySearchPaths),
|
||||
hasFileInPath("libcudart.so.12", librarySearchPaths),
|
||||
hasFileInPath("libcudart.so.13", librarySearchPaths) // for when the next version comes out
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("libcublas.so", librarySearchPaths),
|
||||
hasFileInPath("libcublas.so.11", librarySearchPaths),
|
||||
hasFileInPath("libcublas.so.12", librarySearchPaths),
|
||||
hasFileInPath("libcublas.so.13", librarySearchPaths) // for when the next version comes out
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("libcublasLt.so", librarySearchPaths),
|
||||
hasFileInPath("libcublasLt.so.11", librarySearchPaths),
|
||||
hasFileInPath("libcublasLt.so.12", librarySearchPaths),
|
||||
hasFileInPath("libcublasLt.so.13", librarySearchPaths) // for when the next version comes out
|
||||
])
|
||||
])
|
||||
]);
|
||||
return {
|
||||
hasNvidiaDriver,
|
||||
hasCudaRuntime
|
||||
};
|
||||
}
|
||||
return {
|
||||
hasNvidiaDriver: false,
|
||||
hasCudaRuntime: false
|
||||
};
|
||||
}
|
||||
async function detectVulkanSupport({ platform }) {
|
||||
if (platform === "win") {
|
||||
const windir = getWindir();
|
||||
return await asyncSome([
|
||||
hasFileInPath("vulkan-1.dll"),
|
||||
fs.pathExists(path.join(windir, "System32", "vulkan-1.dll")),
|
||||
fs.pathExists(path.join(windir, "SysWOW64", "vulkan-1.dll"))
|
||||
]);
|
||||
}
|
||||
else if (platform === "linux") {
|
||||
const librarySearchPaths = [
|
||||
process.env.LD_LIBRARY_PATH,
|
||||
"/usr/lib",
|
||||
"/usr/lib64",
|
||||
"/usr/lib/x86_64-linux-gnu",
|
||||
"/usr/lib/aarch64-linux-gnu",
|
||||
"/usr/lib/armv7l-linux-gnu",
|
||||
(process.env.PREFIX != null && process.env.PREFIX?.toLowerCase()?.includes?.("termux"))
|
||||
? `${process.env.PREFIX}/usr/lib`
|
||||
: undefined
|
||||
];
|
||||
return await asyncSome([
|
||||
hasFileInPath("libvulkan.so", librarySearchPaths),
|
||||
hasFileInPath("libvulkan.so.1", librarySearchPaths)
|
||||
]);
|
||||
}
|
||||
else if (platform === "mac") {
|
||||
return await asyncSome([
|
||||
hasFileInPath("libvulkan.dylib"),
|
||||
hasFileInPath("libvulkan.dylib.1")
|
||||
]);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
async function detectMetalSupport({ platform }) {
|
||||
return platform === "mac";
|
||||
}
|
||||
async function getLinuxCudaLibraryPaths() {
|
||||
const res = [];
|
||||
try {
|
||||
for (const cudaInstallationPath of await getCudaInstallationPaths({ platform: "linux" })) {
|
||||
const cudaTargetsFolder = `${cudaInstallationPath}/targets`;
|
||||
if (!(await fs.pathExists(cudaTargetsFolder)))
|
||||
continue;
|
||||
for (const cudaTargetFolderName of await fs.readdir(cudaTargetsFolder)) {
|
||||
res.push(`${cudaTargetsFolder}/${cudaTargetFolderName}/lib`, `${cudaTargetsFolder}/${cudaTargetFolderName}/lib/stubs`);
|
||||
}
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + 'Failed to search "/usr/local/" for CUDA library paths', err);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
async function getCudaInstallationPaths({ platform }) {
|
||||
if (platform === "win") {
|
||||
try {
|
||||
const programFilesPaths = await getWindowsProgramFilesPaths();
|
||||
const potentialCudaInstallationsContainerPaths = programFilesPaths
|
||||
.map((programFilesPath) => `${programFilesPath}/NVIDIA GPU Computing Toolkit/CUDA`);
|
||||
const cudaInstallationsContainerPaths = (await Promise.all(potentialCudaInstallationsContainerPaths.map(async (potentialCudaInstallationsContainerPath) => {
|
||||
if (await fs.pathExists(potentialCudaInstallationsContainerPath))
|
||||
return potentialCudaInstallationsContainerPath;
|
||||
return null;
|
||||
}))).filter((path) => path != null);
|
||||
const potentialCudaInstallations = (await Promise.all(cudaInstallationsContainerPaths.map(async (cudaInstallationsContainerPath) => {
|
||||
const cudaFolderPrefix = "v";
|
||||
return (await fs.pathExists(cudaInstallationsContainerPath)
|
||||
? await fs.readdir(cudaInstallationsContainerPath)
|
||||
: [])
|
||||
.filter((installationFolderName) => installationFolderName.toLowerCase()
|
||||
.startsWith(cudaFolderPrefix))
|
||||
.sort((a, b) => {
|
||||
const aVersion = a.slice(cudaFolderPrefix.length);
|
||||
const bVersion = b.slice(cudaFolderPrefix.length);
|
||||
try {
|
||||
const aVersionValid = semver.valid(semver.coerce(aVersion));
|
||||
const bVersionValid = semver.valid(semver.coerce(bVersion));
|
||||
if (aVersionValid && bVersionValid)
|
||||
return semver.compare(aVersionValid, bVersionValid);
|
||||
else if (aVersionValid)
|
||||
return -1;
|
||||
else if (bVersionValid)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
catch (err) {
|
||||
return 0;
|
||||
}
|
||||
})
|
||||
.reverse()
|
||||
.map((installationFolderName) => `${cudaInstallationsContainerPath}/${installationFolderName}`);
|
||||
}))).flat();
|
||||
if (process.env.CUDA_PATH != null && process.env.CUDA_PATH !== "")
|
||||
potentialCudaInstallations.unshift(process.env.CUDA_PATH);
|
||||
return (await Promise.all(potentialCudaInstallations.map(async (cudaFolder) => {
|
||||
if (await fs.pathExists(cudaFolder))
|
||||
return cudaFolder;
|
||||
return null;
|
||||
}))).filter((cudaFolder) => cudaFolder != null);
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + 'Failed to search "Program Files" for CUDA installations', err);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
else if (platform === "linux") {
|
||||
const res = [];
|
||||
try {
|
||||
const usrLocal = "/usr/local";
|
||||
const cudaFolderPrefix = "cuda-";
|
||||
const potentialCudaFolders = (await fs.pathExists(usrLocal)
|
||||
? await fs.readdir(usrLocal)
|
||||
: [])
|
||||
.filter((usrLocalFolderName) => usrLocalFolderName.toLowerCase().startsWith(cudaFolderPrefix))
|
||||
.sort((a, b) => {
|
||||
const aVersion = a.slice(cudaFolderPrefix.length);
|
||||
const bVersion = b.slice(cudaFolderPrefix.length);
|
||||
try {
|
||||
const aVersionValid = semver.valid(semver.coerce(aVersion));
|
||||
const bVersionValid = semver.valid(semver.coerce(bVersion));
|
||||
if (aVersionValid && bVersionValid)
|
||||
return semver.compare(aVersionValid, bVersionValid);
|
||||
else if (aVersionValid)
|
||||
return -1;
|
||||
else if (bVersionValid)
|
||||
return 1;
|
||||
else
|
||||
return 0;
|
||||
}
|
||||
catch (err) {
|
||||
return 0;
|
||||
}
|
||||
})
|
||||
.reverse()
|
||||
.map((usrLocalFolderName) => `${usrLocal}/${usrLocalFolderName}`);
|
||||
potentialCudaFolders.unshift(`${usrLocal}/cuda`);
|
||||
if (process.env.CUDA_PATH != null && process.env.CUDA_PATH !== "")
|
||||
potentialCudaFolders.unshift(process.env.CUDA_PATH);
|
||||
for (const cudaFolder of potentialCudaFolders) {
|
||||
const cudaTargetsFolder = `${cudaFolder}/targets`;
|
||||
if (!(await fs.pathExists(cudaTargetsFolder)))
|
||||
continue;
|
||||
res.push(cudaFolder);
|
||||
}
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + 'Failed to search "/usr/local/" for CUDA installations', err);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
return [];
|
||||
}
|
||||
export async function getCudaNvccPaths({ platform = getPlatform() } = {}) {
|
||||
const cudaInstallationPaths = await getCudaInstallationPaths({ platform });
|
||||
const nvccPotentialPaths = cudaInstallationPaths
|
||||
.map((cudaInstallationPath) => {
|
||||
if (platform === "win")
|
||||
return {
|
||||
nvccPath: path.join(cudaInstallationPath, "bin", "nvcc.exe"),
|
||||
cudaHomePath: cudaInstallationPath
|
||||
};
|
||||
return {
|
||||
nvccPath: path.join(cudaInstallationPath, "bin", "nvcc"),
|
||||
cudaHomePath: cudaInstallationPath
|
||||
};
|
||||
});
|
||||
try {
|
||||
const resolvedPaths = await Promise.all(nvccPotentialPaths.map(async ({ nvccPath, cudaHomePath }) => {
|
||||
if (await fs.pathExists(nvccPath))
|
||||
return { nvccPath, cudaHomePath };
|
||||
return null;
|
||||
}));
|
||||
return resolvedPaths.filter((resolvedPath) => resolvedPath != null);
|
||||
}
|
||||
catch (err) {
|
||||
console.error(getConsoleLogPrefix() + `Failed to search for "nvcc${platform === "win" ? ".exe" : ""}" in CUDA installation paths`, err);
|
||||
}
|
||||
return [];
|
||||
}
|
||||
function getWindir() {
|
||||
return process.env.windir || process.env.WINDIR || process.env.SystemRoot || process.env.systemroot || process.env.SYSTEMROOT ||
|
||||
"C:\\Windows";
|
||||
}
|
||||
export async function getWindowsProgramFilesPaths() {
|
||||
const potentialPaths = await Promise.all([
|
||||
process.env["ProgramFiles(Arm)"],
|
||||
process.env.ProgramFiles,
|
||||
process.env["ProgramFiles(x86)"],
|
||||
`${process.env.SystemDrive ?? "C:"}\\Program Files (Arm)`,
|
||||
`${process.env.SystemDrive ?? "C:"}\\Program Files`,
|
||||
`${process.env.SystemDrive ?? "C:"}\\Program Files (x86)`
|
||||
]
|
||||
.map(async (programFilesPath) => {
|
||||
if (programFilesPath == null)
|
||||
return null;
|
||||
if (await fs.pathExists(programFilesPath))
|
||||
return programFilesPath;
|
||||
return null;
|
||||
}));
|
||||
return Array.from(new Set(potentialPaths.filter((potentialPath) => potentialPath != null)));
|
||||
}
|
||||
//# sourceMappingURL=detectAvailableComputeLayers.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/detectAvailableComputeLayers.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
14
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.d.ts
generated
vendored
Normal file
14
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.d.ts
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
/**
|
||||
* On platforms other than Windows, this function will return an empty array
|
||||
* @returns Visual Studio edition installation paths - the paths are ordered from the most recent version to the oldest
|
||||
*/
|
||||
export declare function getWindowsVisualStudioEditionPaths(): Promise<{
|
||||
vsEditionPaths: string[];
|
||||
programFilesPaths: string[];
|
||||
}>;
|
||||
export declare function detectWindowsBuildTools(targetArch?: typeof process.arch): Promise<{
|
||||
hasCmake: boolean;
|
||||
hasNinja: boolean;
|
||||
hasLlvm: boolean;
|
||||
hasLibExe: boolean;
|
||||
}>;
|
||||
149
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.js
generated
vendored
Normal file
149
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.js
generated
vendored
Normal file
@@ -0,0 +1,149 @@
|
||||
import path from "path";
|
||||
import fs from "fs-extra";
|
||||
import { getWindowsProgramFilesPaths } from "./detectAvailableComputeLayers.js";
|
||||
import { asyncSome } from "./asyncSome.js";
|
||||
import { asyncEvery } from "./asyncEvery.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
/**
|
||||
* On platforms other than Windows, this function will return an empty array
|
||||
* @returns Visual Studio edition installation paths - the paths are ordered from the most recent version to the oldest
|
||||
*/
|
||||
export async function getWindowsVisualStudioEditionPaths() {
|
||||
const platform = getPlatform();
|
||||
if (platform !== "win")
|
||||
return {
|
||||
vsEditionPaths: [],
|
||||
programFilesPaths: []
|
||||
};
|
||||
const programFilesPaths = await getWindowsProgramFilesPaths();
|
||||
const potentialVisualStudioPaths = programFilesPaths
|
||||
.map((programFilesPath) => `${programFilesPath}/Microsoft Visual Studio`);
|
||||
const versionPaths = (await Promise.all(potentialVisualStudioPaths.map(async (vsPath) => {
|
||||
if (await fs.pathExists(vsPath)) {
|
||||
const versions = await fs.readdir(vsPath, { withFileTypes: true });
|
||||
return versions
|
||||
.filter((dirent) => dirent.isDirectory())
|
||||
.map((dirent) => dirent.name)
|
||||
.sort((a, b) => {
|
||||
const aNumber = parseInt(a);
|
||||
const bNumber = parseInt(b);
|
||||
if (Number.isFinite(aNumber) && Number.isFinite(bNumber))
|
||||
return bNumber - aNumber;
|
||||
else if (Number.isFinite(aNumber))
|
||||
return -1;
|
||||
else if (Number.isFinite(bNumber))
|
||||
return 1;
|
||||
return 0;
|
||||
})
|
||||
.map((version) => path.join(vsPath, version));
|
||||
}
|
||||
return [];
|
||||
}))).flat();
|
||||
const vsEditionPaths = (await Promise.all(versionPaths.map(async (versionPath) => {
|
||||
const editions = await fs.readdir(versionPath, { withFileTypes: true });
|
||||
return editions
|
||||
.filter((dirent) => dirent.isDirectory())
|
||||
.map((edition) => path.join(versionPath, edition.name));
|
||||
}))).flat();
|
||||
return {
|
||||
vsEditionPaths,
|
||||
programFilesPaths
|
||||
};
|
||||
}
|
||||
export async function detectWindowsBuildTools(targetArch = process.arch) {
|
||||
try {
|
||||
const currentArch = process.arch;
|
||||
const { vsEditionPaths, programFilesPaths } = await getWindowsVisualStudioEditionPaths();
|
||||
if (vsEditionPaths.length === 0 && programFilesPaths.length === 0)
|
||||
return {
|
||||
hasCmake: false,
|
||||
hasNinja: false,
|
||||
hasLlvm: false,
|
||||
hasLibExe: false
|
||||
};
|
||||
const programDataPaths = [
|
||||
process.env["ProgramData"]
|
||||
].filter((programDataPath) => programDataPath != null);
|
||||
const msvcPaths = (await Promise.all(vsEditionPaths.map(async (editionPath) => {
|
||||
const msvcVersionsPath = path.join(editionPath, "VC", "Tools", "MSVC");
|
||||
if (await fs.pathExists(msvcVersionsPath)) {
|
||||
const msvcVersions = await fs.readdir(msvcVersionsPath);
|
||||
return msvcVersions
|
||||
.sort((a, b) => {
|
||||
const aNumber = parseInt(a);
|
||||
const bNumber = parseInt(b);
|
||||
if (Number.isFinite(aNumber) && Number.isFinite(bNumber))
|
||||
return bNumber - aNumber;
|
||||
else if (Number.isFinite(aNumber))
|
||||
return -1;
|
||||
else if (Number.isFinite(bNumber))
|
||||
return 1;
|
||||
return 0;
|
||||
})
|
||||
.map((msvcVersion) => path.join(msvcVersionsPath, msvcVersion));
|
||||
}
|
||||
return [];
|
||||
}))).flat();
|
||||
const potentialCmakePaths = [
|
||||
...programFilesPaths.map((programFilesPath) => path.join(programFilesPath, "CMake", "bin", "cmake.exe")),
|
||||
...vsEditionPaths.map((editionPath) => (path.join(editionPath, "Common7", "IDE", "CommonExtensions", "Microsoft", "CMake", "CMake", "bin", "cmake.exe")))
|
||||
];
|
||||
const potentialNinjaPaths = [
|
||||
...programDataPaths.map((programDataPath) => path.join(programDataPath, "chocolatey", "bin", "ninja.exe")),
|
||||
...vsEditionPaths.map((editionPath) => (path.join(editionPath, "Common7", "IDE", "CommonExtensions", "Microsoft", "CMake", "Ninja", "ninja.exe")))
|
||||
];
|
||||
const potentialLlvmPaths = [
|
||||
...programFilesPaths.map((programFilesPath) => path.join(programFilesPath, "LLVM", "bin")),
|
||||
...vsEditionPaths.map((editionPath) => {
|
||||
if (currentArch === "x64")
|
||||
return path.join(editionPath, "VC", "Tools", "Llvm", "x64", "bin");
|
||||
else if (currentArch === "arm64")
|
||||
return path.join(editionPath, "VC", "Tools", "Llvm", "ARM64", "bin");
|
||||
return path.join(editionPath, "VC", "Tools", "Llvm", "bin");
|
||||
})
|
||||
];
|
||||
const potentialLibExePaths = msvcPaths.map((msvcPath) => {
|
||||
const hostArchDirName = currentArch === "x64"
|
||||
? "Hostx64"
|
||||
: currentArch === "arm64"
|
||||
? "Hostarm64"
|
||||
: "_";
|
||||
const targetArchDirName = targetArch === "x64"
|
||||
? "x64"
|
||||
: targetArch === "arm64"
|
||||
? "arm64"
|
||||
: "_";
|
||||
return path.join(msvcPath, "bin", hostArchDirName, targetArchDirName, "lib.exe");
|
||||
});
|
||||
const [hasCmake, hasNinja, hasLibExe, hasLlvm] = await Promise.all([
|
||||
asyncSome(potentialCmakePaths.map((cmakePath) => fs.pathExists(cmakePath))),
|
||||
asyncSome(potentialNinjaPaths.map((ninjaPath) => fs.pathExists(ninjaPath))),
|
||||
asyncSome(potentialLibExePaths.map((libExePath) => fs.pathExists(libExePath))),
|
||||
asyncSome(potentialLlvmPaths.map((llvmPath) => isLlvmPathValid(llvmPath)))
|
||||
]);
|
||||
return {
|
||||
hasCmake,
|
||||
hasNinja,
|
||||
hasLlvm,
|
||||
hasLibExe
|
||||
};
|
||||
}
|
||||
catch (err) {
|
||||
return {
|
||||
hasCmake: false,
|
||||
hasNinja: false,
|
||||
hasLlvm: false,
|
||||
hasLibExe: false
|
||||
};
|
||||
}
|
||||
}
|
||||
async function isLlvmPathValid(llvmPath) {
|
||||
if (!(await fs.pathExists(llvmPath)))
|
||||
return false;
|
||||
return await asyncEvery([
|
||||
fs.pathExists(path.join(llvmPath, "clang.exe")),
|
||||
fs.pathExists(path.join(llvmPath, "clang++.exe")),
|
||||
fs.pathExists(path.join(llvmPath, "llvm-rc.exe"))
|
||||
]);
|
||||
}
|
||||
//# sourceMappingURL=detectBuildTools.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/detectBuildTools.js.map
generated
vendored
Normal file
File diff suppressed because one or more lines are too long
4
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.d.ts
generated
vendored
Normal file
4
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.d.ts
generated
vendored
Normal file
@@ -0,0 +1,4 @@
|
||||
import { BinaryPlatform } from "./getPlatform.js";
|
||||
export declare function detectGlibc({ platform }: {
|
||||
platform: BinaryPlatform;
|
||||
}): Promise<boolean>;
|
||||
46
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.js
generated
vendored
Normal file
46
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.js
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
import process from "process";
|
||||
import { asyncEvery } from "./asyncEvery.js";
|
||||
import { asyncSome } from "./asyncSome.js";
|
||||
import { hasFileInPath } from "./hasFileInPath.js";
|
||||
export async function detectGlibc({ platform }) {
|
||||
if (platform === "linux") {
|
||||
const librarySearchPaths = [
|
||||
process.env.LD_LIBRARY_PATH,
|
||||
"/lib",
|
||||
"/lib64",
|
||||
"/usr/lib",
|
||||
"/usr/lib64",
|
||||
"/usr/lib/x86_64-linux-gnu",
|
||||
"/usr/lib/aarch64-linux-gnu",
|
||||
"/usr/lib/armv7l-linux-gnu"
|
||||
];
|
||||
return await asyncEvery([
|
||||
asyncSome([
|
||||
hasFileInPath("libc.so", librarySearchPaths),
|
||||
hasFileInPath("libc.so.5", librarySearchPaths),
|
||||
hasFileInPath("libc.so.6", librarySearchPaths),
|
||||
hasFileInPath("libc.so.7", librarySearchPaths) // for when the next version comes out
|
||||
]),
|
||||
asyncSome([
|
||||
hasFileInPath("ld-linux.so", librarySearchPaths),
|
||||
hasFileInPath("ld-linux.so.1", librarySearchPaths),
|
||||
hasFileInPath("ld-linux.so.2", librarySearchPaths),
|
||||
hasFileInPath("ld-linux.so.3", librarySearchPaths), // for when the next version comes out
|
||||
hasFileInPath("ld-linux-x86-64.so", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-x86-64.so.1", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-x86-64.so.2", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-x86-64.so.3", librarySearchPaths), // for when the next version comes out
|
||||
hasFileInPath("ld-linux-aarch64.so", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-aarch64.so.1", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-aarch64.so.2", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-aarch64.so.3", librarySearchPaths), // for when the next version comes out
|
||||
hasFileInPath("ld-linux-armv7l.so", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-armv7l.so.1", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-armv7l.so.2", librarySearchPaths),
|
||||
hasFileInPath("ld-linux-armv7l.so.3", librarySearchPaths) // for when the next version comes out
|
||||
])
|
||||
]);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
//# sourceMappingURL=detectGlibc.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/detectGlibc.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"detectGlibc.js","sourceRoot":"","sources":["../../../src/bindings/utils/detectGlibc.ts"],"names":[],"mappings":"AAAA,OAAO,OAAO,MAAM,SAAS,CAAC;AAE9B,OAAO,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAC3C,OAAO,EAAC,SAAS,EAAC,MAAM,gBAAgB,CAAC;AACzC,OAAO,EAAC,aAAa,EAAC,MAAM,oBAAoB,CAAC;AAEjD,MAAM,CAAC,KAAK,UAAU,WAAW,CAAC,EAC9B,QAAQ,EAGX;IACG,IAAI,QAAQ,KAAK,OAAO,EAAE,CAAC;QACvB,MAAM,kBAAkB,GAAG;YACvB,OAAO,CAAC,GAAG,CAAC,eAAe;YAC3B,MAAM;YACN,QAAQ;YACR,UAAU;YACV,YAAY;YACZ,2BAA2B;YAC3B,4BAA4B;YAC5B,2BAA2B;SAC9B,CAAC;QAEF,OAAO,MAAM,UAAU,CAAC;YACpB,SAAS,CAAC;gBACN,aAAa,CAAC,SAAS,EAAE,kBAAkB,CAAC;gBAC5C,aAAa,CAAC,WAAW,EAAE,kBAAkB,CAAC;gBAC9C,aAAa,CAAC,WAAW,EAAE,kBAAkB,CAAC;gBAC9C,aAAa,CAAC,WAAW,EAAE,kBAAkB,CAAC,CAAC,sCAAsC;aACxF,CAAC;YACF,SAAS,CAAC;gBACN,aAAa,CAAC,aAAa,EAAE,kBAAkB,CAAC;gBAChD,aAAa,CAAC,eAAe,EAAE,kBAAkB,CAAC;gBAClD,aAAa,CAAC,eAAe,EAAE,kBAAkB,CAAC;gBAClD,aAAa,CAAC,eAAe,EAAE,kBAAkB,CAAC,EAAE,sCAAsC;gBAC1F,aAAa,CAAC,oBAAoB,EAAE,kBAAkB,CAAC;gBACvD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC;gBACzD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC;gBACzD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC,EAAE,sCAAsC;gBACjG,aAAa,CAAC,qBAAqB,EAAE,kBAAkB,CAAC;gBACxD,aAAa,CAAC,uBAAuB,EAAE,kBAAkB,CAAC;gBAC1D,aAAa,CAAC,uBAAuB,EAAE,kBAAkB,CAAC;gBAC1D,aAAa,CAAC,uBAAuB,EAAE,kBAAkB,CAAC,EAAE,sCAAsC;gBAClG,aAAa,CAAC,oBAAoB,EAAE,kBAAkB,CAAC;gBACvD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC;gBACzD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC;gBACzD,aAAa,CAAC,sBAAsB,EAAE,kBAAkB,CAAC,CAAC,sCAAsC;aACnG,CAAC;SACL,CAAC,CAAC;IACP,CAAC;IAED,OAAO,KAAK,CAAC;AACjB,CAAC"}
|
||||
9
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.d.ts
generated
vendored
Normal file
9
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
import process from "process";
|
||||
import { BuildGpu } from "../types.js";
|
||||
import { BinaryPlatform } from "./getPlatform.js";
|
||||
export declare function getBestComputeLayersAvailable(): Promise<(false | "metal" | "cuda" | "vulkan")[]>;
|
||||
export declare function detectBestComputeLayersAvailable({ platform, arch, hasCudaWithStaticBinaryBuild }?: {
|
||||
platform?: BinaryPlatform;
|
||||
arch?: typeof process.arch;
|
||||
hasCudaWithStaticBinaryBuild?: boolean;
|
||||
}): Promise<BuildGpu[]>;
|
||||
29
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.js
generated
vendored
Normal file
29
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.js
generated
vendored
Normal file
@@ -0,0 +1,29 @@
|
||||
import process from "process";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
import { detectAvailableComputeLayers } from "./detectAvailableComputeLayers.js";
|
||||
let bestComputeLayersAvailablePromise = null;
|
||||
export async function getBestComputeLayersAvailable() {
|
||||
if (bestComputeLayersAvailablePromise != null) {
|
||||
try {
|
||||
return await bestComputeLayersAvailablePromise;
|
||||
}
|
||||
catch (err) { }
|
||||
}
|
||||
bestComputeLayersAvailablePromise = detectBestComputeLayersAvailable();
|
||||
return await bestComputeLayersAvailablePromise;
|
||||
}
|
||||
export async function detectBestComputeLayersAvailable({ platform = getPlatform(), arch = process.arch, hasCudaWithStaticBinaryBuild = false } = {}) {
|
||||
if (platform === "mac" && arch === "arm64")
|
||||
return ["metal"];
|
||||
const res = [];
|
||||
const availableComputeLayers = await detectAvailableComputeLayers({
|
||||
platform
|
||||
});
|
||||
if (availableComputeLayers.cuda.hasNvidiaDriver && (availableComputeLayers.cuda.hasCudaRuntime || hasCudaWithStaticBinaryBuild))
|
||||
res.push("cuda");
|
||||
if (availableComputeLayers.vulkan)
|
||||
res.push("vulkan");
|
||||
res.push(false);
|
||||
return res;
|
||||
}
|
||||
//# sourceMappingURL=getBestComputeLayersAvailable.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getBestComputeLayersAvailable.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getBestComputeLayersAvailable.js","sourceRoot":"","sources":["../../../src/bindings/utils/getBestComputeLayersAvailable.ts"],"names":[],"mappings":"AAAA,OAAO,OAAO,MAAM,SAAS,CAAC;AAE9B,OAAO,EAAiB,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAC7D,OAAO,EAAC,4BAA4B,EAAC,MAAM,mCAAmC,CAAC;AAE/E,IAAI,iCAAiC,GAA+D,IAAI,CAAC;AACzG,MAAM,CAAC,KAAK,UAAU,6BAA6B;IAC/C,IAAI,iCAAiC,IAAI,IAAI,EAAE,CAAC;QAC5C,IAAI,CAAC;YACD,OAAO,MAAM,iCAAiC,CAAC;QACnD,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC,CAAA,CAAC;IACpB,CAAC;IAED,iCAAiC,GAAG,gCAAgC,EAAE,CAAC;IACvE,OAAO,MAAM,iCAAiC,CAAC;AACnD,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,gCAAgC,CAAC,EACnD,QAAQ,GAAG,WAAW,EAAE,EACxB,IAAI,GAAG,OAAO,CAAC,IAAI,EACnB,4BAA4B,GAAG,KAAK,KAKpC,EAAE;IACF,IAAI,QAAQ,KAAK,KAAK,IAAI,IAAI,KAAK,OAAO;QACtC,OAAO,CAAC,OAAO,CAAC,CAAC;IAErB,MAAM,GAAG,GAAe,EAAE,CAAC;IAC3B,MAAM,sBAAsB,GAAG,MAAM,4BAA4B,CAAC;QAC9D,QAAQ;KACX,CAAC,CAAC;IAEH,IAAI,sBAAsB,CAAC,IAAI,CAAC,eAAe,IAAI,CAAC,sBAAsB,CAAC,IAAI,CAAC,cAAc,IAAI,4BAA4B,CAAC;QAC3H,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IAErB,IAAI,sBAAsB,CAAC,MAAM;QAC7B,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;IAEvB,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IAEhB,OAAO,GAAG,CAAC;AACf,CAAC"}
|
||||
6
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.d.ts
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
import { BuildOptions } from "../types.js";
|
||||
export declare function getBuildFolderNameForBuildOptions(buildOptions: BuildOptions): Promise<{
|
||||
withoutCustomCmakeOptions: string;
|
||||
withCustomCmakeOptions: string;
|
||||
binVariant: string;
|
||||
}>;
|
||||
105
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.js
generated
vendored
Normal file
105
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.js
generated
vendored
Normal file
@@ -0,0 +1,105 @@
|
||||
import { hashString } from "../../utils/hashString.js";
|
||||
import { builtinLlamaCppGitHubRepo, builtinLlamaCppRelease } from "../../config.js";
|
||||
export async function getBuildFolderNameForBuildOptions(buildOptions) {
|
||||
const nameParts = [buildOptions.platform, buildOptions.arch];
|
||||
const binParts = [];
|
||||
if (buildOptions.gpu !== false) {
|
||||
nameParts.push(makeStringSafeForPathName(buildOptions.gpu));
|
||||
binParts.push(makeStringSafeForPathName(buildOptions.gpu.toLowerCase()));
|
||||
}
|
||||
if (buildOptions.llamaCpp.repo !== builtinLlamaCppGitHubRepo || buildOptions.llamaCpp.release !== builtinLlamaCppRelease) {
|
||||
const releaseFolderNamePart = await getFolderNamePartForRelease(buildOptions.llamaCpp.repo, buildOptions.llamaCpp.release);
|
||||
nameParts.push("release-" + releaseFolderNamePart);
|
||||
binParts.push(releaseFolderNamePart.replaceAll(" ", "_"));
|
||||
}
|
||||
else if (buildOptions.llamaCpp.release !== "latest")
|
||||
binParts.push(buildOptions.llamaCpp.release);
|
||||
if (buildOptions.customCmakeOptions.size === 0) {
|
||||
const name = nameParts.join("-");
|
||||
return {
|
||||
withoutCustomCmakeOptions: name,
|
||||
withCustomCmakeOptions: name,
|
||||
binVariant: binParts.join(".")
|
||||
};
|
||||
}
|
||||
const cmakeOptionKeys = [...buildOptions.customCmakeOptions.keys()];
|
||||
cmakeOptionKeys.sort();
|
||||
const cmakeOptionStringsArray = [];
|
||||
for (const key of cmakeOptionKeys) {
|
||||
if (key === "")
|
||||
continue;
|
||||
cmakeOptionStringsArray.push(`${encodeURIComponent(key)}=${encodeURIComponent(buildOptions.customCmakeOptions.get(key))}`);
|
||||
}
|
||||
const nameWithoutCustomCmakeOptions = nameParts.join("-");
|
||||
if (cmakeOptionStringsArray.length === 0) {
|
||||
return {
|
||||
withoutCustomCmakeOptions: nameWithoutCustomCmakeOptions,
|
||||
withCustomCmakeOptions: nameWithoutCustomCmakeOptions,
|
||||
binVariant: binParts.join(".")
|
||||
};
|
||||
}
|
||||
const cmakeOptionsHash = await hashString(cmakeOptionStringsArray.join(";"));
|
||||
nameParts.push(cmakeOptionsHash);
|
||||
binParts.push(cmakeOptionsHash.slice(0, 8));
|
||||
const nameWithCustomCmakeOptions = nameParts.join("-");
|
||||
return {
|
||||
withoutCustomCmakeOptions: nameWithoutCustomCmakeOptions,
|
||||
withCustomCmakeOptions: nameWithCustomCmakeOptions,
|
||||
binVariant: binParts.join(".")
|
||||
};
|
||||
}
|
||||
async function getFolderNamePartForRelease(repo, release) {
|
||||
const resParts = [];
|
||||
let shouldHash = false;
|
||||
if (repo !== builtinLlamaCppGitHubRepo) {
|
||||
const [owner, name] = repo.split("/");
|
||||
if (containsUnsafeCharacters(String(owner)) || containsUnsafeCharacters(String(name))) {
|
||||
shouldHash = true;
|
||||
resParts.push(encodeURIComponent(String(owner)) + " " + encodeURIComponent(String(name)));
|
||||
}
|
||||
else
|
||||
resParts.push(String(owner) + " " + String(name));
|
||||
}
|
||||
if (containsUnsafeCharacters(release)) {
|
||||
shouldHash = true;
|
||||
resParts.push(encodeURIComponent(release));
|
||||
}
|
||||
else
|
||||
resParts.push(release);
|
||||
const res = resParts.join(" ");
|
||||
if (shouldHash)
|
||||
return await hashString(res);
|
||||
return res;
|
||||
}
|
||||
function makeStringSafeForPathName(str) {
|
||||
let res = "";
|
||||
for (const char of str) {
|
||||
if (isCharacterSafe(char))
|
||||
res += char;
|
||||
else
|
||||
res += "_" + char.codePointAt(0).toString(32) + "_";
|
||||
}
|
||||
return res;
|
||||
}
|
||||
function containsUnsafeCharacters(str) {
|
||||
for (const char of str) {
|
||||
if (!isCharacterSafe(char))
|
||||
return true;
|
||||
}
|
||||
return false;
|
||||
}
|
||||
function isCharacterSafe(char) {
|
||||
const unicodeNumber = char.codePointAt(0);
|
||||
if (unicodeNumber == null)
|
||||
return false;
|
||||
if (unicodeNumber >= "a".codePointAt(0) && unicodeNumber <= "z".codePointAt(0))
|
||||
return true;
|
||||
else if (unicodeNumber >= "A".codePointAt(0) && unicodeNumber <= "Z".codePointAt(0))
|
||||
return true;
|
||||
else if (unicodeNumber >= "0".codePointAt(0) && unicodeNumber <= "9".codePointAt(0))
|
||||
return true;
|
||||
else if (char === "-" || char === "_" || char === ".")
|
||||
return true;
|
||||
return false;
|
||||
}
|
||||
//# sourceMappingURL=getBuildFolderNameForBuildOptions.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getBuildFolderNameForBuildOptions.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getBuildFolderNameForBuildOptions.js","sourceRoot":"","sources":["../../../src/bindings/utils/getBuildFolderNameForBuildOptions.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,UAAU,EAAC,MAAM,2BAA2B,CAAC;AAErD,OAAO,EAAC,yBAAyB,EAAE,sBAAsB,EAAC,MAAM,iBAAiB,CAAC;AAElF,MAAM,CAAC,KAAK,UAAU,iCAAiC,CAAC,YAA0B;IAC9E,MAAM,SAAS,GAAa,CAAC,YAAY,CAAC,QAAQ,EAAE,YAAY,CAAC,IAAI,CAAC,CAAC;IACvE,MAAM,QAAQ,GAAa,EAAE,CAAC;IAE9B,IAAI,YAAY,CAAC,GAAG,KAAK,KAAK,EAAE,CAAC;QAC7B,SAAS,CAAC,IAAI,CAAC,yBAAyB,CAAC,YAAY,CAAC,GAAG,CAAC,CAAC,CAAC;QAC5D,QAAQ,CAAC,IAAI,CAAC,yBAAyB,CAAC,YAAY,CAAC,GAAG,CAAC,WAAW,EAAE,CAAC,CAAC,CAAC;IAC7E,CAAC;IAED,IAAI,YAAY,CAAC,QAAQ,CAAC,IAAI,KAAK,yBAAyB,IAAI,YAAY,CAAC,QAAQ,CAAC,OAAO,KAAK,sBAAsB,EAAE,CAAC;QACvH,MAAM,qBAAqB,GAAG,MAAM,2BAA2B,CAAC,YAAY,CAAC,QAAQ,CAAC,IAAI,EAAE,YAAY,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;QAC3H,SAAS,CAAC,IAAI,CAAC,UAAU,GAAG,qBAAqB,CAAC,CAAC;QACnD,QAAQ,CAAC,IAAI,CAAC,qBAAqB,CAAC,UAAU,CAAC,GAAG,EAAE,GAAG,CAAC,CAAC,CAAC;IAC9D,CAAC;SAAM,IAAI,YAAY,CAAC,QAAQ,CAAC,OAAO,KAAK,QAAQ;QACjD,QAAQ,CAAC,IAAI,CAAC,YAAY,CAAC,QAAQ,CAAC,OAAO,CAAC,CAAC;IAEjD,IAAI,YAAY,CAAC,kBAAkB,CAAC,IAAI,KAAK,CAAC,EAAE,CAAC;QAC7C,MAAM,IAAI,GAAG,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;QACjC,OAAO;YACH,yBAAyB,EAAE,IAAI;YAC/B,sBAAsB,EAAE,IAAI;YAC5B,UAAU,EAAE,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC;SACjC,CAAC;IACN,CAAC;IAED,MAAM,eAAe,GAAG,CAAC,GAAG,YAAY,CAAC,kBAAkB,CAAC,IAAI,EAAE,CAAC,CAAC;IACpE,eAAe,CAAC,IAAI,EAAE,CAAC;IAEvB,MAAM,uBAAuB,GAAa,EAAE,CAAC;IAC7C,KAAK,MAAM,GAAG,IAAI,eAAe,EAAE,CAAC;QAChC,IAAI,GAAG,KAAK,EAAE;YACV,SAAS;QAEb,uBAAuB,CAAC,IAAI,CAAC,GAAG,kBAAkB,CAAC,GAAG,CAAC,IAAI,kBAAkB,CAAC,YAAY,CAAC,kBAAkB,CAAC,GAAG,CAAC,GAAG,CAAE,CAAC,EAAE,CAAC,CAAC;IAChI,CAAC;IAED,MAAM,6BAA6B,GAAG,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;IAC1D,IAAI,uBAAuB,CAAC,MAAM,KAAK,CAAC,EAAE,CAAC;QACvC,OAAO;YACH,yBAAyB,EAAE,6BAA6B;YACxD,sBAAsB,EAAE,6BAA6B;YACrD,UAAU,EAAE,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC;SACjC,CAAC;IACN,CAAC;IAED,MAAM,gBAAgB,GAAG,MAAM,UAAU,CAAC,uBAAuB,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC,CAAC;IAE7E,SAAS,CAAC,IAAI,CAAC,gBAAgB,CAAC,CAAC;IACjC,QAAQ,CAAC,IAAI,CAAC,gBAAgB,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;IAC5C,MAAM,0BAA0B,GAAG,SAAS,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;IAEvD,OAAO;QACH,yBAAyB,EAAE,6BAA6B;QACxD,sBAAsB,EAAE,0BAA0B;QAClD,UAAU,EAAE,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC;KACjC,CAAC;AACN,CAAC;AAED,KAAK,UAAU,2BAA2B,CAAC,IAAY,EAAE,OAAe;IACpE,MAAM,QAAQ,GAAa,EAAE,CAAC;IAC9B,IAAI,UAAU,GAAG,KAAK,CAAC;IAEvB,IAAI,IAAI,KAAK,yBAAyB,EAAE,CAAC;QACrC,MAAM,CAAC,KAAK,EAAE,IAAI,CAAC,GAAG,IAAI,CAAC,KAAK,CAAC,GAAG,CAAC,CAAC;QAEtC,IAAI,wBAAwB,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,IAAI,wBAAwB,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,EAAE,CAAC;YACpF,UAAU,GAAG,IAAI,CAAC;YAClB,QAAQ,CAAC,IAAI,CAAC,kBAAkB,CAAC,MAAM,CAAC,KAAK,CAAC,CAAC,GAAG,GAAG,GAAG,kBAAkB,CAAC,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC,CAAC;QAC9F,CAAC;;YACG,QAAQ,CAAC,IAAI,CAAC,MAAM,CAAC,KAAK,CAAC,GAAG,GAAG,GAAG,MAAM,CAAC,IAAI,CAAC,CAAC,CAAC;IAC1D,CAAC;IAED,IAAI,wBAAwB,CAAC,OAAO,CAAC,EAAE,CAAC;QACpC,UAAU,GAAG,IAAI,CAAC;QAClB,QAAQ,CAAC,IAAI,CAAC,kBAAkB,CAAC,OAAO,CAAC,CAAC,CAAC;IAC/C,CAAC;;QACG,QAAQ,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;IAE3B,MAAM,GAAG,GAAG,QAAQ,CAAC,IAAI,CAAC,GAAG,CAAC,CAAC;IAE/B,IAAI,UAAU;QACV,OAAO,MAAM,UAAU,CAAC,GAAG,CAAC,CAAC;IAEjC,OAAO,GAAG,CAAC;AACf,CAAC;AAED,SAAS,yBAAyB,CAAC,GAAW;IAC1C,IAAI,GAAG,GAAG,EAAE,CAAC;IAEb,KAAK,MAAM,IAAI,IAAI,GAAG,EAAE,CAAC;QACrB,IAAI,eAAe,CAAC,IAAI,CAAC;YACrB,GAAG,IAAI,IAAI,CAAC;;YAEZ,GAAG,IAAI,GAAG,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAE,CAAC,QAAQ,CAAC,EAAE,CAAC,GAAG,GAAG,CAAC;IAC7D,CAAC;IAED,OAAO,GAAG,CAAC;AACf,CAAC;AAED,SAAS,wBAAwB,CAAC,GAAW;IACzC,KAAK,MAAM,IAAI,IAAI,GAAG,EAAE,CAAC;QACrB,IAAI,CAAC,eAAe,CAAC,IAAI,CAAC;YACtB,OAAO,IAAI,CAAC;IACpB,CAAC;IAED,OAAO,KAAK,CAAC;AACjB,CAAC;AACD,SAAS,eAAe,CAAC,IAAY;IACjC,MAAM,aAAa,GAAG,IAAI,CAAC,WAAW,CAAC,CAAC,CAAC,CAAC;IAE1C,IAAI,aAAa,IAAI,IAAI;QACrB,OAAO,KAAK,CAAC;IAEjB,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE;QAC5E,OAAO,IAAI,CAAC;SACX,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE;QACjF,OAAO,IAAI,CAAC;SACX,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE,IAAI,aAAa,IAAI,GAAG,CAAC,WAAW,CAAC,CAAC,CAAE;QACjF,OAAO,IAAI,CAAC;SACX,IAAI,IAAI,KAAK,GAAG,IAAI,IAAI,KAAK,GAAG,IAAI,IAAI,KAAK,GAAG;QACjD,OAAO,IAAI,CAAC;IAEhB,OAAO,KAAK,CAAC;AACjB,CAAC"}
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.d.ts
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare function getCanUsePrebuiltBinaries(): Promise<boolean>;
|
||||
8
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.js
generated
vendored
Normal file
8
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.js
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
import { builtinLlamaCppGitHubRepo, builtinLlamaCppRelease } from "../../config.js";
|
||||
import { getClonedLlamaCppRepoReleaseInfo } from "./cloneLlamaCppRepo.js";
|
||||
export async function getCanUsePrebuiltBinaries() {
|
||||
const clonedLlamaCppRepoReleaseInfo = await getClonedLlamaCppRepoReleaseInfo();
|
||||
return clonedLlamaCppRepoReleaseInfo == null || (clonedLlamaCppRepoReleaseInfo.tag === builtinLlamaCppRelease &&
|
||||
clonedLlamaCppRepoReleaseInfo.llamaCppGithubRepo === builtinLlamaCppGitHubRepo);
|
||||
}
|
||||
//# sourceMappingURL=getCanUsePrebuiltBinaries.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getCanUsePrebuiltBinaries.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getCanUsePrebuiltBinaries.js","sourceRoot":"","sources":["../../../src/bindings/utils/getCanUsePrebuiltBinaries.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,yBAAyB,EAAE,sBAAsB,EAAC,MAAM,iBAAiB,CAAC;AAClF,OAAO,EAAC,gCAAgC,EAAC,MAAM,wBAAwB,CAAC;AAExE,MAAM,CAAC,KAAK,UAAU,yBAAyB;IAC3C,MAAM,6BAA6B,GAAG,MAAM,gCAAgC,EAAE,CAAC;IAE/E,OAAO,6BAA6B,IAAI,IAAI,IAAI,CAC5C,6BAA6B,CAAC,GAAG,KAAK,sBAAsB;QAC5D,6BAA6B,CAAC,kBAAkB,KAAK,yBAAyB,CACjF,CAAC;AACN,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import { LlamaOptions } from "../getLlama.js";
|
||||
export declare function getExampleUsageCodeOfGetLlama(getLlamaOptions: LlamaOptions | "lastBuild" | undefined, prefix?: string, wrapWithSeparators?: boolean): string;
|
||||
21
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.js
generated
vendored
Normal file
21
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.js
generated
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
import chalk from "chalk";
|
||||
import stripAnsi from "strip-ansi";
|
||||
import { prettyPrintObject } from "../../utils/prettyPrintObject.js";
|
||||
import { getLlamaFunctionName } from "../getLlama.js";
|
||||
export function getExampleUsageCodeOfGetLlama(getLlamaOptions, prefix = "", wrapWithSeparators = true) {
|
||||
let res = prefix + [
|
||||
chalk.magenta.italic("import "), chalk.whiteBright("{"), chalk.yellow(getLlamaFunctionName), chalk.whiteBright("} "),
|
||||
chalk.magenta.italic("from "), chalk.green("\"node-llama-cpp\""), chalk.whiteBright(";"),
|
||||
"\n\n",
|
||||
chalk.magenta.italic("const "), chalk.whiteBright("llama "), chalk.whiteBright("= "), chalk.magenta.italic("await "), chalk.yellow(getLlamaFunctionName), chalk.whiteBright("("),
|
||||
getLlamaOptions === undefined ? "" : prettyPrintObject(getLlamaOptions),
|
||||
chalk.whiteBright(")"), chalk.whiteBright(";")
|
||||
].join(prefix);
|
||||
if (wrapWithSeparators) {
|
||||
const longestLineLength = res.split("\n")
|
||||
.reduce((max, line) => Math.max(max, stripAnsi(line).length), 0);
|
||||
res = chalk.blue("-".repeat(longestLineLength)) + "\n" + res + "\n" + chalk.blue("-".repeat(longestLineLength));
|
||||
}
|
||||
return res;
|
||||
}
|
||||
//# sourceMappingURL=getExampleUsageCodeOfGetLlama.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getExampleUsageCodeOfGetLlama.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getExampleUsageCodeOfGetLlama.js","sourceRoot":"","sources":["../../../src/bindings/utils/getExampleUsageCodeOfGetLlama.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,OAAO,CAAC;AAC1B,OAAO,SAAS,MAAM,YAAY,CAAC;AACnC,OAAO,EAAC,iBAAiB,EAAC,MAAM,kCAAkC,CAAC;AACnE,OAAO,EAAC,oBAAoB,EAAe,MAAM,gBAAgB,CAAC;AAElE,MAAM,UAAU,6BAA6B,CAAC,eAAuD,EAAE,SAAiB,EAAE,EAAE,qBAA8B,IAAI;IAC1J,IAAI,GAAG,GAAG,MAAM,GAAG;QACf,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,SAAS,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,oBAAoB,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC;QACpH,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,OAAO,CAAC,EAAE,KAAK,CAAC,KAAK,CAAC,oBAAoB,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC;QACxF,MAAM;QACN,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,QAAQ,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,IAAI,CAAC,EAAE,KAAK,CAAC,OAAO,CAAC,MAAM,CAAC,QAAQ,CAAC,EAAE,KAAK,CAAC,MAAM,CAAC,oBAAoB,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC;QAChL,eAAe,KAAK,SAAS,CAAC,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,iBAAiB,CAAC,eAAe,CAAC;QACvE,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC,EAAE,KAAK,CAAC,WAAW,CAAC,GAAG,CAAC;KACjD,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IAEf,IAAI,kBAAkB,EAAE,CAAC;QACrB,MAAM,iBAAiB,GAAG,GAAG,CAAC,KAAK,CAAC,IAAI,CAAC;aACpC,MAAM,CAAC,CAAC,GAAG,EAAE,IAAI,EAAE,EAAE,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,EAAE,SAAS,CAAC,IAAI,CAAC,CAAC,MAAM,CAAC,EAAE,CAAC,CAAC,CAAC;QACrE,GAAG,GAAG,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,iBAAiB,CAAC,CAAC,GAAG,IAAI,GAAG,GAAG,GAAG,IAAI,GAAG,KAAK,CAAC,IAAI,CAAC,GAAG,CAAC,MAAM,CAAC,iBAAiB,CAAC,CAAC,CAAC;IACpH,CAAC;IAED,OAAO,GAAG,CAAC;AACf,CAAC"}
|
||||
12
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.d.ts
generated
vendored
Normal file
12
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.d.ts
generated
vendored
Normal file
@@ -0,0 +1,12 @@
|
||||
import process from "process";
|
||||
import { BuildGpu } from "../types.js";
|
||||
import { LlamaOptions } from "../getLlama.js";
|
||||
import { BinaryPlatform } from "./getPlatform.js";
|
||||
export declare function getGpuTypesToUseForOption(gpu: Required<LlamaOptions>["gpu"], { platform, arch }?: {
|
||||
platform?: BinaryPlatform;
|
||||
arch?: typeof process.arch;
|
||||
}): Promise<BuildGpu[]>;
|
||||
export declare function resolveValidGpuOptionForPlatform(gpu: BuildGpu | "auto", { platform, arch }: {
|
||||
platform: BinaryPlatform;
|
||||
arch: typeof process.arch;
|
||||
}): false | "metal" | "cuda" | "vulkan" | "auto";
|
||||
39
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.js
generated
vendored
Normal file
39
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.js
generated
vendored
Normal file
@@ -0,0 +1,39 @@
|
||||
import process from "process";
|
||||
import { buildGpuOptions } from "../types.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
import { getBestComputeLayersAvailable } from "./getBestComputeLayersAvailable.js";
|
||||
export async function getGpuTypesToUseForOption(gpu, { platform = getPlatform(), arch = process.arch } = {}) {
|
||||
const resolvedGpuOption = typeof gpu === "object"
|
||||
? gpu.type
|
||||
: gpu;
|
||||
function withExcludedGpuTypesRemoved(gpuTypes) {
|
||||
const resolvedExcludeTypes = typeof gpu === "object"
|
||||
? new Set(gpu.exclude ?? [])
|
||||
: new Set();
|
||||
return gpuTypes.filter((gpuType) => !resolvedExcludeTypes.has(gpuType));
|
||||
}
|
||||
const resolvedGpu = resolveValidGpuOptionForPlatform(resolvedGpuOption, {
|
||||
platform,
|
||||
arch
|
||||
});
|
||||
if (resolvedGpu === "auto") {
|
||||
if (arch === process.arch)
|
||||
return withExcludedGpuTypesRemoved(await getBestComputeLayersAvailable());
|
||||
return withExcludedGpuTypesRemoved([false]);
|
||||
}
|
||||
return withExcludedGpuTypesRemoved([resolvedGpu]);
|
||||
}
|
||||
export function resolveValidGpuOptionForPlatform(gpu, { platform, arch }) {
|
||||
if (gpu == null)
|
||||
return "auto";
|
||||
else if (platform === "mac") {
|
||||
if (arch !== "x64" && gpu === "cuda")
|
||||
return "auto";
|
||||
}
|
||||
else if (gpu === "metal")
|
||||
return "auto";
|
||||
if (buildGpuOptions.includes(gpu))
|
||||
return gpu;
|
||||
return "auto";
|
||||
}
|
||||
//# sourceMappingURL=getGpuTypesToUseForOption.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getGpuTypesToUseForOption.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getGpuTypesToUseForOption.js","sourceRoot":"","sources":["../../../src/bindings/utils/getGpuTypesToUseForOption.ts"],"names":[],"mappings":"AAAA,OAAO,OAAO,MAAM,SAAS,CAAC;AAC9B,OAAO,EAAW,eAAe,EAAC,MAAM,aAAa,CAAC;AAEtD,OAAO,EAAiB,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAC7D,OAAO,EAAC,6BAA6B,EAAC,MAAM,oCAAoC,CAAC;AAEjF,MAAM,CAAC,KAAK,UAAU,yBAAyB,CAAC,GAAkC,EAAE,EAChF,QAAQ,GAAG,WAAW,EAAE,EACxB,IAAI,GAAG,OAAO,CAAC,IAAI,KAInB,EAAE;IACF,MAAM,iBAAiB,GAAG,OAAO,GAAG,KAAK,QAAQ;QAC7C,CAAC,CAAC,GAAG,CAAC,IAAI;QACV,CAAC,CAAC,GAAG,CAAC;IAEV,SAAS,2BAA2B,CAAC,QAAoB;QACrD,MAAM,oBAAoB,GAAG,OAAO,GAAG,KAAK,QAAQ;YAChD,CAAC,CAAC,IAAI,GAAG,CAAC,GAAG,CAAC,OAAO,IAAI,EAAE,CAAC;YAC5B,CAAC,CAAC,IAAI,GAAG,EAAE,CAAC;QAEhB,OAAO,QAAQ,CAAC,MAAM,CAAC,CAAC,OAAO,EAAE,EAAE,CAAC,CAAC,oBAAoB,CAAC,GAAG,CAAC,OAAO,CAAC,CAAC,CAAC;IAC5E,CAAC;IAED,MAAM,WAAW,GAAG,gCAAgC,CAAC,iBAAiB,EAAE;QACpE,QAAQ;QACR,IAAI;KACP,CAAC,CAAC;IAEH,IAAI,WAAW,KAAK,MAAM,EAAE,CAAC;QACzB,IAAI,IAAI,KAAK,OAAO,CAAC,IAAI;YACrB,OAAO,2BAA2B,CAAC,MAAM,6BAA6B,EAAE,CAAC,CAAC;QAE9E,OAAO,2BAA2B,CAAC,CAAC,KAAK,CAAC,CAAC,CAAC;IAChD,CAAC;IAED,OAAO,2BAA2B,CAAC,CAAC,WAAW,CAAC,CAAC,CAAC;AACtD,CAAC;AAED,MAAM,UAAU,gCAAgC,CAAC,GAAsB,EAAE,EACrE,QAAQ,EACR,IAAI,EAIP;IACG,IAAI,GAAG,IAAI,IAAI;QACX,OAAO,MAAM,CAAC;SACb,IAAI,QAAQ,KAAK,KAAK,EAAE,CAAC;QAC1B,IAAI,IAAI,KAAK,KAAK,IAAI,GAAG,KAAK,MAAM;YAChC,OAAO,MAAM,CAAC;IACtB,CAAC;SAAM,IAAI,GAAG,KAAK,OAAO;QACtB,OAAO,MAAM,CAAC;IAElB,IAAI,eAAe,CAAC,QAAQ,CAAC,GAAuC,CAAC;QACjE,OAAO,GAAG,CAAC;IAEf,OAAO,MAAM,CAAC;AAClB,CAAC"}
|
||||
9
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.d.ts
generated
vendored
Normal file
9
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,9 @@
|
||||
export type LinuxDistroInfo = Awaited<ReturnType<typeof getLinuxDistroInfo>>;
|
||||
export declare function getLinuxDistroInfo(): Promise<{
|
||||
name: string;
|
||||
id: string;
|
||||
version: string;
|
||||
versionCodename: string;
|
||||
prettyName: string;
|
||||
}>;
|
||||
export declare function isDistroAlpineLinux(linuxDistroInfo: LinuxDistroInfo): Promise<boolean>;
|
||||
46
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.js
generated
vendored
Normal file
46
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.js
generated
vendored
Normal file
@@ -0,0 +1,46 @@
|
||||
import fs from "fs-extra";
|
||||
const osReleasePaths = [
|
||||
"/etc/os-release",
|
||||
"/usr/lib/os-release"
|
||||
];
|
||||
export async function getLinuxDistroInfo() {
|
||||
const osReleaseInfo = await getOsReleaseInfo();
|
||||
return {
|
||||
name: osReleaseInfo.get("name") ?? "",
|
||||
id: osReleaseInfo.get("id") ?? "",
|
||||
version: osReleaseInfo.get("version_id") ?? osReleaseInfo.get("version") ?? "",
|
||||
versionCodename: osReleaseInfo.get("version_codename") ?? "",
|
||||
prettyName: osReleaseInfo.get("pretty_name") ?? ""
|
||||
};
|
||||
}
|
||||
export async function isDistroAlpineLinux(linuxDistroInfo) {
|
||||
return linuxDistroInfo.id === "alpine" || linuxDistroInfo.name.toLowerCase().startsWith("alpine") ||
|
||||
linuxDistroInfo.prettyName.toLowerCase().startsWith("alpine");
|
||||
}
|
||||
async function getOsReleaseInfo() {
|
||||
for (const osReleasePath of osReleasePaths) {
|
||||
try {
|
||||
if (!(await fs.pathExists(osReleasePath)))
|
||||
continue;
|
||||
const osReleaseFile = await fs.readFile(osReleasePath, "utf8");
|
||||
const res = new Map();
|
||||
for (const line of osReleaseFile.split("\n")) {
|
||||
const equalsSignIndex = line.indexOf("=");
|
||||
// ignore lines with no key
|
||||
if (equalsSignIndex < 1)
|
||||
continue;
|
||||
const key = line.slice(0, equalsSignIndex).toLowerCase();
|
||||
let value = line.slice(equalsSignIndex + 1);
|
||||
if (value.startsWith('"') && value.endsWith('"'))
|
||||
value = value.slice(1, -1);
|
||||
res.set(key, value);
|
||||
}
|
||||
return res;
|
||||
}
|
||||
catch (err) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return new Map();
|
||||
}
|
||||
//# sourceMappingURL=getLinuxDistroInfo.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getLinuxDistroInfo.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getLinuxDistroInfo.js","sourceRoot":"","sources":["../../../src/bindings/utils/getLinuxDistroInfo.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,UAAU,CAAC;AAE1B,MAAM,cAAc,GAAG;IACnB,iBAAiB;IACjB,qBAAqB;CACf,CAAC;AAGX,MAAM,CAAC,KAAK,UAAU,kBAAkB;IACpC,MAAM,aAAa,GAAG,MAAM,gBAAgB,EAAE,CAAC;IAE/C,OAAO;QACH,IAAI,EAAE,aAAa,CAAC,GAAG,CAAC,MAAM,CAAC,IAAI,EAAE;QACrC,EAAE,EAAE,aAAa,CAAC,GAAG,CAAC,IAAI,CAAC,IAAI,EAAE;QACjC,OAAO,EAAE,aAAa,CAAC,GAAG,CAAC,YAAY,CAAC,IAAI,aAAa,CAAC,GAAG,CAAC,SAAS,CAAC,IAAI,EAAE;QAC9E,eAAe,EAAE,aAAa,CAAC,GAAG,CAAC,kBAAkB,CAAC,IAAI,EAAE;QAC5D,UAAU,EAAE,aAAa,CAAC,GAAG,CAAC,aAAa,CAAC,IAAI,EAAE;KACrD,CAAC;AACN,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,mBAAmB,CAAC,eAAgC;IACtE,OAAO,eAAe,CAAC,EAAE,KAAK,QAAQ,IAAI,eAAe,CAAC,IAAI,CAAC,WAAW,EAAE,CAAC,UAAU,CAAC,QAAQ,CAAC;QAC7F,eAAe,CAAC,UAAU,CAAC,WAAW,EAAE,CAAC,UAAU,CAAC,QAAQ,CAAC,CAAC;AACtE,CAAC;AAED,KAAK,UAAU,gBAAgB;IAC3B,KAAK,MAAM,aAAa,IAAI,cAAc,EAAE,CAAC;QACzC,IAAI,CAAC;YACD,IAAI,CAAC,CAAC,MAAM,EAAE,CAAC,UAAU,CAAC,aAAa,CAAC,CAAC;gBACrC,SAAS;YAEb,MAAM,aAAa,GAAG,MAAM,EAAE,CAAC,QAAQ,CAAC,aAAa,EAAE,MAAM,CAAC,CAAC;YAE/D,MAAM,GAAG,GAAG,IAAI,GAAG,EAAkB,CAAC;YACtC,KAAK,MAAM,IAAI,IAAI,aAAa,CAAC,KAAK,CAAC,IAAI,CAAC,EAAE,CAAC;gBAC3C,MAAM,eAAe,GAAG,IAAI,CAAC,OAAO,CAAC,GAAG,CAAC,CAAC;gBAE1C,2BAA2B;gBAC3B,IAAI,eAAe,GAAG,CAAC;oBACnB,SAAS;gBAEb,MAAM,GAAG,GAAG,IAAI,CAAC,KAAK,CAAC,CAAC,EAAE,eAAe,CAAC,CAAC,WAAW,EAAE,CAAC;gBACzD,IAAI,KAAK,GAAG,IAAI,CAAC,KAAK,CAAC,eAAe,GAAG,CAAC,CAAC,CAAC;gBAE5C,IAAI,KAAK,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,KAAK,CAAC,QAAQ,CAAC,GAAG,CAAC;oBAC5C,KAAK,GAAG,KAAK,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC,CAAC;gBAE/B,GAAG,CAAC,GAAG,CAAC,GAAG,EAAE,KAAK,CAAC,CAAC;YACxB,CAAC;YAED,OAAO,GAAG,CAAC;QACf,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACX,SAAS;QACb,CAAC;IACL,CAAC;IAED,OAAO,IAAI,GAAG,EAAkB,CAAC;AACrC,CAAC"}
|
||||
13
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.d.ts
generated
vendored
Normal file
13
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.d.ts
generated
vendored
Normal file
@@ -0,0 +1,13 @@
|
||||
import { LlamaGpuType } from "../types.js";
|
||||
/**
|
||||
* Get the list of GPU types that can be used with `getLlama` on the current machine.
|
||||
*
|
||||
* When passing `"supported"`, only the GPU types that have the
|
||||
* necessary libraries and drivers installed on the current machine will be returned.
|
||||
* All of these GPU types have prebuilt binaries for the current platform and architecture.
|
||||
*
|
||||
* When passing `"allValid"`, all GPU types that are compatible with the current OS and architecture will be returned.
|
||||
* Some of these GPU types may not have prebuilt binaries for the current platform and architecture,
|
||||
* as some of them are inadvisable for the current machine (like CUDA on an x64 Mac machine).
|
||||
*/
|
||||
export declare function getLlamaGpuTypes(include: "supported" | "allValid"): Promise<LlamaGpuType[]>;
|
||||
34
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.js
generated
vendored
Normal file
34
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.js
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
import process from "process";
|
||||
import { getGpuTypesToUseForOption } from "./getGpuTypesToUseForOption.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
/**
|
||||
* Get the list of GPU types that can be used with `getLlama` on the current machine.
|
||||
*
|
||||
* When passing `"supported"`, only the GPU types that have the
|
||||
* necessary libraries and drivers installed on the current machine will be returned.
|
||||
* All of these GPU types have prebuilt binaries for the current platform and architecture.
|
||||
*
|
||||
* When passing `"allValid"`, all GPU types that are compatible with the current OS and architecture will be returned.
|
||||
* Some of these GPU types may not have prebuilt binaries for the current platform and architecture,
|
||||
* as some of them are inadvisable for the current machine (like CUDA on an x64 Mac machine).
|
||||
*/
|
||||
export async function getLlamaGpuTypes(include) {
|
||||
const platform = getPlatform();
|
||||
const arch = process.arch;
|
||||
if (include === "supported") {
|
||||
const gpuTypes = new Set(await getGpuTypesToUseForOption("auto"));
|
||||
if (platform === "win" && arch !== "x64")
|
||||
gpuTypes.delete("vulkan"); // no Vulkan prebuilt binary yet due to incomplete support for arm64
|
||||
return [...gpuTypes];
|
||||
}
|
||||
const res = [];
|
||||
// Metal is not properly supported by llama.cpp on x64 Mac machines
|
||||
if (platform === "mac" && arch === "arm64")
|
||||
res.push("metal");
|
||||
else
|
||||
res.push("cuda");
|
||||
res.push("vulkan");
|
||||
res.push(false);
|
||||
return res;
|
||||
}
|
||||
//# sourceMappingURL=getLlamaGpuTypes.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaGpuTypes.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getLlamaGpuTypes.js","sourceRoot":"","sources":["../../../src/bindings/utils/getLlamaGpuTypes.ts"],"names":[],"mappings":"AAAA,OAAO,OAAO,MAAM,SAAS,CAAC;AAE9B,OAAO,EAAC,yBAAyB,EAAC,MAAM,gCAAgC,CAAC;AACzE,OAAO,EAAC,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAE7C;;;;;;;;;;GAUG;AACH,MAAM,CAAC,KAAK,UAAU,gBAAgB,CAAC,OAAiC;IACpE,MAAM,QAAQ,GAAG,WAAW,EAAE,CAAC;IAC/B,MAAM,IAAI,GAAG,OAAO,CAAC,IAAI,CAAC;IAE1B,IAAI,OAAO,KAAK,WAAW,EAAE,CAAC;QAC1B,MAAM,QAAQ,GAAG,IAAI,GAAG,CAAC,MAAM,yBAAyB,CAAC,MAAM,CAAC,CAAC,CAAC;QAElE,IAAI,QAAQ,KAAK,KAAK,IAAI,IAAI,KAAK,KAAK;YACpC,QAAQ,CAAC,MAAM,CAAC,QAAQ,CAAC,CAAC,CAAC,oEAAoE;QAEnG,OAAO,CAAC,GAAG,QAAQ,CAAC,CAAC;IACzB,CAAC;IAED,MAAM,GAAG,GAAmB,EAAE,CAAC;IAE/B,mEAAmE;IACnE,IAAI,QAAQ,KAAK,KAAK,IAAI,IAAI,KAAK,OAAO;QACtC,GAAG,CAAC,IAAI,CAAC,OAAO,CAAC,CAAC;;QAElB,GAAG,CAAC,IAAI,CAAC,MAAM,CAAC,CAAC;IAErB,GAAG,CAAC,IAAI,CAAC,QAAQ,CAAC,CAAC;IACnB,GAAG,CAAC,IAAI,CAAC,KAAK,CAAC,CAAC;IAEhB,OAAO,GAAG,CAAC;AACf,CAAC"}
|
||||
5
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
import { Llama } from "../Llama.js";
|
||||
/**
|
||||
* This is used to access various methods in the addon side without actually using a backend
|
||||
*/
|
||||
export declare function getLlamaWithoutBackend(): Promise<Llama>;
|
||||
40
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.js
generated
vendored
Normal file
40
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.js
generated
vendored
Normal file
@@ -0,0 +1,40 @@
|
||||
import { withLock } from "lifecycle-utils";
|
||||
import { getLlamaForOptions } from "../getLlama.js";
|
||||
import { LlamaLogLevel } from "../types.js";
|
||||
let sharedLlamaWithoutBackend = null;
|
||||
/**
|
||||
* This is used to access various methods in the addon side without actually using a backend
|
||||
*/
|
||||
export async function getLlamaWithoutBackend() {
|
||||
if (sharedLlamaWithoutBackend != null)
|
||||
return sharedLlamaWithoutBackend;
|
||||
return await withLock([getLlamaWithoutBackend, "loadAddon"], async () => {
|
||||
if (sharedLlamaWithoutBackend != null)
|
||||
return sharedLlamaWithoutBackend;
|
||||
try {
|
||||
sharedLlamaWithoutBackend = await getLlamaForOptions({
|
||||
gpu: false,
|
||||
progressLogs: false,
|
||||
logLevel: LlamaLogLevel.error,
|
||||
build: "never",
|
||||
usePrebuiltBinaries: true,
|
||||
vramPadding: 0
|
||||
}, {
|
||||
skipLlamaInit: true
|
||||
});
|
||||
}
|
||||
catch (err) {
|
||||
sharedLlamaWithoutBackend = await getLlamaForOptions({
|
||||
progressLogs: false,
|
||||
logLevel: LlamaLogLevel.error,
|
||||
build: "never",
|
||||
usePrebuiltBinaries: true,
|
||||
vramPadding: 0
|
||||
}, {
|
||||
skipLlamaInit: true
|
||||
});
|
||||
}
|
||||
return sharedLlamaWithoutBackend;
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=getLlamaWithoutBackend.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getLlamaWithoutBackend.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getLlamaWithoutBackend.js","sourceRoot":"","sources":["../../../src/bindings/utils/getLlamaWithoutBackend.ts"],"names":[],"mappings":"AAAA,OAAO,EAAC,QAAQ,EAAC,MAAM,iBAAiB,CAAC;AACzC,OAAO,EAAC,kBAAkB,EAAC,MAAM,gBAAgB,CAAC;AAClD,OAAO,EAAC,aAAa,EAAC,MAAM,aAAa,CAAC;AAG1C,IAAI,yBAAyB,GAAiB,IAAI,CAAC;AAEnD;;GAEG;AACH,MAAM,CAAC,KAAK,UAAU,sBAAsB;IACxC,IAAI,yBAAyB,IAAI,IAAI;QACjC,OAAO,yBAAyB,CAAC;IAErC,OAAO,MAAM,QAAQ,CAAC,CAAC,sBAAsB,EAAE,WAAW,CAAC,EAAE,KAAK,IAAI,EAAE;QACpE,IAAI,yBAAyB,IAAI,IAAI;YACjC,OAAO,yBAAyB,CAAC;QAErC,IAAI,CAAC;YACD,yBAAyB,GAAG,MAAM,kBAAkB,CAAC;gBACjD,GAAG,EAAE,KAAK;gBACV,YAAY,EAAE,KAAK;gBACnB,QAAQ,EAAE,aAAa,CAAC,KAAK;gBAC7B,KAAK,EAAE,OAAO;gBACd,mBAAmB,EAAE,IAAI;gBACzB,WAAW,EAAE,CAAC;aACjB,EAAE;gBACC,aAAa,EAAE,IAAI;aACtB,CAAC,CAAC;QACP,CAAC;QAAC,OAAO,GAAG,EAAE,CAAC;YACX,yBAAyB,GAAG,MAAM,kBAAkB,CAAC;gBACjD,YAAY,EAAE,KAAK;gBACnB,QAAQ,EAAE,aAAa,CAAC,KAAK;gBAC7B,KAAK,EAAE,OAAO;gBACd,mBAAmB,EAAE,IAAI;gBACzB,WAAW,EAAE,CAAC;aACjB,EAAE;gBACC,aAAa,EAAE,IAAI;aACtB,CAAC,CAAC;QACP,CAAC;QAED,OAAO,yBAAyB,CAAC;IACrC,CAAC,CAAC,CAAC;AACP,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export declare function getPlatform(): "aix" | "freebsd" | "haiku" | "linux" | "openbsd" | "sunos" | "netbsd" | "win" | "mac";
|
||||
export type BinaryPlatform = ReturnType<typeof getPlatform>;
|
||||
15
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.js
generated
vendored
Normal file
15
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.js
generated
vendored
Normal file
@@ -0,0 +1,15 @@
|
||||
import process from "process";
|
||||
export function getPlatform() {
|
||||
switch (process.platform) {
|
||||
case "win32":
|
||||
case "cygwin":
|
||||
return "win";
|
||||
case "linux":
|
||||
case "android":
|
||||
return "linux";
|
||||
case "darwin":
|
||||
return "mac";
|
||||
}
|
||||
return process.platform;
|
||||
}
|
||||
//# sourceMappingURL=getPlatform.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getPlatform.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getPlatform.js","sourceRoot":"","sources":["../../../src/bindings/utils/getPlatform.ts"],"names":[],"mappings":"AAAA,OAAO,OAAO,MAAM,SAAS,CAAC;AAE9B,MAAM,UAAU,WAAW;IACvB,QAAQ,OAAO,CAAC,QAAQ,EAAE,CAAC;QACvB,KAAK,OAAO,CAAC;QACb,KAAK,QAAQ;YACT,OAAO,KAAK,CAAC;QAEjB,KAAK,OAAO,CAAC;QACb,KAAK,SAAS;YACV,OAAO,OAAO,CAAC;QAEnB,KAAK,QAAQ;YACT,OAAO,KAAK,CAAC;IACrB,CAAC;IAED,OAAO,OAAO,CAAC,QAAQ,CAAC;AAC5B,CAAC"}
|
||||
5
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.d.ts
generated
vendored
Normal file
5
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,5 @@
|
||||
export declare function getPlatformInfo(): Promise<{
|
||||
name: string;
|
||||
version: string;
|
||||
}>;
|
||||
export type BinaryPlatformInfo = Awaited<ReturnType<typeof getPlatformInfo>>;
|
||||
28
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.js
generated
vendored
Normal file
28
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.js
generated
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
import os from "os";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
import { getLinuxDistroInfo } from "./getLinuxDistroInfo.js";
|
||||
export async function getPlatformInfo() {
|
||||
const currentPlatform = getPlatform();
|
||||
if (currentPlatform === "mac")
|
||||
return {
|
||||
name: "macOS",
|
||||
version: os.release()
|
||||
};
|
||||
else if (currentPlatform === "linux") {
|
||||
const linuxDistroInfo = await getLinuxDistroInfo();
|
||||
return {
|
||||
name: linuxDistroInfo.name,
|
||||
version: linuxDistroInfo.version
|
||||
};
|
||||
}
|
||||
else if (currentPlatform === "win")
|
||||
return {
|
||||
name: "Windows",
|
||||
version: os.release()
|
||||
};
|
||||
return {
|
||||
name: "Unknown",
|
||||
version: os.release()
|
||||
};
|
||||
}
|
||||
//# sourceMappingURL=getPlatformInfo.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/getPlatformInfo.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"getPlatformInfo.js","sourceRoot":"","sources":["../../../src/bindings/utils/getPlatformInfo.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,IAAI,CAAC;AACpB,OAAO,EAAC,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAC7C,OAAO,EAAC,kBAAkB,EAAC,MAAM,yBAAyB,CAAC;AAE3D,MAAM,CAAC,KAAK,UAAU,eAAe;IACjC,MAAM,eAAe,GAAG,WAAW,EAAE,CAAC;IAEtC,IAAI,eAAe,KAAK,KAAK;QACzB,OAAO;YACH,IAAI,EAAE,OAAO;YACb,OAAO,EAAE,EAAE,CAAC,OAAO,EAAE;SACxB,CAAC;SACD,IAAI,eAAe,KAAK,OAAO,EAAE,CAAC;QACnC,MAAM,eAAe,GAAG,MAAM,kBAAkB,EAAE,CAAC;QAEnD,OAAO;YACH,IAAI,EAAE,eAAe,CAAC,IAAI;YAC1B,OAAO,EAAE,eAAe,CAAC,OAAO;SACnC,CAAC;IACN,CAAC;SAAM,IAAI,eAAe,KAAK,KAAK;QAChC,OAAO;YACH,IAAI,EAAE,SAAS;YACf,OAAO,EAAE,EAAE,CAAC,OAAO,EAAE;SACxB,CAAC;IAEN,OAAO;QACH,IAAI,EAAE,SAAS;QACf,OAAO,EAAE,EAAE,CAAC,OAAO,EAAE;KACxB,CAAC;AACN,CAAC"}
|
||||
3
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.d.ts
generated
vendored
Normal file
3
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.d.ts
generated
vendored
Normal file
@@ -0,0 +1,3 @@
|
||||
export declare function hasBuildingFromSourceDependenciesInstalled(): Promise<boolean>;
|
||||
export declare function hasGit(): Promise<boolean>;
|
||||
export declare function hasNpm(): Promise<boolean>;
|
||||
27
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.js
generated
vendored
Normal file
27
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.js
generated
vendored
Normal file
@@ -0,0 +1,27 @@
|
||||
import which from "which";
|
||||
import { asyncEvery } from "./asyncEvery.js";
|
||||
export async function hasBuildingFromSourceDependenciesInstalled() {
|
||||
return await asyncEvery([
|
||||
hasGit(),
|
||||
hasNpm()
|
||||
]);
|
||||
}
|
||||
export async function hasGit() {
|
||||
try {
|
||||
const resolvedPath = await which("git");
|
||||
return resolvedPath !== "";
|
||||
}
|
||||
catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
export async function hasNpm() {
|
||||
try {
|
||||
const resolvedPath = await which("npm");
|
||||
return resolvedPath !== "";
|
||||
}
|
||||
catch (err) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=hasBuildingFromSourceDependenciesInstalled.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/hasBuildingFromSourceDependenciesInstalled.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"hasBuildingFromSourceDependenciesInstalled.js","sourceRoot":"","sources":["../../../src/bindings/utils/hasBuildingFromSourceDependenciesInstalled.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,OAAO,CAAC;AAC1B,OAAO,EAAC,UAAU,EAAC,MAAM,iBAAiB,CAAC;AAE3C,MAAM,CAAC,KAAK,UAAU,0CAA0C;IAC5D,OAAO,MAAM,UAAU,CAAC;QACpB,MAAM,EAAE;QACR,MAAM,EAAE;KACX,CAAC,CAAC;AACP,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,MAAM;IACxB,IAAI,CAAC;QACD,MAAM,YAAY,GAAG,MAAM,KAAK,CAAC,KAAK,CAAC,CAAC;QACxC,OAAO,YAAY,KAAK,EAAE,CAAC;IAC/B,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACX,OAAO,KAAK,CAAC;IACjB,CAAC;AACL,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,MAAM;IACxB,IAAI,CAAC;QACD,MAAM,YAAY,GAAG,MAAM,KAAK,CAAC,KAAK,CAAC,CAAC;QACxC,OAAO,YAAY,KAAK,EAAE,CAAC;IAC/B,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACX,OAAO,KAAK,CAAC;IACjB,CAAC;AACL,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
export declare function hasFileInPath(fileToSearch: string, additionalSearchPaths?: (string | null | undefined)[]): Promise<boolean>;
|
||||
export declare function resolveFileLocationInPath(fileToSearch: string, additionalSearchPaths?: (string | null | undefined)[]): Promise<string[]>;
|
||||
34
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.js
generated
vendored
Normal file
34
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.js
generated
vendored
Normal file
@@ -0,0 +1,34 @@
|
||||
import path from "path";
|
||||
import fs from "fs-extra";
|
||||
import { asyncSome } from "./asyncSome.js";
|
||||
export async function hasFileInPath(fileToSearch, additionalSearchPaths = []) {
|
||||
const searchPaths = resolveSearchPaths(additionalSearchPaths);
|
||||
return await asyncSome(searchPaths.map(async (searchPath) => {
|
||||
return fs.pathExists(path.join(searchPath, fileToSearch));
|
||||
}));
|
||||
}
|
||||
export async function resolveFileLocationInPath(fileToSearch, additionalSearchPaths = []) {
|
||||
const searchPaths = resolveSearchPaths(additionalSearchPaths);
|
||||
const foundPaths = await Promise.all(searchPaths.map(async (searchPath) => {
|
||||
const filePath = path.join(searchPath, fileToSearch);
|
||||
if (await fs.pathExists(filePath))
|
||||
return filePath;
|
||||
return null;
|
||||
}));
|
||||
return foundPaths.filter((filePath) => filePath != null);
|
||||
}
|
||||
function resolveSearchPaths(additionalSearchPaths) {
|
||||
return [
|
||||
// Windows checks the cwd before the path
|
||||
...(process.platform === "win32"
|
||||
? [process.cwd()]
|
||||
: []),
|
||||
...((process.env.PATH || "").split(path.delimiter)),
|
||||
...(additionalSearchPaths.flatMap((searchPath) => (searchPath || "").split(path.delimiter)))
|
||||
]
|
||||
.map((pathPart) => ((pathPart.length >= 2 && pathPart.startsWith('"') && pathPart.endsWith('"'))
|
||||
? pathPart.slice(1, -1)
|
||||
: pathPart))
|
||||
.filter((pathPart) => pathPart.length > 0);
|
||||
}
|
||||
//# sourceMappingURL=hasFileInPath.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/hasFileInPath.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"hasFileInPath.js","sourceRoot":"","sources":["../../../src/bindings/utils/hasFileInPath.ts"],"names":[],"mappings":"AAAA,OAAO,IAAI,MAAM,MAAM,CAAC;AACxB,OAAO,EAAE,MAAM,UAAU,CAAC;AAC1B,OAAO,EAAC,SAAS,EAAC,MAAM,gBAAgB,CAAC;AAEzC,MAAM,CAAC,KAAK,UAAU,aAAa,CAAC,YAAoB,EAAE,wBAAuD,EAAE;IAC/G,MAAM,WAAW,GAAG,kBAAkB,CAAC,qBAAqB,CAAC,CAAC;IAE9D,OAAO,MAAM,SAAS,CAClB,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,UAAU,EAAE,EAAE;QACjC,OAAO,EAAE,CAAC,UAAU,CAAC,IAAI,CAAC,IAAI,CAAC,UAAU,EAAE,YAAY,CAAC,CAAC,CAAC;IAC9D,CAAC,CAAC,CACL,CAAC;AACN,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,yBAAyB,CAAC,YAAoB,EAAE,wBAAuD,EAAE;IAC3H,MAAM,WAAW,GAAG,kBAAkB,CAAC,qBAAqB,CAAC,CAAC;IAE9D,MAAM,UAAU,GAAG,MAAM,OAAO,CAAC,GAAG,CAChC,WAAW,CAAC,GAAG,CAAC,KAAK,EAAE,UAAU,EAAE,EAAE;QACjC,MAAM,QAAQ,GAAG,IAAI,CAAC,IAAI,CAAC,UAAU,EAAE,YAAY,CAAC,CAAC;QACrD,IAAI,MAAM,EAAE,CAAC,UAAU,CAAC,QAAQ,CAAC;YAC7B,OAAO,QAAQ,CAAC;QAEpB,OAAO,IAAI,CAAC;IAChB,CAAC,CAAC,CACL,CAAC;IAEF,OAAO,UAAU,CAAC,MAAM,CAAC,CAAC,QAAQ,EAAsB,EAAE,CAAC,QAAQ,IAAI,IAAI,CAAC,CAAC;AACjF,CAAC;AAGD,SAAS,kBAAkB,CAAC,qBAAoD;IAC5E,OAAO;QACH,yCAAyC;QACzC,GAAG,CACC,OAAO,CAAC,QAAQ,KAAK,OAAO;YACxB,CAAC,CAAC,CAAC,OAAO,CAAC,GAAG,EAAE,CAAC;YACjB,CAAC,CAAC,EAAE,CACX;QACD,GAAG,CAAC,CAAC,OAAO,CAAC,GAAG,CAAC,IAAI,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC;QACnD,GAAG,CAAC,qBAAqB,CAAC,OAAO,CAAC,CAAC,UAAU,EAAE,EAAE,CAAC,CAAC,UAAU,IAAI,EAAE,CAAC,CAAC,KAAK,CAAC,IAAI,CAAC,SAAS,CAAC,CAAC,CAAC;KAC/F;SACI,GAAG,CAAC,CAAC,QAAQ,EAAE,EAAE,CAAC,CACf,CAAC,QAAQ,CAAC,MAAM,IAAI,CAAC,IAAI,QAAQ,CAAC,UAAU,CAAC,GAAG,CAAC,IAAI,QAAQ,CAAC,QAAQ,CAAC,GAAG,CAAC,CAAC;QACxE,CAAC,CAAC,QAAQ,CAAC,KAAK,CAAC,CAAC,EAAE,CAAC,CAAC,CAAC;QACvB,CAAC,CAAC,QAAQ,CACjB,CAAC;SACD,MAAM,CAAC,CAAC,QAAQ,EAAE,EAAE,CAAC,QAAQ,CAAC,MAAM,GAAG,CAAC,CAAC,CAAC;AACnD,CAAC"}
|
||||
6
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.d.ts
generated
vendored
Normal file
6
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.d.ts
generated
vendored
Normal file
@@ -0,0 +1,6 @@
|
||||
type LastBuildInfo = {
|
||||
folderName: string;
|
||||
};
|
||||
export declare function getLastBuildInfo(): Promise<LastBuildInfo | null>;
|
||||
export declare function setLastBuildInfo(buildInfo: LastBuildInfo): Promise<void>;
|
||||
export {};
|
||||
17
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.js
generated
vendored
Normal file
17
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.js
generated
vendored
Normal file
@@ -0,0 +1,17 @@
|
||||
import fs from "fs-extra";
|
||||
import { lastBuildInfoJsonPath } from "../../config.js";
|
||||
export async function getLastBuildInfo() {
|
||||
try {
|
||||
const buildInfo = await fs.readJson(lastBuildInfoJsonPath);
|
||||
return buildInfo;
|
||||
}
|
||||
catch (err) {
|
||||
return null;
|
||||
}
|
||||
}
|
||||
export async function setLastBuildInfo(buildInfo) {
|
||||
await fs.writeJson(lastBuildInfoJsonPath, buildInfo, {
|
||||
spaces: 4
|
||||
});
|
||||
}
|
||||
//# sourceMappingURL=lastBuildInfo.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/lastBuildInfo.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"lastBuildInfo.js","sourceRoot":"","sources":["../../../src/bindings/utils/lastBuildInfo.ts"],"names":[],"mappings":"AAAA,OAAO,EAAE,MAAM,UAAU,CAAC;AAC1B,OAAO,EAAC,qBAAqB,EAAC,MAAM,iBAAiB,CAAC;AAMtD,MAAM,CAAC,KAAK,UAAU,gBAAgB;IAClC,IAAI,CAAC;QACD,MAAM,SAAS,GAAkB,MAAM,EAAE,CAAC,QAAQ,CAAC,qBAAqB,CAAC,CAAC;QAE1E,OAAO,SAAS,CAAC;IACrB,CAAC;IAAC,OAAO,GAAG,EAAE,CAAC;QACX,OAAO,IAAI,CAAC;IAChB,CAAC;AACL,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,gBAAgB,CAAC,SAAwB;IAC3D,MAAM,EAAE,CAAC,SAAS,CAAC,qBAAqB,EAAE,SAAS,EAAE;QACjD,MAAM,EAAE,CAAC;KACZ,CAAC,CAAC;AACP,CAAC"}
|
||||
2
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.d.ts
generated
vendored
Normal file
2
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.d.ts
generated
vendored
Normal file
@@ -0,0 +1,2 @@
|
||||
import { BuildOptions } from "../types.js";
|
||||
export declare function logBinaryUsageExampleToConsole(buildOptions: BuildOptions, specifyGpuType: boolean, showLatestBuildUsageExample?: boolean): void;
|
||||
22
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.js
generated
vendored
Normal file
22
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.js
generated
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
import { removeUndefinedFields } from "../../utils/removeNullFields.js";
|
||||
import { getExampleUsageCodeOfGetLlama } from "./getExampleUsageCodeOfGetLlama.js";
|
||||
export function logBinaryUsageExampleToConsole(buildOptions, specifyGpuType, showLatestBuildUsageExample = true) {
|
||||
console.log("To use the binary you've just built, use this code:");
|
||||
const llamaOptions = removeUndefinedFields({
|
||||
gpu: specifyGpuType
|
||||
? buildOptions.gpu
|
||||
: undefined,
|
||||
cmakeOptions: buildOptions.customCmakeOptions.size === 0
|
||||
? undefined
|
||||
: Object.fromEntries([...buildOptions.customCmakeOptions.entries()].sort(([keyA], [keyB]) => keyA.localeCompare(keyB)))
|
||||
});
|
||||
console.log(getExampleUsageCodeOfGetLlama(Object.keys(llamaOptions).length === 0
|
||||
? undefined
|
||||
: llamaOptions));
|
||||
if (showLatestBuildUsageExample) {
|
||||
console.log();
|
||||
console.log("To always use the latest binary you build using a CLI command, use this code:");
|
||||
console.log(getExampleUsageCodeOfGetLlama("lastBuild"));
|
||||
}
|
||||
}
|
||||
//# sourceMappingURL=logBinaryUsageExampleToConsole.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/logBinaryUsageExampleToConsole.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"logBinaryUsageExampleToConsole.js","sourceRoot":"","sources":["../../../src/bindings/utils/logBinaryUsageExampleToConsole.ts"],"names":[],"mappings":"AACA,OAAO,EAAC,qBAAqB,EAAC,MAAM,iCAAiC,CAAC;AAEtE,OAAO,EAAC,6BAA6B,EAAC,MAAM,oCAAoC,CAAC;AAEjF,MAAM,UAAU,8BAA8B,CAC1C,YAA0B,EAAE,cAAuB,EAAE,8BAAuC,IAAI;IAEhG,OAAO,CAAC,GAAG,CAAC,qDAAqD,CAAC,CAAC;IACnE,MAAM,YAAY,GAAiB,qBAAqB,CAAC;QACrD,GAAG,EAAE,cAAc;YACf,CAAC,CAAC,YAAY,CAAC,GAAG;YAClB,CAAC,CAAC,SAAS;QACf,YAAY,EAAE,YAAY,CAAC,kBAAkB,CAAC,IAAI,KAAK,CAAC;YACpD,CAAC,CAAC,SAAS;YACX,CAAC,CAAC,MAAM,CAAC,WAAW,CAChB,CAAC,GAAG,YAAY,CAAC,kBAAkB,CAAC,OAAO,EAAE,CAAC,CAAC,IAAI,CAAC,CAAC,CAAC,IAAI,CAAC,EAAE,CAAC,IAAI,CAAC,EAAE,EAAE,CAAC,IAAI,CAAC,aAAa,CAAC,IAAI,CAAC,CAAC,CACpG;KACR,CAAC,CAAC;IACH,OAAO,CAAC,GAAG,CACP,6BAA6B,CACzB,MAAM,CAAC,IAAI,CAAC,YAAY,CAAC,CAAC,MAAM,KAAK,CAAC;QAClC,CAAC,CAAC,SAAS;QACX,CAAC,CAAC,YAAY,CACrB,CACJ,CAAC;IAEF,IAAI,2BAA2B,EAAE,CAAC;QAC9B,OAAO,CAAC,GAAG,EAAE,CAAC;QACd,OAAO,CAAC,GAAG,CAAC,+EAA+E,CAAC,CAAC;QAC7F,OAAO,CAAC,GAAG,CAAC,6BAA6B,CAAC,WAAW,CAAC,CAAC,CAAC;IAC5D,CAAC;AACL,CAAC"}
|
||||
14
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.d.ts
generated
vendored
Normal file
14
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.d.ts
generated
vendored
Normal file
@@ -0,0 +1,14 @@
|
||||
type DistroPackages = {
|
||||
linuxPackages?: {
|
||||
apt?: string[];
|
||||
apk?: string[];
|
||||
};
|
||||
macOsPackages?: {
|
||||
brew?: string[];
|
||||
};
|
||||
};
|
||||
export declare function logDistroInstallInstruction(prefixText: string, distroPackages: DistroPackages, { forceLogPrefix }?: {
|
||||
forceLogPrefix?: boolean;
|
||||
}): Promise<void>;
|
||||
export declare function getDistroInstallInstruction({ linuxPackages, macOsPackages }: DistroPackages): Promise<string | null>;
|
||||
export {};
|
||||
48
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.js
generated
vendored
Normal file
48
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.js
generated
vendored
Normal file
@@ -0,0 +1,48 @@
|
||||
import which from "which";
|
||||
import chalk from "chalk";
|
||||
import { getConsoleLogPrefix } from "../../utils/getConsoleLogPrefix.js";
|
||||
import { getPlatform } from "./getPlatform.js";
|
||||
export async function logDistroInstallInstruction(prefixText, distroPackages, { forceLogPrefix = false } = {}) {
|
||||
const instruction = await getDistroInstallInstruction(distroPackages);
|
||||
if (instruction == null)
|
||||
return;
|
||||
console.info(getConsoleLogPrefix(forceLogPrefix) + chalk.yellow(prefixText + instruction));
|
||||
}
|
||||
export async function getDistroInstallInstruction({ linuxPackages, macOsPackages }) {
|
||||
const platform = getPlatform();
|
||||
if (platform === "linux") {
|
||||
if (linuxPackages == null)
|
||||
return null;
|
||||
if (linuxPackages.apt != null && linuxPackages.apt.length > 0) {
|
||||
const [sudoPath, aptPath] = await Promise.all([
|
||||
which("sudo", { nothrow: true }),
|
||||
which("apt", { nothrow: true })
|
||||
]);
|
||||
if (aptPath != null) {
|
||||
const aptCommand = (sudoPath != null ? "sudo " : "") + "apt";
|
||||
return 'you can run "' + aptCommand + " update && " + aptCommand + " install -y " + linuxPackages.apt.join(" ") + '"';
|
||||
}
|
||||
}
|
||||
if (linuxPackages.apk != null && linuxPackages.apk.length > 0) {
|
||||
const [sudoPath, apkPath] = await Promise.all([
|
||||
which("sudo", { nothrow: true }),
|
||||
which("apk", { nothrow: true })
|
||||
]);
|
||||
if (apkPath != null)
|
||||
return 'you can run "' + (sudoPath != null ? "sudo " : "") + "apk add " + linuxPackages.apk.join(" ") + '"';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
else if (platform === "mac") {
|
||||
if (macOsPackages == null)
|
||||
return null;
|
||||
if (macOsPackages.brew != null && macOsPackages.brew.length > 0) {
|
||||
const brewPath = await which("brew", { nothrow: true });
|
||||
if (brewPath != null)
|
||||
return 'you can run "brew install ' + macOsPackages.brew.join(" ") + '"';
|
||||
}
|
||||
return null;
|
||||
}
|
||||
return null;
|
||||
}
|
||||
//# sourceMappingURL=logDistroInstallInstruction.js.map
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.js.map
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/logDistroInstallInstruction.js.map
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
{"version":3,"file":"logDistroInstallInstruction.js","sourceRoot":"","sources":["../../../src/bindings/utils/logDistroInstallInstruction.ts"],"names":[],"mappings":"AAAA,OAAO,KAAK,MAAM,OAAO,CAAC;AAC1B,OAAO,KAAK,MAAM,OAAO,CAAC;AAC1B,OAAO,EAAC,mBAAmB,EAAC,MAAM,oCAAoC,CAAC;AACvE,OAAO,EAAC,WAAW,EAAC,MAAM,kBAAkB,CAAC;AAY7C,MAAM,CAAC,KAAK,UAAU,2BAA2B,CAAC,UAAkB,EAAE,cAA8B,EAAE,EAClG,cAAc,GAAG,KAAK,KAGtB,EAAE;IACF,MAAM,WAAW,GAAG,MAAM,2BAA2B,CAAC,cAAc,CAAC,CAAC;IAEtE,IAAI,WAAW,IAAI,IAAI;QACnB,OAAO;IAEX,OAAO,CAAC,IAAI,CAAC,mBAAmB,CAAC,cAAc,CAAC,GAAG,KAAK,CAAC,MAAM,CAAC,UAAU,GAAG,WAAW,CAAC,CAAC,CAAC;AAC/F,CAAC;AAED,MAAM,CAAC,KAAK,UAAU,2BAA2B,CAAC,EAC9C,aAAa,EACb,aAAa,EACA;IACb,MAAM,QAAQ,GAAG,WAAW,EAAE,CAAC;IAE/B,IAAI,QAAQ,KAAK,OAAO,EAAE,CAAC;QACvB,IAAI,aAAa,IAAI,IAAI;YACrB,OAAO,IAAI,CAAC;QAEhB,IAAI,aAAa,CAAC,GAAG,IAAI,IAAI,IAAI,aAAa,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC5D,MAAM,CACF,QAAQ,EACR,OAAO,CACV,GAAG,MAAM,OAAO,CAAC,GAAG,CAAC;gBAClB,KAAK,CAAC,MAAM,EAAE,EAAC,OAAO,EAAE,IAAI,EAAC,CAAC;gBAC9B,KAAK,CAAC,KAAK,EAAE,EAAC,OAAO,EAAE,IAAI,EAAC,CAAC;aAChC,CAAC,CAAC;YAEH,IAAI,OAAO,IAAI,IAAI,EAAE,CAAC;gBAClB,MAAM,UAAU,GAAG,CAAC,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,KAAK,CAAC;gBAE7D,OAAO,eAAe,GAAG,UAAU,GAAG,aAAa,GAAG,UAAU,GAAG,cAAc,GAAG,aAAa,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;YAC1H,CAAC;QACL,CAAC;QAED,IAAI,aAAa,CAAC,GAAG,IAAI,IAAI,IAAI,aAAa,CAAC,GAAG,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC5D,MAAM,CACF,QAAQ,EACR,OAAO,CACV,GAAG,MAAM,OAAO,CAAC,GAAG,CAAC;gBAClB,KAAK,CAAC,MAAM,EAAE,EAAC,OAAO,EAAE,IAAI,EAAC,CAAC;gBAC9B,KAAK,CAAC,KAAK,EAAE,EAAC,OAAO,EAAE,IAAI,EAAC,CAAC;aAChC,CAAC,CAAC;YAEH,IAAI,OAAO,IAAI,IAAI;gBACf,OAAO,eAAe,GAAG,CAAC,QAAQ,IAAI,IAAI,CAAC,CAAC,CAAC,OAAO,CAAC,CAAC,CAAC,EAAE,CAAC,GAAG,UAAU,GAAG,aAAa,CAAC,GAAG,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;QACpH,CAAC;QAED,OAAO,IAAI,CAAC;IAChB,CAAC;SAAM,IAAI,QAAQ,KAAK,KAAK,EAAE,CAAC;QAC5B,IAAI,aAAa,IAAI,IAAI;YACrB,OAAO,IAAI,CAAC;QAEhB,IAAI,aAAa,CAAC,IAAI,IAAI,IAAI,IAAI,aAAa,CAAC,IAAI,CAAC,MAAM,GAAG,CAAC,EAAE,CAAC;YAC9D,MAAM,QAAQ,GAAG,MAAM,KAAK,CAAC,MAAM,EAAE,EAAC,OAAO,EAAE,IAAI,EAAC,CAAC,CAAC;YAEtD,IAAI,QAAQ,IAAI,IAAI;gBAChB,OAAO,4BAA4B,GAAG,aAAa,CAAC,IAAI,CAAC,IAAI,CAAC,GAAG,CAAC,GAAG,GAAG,CAAC;QACjF,CAAC;QAED,OAAO,IAAI,CAAC;IAChB,CAAC;IAED,OAAO,IAAI,CAAC;AAChB,CAAC"}
|
||||
1
node_modules/node-llama-cpp/dist/bindings/utils/resolveActualBindingBinaryPath.d.ts
generated
vendored
Normal file
1
node_modules/node-llama-cpp/dist/bindings/utils/resolveActualBindingBinaryPath.d.ts
generated
vendored
Normal file
@@ -0,0 +1 @@
|
||||
export declare function resolveActualBindingBinaryPath(binaryPath: string): Promise<string>;
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user