First upload version 0.0.1
This commit is contained in:
38
node_modules/node-llama-cpp/dist/cli/commands/CompleteCommand.d.ts
generated
vendored
Normal file
38
node_modules/node-llama-cpp/dist/cli/commands/CompleteCommand.d.ts
generated
vendored
Normal file
@@ -0,0 +1,38 @@
|
||||
import { CommandModule } from "yargs";
|
||||
import { BuildGpu, LlamaNuma } from "../../bindings/types.js";
|
||||
type CompleteCommand = {
|
||||
modelPath?: string;
|
||||
header?: string[];
|
||||
gpu?: BuildGpu | "auto";
|
||||
systemInfo: boolean;
|
||||
text?: string;
|
||||
textFile?: string;
|
||||
contextSize?: number;
|
||||
batchSize?: number;
|
||||
flashAttention?: boolean;
|
||||
swaFullCache?: boolean;
|
||||
threads?: number;
|
||||
temperature: number;
|
||||
minP: number;
|
||||
topK: number;
|
||||
topP: number;
|
||||
seed?: number;
|
||||
gpuLayers?: number;
|
||||
repeatPenalty: number;
|
||||
lastTokensRepeatPenalty: number;
|
||||
penalizeRepeatingNewLine: boolean;
|
||||
repeatFrequencyPenalty?: number;
|
||||
repeatPresencePenalty?: number;
|
||||
maxTokens: number;
|
||||
tokenPredictionDraftModel?: string;
|
||||
tokenPredictionModelContextSize?: number;
|
||||
debug: boolean;
|
||||
numa?: LlamaNuma;
|
||||
meter: boolean;
|
||||
timing: boolean;
|
||||
noMmap: boolean;
|
||||
noDirectIo: boolean;
|
||||
printTimings: boolean;
|
||||
};
|
||||
export declare const CompleteCommand: CommandModule<object, CompleteCommand>;
|
||||
export {};
|
||||
Reference in New Issue
Block a user