First upload version 0.0.1

This commit is contained in:
Neyra
2026-02-05 15:27:49 +08:00
commit 8e9b7201ed
4182 changed files with 593136 additions and 0 deletions

View File

@@ -0,0 +1,37 @@
import { Token, Tokenizer } from "../types.js";
import { LlamaText } from "../utils/LlamaText.js";
import type { LlamaModel } from "./LlamaModel/LlamaModel.js";
/**
* @see [Using Token Bias](https://node-llama-cpp.withcat.ai/guide/token-bias) tutorial
*/
export declare class TokenBias {
constructor(tokenizer: Tokenizer);
/**
* Adjust the bias of the given token(s).
*
* If a text is provided, the bias will be applied to each individual token in the text.
*
* Setting a bias to `"never"` will prevent the token from being generated, unless it is required to comply with a grammar.
*
* Setting the bias of the EOS or EOT tokens to `"never"` has no effect and will be ignored.
* @param input - The token(s) to apply the bias to
* @param bias - The probability bias to apply to the token(s).
*
* Setting to a positive number increases the probability of the token(s) being generated.
*
* Setting to a negative number decreases the probability of the token(s) being generated.
*
* Setting to `0` has no effect.
*
* For example, setting to `0.5` will increase the probability of the token(s) being generated by 50%.
* Setting to `-0.5` will decrease the probability of the token(s) being generated by 50%.
*
* Setting to `"never"` will prevent the token from being generated, unless it is required to comply with a grammar.
*
* Try to play around with values between `0.9` and `-0.9` to see what works for your use case.
*/
set(input: Token | Token[] | string | LlamaText, bias: "never" | number | {
logit: number;
}): this;
static for(modelOrTokenizer: LlamaModel | Tokenizer): TokenBias;
}