Skip to content

LlamaCpp#

LlamaCppTokenizer #

LlamaCppTokenizer(llama)

Tokenizer for llama.cpp loaded GGUF models.

Source code in sibila/llamacpp.py
def __init__(self, 
             llama: Llama):
    self._llama = llama

    self.vocab_size = self._llama.n_vocab()

    self.bos_token_id = self._llama.token_bos()
    self.bos_token = llama_token_get_text(self._llama.model, self.bos_token_id).decode("utf-8")

    self.eos_token_id = self._llama.token_eos()
    self.eos_token = llama_token_get_text(self._llama.model, self.eos_token_id).decode("utf-8")

    self.pad_token_id = None
    self.pad_token = None

    self.unk_token_id = None # ? fill by taking a look at id 0?
    self.unk_token = None

encode #

encode(text)

Encode text into model tokens. Inverse of Decode().

Parameters:

Name Type Description Default
text str

Text to be encoded.

required

Returns:

Type Description
list[int]

A list of ints with the encoded tokens.

Source code in sibila/llamacpp.py
def encode(self, 
           text: str) -> list[int]:
    """Encode text into model tokens. Inverse of Decode().

    Args:
        text: Text to be encoded.

    Returns:
        A list of ints with the encoded tokens.
    """

    # str -> bytes
    btext = text.encode("utf-8", errors="ignore")

    return self._llama.tokenize(btext, add_bos=False, special=True)

decode #

decode(token_ids, skip_special=True)

Decode model tokens to text. Inverse of Encode().

Using instead of llama-cpp-python's to fix error: remove first character after a bos only if it's a space.

Parameters:

Name Type Description Default
token_ids list[int]

List of model tokens.

required
skip_special bool

Don't decode special tokens like bos and eos. Defaults to True.

True

Returns:

Type Description
str

Decoded text.

Source code in sibila/llamacpp.py
def decode(self,
           token_ids: list[int],
           skip_special: bool = True) -> str:
    """Decode model tokens to text. Inverse of Encode().

    Using instead of llama-cpp-python's to fix error: remove first character after a bos only if it's a space.

    Args:
        token_ids: List of model tokens.
        skip_special: Don't decode special tokens like bos and eos. Defaults to True.

    Returns:
        Decoded text.
    """

    if not len(token_ids):
        return ""

    output = b""
    size = 32
    buffer = (ctypes.c_char * size)()

    if not skip_special:
        special_toks = {self.bos_token_id: self.bos_token.encode("utf-8"), # type: ignore[union-attr]
                        self.eos_token_id: self.eos_token.encode("utf-8")} # type: ignore[union-attr]

        for token in token_ids:
            if token == self.bos_token_id:
                output += special_toks[token]
            elif token == self.eos_token_id:
                output += special_toks[token]
            else:
                n = llama_cpp.llama_token_to_piece(
                    self._llama.model, llama_cpp.llama_token(token), buffer, size
                )
                output += bytes(buffer[:n]) # type: ignore[arg-type]

    else: # skip special
        for token in token_ids:
            if token != self.bos_token_id and token != self.eos_token_id:
                n = llama_cpp.llama_token_to_piece(
                    self._llama.model, llama_cpp.llama_token(token), buffer, size
                )
                output += bytes(buffer[:n]) # type: ignore[arg-type]


    # "User code is responsible for removing the leading whitespace of the first non-BOS token when decoding multiple tokens."
    if (# token_ids[0] != self.bos_token_id and # we also try cutting if first is bos to approximate HF tokenizer
       len(output) and output[0] <= 32 # 32 = ord(' ')
       ):
        output = output[1:]

    return output.decode("utf-8", errors="ignore")

token_len #

token_len(text)

Returns token length for given text.

Parameters:

Name Type Description Default
text str

Text to be measured.

required

Returns:

Type Description
int

Token length for given text.

Source code in sibila/model.py
def token_len(self, 
              text: str) -> int:
    """Returns token length for given text.

    Args:
        text: Text to be measured.

    Returns:
        Token length for given text.
    """

    tokens = self.encode(text)
    return len(tokens)        

OpenAI#

OpenAITokenizer #

OpenAITokenizer(model)

Tokenizer for OpenAI models.

Source code in sibila/openai.py
def __init__(self, 
             model: str
             ):

    if not has_tiktoken:
        raise Exception("Please install tiktoken by running: pip install tiktoken")

    self._tok = tiktoken.encoding_for_model(model)

    self.vocab_size = self._tok.n_vocab

    self.bos_token_id = None
    self.bos_token = None

    self.eos_token_id = None
    self.eos_token = None

    self.pad_token_id = None
    self.pad_token = None

    self.unk_token_id = None
    self.unk_token = None

encode #

encode(text)

Encode text into model tokens. Inverse of Decode().

Parameters:

Name Type Description Default
text str

Text to be encoded.

required

Returns:

Type Description
list[int]

A list of ints with the encoded tokens.

Source code in sibila/openai.py
def encode(self, 
           text: str) -> list[int]:
    """Encode text into model tokens. Inverse of Decode().

    Args:
        text: Text to be encoded.

    Returns:
        A list of ints with the encoded tokens.
    """
    return self._tok.encode(text)

decode #

decode(token_ids, skip_special=True)

Decode model tokens to text. Inverse of Encode().

Parameters:

Name Type Description Default
token_ids list[int]

List of model tokens.

required
skip_special bool

Don't decode special tokens like bos and eos. Defaults to True.

True

Returns:

Type Description
str

Decoded text.

Source code in sibila/openai.py
def decode(self, 
           token_ids: list[int],
           skip_special: bool = True) -> str:
    """Decode model tokens to text. Inverse of Encode().

    Args:
        token_ids: List of model tokens.
        skip_special: Don't decode special tokens like bos and eos. Defaults to True.

    Returns:
        Decoded text.
    """
    assert skip_special, "OpenAITokenizer only supports skip_special=True"

    return self._tok.decode(token_ids)

token_len #

token_len(text)

Returns token length for given text.

Parameters:

Name Type Description Default
text str

Text to be measured.

required

Returns:

Type Description
int

Token length for given text.

Source code in sibila/model.py
def token_len(self, 
              text: str) -> int:
    """Returns token length for given text.

    Args:
        text: Text to be measured.

    Returns:
        Token length for given text.
    """

    tokens = self.encode(text)
    return len(tokens)