>>102639764
//this is a hack since we dont have a proper tokenizer, but we can estimate 1 token per 3 characters
let chars_per_token = 3.0;
//we try to detect attempts at coding which tokenize poorly. This usually happens when the average word length is high.
let avgwordlen = (1.0+truncated_context.length)/(1.0+countWords(truncated_context));
if(avgwordlen>=7.8)
{
chars_per_token = 2.7;
}
if (current_memory == null || current_memory.trim() == "")
{
//if there is no memory, then we can be a lot of lenient with the character counts since the backend will truncate excess anyway
chars_per_token = 4.8;
}
if(is_using_kcpp_with_added_memory()) //easily handle overflow
{
chars_per_token = 6;
}
chars_per_token = chars_per_token * (localsettings.token_count_multiplier*0.01);
let max_allowed_characters = Math.max(1, Math.floor((maxctxlen-maxgenamt) * chars_per_token) - 12);
I cannot emphasize enough how jank this shit is