2023-09-28 17:42:29 +01:00
|
|
|
#define LOG_TAG "LatinIME: jni: LanguageModel"
|
|
|
|
|
|
|
|
#include "org_futo_inputmethod_latin_xlm_LanguageModel.h"
|
|
|
|
|
|
|
|
#include <cstring> // for memset()
|
|
|
|
#include <vector>
|
|
|
|
|
|
|
|
#include "jni.h"
|
|
|
|
#include "jni_common.h"
|
|
|
|
#include "ggml/LanguageModel.h"
|
|
|
|
#include "defines.h"
|
2023-12-04 20:09:51 +00:00
|
|
|
#include "suggest/core/layout/proximity_info.h"
|
2024-03-13 18:31:51 +00:00
|
|
|
#include "jni_utils.h"
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
#define EPS 0.0001
|
2024-04-11 06:43:56 +01:00
|
|
|
|
|
|
|
#if false
|
2023-12-04 20:09:51 +00:00
|
|
|
#define TIME_START(name) const int64_t start_##name = ggml_time_us();
|
|
|
|
#define TIME_END(name) const int64_t end_##name = ggml_time_us(); \
|
|
|
|
const int64_t time_taken_##name = (end_##name - start_##name) / 1000L; \
|
|
|
|
AKLOGI("%s: Time taken by %s: %d ms\n", __func__, #name, (int)time_taken_##name);
|
2024-04-11 06:43:56 +01:00
|
|
|
#else
|
|
|
|
#define TIME_START(name)
|
|
|
|
#define TIME_END(name)
|
|
|
|
#endif
|
2024-01-30 18:30:44 +00:00
|
|
|
|
|
|
|
#define RETURNVAL_AUTOCORRECT "autocorrect"
|
|
|
|
#define RETURNVAL_UNCERTAIN "uncertain"
|
|
|
|
#define RETURNVAL_CLUELESS "clueless"
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
static std::string trim(const std::string &s) {
|
|
|
|
auto start = s.begin();
|
|
|
|
while (start != s.end() && std::isspace(*start)) {
|
|
|
|
start++;
|
|
|
|
}
|
|
|
|
|
|
|
|
auto end = s.end();
|
|
|
|
do {
|
|
|
|
end--;
|
|
|
|
} while (std::distance(start, end) > 0 && std::isspace(*end));
|
|
|
|
|
|
|
|
return {start, end + 1};
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
|
bool sortProbabilityPairDescending(const std::pair<float, T>& a, const std::pair<float, T>& b) {
|
|
|
|
return a.first > b.first;
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
|
|
|
static inline void sortProbabilityPairVectorDescending(std::vector<std::pair<float, T>> &vec) {
|
|
|
|
std::sort(vec.begin(), vec.end(), sortProbabilityPairDescending<T>);
|
|
|
|
}
|
|
|
|
|
|
|
|
template<typename T>
|
2024-05-16 23:18:08 +01:00
|
|
|
static inline void sortProbabilityPairVectorDescending(std::vector<std::pair<float, T>> &vec, size_t partial) {
|
2024-03-13 18:31:51 +00:00
|
|
|
if(partial > vec.size()) partial = vec.size();
|
2023-09-28 17:42:29 +01:00
|
|
|
std::partial_sort(vec.begin(), vec.begin() + partial, vec.end(), sortProbabilityPairDescending<T>);
|
|
|
|
}
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
typedef struct potential_sequence_data {
|
|
|
|
token_sequence tokens;
|
2024-05-16 23:18:08 +01:00
|
|
|
llama_seq_id seq_id{};
|
2023-10-10 20:34:04 +01:00
|
|
|
} potential_sequence_data;
|
|
|
|
|
|
|
|
// P = P(tokens[0]) * P(tokens[1]) * [...]
|
|
|
|
typedef std::pair<float, potential_sequence_data> potential_sequence;
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
|
|
|
|
typedef struct banned_sequence {
|
|
|
|
token_sequence sequence;
|
|
|
|
int hash;
|
2024-04-30 18:58:46 +01:00
|
|
|
} banned_sequence;
|
2024-03-13 18:31:51 +00:00
|
|
|
|
|
|
|
int compute_sequence_hash(const token_sequence &seq) {
|
|
|
|
int hash = 0;
|
|
|
|
for(llama_token t : seq) {
|
|
|
|
hash = (hash + t) % 999999999;
|
|
|
|
}
|
|
|
|
return hash;
|
|
|
|
}
|
|
|
|
|
|
|
|
int append_sequence_hash(int hash, llama_token t) {
|
|
|
|
return (hash + t) % 999999999;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
static void softmax(float * input, size_t input_len) {
|
|
|
|
float m = -INFINITY;
|
|
|
|
for (size_t i = 0; i < input_len; i++) {
|
|
|
|
if (input[i] > m) {
|
|
|
|
m = input[i];
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
float sum = 0.0;
|
|
|
|
for (size_t i = 0; i < input_len; i++) {
|
|
|
|
sum += expf(input[i] - m);
|
|
|
|
}
|
|
|
|
|
|
|
|
float offset = m + logf(sum);
|
|
|
|
for (size_t i = 0; i < input_len; i++) {
|
|
|
|
input[i] = expf(input[i] - offset);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
#define NUM_TOKEN_MIX 4
|
|
|
|
struct TokenMix {
|
2023-12-19 18:02:20 +00:00
|
|
|
float x;
|
|
|
|
float y;
|
2023-12-04 20:09:51 +00:00
|
|
|
struct {
|
|
|
|
float weight;
|
|
|
|
llama_token token;
|
|
|
|
} mixes[NUM_TOKEN_MIX];
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
struct DecodeResult {
|
|
|
|
int logits_head;
|
|
|
|
int size;
|
|
|
|
};
|
|
|
|
|
2024-01-30 18:30:44 +00:00
|
|
|
enum WordCapitalizeMode {
|
|
|
|
IgnoredCapitals, // partialWord = "t" or partialWord = "test"
|
|
|
|
FirstCapital, // partialWord = "T" or partialWord = "Test"
|
|
|
|
AllCapitals // partialWord = "TE" or partialWord = "TEST"
|
|
|
|
};
|
|
|
|
|
|
|
|
|
|
|
|
bool isFirstCharLowercase(const char* str) {
|
|
|
|
if (str == nullptr || str[0] == '\0')
|
|
|
|
return false;
|
|
|
|
return islower(static_cast<unsigned char>(str[0])) != 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
bool hasLowercase(const char* str) {
|
|
|
|
if (str == nullptr)
|
|
|
|
return false;
|
|
|
|
|
|
|
|
for (; *str != '\0'; ++str) {
|
|
|
|
if (islower(static_cast<unsigned char>(*str)))
|
|
|
|
return true;
|
|
|
|
}
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
bool isExactMatch(const std::string &a, const std::string &b){
|
|
|
|
auto preprocess = [](const std::string &str) -> std::string {
|
|
|
|
std::string result;
|
|
|
|
for(char c : str) {
|
|
|
|
if(c != '\'' && c != '-' && c != ' ') {
|
2024-05-16 23:18:08 +01:00
|
|
|
result += (char)tolower(c);
|
2024-01-30 18:30:44 +00:00
|
|
|
}
|
|
|
|
}
|
|
|
|
return result;
|
|
|
|
};
|
|
|
|
|
|
|
|
return preprocess(a) == preprocess(b);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
struct LanguageModelState {
|
2024-05-16 20:33:02 +01:00
|
|
|
std::unique_ptr<LanguageModel> model;
|
2023-09-28 17:42:29 +01:00
|
|
|
|
|
|
|
struct {
|
2024-05-16 23:18:08 +01:00
|
|
|
int SPACE = 0;
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
int XBU = 0;
|
|
|
|
int XBC = 0;
|
|
|
|
int XEC = 0;
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
int XC0_SWIPE_MODE = 0;
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
int DASH = 0;
|
|
|
|
int STAR = 0;
|
2023-12-19 18:02:20 +00:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
int LETTERS_TO_IDS[26] = { 0 };
|
2024-01-30 18:30:44 +00:00
|
|
|
|
|
|
|
std::vector<int> banned_start_of_word_tokens;
|
|
|
|
std::vector<int> banned_tokens_for_first_capital;
|
|
|
|
std::vector<int> banned_tokens_for_all_capitals;
|
2024-03-13 18:31:51 +00:00
|
|
|
std::vector<int> banned_tokens_word_separators; // probabilities add to space token
|
|
|
|
std::vector<int> general_banned_tokens;
|
2023-09-28 17:42:29 +01:00
|
|
|
} specialTokens;
|
|
|
|
|
|
|
|
bool Initialize(const std::string &paths){
|
2024-05-16 20:33:02 +01:00
|
|
|
model = std::unique_ptr<LanguageModel>(LlamaAdapter::createLanguageModel(paths));
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
if(!model) {
|
|
|
|
AKLOGE("GGMLDict: Could not load model");
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
2024-01-23 23:03:16 +00:00
|
|
|
specialTokens.SPACE = model->tokenToId("▁"); // ▁
|
2024-03-13 18:31:51 +00:00
|
|
|
specialTokens.DASH = model->tokenToId("-");
|
|
|
|
specialTokens.STAR = model->tokenToId("*");
|
2024-01-23 23:03:16 +00:00
|
|
|
|
|
|
|
if(model->adapter->hasFeature(FEATURE_AUTOCORRECT)) {
|
|
|
|
specialTokens.XBU = model->tokenToId("<XBU>");
|
|
|
|
specialTokens.XBC = model->tokenToId("<XBC>");
|
|
|
|
specialTokens.XEC = model->tokenToId("<XEC>");
|
|
|
|
|
|
|
|
specialTokens.LETTERS_TO_IDS[0] = model->tokenToId("<CHAR_A>");
|
|
|
|
|
|
|
|
ASSERT(specialTokens.XBU != 0);
|
|
|
|
ASSERT(specialTokens.XBC != 0);
|
|
|
|
ASSERT(specialTokens.XEC != 0);
|
|
|
|
ASSERT(specialTokens.LETTERS_TO_IDS[0] != 0);
|
|
|
|
|
|
|
|
for(int i = 1; i < 26; i++) {
|
|
|
|
specialTokens.LETTERS_TO_IDS[i] = specialTokens.LETTERS_TO_IDS[0] + i;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(model->adapter->hasFeature(FEATURE_SWIPE_TYPING)) {
|
|
|
|
specialTokens.XC0_SWIPE_MODE = model->tokenToId("<XC0>");
|
|
|
|
ASSERT(specialTokens.XC0_SWIPE_MODE != 0);
|
|
|
|
}
|
|
|
|
} else {
|
|
|
|
specialTokens.XBU = -1;
|
|
|
|
specialTokens.XBC = -1;
|
|
|
|
specialTokens.XEC = -1;
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
specialTokens.banned_tokens_word_separators = { };
|
|
|
|
specialTokens.general_banned_tokens = { model->tokenToId("-▁") };
|
2023-10-13 16:34:49 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
//int permitted_period_token = model->tokenToId(".");
|
2023-10-16 16:24:00 +01:00
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
const char *blacklist_symbols = ".!@#$%^&*()_=?/,\\][{};:\"><+`~|\r\n\t\x0b\x0c";
|
2024-01-23 23:03:16 +00:00
|
|
|
for(int i = 0; i < model->getVocabSize(); i++) {
|
2024-03-13 18:31:51 +00:00
|
|
|
//if(i == permitted_period_token) continue;
|
2024-01-22 06:20:55 +00:00
|
|
|
|
2024-01-23 23:03:16 +00:00
|
|
|
const char *token = model->getToken(i);
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-01-23 23:03:16 +00:00
|
|
|
bool has_symbol = false;
|
|
|
|
for(char c : std::string(token)){
|
|
|
|
if(strchr(blacklist_symbols, c) != nullptr) {
|
|
|
|
has_symbol = true;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2024-01-23 23:03:16 +00:00
|
|
|
if(has_symbol) {
|
2024-03-13 18:31:51 +00:00
|
|
|
specialTokens.banned_tokens_word_separators.emplace_back(i);
|
2024-01-23 23:03:16 +00:00
|
|
|
}
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
|
2024-05-16 20:33:02 +01:00
|
|
|
size_t n_vocab = llama_n_vocab(model->model());
|
2024-05-16 23:18:08 +01:00
|
|
|
for(int i=0; i < (int)n_vocab; i++) {
|
2024-01-30 18:30:44 +00:00
|
|
|
const char *text = model->adapter->getToken(i);
|
|
|
|
if(isFirstCharLowercase(text)) {
|
|
|
|
specialTokens.banned_tokens_for_first_capital.push_back(i);
|
|
|
|
specialTokens.banned_tokens_for_all_capitals.push_back(i);
|
|
|
|
}else if(hasLowercase(text)){
|
|
|
|
specialTokens.banned_tokens_for_all_capitals.push_back(i);
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
if(text[0] == '\'' || text[0] == '-') {
|
2024-01-30 18:30:44 +00:00
|
|
|
specialTokens.banned_start_of_word_tokens.push_back(i);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
bool transform_logits(float *logits, size_t n_vocab, bool is_first_token, bool allow_correction_token, WordCapitalizeMode capitals, llama_token prev_token){
|
|
|
|
for(size_t i = 0; i < n_vocab; i++) {
|
|
|
|
if(isnan(logits[i])){
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
softmax(logits, n_vocab);
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
for(int x : specialTokens.banned_tokens_word_separators) {
|
2024-01-23 23:03:16 +00:00
|
|
|
if(allow_correction_token && x == specialTokens.XEC) continue;
|
|
|
|
|
2023-10-13 16:44:38 +01:00
|
|
|
logits[specialTokens.SPACE] += std::max(0.0f, logits[x]);
|
2023-10-10 20:34:04 +01:00
|
|
|
logits[x] = -999.0f;
|
|
|
|
}
|
|
|
|
|
2024-01-30 18:30:44 +00:00
|
|
|
if(is_first_token) {
|
2023-10-10 20:34:04 +01:00
|
|
|
logits[specialTokens.SPACE] = -999.0f;
|
2024-01-30 18:30:44 +00:00
|
|
|
|
|
|
|
for(int i : specialTokens.banned_start_of_word_tokens) {
|
|
|
|
logits[i] = -999.0f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
for(int i : specialTokens.general_banned_tokens) {
|
|
|
|
logits[i] = -999.0f;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(prev_token == specialTokens.DASH) {
|
|
|
|
logits[specialTokens.DASH] = -999.0f;
|
|
|
|
}
|
|
|
|
|
2024-01-30 18:30:44 +00:00
|
|
|
if(capitals == WordCapitalizeMode::FirstCapital && is_first_token) {
|
|
|
|
for(int i : specialTokens.banned_tokens_for_first_capital) {
|
|
|
|
logits[i] = -999.0f;
|
|
|
|
}
|
|
|
|
}else if(capitals == WordCapitalizeMode::AllCapitals) {
|
|
|
|
// Note: In case the word is something like "AMD's" we may not wish to ban lowercase completely
|
|
|
|
for(int i : specialTokens.banned_tokens_for_all_capitals) {
|
|
|
|
logits[i] = -999.0f;
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
}
|
2024-03-13 18:31:51 +00:00
|
|
|
return true;
|
2023-10-10 20:34:04 +01:00
|
|
|
}
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
std::vector<TokenMix> past_mixes = { };
|
|
|
|
int GetCachedMixAmount(const std::vector<TokenMix> &mixes) {
|
|
|
|
TIME_START(GetcachedMixAmount)
|
2024-05-16 23:18:08 +01:00
|
|
|
size_t i;
|
2023-12-04 20:09:51 +00:00
|
|
|
for(i = 0; i < std::min(past_mixes.size(), mixes.size()); i++) {
|
2023-12-19 18:02:20 +00:00
|
|
|
if(std::abs(past_mixes[i].x - mixes[i].x) >= EPS) break;
|
|
|
|
if(std::abs(past_mixes[i].y - mixes[i].y) >= EPS) break;
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
2023-11-13 14:42:01 +00:00
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
TIME_END(GetcachedMixAmount)
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
return (int)i;
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
DecodeResult DecodePromptAndMixes(const token_sequence &prompt, const std::vector<TokenMix> &mixes) {
|
|
|
|
TIME_START(PromptDecode)
|
2024-05-16 20:33:02 +01:00
|
|
|
llama_context *ctx = model->context();
|
|
|
|
llama_batch batch = model->adapter->batch;
|
|
|
|
LlamaAdapter *llamaAdapter = model->adapter.get();
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
size_t n_embd = llama_n_embd(llama_get_model(ctx));
|
2023-10-10 20:34:04 +01:00
|
|
|
size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
auto prompt_ff = transformer_context_fastforward(model->transformerContext, prompt, !mixes.empty());
|
2023-10-13 16:34:49 +01:00
|
|
|
|
2024-04-22 19:37:14 +01:00
|
|
|
int n_batch = llamaAdapter->n_batch;
|
|
|
|
|
|
|
|
int head = -1;
|
2024-05-16 23:18:08 +01:00
|
|
|
if(!prompt_ff.first.empty()) {
|
|
|
|
for (size_t b = 0; b < (prompt_ff.first.size() + n_batch - 1) / n_batch; b++) {
|
2024-04-22 19:37:14 +01:00
|
|
|
batch.n_tokens = std::min((int)n_batch, (int)(prompt_ff.first.size() - b*n_batch));
|
|
|
|
for (int i = 0; i < batch.n_tokens; i++) {
|
|
|
|
batch.token[i] = prompt_ff.first[n_batch*b + i];
|
2024-05-16 23:18:08 +01:00
|
|
|
batch.pos[i] = (llama_pos)(prompt_ff.second + n_batch*b + i);
|
2024-04-22 19:37:14 +01:00
|
|
|
batch.seq_id[i][0] = 0;
|
|
|
|
batch.n_seq_id[i] = 1;
|
|
|
|
batch.logits[i] = false;
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
batch.logits[batch.n_tokens - 1] = (int8_t)(mixes.empty());
|
2024-04-22 19:37:14 +01:00
|
|
|
if(mixes.empty()) head = batch.n_tokens - 1;
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
llama_kv_cache_seq_rm(ctx, 0, (llama_pos)prompt_ff.second, -1);
|
2023-12-04 20:09:51 +00:00
|
|
|
|
2024-04-22 19:37:14 +01:00
|
|
|
if (llama_decode(ctx, batch) != 0) {
|
|
|
|
AKLOGE("llama_decode() failed");
|
|
|
|
return {};
|
|
|
|
}
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
} else {
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("No need to recompute prompt, proceeding to mixes");
|
2023-10-10 20:34:04 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
transformer_context_apply(model->transformerContext, prompt_ff);
|
2023-12-04 20:09:51 +00:00
|
|
|
TIME_END(PromptDecode)
|
|
|
|
|
|
|
|
TIME_START(EmbedMixing)
|
2024-05-16 23:18:08 +01:00
|
|
|
size_t size = prompt.size();
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
std::vector<float> embeds;
|
|
|
|
|
2023-12-19 18:02:20 +00:00
|
|
|
bool useEncoder = !llamaAdapter->encoder_weight.empty();
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("DecodePromptAndMixes: useEncoder=%d", useEncoder);
|
2023-12-19 18:02:20 +00:00
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
for(auto &mix : mixes) {
|
|
|
|
|
|
|
|
int num_added = 0;
|
|
|
|
|
|
|
|
std::vector<float> mix_f(n_embd, 0.0f);
|
|
|
|
|
2023-12-19 18:02:20 +00:00
|
|
|
if(useEncoder) {
|
|
|
|
num_added = 1;
|
2023-12-04 20:09:51 +00:00
|
|
|
|
2023-12-19 18:02:20 +00:00
|
|
|
for(size_t i=0; i<n_embd; i++) {
|
|
|
|
mix_f[i] = llamaAdapter->encoder_bias[i]
|
|
|
|
+ llamaAdapter->encoder_weight[i*2]*mix.x
|
|
|
|
+ llamaAdapter->encoder_weight[i*2 + 1]*mix.y;
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
2023-12-19 18:02:20 +00:00
|
|
|
//AKLOGI("DEBUG: pos %.4f %.4f got this: [%.4f %.4f %.4f %.4f %.4f %.4f %.4f ...",
|
|
|
|
// mix.x, mix.y,
|
|
|
|
// mix_f[0], mix_f[1], mix_f[2], mix_f[3], mix_f[4], mix_f[5], mix_f[6]);
|
|
|
|
} else {
|
|
|
|
for (auto &t: mix.mixes) {
|
2024-05-16 18:18:13 +01:00
|
|
|
if (t.weight < EPS) continue;
|
|
|
|
if (t.token < 0 || t.token >= (int)n_vocab) continue;
|
2023-12-19 18:02:20 +00:00
|
|
|
|
2024-05-16 20:33:02 +01:00
|
|
|
float *src = llamaAdapter->embeddings.data() +
|
2023-12-19 18:02:20 +00:00
|
|
|
(t.token * n_embd);
|
|
|
|
float weight = t.weight;
|
|
|
|
|
|
|
|
for (size_t i = 0; i < n_embd; i++) {
|
|
|
|
mix_f[i] += src[i] * weight;
|
|
|
|
}
|
|
|
|
|
|
|
|
num_added++;
|
|
|
|
}
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
if(num_added == 0){
|
|
|
|
AKLOGE("Somehow a token mix had 0 weight for everything");
|
2023-12-19 18:28:58 +00:00
|
|
|
ASSERT(false);
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
embeds.insert(embeds.end(), mix_f.begin(), mix_f.end());
|
|
|
|
size++;
|
|
|
|
}
|
|
|
|
TIME_END(EmbedMixing)
|
|
|
|
|
|
|
|
TIME_START(CachedMixAmount)
|
|
|
|
int n_tokens = int32_t(mixes.size());
|
|
|
|
int n_past = GetCachedMixAmount(mixes);
|
|
|
|
past_mixes = mixes;
|
|
|
|
|
|
|
|
if(!prompt_ff.first.empty()) n_past = 0; // We have to recompute embeds completely if prompt changed
|
2024-05-16 23:18:08 +01:00
|
|
|
llama_kv_cache_seq_rm(ctx, 0, (llama_pos)prompt.size() + n_past, -1);
|
2023-12-04 20:09:51 +00:00
|
|
|
TIME_END(CachedMixAmount)
|
|
|
|
|
|
|
|
if(!embeds.empty()) {
|
|
|
|
TIME_START(DecodeEmbeds)
|
|
|
|
// TODO: This is only processing one embd at a time, increasing n_tokens doesn't seem to work
|
|
|
|
for(int h = n_past; h < n_tokens; h++ ) {
|
|
|
|
llama_batch embd_batch = {
|
|
|
|
1,
|
|
|
|
|
|
|
|
nullptr,
|
|
|
|
embeds.data() + h*n_embd,
|
|
|
|
batch.pos,
|
|
|
|
batch.n_seq_id,
|
|
|
|
batch.seq_id,
|
|
|
|
batch.logits,
|
2023-12-19 18:02:20 +00:00
|
|
|
|
|
|
|
batch.all_pos_0,
|
|
|
|
batch.all_pos_1,
|
|
|
|
batch.all_seq_id
|
2023-12-04 20:09:51 +00:00
|
|
|
};
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
batch.pos[0] = (llama_pos)(prompt.size() + h);
|
2023-12-04 20:09:51 +00:00
|
|
|
batch.seq_id[0][0] = 0;
|
|
|
|
batch.n_seq_id[0] = 1;
|
|
|
|
batch.logits[0] = false;
|
|
|
|
|
|
|
|
if (llama_decode(ctx, embd_batch) != 0) {
|
|
|
|
AKLOGE("llama_decode() with embeds failed");
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
}
|
|
|
|
TIME_END(DecodeEmbeds)
|
|
|
|
|
|
|
|
TIME_START(DecodeXBC)
|
|
|
|
|
|
|
|
// We always force an XBC token after
|
|
|
|
size += 1;
|
|
|
|
batch.n_tokens = 1;
|
|
|
|
batch.token[0] = specialTokens.XBC;
|
|
|
|
batch.seq_id[0][0] = 0;
|
|
|
|
batch.n_seq_id[0] = 1;
|
|
|
|
batch.logits[0] = true;
|
2024-05-16 23:18:08 +01:00
|
|
|
batch.pos[0] = (llama_pos)(prompt.size() + n_tokens);
|
2023-12-04 20:09:51 +00:00
|
|
|
head = 0;
|
|
|
|
|
|
|
|
if (llama_decode(ctx, batch) != 0) {
|
|
|
|
AKLOGE("llama_decode() for XBC failed");
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
TIME_END(DecodeXBC)
|
|
|
|
|
2023-12-19 18:28:58 +00:00
|
|
|
ASSERT(size == prompt.size() + n_tokens + 1);
|
|
|
|
ASSERT(size == prompt.size() + (embeds.size() / n_embd) + 1);
|
2023-12-04 20:09:51 +00:00
|
|
|
} else {
|
2023-12-19 18:28:58 +00:00
|
|
|
ASSERT(size == prompt.size());
|
2024-04-22 19:37:14 +01:00
|
|
|
//ASSERT(head == prompt_ff.first.size() - 1);
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("-- Decode");
|
|
|
|
//AKLOGI("First we processed the prompt (%d):", prompt_ff.first.size());
|
|
|
|
//for(auto t : prompt) {
|
|
|
|
// AKLOGI(" - [%s]", model->getToken(t));
|
|
|
|
//}
|
|
|
|
//AKLOGI("Then %d embeds (cached %d)", embeds.size(), n_past);
|
|
|
|
//AKLOGI("The final size is %d and head is %d", size, head);
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
TIME_START(FinishRm)
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
llama_kv_cache_seq_rm(ctx, 0, (llama_pos)size, -1);
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
TIME_END(FinishRm)
|
|
|
|
return {
|
|
|
|
head,
|
2024-05-16 23:18:08 +01:00
|
|
|
(int)size
|
2023-12-04 20:09:51 +00:00
|
|
|
};
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
bool MatchesBanned(const token_sequence &prior, int prior_hash, llama_token next, const std::vector<banned_sequence> &banned_sequences) const {
|
2024-03-13 18:31:51 +00:00
|
|
|
int new_hash = append_sequence_hash(prior_hash, next);
|
|
|
|
for(const auto &banned_sequence : banned_sequences) {
|
|
|
|
if(banned_sequence.sequence.back() == specialTokens.STAR && (prior.size() >= banned_sequence.sequence.size() - 1)) {
|
|
|
|
bool matches = true;
|
|
|
|
for(size_t i = 0; i < banned_sequence.sequence.size() - 1; i++) {
|
|
|
|
if(prior[i] != banned_sequence.sequence[i]) {
|
|
|
|
matches = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(matches){
|
|
|
|
auto priorTxt = model->decode(prior);
|
|
|
|
auto nextTxt = model->decode({next});
|
|
|
|
auto bannedTxt = model->decode(banned_sequence.sequence);
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("Tokens [%s] + [%s] matches banned wildcard [%s]", priorTxt.c_str(), nextTxt.c_str(), bannedTxt.c_str());
|
2024-03-13 18:31:51 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}else if((banned_sequence.sequence.size() == prior.size() + 1) && (banned_sequence.hash == new_hash)) {
|
|
|
|
if(banned_sequence.sequence.back() == next) {
|
|
|
|
bool matches = true;
|
|
|
|
for(size_t i = 0; i < prior.size(); i++) {
|
|
|
|
if(prior[i] != banned_sequence.sequence[i]) {
|
|
|
|
matches = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if(matches) {
|
|
|
|
auto priorTxt = model->decode(prior);
|
|
|
|
auto nextTxt = model->decode({next});
|
|
|
|
auto bannedTxt = model->decode(banned_sequence.sequence);
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("Tokens [%s] + [%s] matches banned [%s]", priorTxt.c_str(), nextTxt.c_str(), bannedTxt.c_str());
|
2024-03-13 18:31:51 +00:00
|
|
|
return true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return false;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::pair<float, token_sequence>> Sample(DecodeResult decodeResult, int n_results, WordCapitalizeMode capitals, const std::vector<banned_sequence> &banned_sequences) {
|
2024-05-16 20:33:02 +01:00
|
|
|
llama_context *ctx = model->context();
|
|
|
|
llama_batch batch = model->adapter->batch;
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
|
|
|
|
|
|
|
|
std::vector<potential_sequence> sequences;
|
|
|
|
|
|
|
|
bool allow_correction_token = decodeResult.logits_head == 0;
|
|
|
|
|
|
|
|
float *logits = llama_get_logits_ith(ctx, decodeResult.logits_head);
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("Value of [the ] before transform: %f", logits[561]);
|
2024-03-13 18:31:51 +00:00
|
|
|
|
2024-04-30 19:14:41 +01:00
|
|
|
bool is_bugged = logits[561] == 0.0f;
|
2024-03-13 18:31:51 +00:00
|
|
|
|
|
|
|
if(!transform_logits(logits, n_vocab, true, allow_correction_token, capitals, 0)) {
|
|
|
|
AKLOGE("logits have NaN!");
|
|
|
|
return { };
|
|
|
|
}
|
|
|
|
|
2024-03-13 21:11:52 +00:00
|
|
|
// TODO: This should really not be here
|
2024-04-30 19:14:41 +01:00
|
|
|
is_bugged = is_bugged && logits[561] < -990.0f && logits[561] > -1100.0f;
|
|
|
|
if(is_bugged) {
|
|
|
|
AKLOGE("Detected bug!!!! Trying to mitigate. Let's just reset cache and exit");
|
|
|
|
llama_kv_cache_seq_rm(ctx, -1, -1, -1);
|
|
|
|
model->transformerContext.active_context = { };
|
|
|
|
return { };
|
|
|
|
}
|
2024-03-13 18:31:51 +00:00
|
|
|
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("Value of [the ] after transform: %f", logits[561]);
|
2023-10-10 20:34:04 +01:00
|
|
|
|
|
|
|
std::vector<std::pair<float, int>> index_value;
|
|
|
|
index_value.clear();
|
|
|
|
for (size_t i = 0; i < n_vocab; i++) {
|
|
|
|
index_value.emplace_back(logits[i], i);
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
|
|
|
|
sortProbabilityPairVectorDescending(index_value, n_results * 2);
|
|
|
|
const token_sequence blank = {};
|
|
|
|
for(int i = 0; i < n_results * 2; i++) {
|
|
|
|
if(MatchesBanned(blank, 0, index_value[i].second, banned_sequences)) {
|
|
|
|
index_value[i].first = 0.0f;
|
|
|
|
}
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
sortProbabilityPairVectorDescending(index_value, n_results);
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
sequences.reserve(n_results);
|
2023-10-10 20:34:04 +01:00
|
|
|
for (int i = 0; i < n_results; i++) {
|
|
|
|
sequences.emplace_back(
|
|
|
|
index_value[i].first,
|
2023-10-13 16:34:49 +01:00
|
|
|
potential_sequence_data {
|
2023-10-10 20:34:04 +01:00
|
|
|
{index_value[i].second},
|
|
|
|
i
|
|
|
|
}
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
2024-03-13 21:11:52 +00:00
|
|
|
// TODO: This should really not be here
|
2024-04-30 19:14:41 +01:00
|
|
|
is_bugged = true;
|
|
|
|
for(const auto &seq : sequences) {
|
|
|
|
if(seq.second.tokens.front() > 48 || seq.first != sequences[0].first) {
|
|
|
|
is_bugged = false;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(is_bugged) {
|
|
|
|
AKLOGE("Detected bug2!!!! Trying to mitigate. Let's just reset cache and exit");
|
|
|
|
llama_kv_cache_seq_rm(ctx, -1, -1, -1);
|
|
|
|
model->transformerContext.active_context = { };
|
|
|
|
return { };
|
|
|
|
}
|
2024-03-13 21:11:52 +00:00
|
|
|
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
for (auto &sequence: sequences) {
|
|
|
|
if (sequence.second.seq_id == 0) continue;
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
llama_kv_cache_seq_cp(ctx, 0, sequence.second.seq_id, 0, decodeResult.size);
|
2023-10-10 20:34:04 +01:00
|
|
|
}
|
|
|
|
|
2024-01-30 18:30:44 +00:00
|
|
|
std::vector<potential_sequence> next_sequences;
|
2023-10-10 20:34:04 +01:00
|
|
|
|
|
|
|
std::vector<std::pair<float, token_sequence>> outputs;
|
|
|
|
|
2023-10-13 16:34:49 +01:00
|
|
|
for(int tok=0; tok<10; tok++) {
|
2023-10-10 20:34:04 +01:00
|
|
|
next_sequences.clear();
|
|
|
|
for (auto sequence: std::move(sequences)) {
|
|
|
|
int next_token = sequence.second.tokens[sequence.second.tokens.size() - 1];
|
|
|
|
|
|
|
|
// Check if this is the end of correction
|
|
|
|
if (next_token == specialTokens.XEC) {
|
|
|
|
token_sequence resulting_tokens = std::move(sequence.second.tokens);
|
|
|
|
resulting_tokens.resize(resulting_tokens.size() - 1);
|
|
|
|
outputs.emplace_back(sequence.first, resulting_tokens);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
// Check if this is the end of a word
|
|
|
|
std::string token = model->getToken(next_token);
|
|
|
|
if (token.size() >= 3 && (token[token.size() - 1] == '\x81') &&
|
|
|
|
(token[token.size() - 2] == '\x96') && token[token.size() - 3] == '\xe2') {
|
|
|
|
outputs.emplace_back(sequence.first, std::move(sequence.second.tokens));
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
|
|
|
|
next_sequences.emplace_back(sequence);
|
|
|
|
}
|
|
|
|
|
|
|
|
sequences = next_sequences;
|
|
|
|
next_sequences.clear();
|
|
|
|
|
|
|
|
size_t remaining_count = n_results - outputs.size();
|
|
|
|
batch.n_tokens = 0;
|
|
|
|
|
2023-11-06 11:41:25 +00:00
|
|
|
//for(int i=0; i<batch.n_tokens; i++) batch.logits[i] = false;
|
2023-10-10 20:34:04 +01:00
|
|
|
for (auto &sequence: sequences) {
|
2023-12-04 20:09:51 +00:00
|
|
|
batch.token[batch.n_tokens] = sequence.second.tokens[sequence.second.tokens.size() - 1];
|
2024-05-16 23:18:08 +01:00
|
|
|
batch.pos[batch.n_tokens] = (llama_pos)(decodeResult.size + (sequence.second.tokens.size() - 1));
|
2023-11-06 11:41:25 +00:00
|
|
|
batch.seq_id[batch.n_tokens][0] = sequence.second.seq_id;
|
|
|
|
batch.n_seq_id[batch.n_tokens] = 1;
|
2023-10-10 20:34:04 +01:00
|
|
|
batch.logits[batch.n_tokens] = true;
|
|
|
|
|
|
|
|
batch.n_tokens += 1;
|
|
|
|
}
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
ASSERT(batch.n_tokens == (int)remaining_count); // usually 3
|
2023-10-10 20:34:04 +01:00
|
|
|
|
|
|
|
if (batch.n_tokens == 0) {
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
|
|
|
|
llama_decode(ctx, batch);
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
for (int seq = 0; seq < (int)remaining_count; seq++) {
|
2023-10-10 20:34:04 +01:00
|
|
|
const potential_sequence &parent_seq = sequences[seq];
|
2024-03-13 18:31:51 +00:00
|
|
|
auto hash = compute_sequence_hash(parent_seq.second.tokens);
|
|
|
|
|
|
|
|
llama_token prev_token = 0;
|
2024-05-16 23:18:08 +01:00
|
|
|
if(!parent_seq.second.tokens.empty()) prev_token = parent_seq.second.tokens.back();
|
2024-03-13 18:31:51 +00:00
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
logits = llama_get_logits_ith(ctx, seq);
|
2024-03-13 18:31:51 +00:00
|
|
|
if(!transform_logits(logits, n_vocab, false, allow_correction_token, capitals, prev_token)) {
|
|
|
|
AKLOGE("Logits have NaN!");
|
|
|
|
return { };
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
|
|
|
|
index_value.clear();
|
|
|
|
for (size_t i = 0; i < n_vocab; i++) {
|
|
|
|
index_value.emplace_back(logits[i], i);
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
|
|
|
|
sortProbabilityPairVectorDescending(index_value, remaining_count * 2);
|
|
|
|
for(size_t i = 0; i < remaining_count * 2; i++) {
|
|
|
|
if(MatchesBanned(parent_seq.second.tokens, hash, index_value[i].second, banned_sequences)) {
|
|
|
|
index_value[i].first = 0.0f;
|
|
|
|
}
|
|
|
|
}
|
2023-10-10 20:34:04 +01:00
|
|
|
sortProbabilityPairVectorDescending(index_value, remaining_count);
|
|
|
|
|
|
|
|
for (size_t i = 0; i < remaining_count; i++) {
|
|
|
|
token_sequence new_sequence = parent_seq.second.tokens;
|
|
|
|
new_sequence.push_back(index_value[i].second);
|
|
|
|
|
|
|
|
if (index_value[i].first > 1.0f || index_value[i].first < 0.0f) {
|
|
|
|
AKLOGE("Expected index_value to be probability [%.2f]",
|
|
|
|
index_value[i].first);
|
|
|
|
}
|
|
|
|
|
|
|
|
if (sequences[i].first > 1.0f || sequences[i].first < 0.0f) {
|
|
|
|
AKLOGE("Expected sequences value to be probability [%.2f]",
|
|
|
|
sequences[i].first);
|
|
|
|
}
|
|
|
|
|
|
|
|
next_sequences.emplace_back(
|
|
|
|
index_value[i].first * sequences[i].first,
|
|
|
|
potential_sequence_data{
|
|
|
|
new_sequence,
|
|
|
|
parent_seq.second.seq_id
|
|
|
|
}
|
|
|
|
);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sortProbabilityPairVectorDescending(next_sequences, remaining_count);
|
|
|
|
next_sequences.resize(remaining_count);
|
|
|
|
sequences.clear();
|
|
|
|
|
|
|
|
// In some cases we may have picked a sequence from the same parent sequence
|
|
|
|
// We must re-assign the seq_id
|
|
|
|
int seq_id_use_count[n_results];
|
|
|
|
for (int i = 0; i < n_results; i++) seq_id_use_count[i] = 0;
|
|
|
|
|
|
|
|
for (auto &seq: next_sequences) seq_id_use_count[seq.second.seq_id] += 1;
|
|
|
|
|
|
|
|
for (auto &seq: next_sequences) {
|
|
|
|
if (seq_id_use_count[seq.second.seq_id] > 1) {
|
|
|
|
int old_seq_id = seq.second.seq_id;
|
|
|
|
|
|
|
|
int new_seq_id = -1;
|
|
|
|
for (int i = 0; i < n_results; i++) {
|
|
|
|
if (seq_id_use_count[i] == 0) {
|
|
|
|
new_seq_id = i;
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
if (new_seq_id == -1) {
|
|
|
|
AKLOGE("Couldn't find an empty sequence id to use. This should never happen.");
|
|
|
|
return {};
|
|
|
|
}
|
|
|
|
|
|
|
|
seq_id_use_count[old_seq_id]--;
|
|
|
|
seq_id_use_count[new_seq_id]++;
|
|
|
|
|
|
|
|
llama_kv_cache_seq_cp(
|
|
|
|
ctx,
|
|
|
|
old_seq_id,
|
|
|
|
new_seq_id,
|
|
|
|
0, // could start from prompt.size()
|
2024-05-16 23:18:08 +01:00
|
|
|
(llama_pos)(decodeResult.size + (seq.second.tokens.size() - 1))
|
2023-10-10 20:34:04 +01:00
|
|
|
);
|
|
|
|
|
|
|
|
seq.second.seq_id = new_seq_id;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
sequences = next_sequences;
|
|
|
|
}
|
|
|
|
|
|
|
|
for (int i = 1; i < n_results; i++) {
|
|
|
|
llama_kv_cache_seq_rm(ctx, i, 0, -1);
|
|
|
|
}
|
|
|
|
|
|
|
|
return outputs;
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
std::vector<std::pair<float, std::string>> PredictNextWord(const std::string &context, const std::vector<std::string> &banned_words) {
|
|
|
|
std::vector<banned_sequence> banned_sequences;
|
|
|
|
for(const std::string &bw : banned_words) {
|
|
|
|
auto tokenized = model->tokenize(trim(bw) + " ");
|
|
|
|
banned_sequences.push_back({ tokenized, compute_sequence_hash(tokenized) });
|
|
|
|
|
|
|
|
auto tokenized2 = model->tokenize(trim(bw));
|
|
|
|
banned_sequences.push_back({ tokenized2, compute_sequence_hash(tokenized2) });
|
|
|
|
}
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
token_sequence next_context = model->tokenize(trim(context) + " ");
|
2023-11-07 14:48:48 +00:00
|
|
|
next_context.insert(next_context.begin(), 1); // BOS
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
auto decoding_result = DecodePromptAndMixes(next_context, { });
|
2024-03-13 18:31:51 +00:00
|
|
|
auto results = Sample(decoding_result, 3, WordCapitalizeMode::IgnoredCapitals, banned_sequences);
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
std::vector<std::pair<float, std::string>> str_results;
|
2024-05-16 23:18:08 +01:00
|
|
|
str_results.reserve(results.size());
|
2023-10-10 20:34:04 +01:00
|
|
|
for(const auto& result : results) {
|
|
|
|
str_results.emplace_back(result.first, model->decode(result.second));
|
|
|
|
}
|
|
|
|
|
|
|
|
return str_results;
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
std::vector<std::pair<float, std::string>> PredictCorrection(const std::string &context, const std::vector<TokenMix> &mixes, bool swipe_mode, WordCapitalizeMode capitals, const std::vector<std::string> &banned_words) {
|
2024-03-13 18:31:51 +00:00
|
|
|
if(specialTokens.XBU == -1) return { };
|
|
|
|
|
|
|
|
std::vector<banned_sequence> banned_sequences;
|
|
|
|
for(const std::string &bw : banned_words) {
|
|
|
|
auto tokenized = model->tokenize(trim(bw) + " ");
|
|
|
|
banned_sequences.push_back({ tokenized, compute_sequence_hash(tokenized) });
|
|
|
|
|
|
|
|
auto tokenized2 = model->tokenize(trim(bw));
|
|
|
|
banned_sequences.push_back({ tokenized2, compute_sequence_hash(tokenized2) });
|
|
|
|
}
|
|
|
|
|
2023-10-16 16:24:00 +01:00
|
|
|
token_sequence next_context;
|
2024-05-16 23:18:08 +01:00
|
|
|
if(!context.empty()) {
|
2023-10-16 16:24:00 +01:00
|
|
|
next_context = model->tokenize(trim(context) + " ");
|
|
|
|
}
|
|
|
|
|
2023-11-07 14:48:48 +00:00
|
|
|
next_context.insert(next_context.begin(), 1); // BOS
|
2023-09-28 17:42:29 +01:00
|
|
|
next_context.push_back(specialTokens.XBU);
|
|
|
|
|
2023-12-19 18:02:20 +00:00
|
|
|
if(swipe_mode) {
|
|
|
|
next_context.push_back(specialTokens.XC0_SWIPE_MODE);
|
|
|
|
}
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
auto decoding_result = DecodePromptAndMixes(next_context, mixes);
|
2024-03-13 18:31:51 +00:00
|
|
|
auto results = Sample(decoding_result, 3, capitals, banned_sequences);
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
std::vector<std::pair<float, std::string>> str_results;
|
2024-05-16 23:18:08 +01:00
|
|
|
str_results.reserve(results.size());
|
2023-10-10 20:34:04 +01:00
|
|
|
for(const auto& result : results) {
|
|
|
|
str_results.emplace_back(result.first, model->decode(result.second));
|
|
|
|
}
|
|
|
|
|
|
|
|
return str_results;
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2024-04-29 02:55:32 +01:00
|
|
|
struct SuggestionItemToRescore {
|
|
|
|
int index;
|
|
|
|
|
|
|
|
int originalScore;
|
|
|
|
float transformedScore;
|
|
|
|
|
|
|
|
std::string word;
|
|
|
|
token_sequence tokens;
|
|
|
|
};
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
namespace latinime {
|
|
|
|
static jlong xlm_LanguageModel_open(JNIEnv *env, jclass clazz, jstring modelDir) {
|
2024-05-16 23:18:08 +01:00
|
|
|
GGML_UNUSED(clazz);
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
AKLOGI("open LM");
|
|
|
|
const jsize sourceDirUtf8Length = env->GetStringUTFLength(modelDir);
|
|
|
|
if (sourceDirUtf8Length <= 0) {
|
|
|
|
AKLOGE("DICT: Can't get sourceDir string");
|
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
char sourceDirChars[sourceDirUtf8Length + 1];
|
|
|
|
env->GetStringUTFRegion(modelDir, 0, env->GetStringLength(modelDir), sourceDirChars);
|
|
|
|
sourceDirChars[sourceDirUtf8Length] = '\0';
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
auto *state = new LanguageModelState();
|
2023-09-28 17:42:29 +01:00
|
|
|
|
|
|
|
if(!state->Initialize(sourceDirChars)) {
|
2023-11-07 14:48:48 +00:00
|
|
|
delete state;
|
2023-09-28 17:42:29 +01:00
|
|
|
return 0;
|
|
|
|
}
|
|
|
|
|
|
|
|
return reinterpret_cast<jlong>(state);
|
|
|
|
}
|
|
|
|
|
|
|
|
static void xlm_LanguageModel_close(JNIEnv *env, jclass clazz, jlong statePtr) {
|
2024-05-16 23:18:08 +01:00
|
|
|
GGML_UNUSED(env);
|
|
|
|
GGML_UNUSED(clazz);
|
|
|
|
|
2024-04-10 05:06:31 +01:00
|
|
|
AKLOGI("LanguageModel_close called!");
|
2024-05-16 23:18:08 +01:00
|
|
|
auto *state = reinterpret_cast<LanguageModelState *>(statePtr);
|
2023-09-28 17:42:29 +01:00
|
|
|
if(state == nullptr) return;
|
|
|
|
delete state;
|
|
|
|
}
|
|
|
|
|
2024-04-29 02:55:32 +01:00
|
|
|
// (JLjava/lang/String;[Ljava/lang/String;[I[I)V
|
|
|
|
// TODO: This will also need caching to not make things extremely slow by recomputing every time
|
|
|
|
static void xlm_LanguageModel_rescoreSuggestions(JNIEnv *env, jclass clazz,
|
|
|
|
jlong dict,
|
|
|
|
jstring context,
|
|
|
|
jobjectArray inWords,
|
|
|
|
jintArray inScores,
|
|
|
|
|
|
|
|
jintArray outScores
|
|
|
|
) {
|
2024-05-16 23:18:08 +01:00
|
|
|
GGML_UNUSED(clazz);
|
|
|
|
auto *state = reinterpret_cast<LanguageModelState *>(dict);
|
2024-04-29 02:55:32 +01:00
|
|
|
|
|
|
|
std::string contextString = jstring2string(env, context);
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
jsize inputSize = env->GetArrayLength(inScores);
|
2024-04-29 02:55:32 +01:00
|
|
|
int scores[inputSize];
|
|
|
|
env->GetIntArrayRegion(inScores, 0, inputSize, scores);
|
|
|
|
|
|
|
|
float maxScore = -INFINITY;
|
|
|
|
float minScore = INFINITY;
|
|
|
|
for(int score : scores) {
|
2024-05-16 23:18:08 +01:00
|
|
|
auto scoref = (float)score;
|
|
|
|
|
|
|
|
if(scoref > maxScore) maxScore = scoref;
|
|
|
|
if(scoref < minScore) minScore = scoref;
|
2024-04-29 02:55:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
minScore -= (maxScore - minScore) * 0.33f;
|
|
|
|
|
|
|
|
std::vector<SuggestionItemToRescore> words;
|
2024-05-16 23:18:08 +01:00
|
|
|
jsize numWords = env->GetArrayLength(inWords);
|
2024-04-29 02:55:32 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
for(jsize i=0; i<numWords; i++) {
|
|
|
|
auto jstr = (jstring)env->GetObjectArrayElement(inWords, i);
|
2024-04-29 02:55:32 +01:00
|
|
|
SuggestionItemToRescore item = {
|
|
|
|
(int) i,
|
|
|
|
scores[i],
|
|
|
|
((float)scores[i] - minScore) / (maxScore - minScore),
|
|
|
|
jstring2string(env, jstr),
|
|
|
|
{}
|
|
|
|
};
|
|
|
|
|
|
|
|
item.tokens = state->model->tokenize(trim(item.word) + " ");
|
|
|
|
words.push_back(item);
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
|
|
// TODO: Transform here
|
2024-05-16 20:33:02 +01:00
|
|
|
llama_context *ctx = state->model->context();
|
2024-04-29 02:55:32 +01:00
|
|
|
size_t n_vocab = llama_n_vocab(llama_get_model(ctx));
|
|
|
|
|
|
|
|
token_sequence next_context = state->model->tokenize(trim(contextString) + " ");
|
|
|
|
next_context.insert(next_context.begin(), 1); // BOS
|
|
|
|
|
|
|
|
auto decoding_result = state->DecodePromptAndMixes(next_context, { });
|
|
|
|
float *logits = llama_get_logits_ith(ctx, decoding_result.logits_head);
|
|
|
|
|
|
|
|
softmax(logits, n_vocab);
|
|
|
|
|
|
|
|
AKLOGI("Iter");
|
|
|
|
for(auto &entry : words) {
|
|
|
|
float pseudoScore = logits[entry.tokens[0]] / (float)entry.tokens.size();
|
|
|
|
AKLOGI("Word [%s], %d tokens, prob[0] = %.8f", entry.word.c_str(), entry.tokens.size(), pseudoScore);
|
|
|
|
entry.transformedScore *= pseudoScore * 1000.0f;
|
|
|
|
}
|
|
|
|
// TODO: Transform here
|
|
|
|
|
|
|
|
// Output scores
|
|
|
|
jint *outArray = env->GetIntArrayElements(outScores, nullptr);
|
|
|
|
|
|
|
|
for(const auto &entry : words) {
|
2024-05-16 23:18:08 +01:00
|
|
|
outArray[entry.index] = (jint)(entry.transformedScore * (maxScore - minScore) + minScore);
|
2024-04-29 02:55:32 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
env->ReleaseIntArrayElements(outScores, outArray, 0);
|
|
|
|
}
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
static void xlm_LanguageModel_getSuggestions(JNIEnv *env, jclass clazz,
|
|
|
|
// inputs
|
|
|
|
jlong dict,
|
2023-12-04 20:09:51 +00:00
|
|
|
jlong proximityInfo,
|
2023-09-28 17:42:29 +01:00
|
|
|
jstring context,
|
|
|
|
jstring partialWord,
|
2023-12-19 18:02:20 +00:00
|
|
|
jint inputMode,
|
2023-12-04 20:09:51 +00:00
|
|
|
jintArray inComposeX,
|
|
|
|
jintArray inComposeY,
|
2024-02-01 19:55:56 +00:00
|
|
|
jfloat autocorrectThreshold,
|
2024-03-13 18:31:51 +00:00
|
|
|
jobjectArray bannedWordsArray,
|
2023-09-28 17:42:29 +01:00
|
|
|
|
|
|
|
// outputs
|
|
|
|
jobjectArray outPredictions,
|
|
|
|
jfloatArray outProbabilities
|
|
|
|
) {
|
2024-05-16 23:18:08 +01:00
|
|
|
GGML_UNUSED(clazz);
|
|
|
|
|
|
|
|
auto *state = reinterpret_cast<LanguageModelState *>(dict);
|
|
|
|
auto *pInfo = reinterpret_cast<ProximityInfo *>(proximityInfo);
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
size_t inputSize = env->GetArrayLength(inComposeX);
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
std::string contextString;
|
2024-05-16 18:07:16 +01:00
|
|
|
if(context != nullptr) {
|
|
|
|
contextString = jstring2string(env, context);
|
|
|
|
}
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
std::string partialWordString;
|
2023-09-28 17:42:29 +01:00
|
|
|
if(partialWord != nullptr){
|
2024-05-16 18:07:16 +01:00
|
|
|
partialWordString = jstring2string(env, partialWord);
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
if(partialWordString.size() < inputSize) inputSize = partialWordString.size();
|
|
|
|
|
2024-01-30 18:30:44 +00:00
|
|
|
WordCapitalizeMode capitals = WordCapitalizeMode::IgnoredCapitals;
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
if(!partialWordString.empty() && !isFirstCharLowercase(partialWordString.c_str())) {
|
2024-01-30 18:30:44 +00:00
|
|
|
if(partialWordString.size() > 1 && !hasLowercase(partialWordString.c_str())) {
|
|
|
|
capitals = WordCapitalizeMode::AllCapitals;
|
|
|
|
} else {
|
|
|
|
capitals = WordCapitalizeMode::FirstCapital;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2024-03-13 18:31:51 +00:00
|
|
|
std::vector<std::string> bannedWords;
|
|
|
|
size_t numBannedWords = env->GetArrayLength(bannedWordsArray);
|
|
|
|
for(size_t i=0; i<numBannedWords; i++) {
|
2024-05-16 23:18:08 +01:00
|
|
|
bannedWords.push_back(jstring2string(
|
|
|
|
env,
|
|
|
|
(jstring)env->GetObjectArrayElement(bannedWordsArray, (jsize) i)
|
|
|
|
));
|
2024-03-13 18:31:51 +00:00
|
|
|
}
|
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
TIME_START(GettingMixes)
|
|
|
|
int xCoordinates[inputSize];
|
|
|
|
int yCoordinates[inputSize];
|
2024-05-16 23:18:08 +01:00
|
|
|
env->GetIntArrayRegion(inComposeX, 0, (jsize)inputSize, xCoordinates);
|
|
|
|
env->GetIntArrayRegion(inComposeY, 0, (jsize)inputSize, yCoordinates);
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
std::vector<TokenMix> mixes;
|
2024-05-16 23:18:08 +01:00
|
|
|
for(size_t i=0; i<inputSize; i++) {
|
2024-01-09 16:25:14 +00:00
|
|
|
char wc = partialWordString[i];
|
2024-05-07 17:31:15 +01:00
|
|
|
if (!(wc >= 'a' && wc <= 'z') && !(wc >= 'A' && wc <= 'Z')) {
|
|
|
|
//AKLOGI("%d | Char %c skipped due to not within range", i, wc);
|
|
|
|
continue;
|
|
|
|
}
|
|
|
|
if (xCoordinates[i] == -1 || yCoordinates[i] == -1) {
|
|
|
|
//AKLOGI("%d | Char %c skipped due to -1", i, wc);
|
|
|
|
continue;
|
|
|
|
}
|
2024-01-09 16:25:14 +00:00
|
|
|
|
2023-12-04 20:09:51 +00:00
|
|
|
std::vector<float> proportions = pInfo->decomposeTapPosition(xCoordinates[i], yCoordinates[i]);
|
|
|
|
for(float &f : proportions) {
|
|
|
|
if(f < 0.05f) f = 0.0f;
|
|
|
|
}
|
|
|
|
|
|
|
|
std::vector<std::pair<float, int>> index_value;
|
|
|
|
index_value.clear();
|
|
|
|
for (size_t k = 0; k < proportions.size(); k++) {
|
|
|
|
index_value.emplace_back(proportions[k], k);
|
|
|
|
}
|
|
|
|
|
|
|
|
sortProbabilityPairVectorDescending(index_value, NUM_TOKEN_MIX);
|
|
|
|
|
|
|
|
bool needs_resorting = false;
|
|
|
|
int num_symbols = 0;
|
2024-05-07 17:31:15 +01:00
|
|
|
for(int s=0; s<4; s++) {
|
|
|
|
num_symbols = 0;
|
2023-12-04 20:09:51 +00:00
|
|
|
for (int j = 0; j < NUM_TOKEN_MIX; j++) {
|
|
|
|
char c = (char) (pInfo->getKeyCodePoint(index_value[j].second));
|
|
|
|
|
|
|
|
if (c >= 'a' && c <= 'z') {
|
|
|
|
} else if (c >= 'A' && c <= 'Z') {
|
2024-05-07 17:31:15 +01:00
|
|
|
} else if(index_value[j].first > 0.0f) {
|
|
|
|
index_value[j].first = 0.0f;
|
2023-12-04 20:09:51 +00:00
|
|
|
needs_resorting = true;
|
|
|
|
num_symbols++;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(num_symbols == NUM_TOKEN_MIX) break;
|
|
|
|
if(!needs_resorting) break;
|
|
|
|
sortProbabilityPairVectorDescending(index_value, NUM_TOKEN_MIX);
|
|
|
|
}
|
2024-05-07 17:31:15 +01:00
|
|
|
if(num_symbols == NUM_TOKEN_MIX) {
|
|
|
|
//AKLOGI("%d | Char %c skipped due to num_symbols == NUM_TOKEN_MIX", i, wc);
|
|
|
|
continue;
|
2024-05-16 23:18:08 +01:00
|
|
|
} // Skip the symbol character
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
float total_sum = 0.0f;
|
|
|
|
for(int j=0; j<NUM_TOKEN_MIX; j++) {
|
|
|
|
total_sum += index_value[j].first;
|
|
|
|
}
|
|
|
|
|
|
|
|
if(total_sum == 0.0f) {
|
2024-05-17 00:37:43 +01:00
|
|
|
continue;
|
2023-12-04 20:09:51 +00:00
|
|
|
}
|
|
|
|
|
|
|
|
for(int j=0; j<NUM_TOKEN_MIX; j++) {
|
|
|
|
index_value[j].first /= total_sum;
|
|
|
|
}
|
|
|
|
|
2024-05-16 23:18:08 +01:00
|
|
|
TokenMix results {};
|
2023-12-19 18:02:20 +00:00
|
|
|
results.x = ((float)xCoordinates[i]) / ((float)pInfo->getKeyboardWidth());
|
|
|
|
results.y = ((float)yCoordinates[i]) / ((float)pInfo->getKeyboardHeight());
|
|
|
|
|
2024-01-09 16:25:14 +00:00
|
|
|
//AKLOGI("%d | Char %c, pos %.6f %.6f, nearest is %c at %.2f, then %c at %.2f, finally %c at %.2f", i, partialWordString[i],
|
|
|
|
// results.x, results.y,
|
|
|
|
// (char)(pInfo->getKeyCodePoint(index_value[0].second)), (float)(index_value[0].first),
|
|
|
|
// (char)(pInfo->getKeyCodePoint(index_value[1].second)), (float)(index_value[1].first),
|
|
|
|
// (char)(pInfo->getKeyCodePoint(index_value[2].second)), (float)(index_value[2].first)
|
|
|
|
// );
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
for(int j=0; j<NUM_TOKEN_MIX; j++) {
|
|
|
|
char c = (char) (pInfo->getKeyCodePoint(index_value[j].second));
|
2024-05-16 23:18:08 +01:00
|
|
|
float w = index_value[j].first;
|
2023-12-04 20:09:51 +00:00
|
|
|
|
|
|
|
results.mixes[j].weight = w;
|
|
|
|
if(c >= 'a' && c <= 'z') {
|
|
|
|
results.mixes[j].token = (state->specialTokens.LETTERS_TO_IDS[c - 'a']);
|
|
|
|
}else if(c >= 'A' && c <= 'Z') {
|
|
|
|
results.mixes[j].token = (state->specialTokens.LETTERS_TO_IDS[c - 'A']);
|
|
|
|
} else {
|
2024-04-11 06:43:56 +01:00
|
|
|
//AKLOGI("ignoring character in partial word [%c]", c);
|
2023-12-04 20:09:51 +00:00
|
|
|
results.mixes[j].weight = 0.0f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
mixes.push_back(results);
|
|
|
|
}
|
|
|
|
|
|
|
|
TIME_END(GettingMixes)
|
|
|
|
|
2023-11-14 18:40:00 +00:00
|
|
|
//AKLOGI("LanguageModel context [%s]", contextString.c_str());
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
std::vector<std::pair<float, std::string>> results;
|
2023-09-28 17:42:29 +01:00
|
|
|
if(partialWordString.empty()) {
|
2024-03-13 18:31:51 +00:00
|
|
|
results = state->PredictNextWord(contextString, bannedWords);
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-11-14 18:40:00 +00:00
|
|
|
//for(const auto &result : results) {
|
|
|
|
// AKLOGI("LanguageModel suggestion %.2f [%s]", result.first, result.second.c_str());
|
|
|
|
//}
|
2023-09-28 17:42:29 +01:00
|
|
|
} else {
|
2023-12-19 18:02:20 +00:00
|
|
|
bool swipeMode = inputMode == 1;
|
2024-05-16 23:18:08 +01:00
|
|
|
results = state->PredictCorrection(contextString, mixes, swipeMode, capitals, bannedWords);
|
2023-09-28 17:42:29 +01:00
|
|
|
|
2023-11-14 18:40:00 +00:00
|
|
|
//for(const auto &result : results) {
|
|
|
|
// AKLOGI("LanguageModel correction %.2f [%s] -> [%s]", result.first, partialWordString.c_str(), result.second.c_str());
|
|
|
|
//}
|
2024-01-30 18:30:44 +00:00
|
|
|
|
|
|
|
// Exact match rule
|
|
|
|
bool hasExactMatch = false;
|
|
|
|
for(const auto &result : results) {
|
|
|
|
if(isExactMatch(result.second, partialWordString)) {
|
|
|
|
hasExactMatch = true;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if(hasExactMatch){
|
|
|
|
for(auto &result : results) {
|
|
|
|
if(!isExactMatch(result.second, partialWordString)) {
|
|
|
|
result.first -= 1.0f;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
// Probability check
|
|
|
|
sortProbabilityPairVectorDescending(results);
|
|
|
|
|
|
|
|
const char *result_probability_mode;
|
2024-03-07 12:56:21 +00:00
|
|
|
if(results.size() < 2) {
|
|
|
|
// Not sure what to do here
|
|
|
|
result_probability_mode = RETURNVAL_UNCERTAIN;
|
|
|
|
}else if(results[0].first > autocorrectThreshold * results[1].first) {
|
2024-01-30 18:30:44 +00:00
|
|
|
result_probability_mode = RETURNVAL_AUTOCORRECT;
|
2024-02-01 19:55:56 +00:00
|
|
|
}else if(results[0].first > (autocorrectThreshold * 0.1f) * results[1].first) {
|
2024-01-30 18:30:44 +00:00
|
|
|
result_probability_mode = RETURNVAL_UNCERTAIN;
|
|
|
|
} else {
|
|
|
|
result_probability_mode = RETURNVAL_CLUELESS;
|
|
|
|
// TODO: If we end up here, we could try sampling differently / etc
|
|
|
|
}
|
|
|
|
|
|
|
|
// No way it's correct if it's way shorter! (unless we're swipe typing)
|
2024-05-16 23:18:08 +01:00
|
|
|
if(!results.empty() && !partialWordString.empty() && (results[0].second.size() * 2 < partialWordString.size()) && inputMode != 1) {
|
2024-01-30 18:30:44 +00:00
|
|
|
result_probability_mode = RETURNVAL_CLUELESS;
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
// Output
|
|
|
|
size_t size = env->GetArrayLength(outPredictions);
|
|
|
|
|
2024-03-21 21:49:45 +00:00
|
|
|
jstring result_str = string2jstring(env, result_probability_mode);
|
2024-05-16 23:18:08 +01:00
|
|
|
env->SetObjectArrayElement(outPredictions, (jsize)(size - 1), result_str);
|
2024-01-30 18:30:44 +00:00
|
|
|
env->DeleteLocalRef(result_str);
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
jfloat *probsArray = env->GetFloatArrayElements(outProbabilities, nullptr);
|
|
|
|
|
|
|
|
// Output predictions for next word
|
2024-05-16 23:18:08 +01:00
|
|
|
for (int i = 0; i < (int)results.size(); i++) {
|
2024-03-21 21:49:45 +00:00
|
|
|
jstring jstr = string2jstring(env, results[i].second.c_str());
|
2023-09-28 17:42:29 +01:00
|
|
|
env->SetObjectArrayElement(outPredictions, i, jstr);
|
2023-10-10 20:34:04 +01:00
|
|
|
probsArray[i] = results[i].first;
|
2023-09-28 17:42:29 +01:00
|
|
|
env->DeleteLocalRef(jstr);
|
|
|
|
}
|
|
|
|
|
|
|
|
env->ReleaseFloatArrayElements(outProbabilities, probsArray, 0);
|
|
|
|
}
|
|
|
|
|
|
|
|
static const JNINativeMethod sMethods[] = {
|
|
|
|
{
|
|
|
|
const_cast<char *>("openNative"),
|
|
|
|
const_cast<char *>("(Ljava/lang/String;)J"),
|
|
|
|
reinterpret_cast<void *>(xlm_LanguageModel_open)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
const_cast<char *>("closeNative"),
|
|
|
|
const_cast<char *>("(J)V"),
|
|
|
|
reinterpret_cast<void *>(xlm_LanguageModel_close)
|
|
|
|
},
|
|
|
|
{
|
|
|
|
const_cast<char *>("getSuggestionsNative"),
|
2024-03-13 18:31:51 +00:00
|
|
|
const_cast<char *>("(JJLjava/lang/String;Ljava/lang/String;I[I[IF[Ljava/lang/String;[Ljava/lang/String;[F)V"),
|
2023-09-28 17:42:29 +01:00
|
|
|
reinterpret_cast<void *>(xlm_LanguageModel_getSuggestions)
|
2024-04-29 02:55:32 +01:00
|
|
|
},
|
|
|
|
{
|
|
|
|
const_cast<char *>("rescoreSuggestionsNative"),
|
|
|
|
const_cast<char *>("(JLjava/lang/String;[Ljava/lang/String;[I[I)V"),
|
|
|
|
reinterpret_cast<void *>(xlm_LanguageModel_rescoreSuggestions)
|
2023-09-28 17:42:29 +01:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
|
|
|
|
static void llama_log_callback(ggml_log_level level, const char * text, void * user_data) {
|
2024-05-16 23:18:08 +01:00
|
|
|
GGML_UNUSED(user_data);
|
|
|
|
|
2023-10-10 20:34:04 +01:00
|
|
|
switch(level) {
|
|
|
|
case GGML_LOG_LEVEL_ERROR:
|
|
|
|
AKLOGE("llama err: %s", text);
|
|
|
|
break;
|
|
|
|
case GGML_LOG_LEVEL_WARN:
|
|
|
|
AKLOGI("llama warn: %s", text);
|
|
|
|
break;
|
|
|
|
case GGML_LOG_LEVEL_INFO:
|
|
|
|
AKLOGI("llama info: %s", text);
|
|
|
|
break;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
2023-09-28 17:42:29 +01:00
|
|
|
int register_LanguageModel(JNIEnv *env) {
|
|
|
|
llama_backend_init(true /* numa??? */);
|
2023-10-10 20:34:04 +01:00
|
|
|
llama_log_set(llama_log_callback, nullptr);
|
2023-09-28 17:42:29 +01:00
|
|
|
|
|
|
|
const char *const kClassPathName = "org/futo/inputmethod/latin/xlm/LanguageModel";
|
|
|
|
return registerNativeMethods(env, kClassPathName, sMethods, NELEMS(sMethods));
|
|
|
|
}
|
|
|
|
} // namespace latinime
|