mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-27 09:39:53 +00:00
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
21 lines
582 B
C++
21 lines
582 B
C++
#pragma once
|
|
|
|
#include <cstdint>
|
|
#include <vector>
|
|
#include <unordered_map>
|
|
#include <unordered_set>
|
|
|
|
struct range_nfd {
|
|
uint32_t first;
|
|
uint32_t last;
|
|
uint32_t nfd;
|
|
};
|
|
|
|
static const uint32_t MAX_CODEPOINTS = 0x110000;
|
|
|
|
extern const std::vector<std::pair<uint32_t, uint16_t>> unicode_ranges_flags;
|
|
extern const std::unordered_set<uint32_t> unicode_set_whitespace;
|
|
extern const std::unordered_map<uint32_t, uint32_t> unicode_map_lowercase;
|
|
extern const std::unordered_map<uint32_t, uint32_t> unicode_map_uppercase;
|
|
extern const std::vector<range_nfd> unicode_ranges_nfd;
|