mirror of
https://github.com/ikawrakow/ik_llama.cpp.git
synced 2026-01-26 17:20:01 +00:00
* Merging mainline - WIP * Merging mainline - WIP AVX2 and CUDA appear to work. CUDA performance seems slightly (~1-2%) lower as it is so often the case with llama.cpp/ggml after some "improvements" have been made. * Merging mainline - fix Metal * Remove check --------- Co-authored-by: Iwan Kawrakow <iwan.kawrakow@gmail.com>
21 lines
431 B
Python
21 lines
431 B
Python
import json, subprocess, sys, os
|
|
|
|
assert len(sys.argv) >= 2
|
|
[_, pattern, *rest] = sys.argv
|
|
|
|
print(subprocess.check_output(
|
|
[
|
|
"python",
|
|
os.path.join(
|
|
os.path.dirname(os.path.realpath(__file__)),
|
|
"json_schema_to_grammar.py"),
|
|
*rest,
|
|
"-",
|
|
"--raw-pattern",
|
|
],
|
|
text=True,
|
|
input=json.dumps({
|
|
"type": "string",
|
|
"pattern": pattern,
|
|
}, indent=2)))
|